xref: /linux/fs/f2fs/super.c (revision e812928be2ee1c2744adf20ed04e0ce1e2fc5c13)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/super.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/fs.h>
11 #include <linux/fs_context.h>
12 #include <linux/sched/mm.h>
13 #include <linux/statfs.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
28 #include <linux/zstd.h>
29 #include <linux/lz4.h>
30 #include <linux/ctype.h>
31 #include <linux/fs_parser.h>
32 
33 #include "f2fs.h"
34 #include "node.h"
35 #include "segment.h"
36 #include "xattr.h"
37 #include "gc.h"
38 #include "iostat.h"
39 
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/f2fs.h>
42 
43 static struct kmem_cache *f2fs_inode_cachep;
44 
45 #ifdef CONFIG_F2FS_FAULT_INJECTION
46 
47 const char *f2fs_fault_name[FAULT_MAX] = {
48 	[FAULT_KMALLOC]			= "kmalloc",
49 	[FAULT_KVMALLOC]		= "kvmalloc",
50 	[FAULT_PAGE_ALLOC]		= "page alloc",
51 	[FAULT_PAGE_GET]		= "page get",
52 	[FAULT_ALLOC_BIO]		= "alloc bio(obsolete)",
53 	[FAULT_ALLOC_NID]		= "alloc nid",
54 	[FAULT_ORPHAN]			= "orphan",
55 	[FAULT_BLOCK]			= "no more block",
56 	[FAULT_DIR_DEPTH]		= "too big dir depth",
57 	[FAULT_EVICT_INODE]		= "evict_inode fail",
58 	[FAULT_TRUNCATE]		= "truncate fail",
59 	[FAULT_READ_IO]			= "read IO error",
60 	[FAULT_CHECKPOINT]		= "checkpoint error",
61 	[FAULT_DISCARD]			= "discard error",
62 	[FAULT_WRITE_IO]		= "write IO error",
63 	[FAULT_SLAB_ALLOC]		= "slab alloc",
64 	[FAULT_DQUOT_INIT]		= "dquot initialize",
65 	[FAULT_LOCK_OP]			= "lock_op",
66 	[FAULT_BLKADDR_VALIDITY]	= "invalid blkaddr",
67 	[FAULT_BLKADDR_CONSISTENCE]	= "inconsistent blkaddr",
68 	[FAULT_NO_SEGMENT]		= "no free segment",
69 	[FAULT_INCONSISTENT_FOOTER]	= "inconsistent footer",
70 	[FAULT_TIMEOUT]			= "timeout",
71 	[FAULT_VMALLOC]			= "vmalloc",
72 };
73 
74 int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
75 				unsigned long type, enum fault_option fo)
76 {
77 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
78 
79 	if (fo & FAULT_ALL) {
80 		memset(ffi, 0, sizeof(struct f2fs_fault_info));
81 		return 0;
82 	}
83 
84 	if (fo & FAULT_RATE) {
85 		if (rate > INT_MAX)
86 			return -EINVAL;
87 		atomic_set(&ffi->inject_ops, 0);
88 		ffi->inject_rate = (int)rate;
89 		f2fs_info(sbi, "build fault injection rate: %lu", rate);
90 	}
91 
92 	if (fo & FAULT_TYPE) {
93 		if (type >= BIT(FAULT_MAX))
94 			return -EINVAL;
95 		ffi->inject_type = (unsigned int)type;
96 		f2fs_info(sbi, "build fault injection type: 0x%lx", type);
97 	}
98 
99 	return 0;
100 }
101 #endif
102 
103 /* f2fs-wide shrinker description */
104 static struct shrinker *f2fs_shrinker_info;
105 
106 static int __init f2fs_init_shrinker(void)
107 {
108 	f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
109 	if (!f2fs_shrinker_info)
110 		return -ENOMEM;
111 
112 	f2fs_shrinker_info->count_objects = f2fs_shrink_count;
113 	f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
114 
115 	shrinker_register(f2fs_shrinker_info);
116 
117 	return 0;
118 }
119 
120 static void f2fs_exit_shrinker(void)
121 {
122 	shrinker_free(f2fs_shrinker_info);
123 }
124 
125 enum {
126 	Opt_gc_background,
127 	Opt_disable_roll_forward,
128 	Opt_norecovery,
129 	Opt_discard,
130 	Opt_noheap,
131 	Opt_heap,
132 	Opt_user_xattr,
133 	Opt_acl,
134 	Opt_active_logs,
135 	Opt_disable_ext_identify,
136 	Opt_inline_xattr,
137 	Opt_inline_xattr_size,
138 	Opt_inline_data,
139 	Opt_inline_dentry,
140 	Opt_flush_merge,
141 	Opt_barrier,
142 	Opt_fastboot,
143 	Opt_extent_cache,
144 	Opt_data_flush,
145 	Opt_reserve_root,
146 	Opt_reserve_node,
147 	Opt_resgid,
148 	Opt_resuid,
149 	Opt_mode,
150 	Opt_fault_injection,
151 	Opt_fault_type,
152 	Opt_lazytime,
153 	Opt_quota,
154 	Opt_usrquota,
155 	Opt_grpquota,
156 	Opt_prjquota,
157 	Opt_usrjquota,
158 	Opt_grpjquota,
159 	Opt_prjjquota,
160 	Opt_alloc,
161 	Opt_fsync,
162 	Opt_test_dummy_encryption,
163 	Opt_inlinecrypt,
164 	Opt_checkpoint_disable,
165 	Opt_checkpoint_disable_cap,
166 	Opt_checkpoint_disable_cap_perc,
167 	Opt_checkpoint_enable,
168 	Opt_checkpoint_merge,
169 	Opt_compress_algorithm,
170 	Opt_compress_log_size,
171 	Opt_nocompress_extension,
172 	Opt_compress_extension,
173 	Opt_compress_chksum,
174 	Opt_compress_mode,
175 	Opt_compress_cache,
176 	Opt_atgc,
177 	Opt_gc_merge,
178 	Opt_discard_unit,
179 	Opt_memory_mode,
180 	Opt_age_extent_cache,
181 	Opt_errors,
182 	Opt_nat_bits,
183 	Opt_jqfmt,
184 	Opt_checkpoint,
185 	Opt_lookup_mode,
186 	Opt_err,
187 };
188 
189 static const struct constant_table f2fs_param_background_gc[] = {
190 	{"on",		BGGC_MODE_ON},
191 	{"off",		BGGC_MODE_OFF},
192 	{"sync",	BGGC_MODE_SYNC},
193 	{}
194 };
195 
196 static const struct constant_table f2fs_param_mode[] = {
197 	{"adaptive",		FS_MODE_ADAPTIVE},
198 	{"lfs",			FS_MODE_LFS},
199 	{"fragment:segment",	FS_MODE_FRAGMENT_SEG},
200 	{"fragment:block",	FS_MODE_FRAGMENT_BLK},
201 	{}
202 };
203 
204 static const struct constant_table f2fs_param_jqfmt[] = {
205 	{"vfsold",	QFMT_VFS_OLD},
206 	{"vfsv0",	QFMT_VFS_V0},
207 	{"vfsv1",	QFMT_VFS_V1},
208 	{}
209 };
210 
211 static const struct constant_table f2fs_param_alloc_mode[] = {
212 	{"default",	ALLOC_MODE_DEFAULT},
213 	{"reuse",	ALLOC_MODE_REUSE},
214 	{}
215 };
216 static const struct constant_table f2fs_param_fsync_mode[] = {
217 	{"posix",	FSYNC_MODE_POSIX},
218 	{"strict",	FSYNC_MODE_STRICT},
219 	{"nobarrier",	FSYNC_MODE_NOBARRIER},
220 	{}
221 };
222 
223 static const struct constant_table f2fs_param_compress_mode[] = {
224 	{"fs",		COMPR_MODE_FS},
225 	{"user",	COMPR_MODE_USER},
226 	{}
227 };
228 
229 static const struct constant_table f2fs_param_discard_unit[] = {
230 	{"block",	DISCARD_UNIT_BLOCK},
231 	{"segment",	DISCARD_UNIT_SEGMENT},
232 	{"section",	DISCARD_UNIT_SECTION},
233 	{}
234 };
235 
236 static const struct constant_table f2fs_param_memory_mode[] = {
237 	{"normal",	MEMORY_MODE_NORMAL},
238 	{"low",		MEMORY_MODE_LOW},
239 	{}
240 };
241 
242 static const struct constant_table f2fs_param_errors[] = {
243 	{"remount-ro",	MOUNT_ERRORS_READONLY},
244 	{"continue",	MOUNT_ERRORS_CONTINUE},
245 	{"panic",	MOUNT_ERRORS_PANIC},
246 	{}
247 };
248 
249 static const struct constant_table f2fs_param_lookup_mode[] = {
250 	{"perf",	LOOKUP_PERF},
251 	{"compat",	LOOKUP_COMPAT},
252 	{"auto",	LOOKUP_AUTO},
253 	{}
254 };
255 
256 static const struct fs_parameter_spec f2fs_param_specs[] = {
257 	fsparam_enum("background_gc", Opt_gc_background, f2fs_param_background_gc),
258 	fsparam_flag("disable_roll_forward", Opt_disable_roll_forward),
259 	fsparam_flag("norecovery", Opt_norecovery),
260 	fsparam_flag_no("discard", Opt_discard),
261 	fsparam_flag("no_heap", Opt_noheap),
262 	fsparam_flag("heap", Opt_heap),
263 	fsparam_flag_no("user_xattr", Opt_user_xattr),
264 	fsparam_flag_no("acl", Opt_acl),
265 	fsparam_s32("active_logs", Opt_active_logs),
266 	fsparam_flag("disable_ext_identify", Opt_disable_ext_identify),
267 	fsparam_flag_no("inline_xattr", Opt_inline_xattr),
268 	fsparam_s32("inline_xattr_size", Opt_inline_xattr_size),
269 	fsparam_flag_no("inline_data", Opt_inline_data),
270 	fsparam_flag_no("inline_dentry", Opt_inline_dentry),
271 	fsparam_flag_no("flush_merge", Opt_flush_merge),
272 	fsparam_flag_no("barrier", Opt_barrier),
273 	fsparam_flag("fastboot", Opt_fastboot),
274 	fsparam_flag_no("extent_cache", Opt_extent_cache),
275 	fsparam_flag("data_flush", Opt_data_flush),
276 	fsparam_u32("reserve_root", Opt_reserve_root),
277 	fsparam_u32("reserve_node", Opt_reserve_node),
278 	fsparam_gid("resgid", Opt_resgid),
279 	fsparam_uid("resuid", Opt_resuid),
280 	fsparam_enum("mode", Opt_mode, f2fs_param_mode),
281 	fsparam_s32("fault_injection", Opt_fault_injection),
282 	fsparam_u32("fault_type", Opt_fault_type),
283 	fsparam_flag_no("lazytime", Opt_lazytime),
284 	fsparam_flag_no("quota", Opt_quota),
285 	fsparam_flag("usrquota", Opt_usrquota),
286 	fsparam_flag("grpquota", Opt_grpquota),
287 	fsparam_flag("prjquota", Opt_prjquota),
288 	fsparam_string_empty("usrjquota", Opt_usrjquota),
289 	fsparam_string_empty("grpjquota", Opt_grpjquota),
290 	fsparam_string_empty("prjjquota", Opt_prjjquota),
291 	fsparam_flag("nat_bits", Opt_nat_bits),
292 	fsparam_enum("jqfmt", Opt_jqfmt, f2fs_param_jqfmt),
293 	fsparam_enum("alloc_mode", Opt_alloc, f2fs_param_alloc_mode),
294 	fsparam_enum("fsync_mode", Opt_fsync, f2fs_param_fsync_mode),
295 	fsparam_string("test_dummy_encryption", Opt_test_dummy_encryption),
296 	fsparam_flag("test_dummy_encryption", Opt_test_dummy_encryption),
297 	fsparam_flag("inlinecrypt", Opt_inlinecrypt),
298 	fsparam_string("checkpoint", Opt_checkpoint),
299 	fsparam_flag_no("checkpoint_merge", Opt_checkpoint_merge),
300 	fsparam_string("compress_algorithm", Opt_compress_algorithm),
301 	fsparam_u32("compress_log_size", Opt_compress_log_size),
302 	fsparam_string("compress_extension", Opt_compress_extension),
303 	fsparam_string("nocompress_extension", Opt_nocompress_extension),
304 	fsparam_flag("compress_chksum", Opt_compress_chksum),
305 	fsparam_enum("compress_mode", Opt_compress_mode, f2fs_param_compress_mode),
306 	fsparam_flag("compress_cache", Opt_compress_cache),
307 	fsparam_flag("atgc", Opt_atgc),
308 	fsparam_flag_no("gc_merge", Opt_gc_merge),
309 	fsparam_enum("discard_unit", Opt_discard_unit, f2fs_param_discard_unit),
310 	fsparam_enum("memory", Opt_memory_mode, f2fs_param_memory_mode),
311 	fsparam_flag("age_extent_cache", Opt_age_extent_cache),
312 	fsparam_enum("errors", Opt_errors, f2fs_param_errors),
313 	fsparam_enum("lookup_mode", Opt_lookup_mode, f2fs_param_lookup_mode),
314 	{}
315 };
316 
317 /* Resort to a match_table for this interestingly formatted option */
318 static match_table_t f2fs_checkpoint_tokens = {
319 	{Opt_checkpoint_disable, "disable"},
320 	{Opt_checkpoint_disable_cap, "disable:%u"},
321 	{Opt_checkpoint_disable_cap_perc, "disable:%u%%"},
322 	{Opt_checkpoint_enable, "enable"},
323 	{Opt_err, NULL},
324 };
325 
326 #define F2FS_SPEC_background_gc			(1 << 0)
327 #define F2FS_SPEC_inline_xattr_size		(1 << 1)
328 #define F2FS_SPEC_active_logs			(1 << 2)
329 #define F2FS_SPEC_reserve_root			(1 << 3)
330 #define F2FS_SPEC_resgid			(1 << 4)
331 #define F2FS_SPEC_resuid			(1 << 5)
332 #define F2FS_SPEC_mode				(1 << 6)
333 #define F2FS_SPEC_fault_injection		(1 << 7)
334 #define F2FS_SPEC_fault_type			(1 << 8)
335 #define F2FS_SPEC_jqfmt				(1 << 9)
336 #define F2FS_SPEC_alloc_mode			(1 << 10)
337 #define F2FS_SPEC_fsync_mode			(1 << 11)
338 #define F2FS_SPEC_checkpoint_disable_cap	(1 << 12)
339 #define F2FS_SPEC_checkpoint_disable_cap_perc	(1 << 13)
340 #define F2FS_SPEC_compress_level		(1 << 14)
341 #define F2FS_SPEC_compress_algorithm		(1 << 15)
342 #define F2FS_SPEC_compress_log_size		(1 << 16)
343 #define F2FS_SPEC_compress_extension		(1 << 17)
344 #define F2FS_SPEC_nocompress_extension		(1 << 18)
345 #define F2FS_SPEC_compress_chksum		(1 << 19)
346 #define F2FS_SPEC_compress_mode			(1 << 20)
347 #define F2FS_SPEC_discard_unit			(1 << 21)
348 #define F2FS_SPEC_memory_mode			(1 << 22)
349 #define F2FS_SPEC_errors			(1 << 23)
350 #define F2FS_SPEC_lookup_mode			(1 << 24)
351 #define F2FS_SPEC_reserve_node			(1 << 25)
352 
353 struct f2fs_fs_context {
354 	struct f2fs_mount_info info;
355 	unsigned long long opt_mask;	/* Bits changed */
356 	unsigned int	spec_mask;
357 	unsigned short	qname_mask;
358 };
359 
360 #define F2FS_CTX_INFO(ctx)	((ctx)->info)
361 
362 static inline void ctx_set_opt(struct f2fs_fs_context *ctx,
363 			       enum f2fs_mount_opt flag)
364 {
365 	ctx->info.opt |= BIT(flag);
366 	ctx->opt_mask |= BIT(flag);
367 }
368 
369 static inline void ctx_clear_opt(struct f2fs_fs_context *ctx,
370 				 enum f2fs_mount_opt flag)
371 {
372 	ctx->info.opt &= ~BIT(flag);
373 	ctx->opt_mask |= BIT(flag);
374 }
375 
376 static inline bool ctx_test_opt(struct f2fs_fs_context *ctx,
377 				enum f2fs_mount_opt flag)
378 {
379 	return ctx->info.opt & BIT(flag);
380 }
381 
382 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
383 					const char *fmt, ...)
384 {
385 	struct va_format vaf;
386 	va_list args;
387 	int level;
388 
389 	va_start(args, fmt);
390 
391 	level = printk_get_level(fmt);
392 	vaf.fmt = printk_skip_level(fmt);
393 	vaf.va = &args;
394 	if (limit_rate)
395 		if (sbi)
396 			printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
397 				KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
398 		else
399 			printk_ratelimited("%c%cF2FS-fs: %pV\n",
400 				KERN_SOH_ASCII, level, &vaf);
401 	else
402 		if (sbi)
403 			printk("%c%cF2FS-fs (%s): %pV\n",
404 				KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
405 		else
406 			printk("%c%cF2FS-fs: %pV\n",
407 				KERN_SOH_ASCII, level, &vaf);
408 
409 	va_end(args);
410 }
411 
412 #if IS_ENABLED(CONFIG_UNICODE)
413 static const struct f2fs_sb_encodings {
414 	__u16 magic;
415 	char *name;
416 	unsigned int version;
417 } f2fs_sb_encoding_map[] = {
418 	{F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
419 };
420 
421 static const struct f2fs_sb_encodings *
422 f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
423 {
424 	__u16 magic = le16_to_cpu(sb->s_encoding);
425 	int i;
426 
427 	for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
428 		if (magic == f2fs_sb_encoding_map[i].magic)
429 			return &f2fs_sb_encoding_map[i];
430 
431 	return NULL;
432 }
433 
434 struct kmem_cache *f2fs_cf_name_slab;
435 static int __init f2fs_create_casefold_cache(void)
436 {
437 	f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
438 						   F2FS_NAME_LEN);
439 	return f2fs_cf_name_slab ? 0 : -ENOMEM;
440 }
441 
442 static void f2fs_destroy_casefold_cache(void)
443 {
444 	kmem_cache_destroy(f2fs_cf_name_slab);
445 }
446 #else
447 static int __init f2fs_create_casefold_cache(void) { return 0; }
448 static void f2fs_destroy_casefold_cache(void) { }
449 #endif
450 
451 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
452 {
453 	block_t block_limit = min((sbi->user_block_count >> 3),
454 			sbi->user_block_count - sbi->reserved_blocks);
455 	block_t node_limit = sbi->total_node_count >> 3;
456 
457 	/* limit is 12.5% */
458 	if (test_opt(sbi, RESERVE_ROOT) &&
459 			F2FS_OPTION(sbi).root_reserved_blocks > block_limit) {
460 		F2FS_OPTION(sbi).root_reserved_blocks = block_limit;
461 		f2fs_info(sbi, "Reduce reserved blocks for root = %u",
462 			  F2FS_OPTION(sbi).root_reserved_blocks);
463 	}
464 	if (test_opt(sbi, RESERVE_NODE) &&
465 			F2FS_OPTION(sbi).root_reserved_nodes > node_limit) {
466 		F2FS_OPTION(sbi).root_reserved_nodes = node_limit;
467 		f2fs_info(sbi, "Reduce reserved nodes for root = %u",
468 			  F2FS_OPTION(sbi).root_reserved_nodes);
469 	}
470 	if (!test_opt(sbi, RESERVE_ROOT) && !test_opt(sbi, RESERVE_NODE) &&
471 		(!uid_eq(F2FS_OPTION(sbi).s_resuid,
472 				make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
473 		!gid_eq(F2FS_OPTION(sbi).s_resgid,
474 				make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
475 		f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root"
476 				" and reserve_node",
477 			  from_kuid_munged(&init_user_ns,
478 					   F2FS_OPTION(sbi).s_resuid),
479 			  from_kgid_munged(&init_user_ns,
480 					   F2FS_OPTION(sbi).s_resgid));
481 }
482 
483 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
484 {
485 	if (!F2FS_OPTION(sbi).unusable_cap_perc)
486 		return;
487 
488 	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
489 		F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
490 	else
491 		F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
492 					F2FS_OPTION(sbi).unusable_cap_perc;
493 
494 	f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
495 			F2FS_OPTION(sbi).unusable_cap,
496 			F2FS_OPTION(sbi).unusable_cap_perc);
497 }
498 
499 static void init_once(void *foo)
500 {
501 	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
502 
503 	inode_init_once(&fi->vfs_inode);
504 #ifdef CONFIG_FS_ENCRYPTION
505 	fi->i_crypt_info = NULL;
506 #endif
507 }
508 
509 #ifdef CONFIG_QUOTA
510 static const char * const quotatypes[] = INITQFNAMES;
511 #define QTYPE2NAME(t) (quotatypes[t])
512 /*
513  * Note the name of the specified quota file.
514  */
515 static int f2fs_note_qf_name(struct fs_context *fc, int qtype,
516 			     struct fs_parameter *param)
517 {
518 	struct f2fs_fs_context *ctx = fc->fs_private;
519 	char *qname;
520 
521 	if (param->size < 1) {
522 		f2fs_err(NULL, "Missing quota name");
523 		return -EINVAL;
524 	}
525 	if (strchr(param->string, '/')) {
526 		f2fs_err(NULL, "quotafile must be on filesystem root");
527 		return -EINVAL;
528 	}
529 	if (ctx->info.s_qf_names[qtype]) {
530 		if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) {
531 			f2fs_err(NULL, "Quota file already specified");
532 			return -EINVAL;
533 		}
534 		return 0;
535 	}
536 
537 	qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
538 	if (!qname) {
539 		f2fs_err(NULL, "Not enough memory for storing quotafile name");
540 		return -ENOMEM;
541 	}
542 	F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname;
543 	ctx->qname_mask |= 1 << qtype;
544 	return 0;
545 }
546 
547 /*
548  * Clear the name of the specified quota file.
549  */
550 static int f2fs_unnote_qf_name(struct fs_context *fc, int qtype)
551 {
552 	struct f2fs_fs_context *ctx = fc->fs_private;
553 
554 	kfree(ctx->info.s_qf_names[qtype]);
555 	ctx->info.s_qf_names[qtype] = NULL;
556 	ctx->qname_mask |= 1 << qtype;
557 	return 0;
558 }
559 
560 static void f2fs_unnote_qf_name_all(struct fs_context *fc)
561 {
562 	int i;
563 
564 	for (i = 0; i < MAXQUOTAS; i++)
565 		f2fs_unnote_qf_name(fc, i);
566 }
567 #endif
568 
569 static int f2fs_parse_test_dummy_encryption(const struct fs_parameter *param,
570 					    struct f2fs_fs_context *ctx)
571 {
572 	int err;
573 
574 	if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
575 		f2fs_warn(NULL, "test_dummy_encryption option not supported");
576 		return -EINVAL;
577 	}
578 	err = fscrypt_parse_test_dummy_encryption(param,
579 					&ctx->info.dummy_enc_policy);
580 	if (err) {
581 		if (err == -EINVAL)
582 			f2fs_warn(NULL, "Value of option \"%s\" is unrecognized",
583 				  param->key);
584 		else if (err == -EEXIST)
585 			f2fs_warn(NULL, "Conflicting test_dummy_encryption options");
586 		else
587 			f2fs_warn(NULL, "Error processing option \"%s\" [%d]",
588 				  param->key, err);
589 		return -EINVAL;
590 	}
591 	return 0;
592 }
593 
594 #ifdef CONFIG_F2FS_FS_COMPRESSION
595 static bool is_compress_extension_exist(struct f2fs_mount_info *info,
596 					const char *new_ext, bool is_ext)
597 {
598 	unsigned char (*ext)[F2FS_EXTENSION_LEN];
599 	int ext_cnt;
600 	int i;
601 
602 	if (is_ext) {
603 		ext = info->extensions;
604 		ext_cnt = info->compress_ext_cnt;
605 	} else {
606 		ext = info->noextensions;
607 		ext_cnt = info->nocompress_ext_cnt;
608 	}
609 
610 	for (i = 0; i < ext_cnt; i++) {
611 		if (!strcasecmp(new_ext, ext[i]))
612 			return true;
613 	}
614 
615 	return false;
616 }
617 
618 /*
619  * 1. The same extension name cannot not appear in both compress and non-compress extension
620  * at the same time.
621  * 2. If the compress extension specifies all files, the types specified by the non-compress
622  * extension will be treated as special cases and will not be compressed.
623  * 3. Don't allow the non-compress extension specifies all files.
624  */
625 static int f2fs_test_compress_extension(unsigned char (*noext)[F2FS_EXTENSION_LEN],
626 					int noext_cnt,
627 					unsigned char (*ext)[F2FS_EXTENSION_LEN],
628 					int ext_cnt)
629 {
630 	int index = 0, no_index = 0;
631 
632 	if (!noext_cnt)
633 		return 0;
634 
635 	for (no_index = 0; no_index < noext_cnt; no_index++) {
636 		if (strlen(noext[no_index]) == 0)
637 			continue;
638 		if (!strcasecmp("*", noext[no_index])) {
639 			f2fs_info(NULL, "Don't allow the nocompress extension specifies all files");
640 			return -EINVAL;
641 		}
642 		for (index = 0; index < ext_cnt; index++) {
643 			if (strlen(ext[index]) == 0)
644 				continue;
645 			if (!strcasecmp(ext[index], noext[no_index])) {
646 				f2fs_info(NULL, "Don't allow the same extension %s appear in both compress and nocompress extension",
647 						ext[index]);
648 				return -EINVAL;
649 			}
650 		}
651 	}
652 	return 0;
653 }
654 
655 #ifdef CONFIG_F2FS_FS_LZ4
656 static int f2fs_set_lz4hc_level(struct f2fs_fs_context *ctx, const char *str)
657 {
658 #ifdef CONFIG_F2FS_FS_LZ4HC
659 	unsigned int level;
660 
661 	if (strlen(str) == 3) {
662 		F2FS_CTX_INFO(ctx).compress_level = 0;
663 		ctx->spec_mask |= F2FS_SPEC_compress_level;
664 		return 0;
665 	}
666 
667 	str += 3;
668 
669 	if (str[0] != ':') {
670 		f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
671 		return -EINVAL;
672 	}
673 	if (kstrtouint(str + 1, 10, &level))
674 		return -EINVAL;
675 
676 	if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
677 		f2fs_info(NULL, "invalid lz4hc compress level: %d", level);
678 		return -EINVAL;
679 	}
680 
681 	F2FS_CTX_INFO(ctx).compress_level = level;
682 	ctx->spec_mask |= F2FS_SPEC_compress_level;
683 	return 0;
684 #else
685 	if (strlen(str) == 3) {
686 		F2FS_CTX_INFO(ctx).compress_level = 0;
687 		ctx->spec_mask |= F2FS_SPEC_compress_level;
688 		return 0;
689 	}
690 	f2fs_info(NULL, "kernel doesn't support lz4hc compression");
691 	return -EINVAL;
692 #endif
693 }
694 #endif
695 
696 #ifdef CONFIG_F2FS_FS_ZSTD
697 static int f2fs_set_zstd_level(struct f2fs_fs_context *ctx, const char *str)
698 {
699 	int level;
700 	int len = 4;
701 
702 	if (strlen(str) == len) {
703 		F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
704 		ctx->spec_mask |= F2FS_SPEC_compress_level;
705 		return 0;
706 	}
707 
708 	str += len;
709 
710 	if (str[0] != ':') {
711 		f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
712 		return -EINVAL;
713 	}
714 	if (kstrtoint(str + 1, 10, &level))
715 		return -EINVAL;
716 
717 	/* f2fs does not support negative compress level now */
718 	if (level < 0) {
719 		f2fs_info(NULL, "do not support negative compress level: %d", level);
720 		return -ERANGE;
721 	}
722 
723 	if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
724 		f2fs_info(NULL, "invalid zstd compress level: %d", level);
725 		return -EINVAL;
726 	}
727 
728 	F2FS_CTX_INFO(ctx).compress_level = level;
729 	ctx->spec_mask |= F2FS_SPEC_compress_level;
730 	return 0;
731 }
732 #endif
733 #endif
734 
735 static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
736 {
737 	struct f2fs_fs_context *ctx = fc->fs_private;
738 #ifdef CONFIG_F2FS_FS_COMPRESSION
739 	unsigned char (*ext)[F2FS_EXTENSION_LEN];
740 	unsigned char (*noext)[F2FS_EXTENSION_LEN];
741 	int ext_cnt, noext_cnt;
742 	char *name;
743 #endif
744 	substring_t args[MAX_OPT_ARGS];
745 	struct fs_parse_result result;
746 	int token, ret, arg;
747 
748 	token = fs_parse(fc, f2fs_param_specs, param, &result);
749 	if (token < 0)
750 		return token;
751 
752 	switch (token) {
753 	case Opt_gc_background:
754 		F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32;
755 		ctx->spec_mask |= F2FS_SPEC_background_gc;
756 		break;
757 	case Opt_disable_roll_forward:
758 		ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD);
759 		break;
760 	case Opt_norecovery:
761 		/* requires ro mount, checked in f2fs_validate_options */
762 		ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY);
763 		break;
764 	case Opt_discard:
765 		if (result.negated)
766 			ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
767 		else
768 			ctx_set_opt(ctx, F2FS_MOUNT_DISCARD);
769 		break;
770 	case Opt_noheap:
771 	case Opt_heap:
772 		f2fs_warn(NULL, "heap/no_heap options were deprecated");
773 		break;
774 #ifdef CONFIG_F2FS_FS_XATTR
775 	case Opt_user_xattr:
776 		if (result.negated)
777 			ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER);
778 		else
779 			ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER);
780 		break;
781 	case Opt_inline_xattr:
782 		if (result.negated)
783 			ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
784 		else
785 			ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
786 		break;
787 	case Opt_inline_xattr_size:
788 		if (result.int_32 < MIN_INLINE_XATTR_SIZE ||
789 			result.int_32 > MAX_INLINE_XATTR_SIZE) {
790 			f2fs_err(NULL, "inline xattr size is out of range: %u ~ %u",
791 				 (u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE);
792 			return -EINVAL;
793 		}
794 		ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE);
795 		F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32;
796 		ctx->spec_mask |= F2FS_SPEC_inline_xattr_size;
797 		break;
798 #else
799 	case Opt_user_xattr:
800 	case Opt_inline_xattr:
801 	case Opt_inline_xattr_size:
802 		f2fs_info(NULL, "%s options not supported", param->key);
803 		break;
804 #endif
805 #ifdef CONFIG_F2FS_FS_POSIX_ACL
806 	case Opt_acl:
807 		if (result.negated)
808 			ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL);
809 		else
810 			ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL);
811 		break;
812 #else
813 	case Opt_acl:
814 		f2fs_info(NULL, "%s options not supported", param->key);
815 		break;
816 #endif
817 	case Opt_active_logs:
818 		if (result.int_32 != 2 && result.int_32 != 4 &&
819 			result.int_32 != NR_CURSEG_PERSIST_TYPE)
820 			return -EINVAL;
821 		ctx->spec_mask |= F2FS_SPEC_active_logs;
822 		F2FS_CTX_INFO(ctx).active_logs = result.int_32;
823 		break;
824 	case Opt_disable_ext_identify:
825 		ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY);
826 		break;
827 	case Opt_inline_data:
828 		if (result.negated)
829 			ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA);
830 		else
831 			ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA);
832 		break;
833 	case Opt_inline_dentry:
834 		if (result.negated)
835 			ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
836 		else
837 			ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
838 		break;
839 	case Opt_flush_merge:
840 		if (result.negated)
841 			ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
842 		else
843 			ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
844 		break;
845 	case Opt_barrier:
846 		if (result.negated)
847 			ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER);
848 		else
849 			ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER);
850 		break;
851 	case Opt_fastboot:
852 		ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT);
853 		break;
854 	case Opt_extent_cache:
855 		if (result.negated)
856 			ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
857 		else
858 			ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
859 		break;
860 	case Opt_data_flush:
861 		ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH);
862 		break;
863 	case Opt_reserve_root:
864 		ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
865 		F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32;
866 		ctx->spec_mask |= F2FS_SPEC_reserve_root;
867 		break;
868 	case Opt_reserve_node:
869 		ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
870 		F2FS_CTX_INFO(ctx).root_reserved_nodes = result.uint_32;
871 		ctx->spec_mask |= F2FS_SPEC_reserve_node;
872 		break;
873 	case Opt_resuid:
874 		F2FS_CTX_INFO(ctx).s_resuid = result.uid;
875 		ctx->spec_mask |= F2FS_SPEC_resuid;
876 		break;
877 	case Opt_resgid:
878 		F2FS_CTX_INFO(ctx).s_resgid = result.gid;
879 		ctx->spec_mask |= F2FS_SPEC_resgid;
880 		break;
881 	case Opt_mode:
882 		F2FS_CTX_INFO(ctx).fs_mode = result.uint_32;
883 		ctx->spec_mask |= F2FS_SPEC_mode;
884 		break;
885 #ifdef CONFIG_F2FS_FAULT_INJECTION
886 	case Opt_fault_injection:
887 		F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32;
888 		ctx->spec_mask |= F2FS_SPEC_fault_injection;
889 		ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
890 		break;
891 
892 	case Opt_fault_type:
893 		if (result.uint_32 > BIT(FAULT_MAX))
894 			return -EINVAL;
895 		F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32;
896 		ctx->spec_mask |= F2FS_SPEC_fault_type;
897 		ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
898 		break;
899 #else
900 	case Opt_fault_injection:
901 	case Opt_fault_type:
902 		f2fs_info(NULL, "%s options not supported", param->key);
903 		break;
904 #endif
905 	case Opt_lazytime:
906 		if (result.negated)
907 			ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME);
908 		else
909 			ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME);
910 		break;
911 #ifdef CONFIG_QUOTA
912 	case Opt_quota:
913 		if (result.negated) {
914 			ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA);
915 			ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
916 			ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
917 			ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
918 		} else
919 			ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
920 		break;
921 	case Opt_usrquota:
922 		ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
923 		break;
924 	case Opt_grpquota:
925 		ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA);
926 		break;
927 	case Opt_prjquota:
928 		ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA);
929 		break;
930 	case Opt_usrjquota:
931 		if (!*param->string)
932 			ret = f2fs_unnote_qf_name(fc, USRQUOTA);
933 		else
934 			ret = f2fs_note_qf_name(fc, USRQUOTA, param);
935 		if (ret)
936 			return ret;
937 		break;
938 	case Opt_grpjquota:
939 		if (!*param->string)
940 			ret = f2fs_unnote_qf_name(fc, GRPQUOTA);
941 		else
942 			ret = f2fs_note_qf_name(fc, GRPQUOTA, param);
943 		if (ret)
944 			return ret;
945 		break;
946 	case Opt_prjjquota:
947 		if (!*param->string)
948 			ret = f2fs_unnote_qf_name(fc, PRJQUOTA);
949 		else
950 			ret = f2fs_note_qf_name(fc, PRJQUOTA, param);
951 		if (ret)
952 			return ret;
953 		break;
954 	case Opt_jqfmt:
955 		F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32;
956 		ctx->spec_mask |= F2FS_SPEC_jqfmt;
957 		break;
958 #else
959 	case Opt_quota:
960 	case Opt_usrquota:
961 	case Opt_grpquota:
962 	case Opt_prjquota:
963 	case Opt_usrjquota:
964 	case Opt_grpjquota:
965 	case Opt_prjjquota:
966 		f2fs_info(NULL, "quota operations not supported");
967 		break;
968 #endif
969 	case Opt_alloc:
970 		F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32;
971 		ctx->spec_mask |= F2FS_SPEC_alloc_mode;
972 		break;
973 	case Opt_fsync:
974 		F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32;
975 		ctx->spec_mask |= F2FS_SPEC_fsync_mode;
976 		break;
977 	case Opt_test_dummy_encryption:
978 		ret = f2fs_parse_test_dummy_encryption(param, ctx);
979 		if (ret)
980 			return ret;
981 		break;
982 	case Opt_inlinecrypt:
983 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
984 		ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT);
985 #else
986 		f2fs_info(NULL, "inline encryption not supported");
987 #endif
988 		break;
989 	case Opt_checkpoint:
990 		/*
991 		 * Initialize args struct so we know whether arg was
992 		 * found; some options take optional arguments.
993 		 */
994 		args[0].from = args[0].to = NULL;
995 		arg = 0;
996 
997 		/* revert to match_table for checkpoint= options */
998 		token = match_token(param->string, f2fs_checkpoint_tokens, args);
999 		switch (token) {
1000 		case Opt_checkpoint_disable_cap_perc:
1001 			if (args->from && match_int(args, &arg))
1002 				return -EINVAL;
1003 			if (arg < 0 || arg > 100)
1004 				return -EINVAL;
1005 			F2FS_CTX_INFO(ctx).unusable_cap_perc = arg;
1006 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
1007 			ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1008 			break;
1009 		case Opt_checkpoint_disable_cap:
1010 			if (args->from && match_int(args, &arg))
1011 				return -EINVAL;
1012 			F2FS_CTX_INFO(ctx).unusable_cap = arg;
1013 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
1014 			ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1015 			break;
1016 		case Opt_checkpoint_disable:
1017 			ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1018 			break;
1019 		case Opt_checkpoint_enable:
1020 			F2FS_CTX_INFO(ctx).unusable_cap_perc = 0;
1021 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
1022 			F2FS_CTX_INFO(ctx).unusable_cap = 0;
1023 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
1024 			ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1025 			break;
1026 		default:
1027 			return -EINVAL;
1028 		}
1029 		break;
1030 	case Opt_checkpoint_merge:
1031 		if (result.negated)
1032 			ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
1033 		else
1034 			ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
1035 		break;
1036 #ifdef CONFIG_F2FS_FS_COMPRESSION
1037 	case Opt_compress_algorithm:
1038 		name = param->string;
1039 		if (!strcmp(name, "lzo")) {
1040 #ifdef CONFIG_F2FS_FS_LZO
1041 			F2FS_CTX_INFO(ctx).compress_level = 0;
1042 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO;
1043 			ctx->spec_mask |= F2FS_SPEC_compress_level;
1044 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1045 #else
1046 			f2fs_info(NULL, "kernel doesn't support lzo compression");
1047 #endif
1048 		} else if (!strncmp(name, "lz4", 3)) {
1049 #ifdef CONFIG_F2FS_FS_LZ4
1050 			ret = f2fs_set_lz4hc_level(ctx, name);
1051 			if (ret)
1052 				return -EINVAL;
1053 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4;
1054 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1055 #else
1056 			f2fs_info(NULL, "kernel doesn't support lz4 compression");
1057 #endif
1058 		} else if (!strncmp(name, "zstd", 4)) {
1059 #ifdef CONFIG_F2FS_FS_ZSTD
1060 			ret = f2fs_set_zstd_level(ctx, name);
1061 			if (ret)
1062 				return -EINVAL;
1063 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD;
1064 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1065 #else
1066 			f2fs_info(NULL, "kernel doesn't support zstd compression");
1067 #endif
1068 		} else if (!strcmp(name, "lzo-rle")) {
1069 #ifdef CONFIG_F2FS_FS_LZORLE
1070 			F2FS_CTX_INFO(ctx).compress_level = 0;
1071 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE;
1072 			ctx->spec_mask |= F2FS_SPEC_compress_level;
1073 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1074 #else
1075 			f2fs_info(NULL, "kernel doesn't support lzorle compression");
1076 #endif
1077 		} else
1078 			return -EINVAL;
1079 		break;
1080 	case Opt_compress_log_size:
1081 		if (result.uint_32 < MIN_COMPRESS_LOG_SIZE ||
1082 		    result.uint_32 > MAX_COMPRESS_LOG_SIZE) {
1083 			f2fs_err(NULL,
1084 				"Compress cluster log size is out of range");
1085 			return -EINVAL;
1086 		}
1087 		F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32;
1088 		ctx->spec_mask |= F2FS_SPEC_compress_log_size;
1089 		break;
1090 	case Opt_compress_extension:
1091 		name = param->string;
1092 		ext = F2FS_CTX_INFO(ctx).extensions;
1093 		ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
1094 
1095 		if (strlen(name) >= F2FS_EXTENSION_LEN ||
1096 		    ext_cnt >= COMPRESS_EXT_NUM) {
1097 			f2fs_err(NULL, "invalid extension length/number");
1098 			return -EINVAL;
1099 		}
1100 
1101 		if (is_compress_extension_exist(&ctx->info, name, true))
1102 			break;
1103 
1104 		ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN);
1105 		if (ret < 0)
1106 			return ret;
1107 		F2FS_CTX_INFO(ctx).compress_ext_cnt++;
1108 		ctx->spec_mask |= F2FS_SPEC_compress_extension;
1109 		break;
1110 	case Opt_nocompress_extension:
1111 		name = param->string;
1112 		noext = F2FS_CTX_INFO(ctx).noextensions;
1113 		noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
1114 
1115 		if (strlen(name) >= F2FS_EXTENSION_LEN ||
1116 			noext_cnt >= COMPRESS_EXT_NUM) {
1117 			f2fs_err(NULL, "invalid extension length/number");
1118 			return -EINVAL;
1119 		}
1120 
1121 		if (is_compress_extension_exist(&ctx->info, name, false))
1122 			break;
1123 
1124 		ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN);
1125 		if (ret < 0)
1126 			return ret;
1127 		F2FS_CTX_INFO(ctx).nocompress_ext_cnt++;
1128 		ctx->spec_mask |= F2FS_SPEC_nocompress_extension;
1129 		break;
1130 	case Opt_compress_chksum:
1131 		F2FS_CTX_INFO(ctx).compress_chksum = true;
1132 		ctx->spec_mask |= F2FS_SPEC_compress_chksum;
1133 		break;
1134 	case Opt_compress_mode:
1135 		F2FS_CTX_INFO(ctx).compress_mode = result.uint_32;
1136 		ctx->spec_mask |= F2FS_SPEC_compress_mode;
1137 		break;
1138 	case Opt_compress_cache:
1139 		ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE);
1140 		break;
1141 #else
1142 	case Opt_compress_algorithm:
1143 	case Opt_compress_log_size:
1144 	case Opt_compress_extension:
1145 	case Opt_nocompress_extension:
1146 	case Opt_compress_chksum:
1147 	case Opt_compress_mode:
1148 	case Opt_compress_cache:
1149 		f2fs_info(NULL, "compression options not supported");
1150 		break;
1151 #endif
1152 	case Opt_atgc:
1153 		ctx_set_opt(ctx, F2FS_MOUNT_ATGC);
1154 		break;
1155 	case Opt_gc_merge:
1156 		if (result.negated)
1157 			ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE);
1158 		else
1159 			ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE);
1160 		break;
1161 	case Opt_discard_unit:
1162 		F2FS_CTX_INFO(ctx).discard_unit = result.uint_32;
1163 		ctx->spec_mask |= F2FS_SPEC_discard_unit;
1164 		break;
1165 	case Opt_memory_mode:
1166 		F2FS_CTX_INFO(ctx).memory_mode = result.uint_32;
1167 		ctx->spec_mask |= F2FS_SPEC_memory_mode;
1168 		break;
1169 	case Opt_age_extent_cache:
1170 		ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE);
1171 		break;
1172 	case Opt_errors:
1173 		F2FS_CTX_INFO(ctx).errors = result.uint_32;
1174 		ctx->spec_mask |= F2FS_SPEC_errors;
1175 		break;
1176 	case Opt_nat_bits:
1177 		ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS);
1178 		break;
1179 	case Opt_lookup_mode:
1180 		F2FS_CTX_INFO(ctx).lookup_mode = result.uint_32;
1181 		ctx->spec_mask |= F2FS_SPEC_lookup_mode;
1182 		break;
1183 	}
1184 	return 0;
1185 }
1186 
1187 /*
1188  * Check quota settings consistency.
1189  */
1190 static int f2fs_check_quota_consistency(struct fs_context *fc,
1191 					struct super_block *sb)
1192 {
1193 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1194  #ifdef CONFIG_QUOTA
1195 	struct f2fs_fs_context *ctx = fc->fs_private;
1196 	bool quota_feature = f2fs_sb_has_quota_ino(sbi);
1197 	bool quota_turnon = sb_any_quota_loaded(sb);
1198 	char *old_qname, *new_qname;
1199 	bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota;
1200 	int i;
1201 
1202 	/*
1203 	 * We do the test below only for project quotas. 'usrquota' and
1204 	 * 'grpquota' mount options are allowed even without quota feature
1205 	 * to support legacy quotas in quota files.
1206 	 */
1207 	if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) &&
1208 			!f2fs_sb_has_project_quota(sbi)) {
1209 		f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
1210 		return -EINVAL;
1211 	}
1212 
1213 	if (ctx->qname_mask) {
1214 		for (i = 0; i < MAXQUOTAS; i++) {
1215 			if (!(ctx->qname_mask & (1 << i)))
1216 				continue;
1217 
1218 			old_qname = F2FS_OPTION(sbi).s_qf_names[i];
1219 			new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
1220 			if (quota_turnon &&
1221 				!!old_qname != !!new_qname)
1222 				goto err_jquota_change;
1223 
1224 			if (old_qname) {
1225 				if (!new_qname) {
1226 					f2fs_info(sbi, "remove qf_name %s",
1227 								old_qname);
1228 					continue;
1229 				} else if (strcmp(old_qname, new_qname) == 0) {
1230 					ctx->qname_mask &= ~(1 << i);
1231 					continue;
1232 				}
1233 				goto err_jquota_specified;
1234 			}
1235 
1236 			if (quota_feature) {
1237 				f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
1238 				ctx->qname_mask &= ~(1 << i);
1239 				kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]);
1240 				F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL;
1241 			}
1242 		}
1243 	}
1244 
1245 	/* Make sure we don't mix old and new quota format */
1246 	usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
1247 			F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA];
1248 	grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
1249 			F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA];
1250 	prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] ||
1251 			F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA];
1252 	usrquota = test_opt(sbi, USRQUOTA) ||
1253 			ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA);
1254 	grpquota = test_opt(sbi, GRPQUOTA) ||
1255 			ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA);
1256 	prjquota = test_opt(sbi, PRJQUOTA) ||
1257 			ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA);
1258 
1259 	if (usr_qf_name) {
1260 		ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
1261 		usrquota = false;
1262 	}
1263 	if (grp_qf_name) {
1264 		ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
1265 		grpquota = false;
1266 	}
1267 	if (prj_qf_name) {
1268 		ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
1269 		prjquota = false;
1270 	}
1271 	if (usr_qf_name || grp_qf_name || prj_qf_name) {
1272 		if (grpquota || usrquota || prjquota) {
1273 			f2fs_err(sbi, "old and new quota format mixing");
1274 			return -EINVAL;
1275 		}
1276 		if (!(ctx->spec_mask & F2FS_SPEC_jqfmt ||
1277 				F2FS_OPTION(sbi).s_jquota_fmt)) {
1278 			f2fs_err(sbi, "journaled quota format not specified");
1279 			return -EINVAL;
1280 		}
1281 	}
1282 	return 0;
1283 
1284 err_jquota_change:
1285 	f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
1286 	return -EINVAL;
1287 err_jquota_specified:
1288 	f2fs_err(sbi, "%s quota file already specified",
1289 		 QTYPE2NAME(i));
1290 	return -EINVAL;
1291 
1292 #else
1293 	if (f2fs_readonly(sbi->sb))
1294 		return 0;
1295 	if (f2fs_sb_has_quota_ino(sbi)) {
1296 		f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1297 		return -EINVAL;
1298 	}
1299 	if (f2fs_sb_has_project_quota(sbi)) {
1300 		f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1301 		return -EINVAL;
1302 	}
1303 
1304 	return 0;
1305 #endif
1306 }
1307 
1308 static int f2fs_check_test_dummy_encryption(struct fs_context *fc,
1309 					    struct super_block *sb)
1310 {
1311 	struct f2fs_fs_context *ctx = fc->fs_private;
1312 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1313 
1314 	if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy))
1315 		return 0;
1316 
1317 	if (!f2fs_sb_has_encrypt(sbi)) {
1318 		f2fs_err(sbi, "Encrypt feature is off");
1319 		return -EINVAL;
1320 	}
1321 
1322 	/*
1323 	 * This mount option is just for testing, and it's not worthwhile to
1324 	 * implement the extra complexity (e.g. RCU protection) that would be
1325 	 * needed to allow it to be set or changed during remount.  We do allow
1326 	 * it to be specified during remount, but only if there is no change.
1327 	 */
1328 	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
1329 		if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy,
1330 				&F2FS_CTX_INFO(ctx).dummy_enc_policy))
1331 			return 0;
1332 		f2fs_warn(sbi, "Can't set or change test_dummy_encryption on remount");
1333 		return -EINVAL;
1334 	}
1335 	return 0;
1336 }
1337 
1338 static inline bool test_compression_spec(unsigned int mask)
1339 {
1340 	return mask & (F2FS_SPEC_compress_algorithm
1341 			| F2FS_SPEC_compress_log_size
1342 			| F2FS_SPEC_compress_extension
1343 			| F2FS_SPEC_nocompress_extension
1344 			| F2FS_SPEC_compress_chksum
1345 			| F2FS_SPEC_compress_mode);
1346 }
1347 
1348 static inline void clear_compression_spec(struct f2fs_fs_context *ctx)
1349 {
1350 	ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm
1351 						| F2FS_SPEC_compress_log_size
1352 						| F2FS_SPEC_compress_extension
1353 						| F2FS_SPEC_nocompress_extension
1354 						| F2FS_SPEC_compress_chksum
1355 						| F2FS_SPEC_compress_mode);
1356 }
1357 
1358 static int f2fs_check_compression(struct fs_context *fc,
1359 				  struct super_block *sb)
1360 {
1361 #ifdef CONFIG_F2FS_FS_COMPRESSION
1362 	struct f2fs_fs_context *ctx = fc->fs_private;
1363 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1364 	int i, cnt;
1365 
1366 	if (!f2fs_sb_has_compression(sbi)) {
1367 		if (test_compression_spec(ctx->spec_mask) ||
1368 			ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE))
1369 			f2fs_info(sbi, "Image doesn't support compression");
1370 		clear_compression_spec(ctx);
1371 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_COMPRESS_CACHE);
1372 		return 0;
1373 	}
1374 	if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
1375 		cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
1376 		for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) {
1377 			if (is_compress_extension_exist(&F2FS_OPTION(sbi),
1378 					F2FS_CTX_INFO(ctx).extensions[i], true)) {
1379 				F2FS_CTX_INFO(ctx).extensions[i][0] = '\0';
1380 				cnt--;
1381 			}
1382 		}
1383 		if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
1384 			f2fs_err(sbi, "invalid extension length/number");
1385 			return -EINVAL;
1386 		}
1387 	}
1388 	if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
1389 		cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
1390 		for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) {
1391 			if (is_compress_extension_exist(&F2FS_OPTION(sbi),
1392 					F2FS_CTX_INFO(ctx).noextensions[i], false)) {
1393 				F2FS_CTX_INFO(ctx).noextensions[i][0] = '\0';
1394 				cnt--;
1395 			}
1396 		}
1397 		if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
1398 			f2fs_err(sbi, "invalid noextension length/number");
1399 			return -EINVAL;
1400 		}
1401 	}
1402 
1403 	if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
1404 				F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
1405 				F2FS_CTX_INFO(ctx).extensions,
1406 				F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
1407 		f2fs_err(sbi, "new noextensions conflicts with new extensions");
1408 		return -EINVAL;
1409 	}
1410 	if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
1411 				F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
1412 				F2FS_OPTION(sbi).extensions,
1413 				F2FS_OPTION(sbi).compress_ext_cnt)) {
1414 		f2fs_err(sbi, "new noextensions conflicts with old extensions");
1415 		return -EINVAL;
1416 	}
1417 	if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions,
1418 				F2FS_OPTION(sbi).nocompress_ext_cnt,
1419 				F2FS_CTX_INFO(ctx).extensions,
1420 				F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
1421 		f2fs_err(sbi, "new extensions conflicts with old noextensions");
1422 		return -EINVAL;
1423 	}
1424 #endif
1425 	return 0;
1426 }
1427 
1428 static int f2fs_check_opt_consistency(struct fs_context *fc,
1429 				      struct super_block *sb)
1430 {
1431 	struct f2fs_fs_context *ctx = fc->fs_private;
1432 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1433 	int err;
1434 
1435 	if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb))
1436 		return -EINVAL;
1437 
1438 	if (f2fs_hw_should_discard(sbi) &&
1439 			(ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
1440 			!ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
1441 		f2fs_warn(sbi, "discard is required for zoned block devices");
1442 		return -EINVAL;
1443 	}
1444 
1445 	if (!f2fs_hw_support_discard(sbi) &&
1446 			(ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
1447 			ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
1448 		f2fs_warn(sbi, "device does not support discard");
1449 		ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
1450 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_DISCARD);
1451 	}
1452 
1453 	if (f2fs_sb_has_device_alias(sbi) &&
1454 			(ctx->opt_mask & BIT(F2FS_MOUNT_READ_EXTENT_CACHE)) &&
1455 			!ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) {
1456 		f2fs_err(sbi, "device aliasing requires extent cache");
1457 		return -EINVAL;
1458 	}
1459 
1460 	if (test_opt(sbi, RESERVE_ROOT) &&
1461 			(ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_ROOT)) &&
1462 			ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) {
1463 		f2fs_info(sbi, "Preserve previous reserve_root=%u",
1464 			F2FS_OPTION(sbi).root_reserved_blocks);
1465 		ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
1466 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_ROOT);
1467 	}
1468 	if (test_opt(sbi, RESERVE_NODE) &&
1469 			(ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_NODE)) &&
1470 			ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_NODE)) {
1471 		f2fs_info(sbi, "Preserve previous reserve_node=%u",
1472 			F2FS_OPTION(sbi).root_reserved_nodes);
1473 		ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
1474 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_NODE);
1475 	}
1476 
1477 	err = f2fs_check_test_dummy_encryption(fc, sb);
1478 	if (err)
1479 		return err;
1480 
1481 	err = f2fs_check_compression(fc, sb);
1482 	if (err)
1483 		return err;
1484 
1485 	err = f2fs_check_quota_consistency(fc, sb);
1486 	if (err)
1487 		return err;
1488 
1489 	if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
1490 		f2fs_err(sbi,
1491 			"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1492 		return -EINVAL;
1493 	}
1494 
1495 	/*
1496 	 * The BLKZONED feature indicates that the drive was formatted with
1497 	 * zone alignment optimization. This is optional for host-aware
1498 	 * devices, but mandatory for host-managed zoned block devices.
1499 	 */
1500 	if (f2fs_sb_has_blkzoned(sbi)) {
1501 		if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) {
1502 			f2fs_warn(sbi, "zoned devices need bggc");
1503 			return -EINVAL;
1504 		}
1505 #ifdef CONFIG_BLK_DEV_ZONED
1506 		if ((ctx->spec_mask & F2FS_SPEC_discard_unit) &&
1507 		F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) {
1508 			f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1509 			F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION;
1510 		}
1511 
1512 		if ((ctx->spec_mask & F2FS_SPEC_mode) &&
1513 		F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) {
1514 			f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
1515 			return -EINVAL;
1516 		}
1517 #else
1518 		f2fs_err(sbi, "Zoned block device support is not enabled");
1519 		return -EINVAL;
1520 #endif
1521 	}
1522 
1523 	if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) {
1524 		if (!f2fs_sb_has_extra_attr(sbi) ||
1525 			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
1526 			f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1527 			return -EINVAL;
1528 		}
1529 		if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) {
1530 			f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1531 			return -EINVAL;
1532 		}
1533 	}
1534 
1535 	if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) &&
1536 	    F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) {
1537 		f2fs_err(sbi, "LFS is not compatible with ATGC");
1538 		return -EINVAL;
1539 	}
1540 
1541 	if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) {
1542 		f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
1543 		return -EINVAL;
1544 	}
1545 
1546 	if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1547 		f2fs_err(sbi, "Allow to mount readonly mode only");
1548 		return -EROFS;
1549 	}
1550 	return 0;
1551 }
1552 
1553 static void f2fs_apply_quota_options(struct fs_context *fc,
1554 				     struct super_block *sb)
1555 {
1556 #ifdef CONFIG_QUOTA
1557 	struct f2fs_fs_context *ctx = fc->fs_private;
1558 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1559 	bool quota_feature = f2fs_sb_has_quota_ino(sbi);
1560 	char *qname;
1561 	int i;
1562 
1563 	if (quota_feature)
1564 		return;
1565 
1566 	for (i = 0; i < MAXQUOTAS; i++) {
1567 		if (!(ctx->qname_mask & (1 << i)))
1568 			continue;
1569 
1570 		qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
1571 		if (qname) {
1572 			qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i],
1573 					GFP_KERNEL | __GFP_NOFAIL);
1574 			set_opt(sbi, QUOTA);
1575 		}
1576 		F2FS_OPTION(sbi).s_qf_names[i] = qname;
1577 	}
1578 
1579 	if (ctx->spec_mask & F2FS_SPEC_jqfmt)
1580 		F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt;
1581 
1582 	if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) {
1583 		f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
1584 		F2FS_OPTION(sbi).s_jquota_fmt = 0;
1585 	}
1586 #endif
1587 }
1588 
1589 static void f2fs_apply_test_dummy_encryption(struct fs_context *fc,
1590 					     struct super_block *sb)
1591 {
1592 	struct f2fs_fs_context *ctx = fc->fs_private;
1593 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1594 
1595 	if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) ||
1596 		/* if already set, it was already verified to be the same */
1597 		fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy))
1598 		return;
1599 	swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy);
1600 	f2fs_warn(sbi, "Test dummy encryption mode enabled");
1601 }
1602 
1603 static void f2fs_apply_compression(struct fs_context *fc,
1604 				   struct super_block *sb)
1605 {
1606 #ifdef CONFIG_F2FS_FS_COMPRESSION
1607 	struct f2fs_fs_context *ctx = fc->fs_private;
1608 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1609 	unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN];
1610 	unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN];
1611 	int ctx_cnt, sbi_cnt, i;
1612 
1613 	if (ctx->spec_mask & F2FS_SPEC_compress_level)
1614 		F2FS_OPTION(sbi).compress_level =
1615 					F2FS_CTX_INFO(ctx).compress_level;
1616 	if (ctx->spec_mask & F2FS_SPEC_compress_algorithm)
1617 		F2FS_OPTION(sbi).compress_algorithm =
1618 					F2FS_CTX_INFO(ctx).compress_algorithm;
1619 	if (ctx->spec_mask & F2FS_SPEC_compress_log_size)
1620 		F2FS_OPTION(sbi).compress_log_size =
1621 					F2FS_CTX_INFO(ctx).compress_log_size;
1622 	if (ctx->spec_mask & F2FS_SPEC_compress_chksum)
1623 		F2FS_OPTION(sbi).compress_chksum =
1624 					F2FS_CTX_INFO(ctx).compress_chksum;
1625 	if (ctx->spec_mask & F2FS_SPEC_compress_mode)
1626 		F2FS_OPTION(sbi).compress_mode =
1627 					F2FS_CTX_INFO(ctx).compress_mode;
1628 	if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
1629 		ctx_ext = F2FS_CTX_INFO(ctx).extensions;
1630 		ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
1631 		sbi_ext = F2FS_OPTION(sbi).extensions;
1632 		sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1633 		for (i = 0; i < ctx_cnt; i++) {
1634 			if (strlen(ctx_ext[i]) == 0)
1635 				continue;
1636 			strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
1637 			sbi_cnt++;
1638 		}
1639 		F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt;
1640 	}
1641 	if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
1642 		ctx_ext = F2FS_CTX_INFO(ctx).noextensions;
1643 		ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
1644 		sbi_ext = F2FS_OPTION(sbi).noextensions;
1645 		sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1646 		for (i = 0; i < ctx_cnt; i++) {
1647 			if (strlen(ctx_ext[i]) == 0)
1648 				continue;
1649 			strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
1650 			sbi_cnt++;
1651 		}
1652 		F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt;
1653 	}
1654 #endif
1655 }
1656 
1657 static void f2fs_apply_options(struct fs_context *fc, struct super_block *sb)
1658 {
1659 	struct f2fs_fs_context *ctx = fc->fs_private;
1660 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1661 
1662 	F2FS_OPTION(sbi).opt &= ~ctx->opt_mask;
1663 	F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt;
1664 
1665 	if (ctx->spec_mask & F2FS_SPEC_background_gc)
1666 		F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode;
1667 	if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size)
1668 		F2FS_OPTION(sbi).inline_xattr_size =
1669 					F2FS_CTX_INFO(ctx).inline_xattr_size;
1670 	if (ctx->spec_mask & F2FS_SPEC_active_logs)
1671 		F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs;
1672 	if (ctx->spec_mask & F2FS_SPEC_reserve_root)
1673 		F2FS_OPTION(sbi).root_reserved_blocks =
1674 					F2FS_CTX_INFO(ctx).root_reserved_blocks;
1675 	if (ctx->spec_mask & F2FS_SPEC_reserve_node)
1676 		F2FS_OPTION(sbi).root_reserved_nodes =
1677 					F2FS_CTX_INFO(ctx).root_reserved_nodes;
1678 	if (ctx->spec_mask & F2FS_SPEC_resgid)
1679 		F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid;
1680 	if (ctx->spec_mask & F2FS_SPEC_resuid)
1681 		F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid;
1682 	if (ctx->spec_mask & F2FS_SPEC_mode)
1683 		F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode;
1684 #ifdef CONFIG_F2FS_FAULT_INJECTION
1685 	if (ctx->spec_mask & F2FS_SPEC_fault_injection)
1686 		(void)f2fs_build_fault_attr(sbi,
1687 		F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE);
1688 	if (ctx->spec_mask & F2FS_SPEC_fault_type)
1689 		(void)f2fs_build_fault_attr(sbi, 0,
1690 			F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE);
1691 #endif
1692 	if (ctx->spec_mask & F2FS_SPEC_alloc_mode)
1693 		F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode;
1694 	if (ctx->spec_mask & F2FS_SPEC_fsync_mode)
1695 		F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode;
1696 	if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap)
1697 		F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap;
1698 	if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc)
1699 		F2FS_OPTION(sbi).unusable_cap_perc =
1700 					F2FS_CTX_INFO(ctx).unusable_cap_perc;
1701 	if (ctx->spec_mask & F2FS_SPEC_discard_unit)
1702 		F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit;
1703 	if (ctx->spec_mask & F2FS_SPEC_memory_mode)
1704 		F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode;
1705 	if (ctx->spec_mask & F2FS_SPEC_errors)
1706 		F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors;
1707 	if (ctx->spec_mask & F2FS_SPEC_lookup_mode)
1708 		F2FS_OPTION(sbi).lookup_mode = F2FS_CTX_INFO(ctx).lookup_mode;
1709 
1710 	f2fs_apply_compression(fc, sb);
1711 	f2fs_apply_test_dummy_encryption(fc, sb);
1712 	f2fs_apply_quota_options(fc, sb);
1713 }
1714 
1715 static int f2fs_sanity_check_options(struct f2fs_sb_info *sbi, bool remount)
1716 {
1717 	if (f2fs_sb_has_device_alias(sbi) &&
1718 	    !test_opt(sbi, READ_EXTENT_CACHE)) {
1719 		f2fs_err(sbi, "device aliasing requires extent cache");
1720 		return -EINVAL;
1721 	}
1722 
1723 	if (!remount)
1724 		return 0;
1725 
1726 #ifdef CONFIG_BLK_DEV_ZONED
1727 	if (f2fs_sb_has_blkzoned(sbi) &&
1728 	    sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
1729 		f2fs_err(sbi,
1730 			"zoned: max open zones %u is too small, need at least %u open zones",
1731 				 sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
1732 		return -EINVAL;
1733 	}
1734 #endif
1735 	if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
1736 		f2fs_warn(sbi, "LFS is not compatible with IPU");
1737 		return -EINVAL;
1738 	}
1739 	return 0;
1740 }
1741 
1742 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1743 {
1744 	struct f2fs_inode_info *fi;
1745 
1746 	if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
1747 		return NULL;
1748 
1749 	fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
1750 	if (!fi)
1751 		return NULL;
1752 
1753 	init_once((void *) fi);
1754 
1755 	/* Initialize f2fs-specific inode info */
1756 	atomic_set(&fi->dirty_pages, 0);
1757 	atomic_set(&fi->i_compr_blocks, 0);
1758 	atomic_set(&fi->open_count, 0);
1759 	atomic_set(&fi->writeback, 0);
1760 	init_f2fs_rwsem(&fi->i_sem);
1761 	spin_lock_init(&fi->i_size_lock);
1762 	INIT_LIST_HEAD(&fi->dirty_list);
1763 	INIT_LIST_HEAD(&fi->gdirty_list);
1764 	INIT_LIST_HEAD(&fi->gdonate_list);
1765 	init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1766 	init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1767 	init_f2fs_rwsem(&fi->i_xattr_sem);
1768 
1769 	/* Will be used by directory only */
1770 	fi->i_dir_level = F2FS_SB(sb)->dir_level;
1771 
1772 	return &fi->vfs_inode;
1773 }
1774 
1775 static int f2fs_drop_inode(struct inode *inode)
1776 {
1777 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1778 	int ret;
1779 
1780 	/*
1781 	 * during filesystem shutdown, if checkpoint is disabled,
1782 	 * drop useless meta/node dirty pages.
1783 	 */
1784 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1785 		if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1786 			inode->i_ino == F2FS_META_INO(sbi)) {
1787 			trace_f2fs_drop_inode(inode, 1);
1788 			return 1;
1789 		}
1790 	}
1791 
1792 	/*
1793 	 * This is to avoid a deadlock condition like below.
1794 	 * writeback_single_inode(inode)
1795 	 *  - f2fs_write_data_page
1796 	 *    - f2fs_gc -> iput -> evict
1797 	 *       - inode_wait_for_writeback(inode)
1798 	 */
1799 	if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) {
1800 		if (!inode->i_nlink && !is_bad_inode(inode)) {
1801 			/* to avoid evict_inode call simultaneously */
1802 			__iget(inode);
1803 			spin_unlock(&inode->i_lock);
1804 
1805 			/* should remain fi->extent_tree for writepage */
1806 			f2fs_destroy_extent_node(inode);
1807 
1808 			sb_start_intwrite(inode->i_sb);
1809 			f2fs_i_size_write(inode, 0);
1810 
1811 			f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1812 					inode, NULL, 0, DATA);
1813 			truncate_inode_pages_final(inode->i_mapping);
1814 
1815 			if (F2FS_HAS_BLOCKS(inode))
1816 				f2fs_truncate(inode);
1817 
1818 			sb_end_intwrite(inode->i_sb);
1819 
1820 			spin_lock(&inode->i_lock);
1821 			atomic_dec(&inode->i_count);
1822 		}
1823 		trace_f2fs_drop_inode(inode, 0);
1824 		return 0;
1825 	}
1826 	ret = inode_generic_drop(inode);
1827 	if (!ret)
1828 		ret = fscrypt_drop_inode(inode);
1829 	trace_f2fs_drop_inode(inode, ret);
1830 	return ret;
1831 }
1832 
1833 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1834 {
1835 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1836 	int ret = 0;
1837 
1838 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1839 	if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1840 		ret = 1;
1841 	} else {
1842 		set_inode_flag(inode, FI_DIRTY_INODE);
1843 		stat_inc_dirty_inode(sbi, DIRTY_META);
1844 	}
1845 	if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1846 		list_add_tail(&F2FS_I(inode)->gdirty_list,
1847 				&sbi->inode_list[DIRTY_META]);
1848 		inc_page_count(sbi, F2FS_DIRTY_IMETA);
1849 	}
1850 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1851 
1852 	/* if atomic write is not committed, set inode w/ atomic dirty */
1853 	if (!ret && f2fs_is_atomic_file(inode) &&
1854 			!is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
1855 		set_inode_flag(inode, FI_ATOMIC_DIRTIED);
1856 
1857 	return ret;
1858 }
1859 
1860 void f2fs_inode_synced(struct inode *inode)
1861 {
1862 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1863 
1864 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1865 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1866 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
1867 		return;
1868 	}
1869 	if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1870 		list_del_init(&F2FS_I(inode)->gdirty_list);
1871 		dec_page_count(sbi, F2FS_DIRTY_IMETA);
1872 	}
1873 	clear_inode_flag(inode, FI_DIRTY_INODE);
1874 	clear_inode_flag(inode, FI_AUTO_RECOVER);
1875 	stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1876 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1877 }
1878 
1879 /*
1880  * f2fs_dirty_inode() is called from __mark_inode_dirty()
1881  *
1882  * We should call set_dirty_inode to write the dirty inode through write_inode.
1883  */
1884 static void f2fs_dirty_inode(struct inode *inode, int flags)
1885 {
1886 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1887 
1888 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1889 			inode->i_ino == F2FS_META_INO(sbi))
1890 		return;
1891 
1892 	if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1893 		clear_inode_flag(inode, FI_AUTO_RECOVER);
1894 
1895 	f2fs_inode_dirtied(inode, false);
1896 }
1897 
1898 static void f2fs_free_inode(struct inode *inode)
1899 {
1900 	fscrypt_free_inode(inode);
1901 	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1902 }
1903 
1904 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1905 {
1906 	percpu_counter_destroy(&sbi->total_valid_inode_count);
1907 	percpu_counter_destroy(&sbi->rf_node_block_count);
1908 	percpu_counter_destroy(&sbi->alloc_valid_block_count);
1909 }
1910 
1911 static void destroy_device_list(struct f2fs_sb_info *sbi)
1912 {
1913 	int i;
1914 
1915 	for (i = 0; i < sbi->s_ndevs; i++) {
1916 		if (i > 0)
1917 			bdev_fput(FDEV(i).bdev_file);
1918 #ifdef CONFIG_BLK_DEV_ZONED
1919 		kvfree(FDEV(i).blkz_seq);
1920 #endif
1921 	}
1922 	kvfree(sbi->devs);
1923 }
1924 
1925 static void f2fs_put_super(struct super_block *sb)
1926 {
1927 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1928 	int i;
1929 	int err = 0;
1930 	bool done;
1931 
1932 	/* unregister procfs/sysfs entries in advance to avoid race case */
1933 	f2fs_unregister_sysfs(sbi);
1934 
1935 	f2fs_quota_off_umount(sb);
1936 
1937 	/* prevent remaining shrinker jobs */
1938 	mutex_lock(&sbi->umount_mutex);
1939 
1940 	/*
1941 	 * flush all issued checkpoints and stop checkpoint issue thread.
1942 	 * after then, all checkpoints should be done by each process context.
1943 	 */
1944 	f2fs_stop_ckpt_thread(sbi);
1945 
1946 	/*
1947 	 * We don't need to do checkpoint when superblock is clean.
1948 	 * But, the previous checkpoint was not done by umount, it needs to do
1949 	 * clean checkpoint again.
1950 	 */
1951 	if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1952 			!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1953 		struct cp_control cpc = {
1954 			.reason = CP_UMOUNT,
1955 		};
1956 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1957 		err = f2fs_write_checkpoint(sbi, &cpc);
1958 	}
1959 
1960 	/* be sure to wait for any on-going discard commands */
1961 	done = f2fs_issue_discard_timeout(sbi);
1962 	if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
1963 		struct cp_control cpc = {
1964 			.reason = CP_UMOUNT | CP_TRIMMED,
1965 		};
1966 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1967 		err = f2fs_write_checkpoint(sbi, &cpc);
1968 	}
1969 
1970 	/*
1971 	 * normally superblock is clean, so we need to release this.
1972 	 * In addition, EIO will skip do checkpoint, we need this as well.
1973 	 */
1974 	f2fs_release_ino_entry(sbi, true);
1975 
1976 	f2fs_leave_shrinker(sbi);
1977 	mutex_unlock(&sbi->umount_mutex);
1978 
1979 	/* our cp_error case, we can wait for any writeback page */
1980 	f2fs_flush_merged_writes(sbi);
1981 
1982 	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1983 
1984 	if (err || f2fs_cp_error(sbi)) {
1985 		truncate_inode_pages_final(NODE_MAPPING(sbi));
1986 		truncate_inode_pages_final(META_MAPPING(sbi));
1987 	}
1988 
1989 	f2fs_bug_on(sbi, sbi->fsync_node_num);
1990 
1991 	f2fs_destroy_compress_inode(sbi);
1992 
1993 	iput(sbi->node_inode);
1994 	sbi->node_inode = NULL;
1995 
1996 	iput(sbi->meta_inode);
1997 	sbi->meta_inode = NULL;
1998 
1999 	/* Should check the page counts after dropping all node/meta pages */
2000 	for (i = 0; i < NR_COUNT_TYPE; i++) {
2001 		if (!get_pages(sbi, i))
2002 			continue;
2003 		f2fs_err(sbi, "detect filesystem reference count leak during "
2004 			"umount, type: %d, count: %lld", i, get_pages(sbi, i));
2005 		f2fs_bug_on(sbi, 1);
2006 	}
2007 
2008 	/*
2009 	 * iput() can update stat information, if f2fs_write_checkpoint()
2010 	 * above failed with error.
2011 	 */
2012 	f2fs_destroy_stats(sbi);
2013 
2014 	/* destroy f2fs internal modules */
2015 	f2fs_destroy_node_manager(sbi);
2016 	f2fs_destroy_segment_manager(sbi);
2017 
2018 	/* flush s_error_work before sbi destroy */
2019 	flush_work(&sbi->s_error_work);
2020 
2021 	f2fs_destroy_post_read_wq(sbi);
2022 
2023 	kvfree(sbi->ckpt);
2024 
2025 	kfree(sbi->raw_super);
2026 
2027 	f2fs_destroy_page_array_cache(sbi);
2028 #ifdef CONFIG_QUOTA
2029 	for (i = 0; i < MAXQUOTAS; i++)
2030 		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2031 #endif
2032 	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
2033 	destroy_percpu_info(sbi);
2034 	f2fs_destroy_iostat(sbi);
2035 	for (i = 0; i < NR_PAGE_TYPE; i++)
2036 		kfree(sbi->write_io[i]);
2037 #if IS_ENABLED(CONFIG_UNICODE)
2038 	utf8_unload(sb->s_encoding);
2039 #endif
2040 }
2041 
2042 int f2fs_sync_fs(struct super_block *sb, int sync)
2043 {
2044 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2045 	int err = 0;
2046 
2047 	if (unlikely(f2fs_cp_error(sbi)))
2048 		return 0;
2049 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2050 		return 0;
2051 
2052 	trace_f2fs_sync_fs(sb, sync);
2053 
2054 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2055 		return -EAGAIN;
2056 
2057 	if (sync) {
2058 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
2059 		err = f2fs_issue_checkpoint(sbi);
2060 	}
2061 
2062 	return err;
2063 }
2064 
2065 static int f2fs_freeze(struct super_block *sb)
2066 {
2067 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2068 
2069 	if (f2fs_readonly(sb))
2070 		return 0;
2071 
2072 	/* IO error happened before */
2073 	if (unlikely(f2fs_cp_error(sbi)))
2074 		return -EIO;
2075 
2076 	/* must be clean, since sync_filesystem() was already called */
2077 	if (is_sbi_flag_set(sbi, SBI_IS_DIRTY))
2078 		return -EINVAL;
2079 
2080 	sbi->umount_lock_holder = current;
2081 
2082 	/* Let's flush checkpoints and stop the thread. */
2083 	f2fs_flush_ckpt_thread(sbi);
2084 
2085 	sbi->umount_lock_holder = NULL;
2086 
2087 	/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
2088 	set_sbi_flag(sbi, SBI_IS_FREEZING);
2089 	return 0;
2090 }
2091 
2092 static int f2fs_unfreeze(struct super_block *sb)
2093 {
2094 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2095 
2096 	/*
2097 	 * It will update discard_max_bytes of mounted lvm device to zero
2098 	 * after creating snapshot on this lvm device, let's drop all
2099 	 * remained discards.
2100 	 * We don't need to disable real-time discard because discard_max_bytes
2101 	 * will recover after removal of snapshot.
2102 	 */
2103 	if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
2104 		f2fs_issue_discard_timeout(sbi);
2105 
2106 	clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
2107 	return 0;
2108 }
2109 
2110 #ifdef CONFIG_QUOTA
2111 static int f2fs_statfs_project(struct super_block *sb,
2112 				kprojid_t projid, struct kstatfs *buf)
2113 {
2114 	struct kqid qid;
2115 	struct dquot *dquot;
2116 	u64 limit;
2117 	u64 curblock;
2118 
2119 	qid = make_kqid_projid(projid);
2120 	dquot = dqget(sb, qid);
2121 	if (IS_ERR(dquot))
2122 		return PTR_ERR(dquot);
2123 	spin_lock(&dquot->dq_dqb_lock);
2124 
2125 	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
2126 					dquot->dq_dqb.dqb_bhardlimit);
2127 	limit >>= sb->s_blocksize_bits;
2128 
2129 	if (limit) {
2130 		uint64_t remaining = 0;
2131 
2132 		curblock = (dquot->dq_dqb.dqb_curspace +
2133 			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
2134 		if (limit > curblock)
2135 			remaining = limit - curblock;
2136 
2137 		buf->f_blocks = min(buf->f_blocks, limit);
2138 		buf->f_bfree = min(buf->f_bfree, remaining);
2139 		buf->f_bavail = min(buf->f_bavail, remaining);
2140 	}
2141 
2142 	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
2143 					dquot->dq_dqb.dqb_ihardlimit);
2144 
2145 	if (limit) {
2146 		uint64_t remaining = 0;
2147 
2148 		if (limit > dquot->dq_dqb.dqb_curinodes)
2149 			remaining = limit - dquot->dq_dqb.dqb_curinodes;
2150 
2151 		buf->f_files = min(buf->f_files, limit);
2152 		buf->f_ffree = min(buf->f_ffree, remaining);
2153 	}
2154 
2155 	spin_unlock(&dquot->dq_dqb_lock);
2156 	dqput(dquot);
2157 	return 0;
2158 }
2159 #endif
2160 
2161 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
2162 {
2163 	struct super_block *sb = dentry->d_sb;
2164 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2165 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2166 	block_t total_count, user_block_count, start_count;
2167 	u64 avail_node_count;
2168 	unsigned int total_valid_node_count;
2169 
2170 	total_count = le64_to_cpu(sbi->raw_super->block_count);
2171 	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
2172 	buf->f_type = F2FS_SUPER_MAGIC;
2173 	buf->f_bsize = sbi->blocksize;
2174 
2175 	buf->f_blocks = total_count - start_count;
2176 
2177 	spin_lock(&sbi->stat_lock);
2178 	if (sbi->carve_out)
2179 		buf->f_blocks -= sbi->current_reserved_blocks;
2180 	user_block_count = sbi->user_block_count;
2181 	total_valid_node_count = valid_node_count(sbi);
2182 	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
2183 	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
2184 						sbi->current_reserved_blocks;
2185 
2186 	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
2187 		buf->f_bfree = 0;
2188 	else
2189 		buf->f_bfree -= sbi->unusable_block_count;
2190 	spin_unlock(&sbi->stat_lock);
2191 
2192 	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
2193 		buf->f_bavail = buf->f_bfree -
2194 				F2FS_OPTION(sbi).root_reserved_blocks;
2195 	else
2196 		buf->f_bavail = 0;
2197 
2198 	if (avail_node_count > user_block_count) {
2199 		buf->f_files = user_block_count;
2200 		buf->f_ffree = buf->f_bavail;
2201 	} else {
2202 		buf->f_files = avail_node_count;
2203 		buf->f_ffree = min(avail_node_count - total_valid_node_count,
2204 					buf->f_bavail);
2205 	}
2206 
2207 	buf->f_namelen = F2FS_NAME_LEN;
2208 	buf->f_fsid    = u64_to_fsid(id);
2209 
2210 #ifdef CONFIG_QUOTA
2211 	if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
2212 			sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
2213 		f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
2214 	}
2215 #endif
2216 	return 0;
2217 }
2218 
2219 static inline void f2fs_show_quota_options(struct seq_file *seq,
2220 					   struct super_block *sb)
2221 {
2222 #ifdef CONFIG_QUOTA
2223 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2224 
2225 	if (F2FS_OPTION(sbi).s_jquota_fmt) {
2226 		char *fmtname = "";
2227 
2228 		switch (F2FS_OPTION(sbi).s_jquota_fmt) {
2229 		case QFMT_VFS_OLD:
2230 			fmtname = "vfsold";
2231 			break;
2232 		case QFMT_VFS_V0:
2233 			fmtname = "vfsv0";
2234 			break;
2235 		case QFMT_VFS_V1:
2236 			fmtname = "vfsv1";
2237 			break;
2238 		}
2239 		seq_printf(seq, ",jqfmt=%s", fmtname);
2240 	}
2241 
2242 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
2243 		seq_show_option(seq, "usrjquota",
2244 			F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
2245 
2246 	if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
2247 		seq_show_option(seq, "grpjquota",
2248 			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
2249 
2250 	if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
2251 		seq_show_option(seq, "prjjquota",
2252 			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
2253 #endif
2254 }
2255 
2256 #ifdef CONFIG_F2FS_FS_COMPRESSION
2257 static inline void f2fs_show_compress_options(struct seq_file *seq,
2258 							struct super_block *sb)
2259 {
2260 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2261 	char *algtype = "";
2262 	int i;
2263 
2264 	if (!f2fs_sb_has_compression(sbi))
2265 		return;
2266 
2267 	switch (F2FS_OPTION(sbi).compress_algorithm) {
2268 	case COMPRESS_LZO:
2269 		algtype = "lzo";
2270 		break;
2271 	case COMPRESS_LZ4:
2272 		algtype = "lz4";
2273 		break;
2274 	case COMPRESS_ZSTD:
2275 		algtype = "zstd";
2276 		break;
2277 	case COMPRESS_LZORLE:
2278 		algtype = "lzo-rle";
2279 		break;
2280 	}
2281 	seq_printf(seq, ",compress_algorithm=%s", algtype);
2282 
2283 	if (F2FS_OPTION(sbi).compress_level)
2284 		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
2285 
2286 	seq_printf(seq, ",compress_log_size=%u",
2287 			F2FS_OPTION(sbi).compress_log_size);
2288 
2289 	for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
2290 		seq_printf(seq, ",compress_extension=%s",
2291 			F2FS_OPTION(sbi).extensions[i]);
2292 	}
2293 
2294 	for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
2295 		seq_printf(seq, ",nocompress_extension=%s",
2296 			F2FS_OPTION(sbi).noextensions[i]);
2297 	}
2298 
2299 	if (F2FS_OPTION(sbi).compress_chksum)
2300 		seq_puts(seq, ",compress_chksum");
2301 
2302 	if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
2303 		seq_printf(seq, ",compress_mode=%s", "fs");
2304 	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
2305 		seq_printf(seq, ",compress_mode=%s", "user");
2306 
2307 	if (test_opt(sbi, COMPRESS_CACHE))
2308 		seq_puts(seq, ",compress_cache");
2309 }
2310 #endif
2311 
2312 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
2313 {
2314 	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
2315 
2316 	if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
2317 		seq_printf(seq, ",background_gc=%s", "sync");
2318 	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
2319 		seq_printf(seq, ",background_gc=%s", "on");
2320 	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
2321 		seq_printf(seq, ",background_gc=%s", "off");
2322 
2323 	if (test_opt(sbi, GC_MERGE))
2324 		seq_puts(seq, ",gc_merge");
2325 	else
2326 		seq_puts(seq, ",nogc_merge");
2327 
2328 	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
2329 		seq_puts(seq, ",disable_roll_forward");
2330 	if (test_opt(sbi, NORECOVERY))
2331 		seq_puts(seq, ",norecovery");
2332 	if (test_opt(sbi, DISCARD)) {
2333 		seq_puts(seq, ",discard");
2334 		if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
2335 			seq_printf(seq, ",discard_unit=%s", "block");
2336 		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2337 			seq_printf(seq, ",discard_unit=%s", "segment");
2338 		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2339 			seq_printf(seq, ",discard_unit=%s", "section");
2340 	} else {
2341 		seq_puts(seq, ",nodiscard");
2342 	}
2343 #ifdef CONFIG_F2FS_FS_XATTR
2344 	if (test_opt(sbi, XATTR_USER))
2345 		seq_puts(seq, ",user_xattr");
2346 	else
2347 		seq_puts(seq, ",nouser_xattr");
2348 	if (test_opt(sbi, INLINE_XATTR))
2349 		seq_puts(seq, ",inline_xattr");
2350 	else
2351 		seq_puts(seq, ",noinline_xattr");
2352 	if (test_opt(sbi, INLINE_XATTR_SIZE))
2353 		seq_printf(seq, ",inline_xattr_size=%u",
2354 					F2FS_OPTION(sbi).inline_xattr_size);
2355 #endif
2356 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2357 	if (test_opt(sbi, POSIX_ACL))
2358 		seq_puts(seq, ",acl");
2359 	else
2360 		seq_puts(seq, ",noacl");
2361 #endif
2362 	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
2363 		seq_puts(seq, ",disable_ext_identify");
2364 	if (test_opt(sbi, INLINE_DATA))
2365 		seq_puts(seq, ",inline_data");
2366 	else
2367 		seq_puts(seq, ",noinline_data");
2368 	if (test_opt(sbi, INLINE_DENTRY))
2369 		seq_puts(seq, ",inline_dentry");
2370 	else
2371 		seq_puts(seq, ",noinline_dentry");
2372 	if (test_opt(sbi, FLUSH_MERGE))
2373 		seq_puts(seq, ",flush_merge");
2374 	else
2375 		seq_puts(seq, ",noflush_merge");
2376 	if (test_opt(sbi, NOBARRIER))
2377 		seq_puts(seq, ",nobarrier");
2378 	else
2379 		seq_puts(seq, ",barrier");
2380 	if (test_opt(sbi, FASTBOOT))
2381 		seq_puts(seq, ",fastboot");
2382 	if (test_opt(sbi, READ_EXTENT_CACHE))
2383 		seq_puts(seq, ",extent_cache");
2384 	else
2385 		seq_puts(seq, ",noextent_cache");
2386 	if (test_opt(sbi, AGE_EXTENT_CACHE))
2387 		seq_puts(seq, ",age_extent_cache");
2388 	if (test_opt(sbi, DATA_FLUSH))
2389 		seq_puts(seq, ",data_flush");
2390 
2391 	seq_puts(seq, ",mode=");
2392 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
2393 		seq_puts(seq, "adaptive");
2394 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
2395 		seq_puts(seq, "lfs");
2396 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
2397 		seq_puts(seq, "fragment:segment");
2398 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2399 		seq_puts(seq, "fragment:block");
2400 	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
2401 	if (test_opt(sbi, RESERVE_ROOT) || test_opt(sbi, RESERVE_NODE))
2402 		seq_printf(seq, ",reserve_root=%u,reserve_node=%u,resuid=%u,"
2403 				"resgid=%u",
2404 				F2FS_OPTION(sbi).root_reserved_blocks,
2405 				F2FS_OPTION(sbi).root_reserved_nodes,
2406 				from_kuid_munged(&init_user_ns,
2407 					F2FS_OPTION(sbi).s_resuid),
2408 				from_kgid_munged(&init_user_ns,
2409 					F2FS_OPTION(sbi).s_resgid));
2410 #ifdef CONFIG_F2FS_FAULT_INJECTION
2411 	if (test_opt(sbi, FAULT_INJECTION)) {
2412 		seq_printf(seq, ",fault_injection=%u",
2413 				F2FS_OPTION(sbi).fault_info.inject_rate);
2414 		seq_printf(seq, ",fault_type=%u",
2415 				F2FS_OPTION(sbi).fault_info.inject_type);
2416 	}
2417 #endif
2418 #ifdef CONFIG_QUOTA
2419 	if (test_opt(sbi, QUOTA))
2420 		seq_puts(seq, ",quota");
2421 	if (test_opt(sbi, USRQUOTA))
2422 		seq_puts(seq, ",usrquota");
2423 	if (test_opt(sbi, GRPQUOTA))
2424 		seq_puts(seq, ",grpquota");
2425 	if (test_opt(sbi, PRJQUOTA))
2426 		seq_puts(seq, ",prjquota");
2427 #endif
2428 	f2fs_show_quota_options(seq, sbi->sb);
2429 
2430 	fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
2431 
2432 	if (sbi->sb->s_flags & SB_INLINECRYPT)
2433 		seq_puts(seq, ",inlinecrypt");
2434 
2435 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
2436 		seq_printf(seq, ",alloc_mode=%s", "default");
2437 	else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2438 		seq_printf(seq, ",alloc_mode=%s", "reuse");
2439 
2440 	if (test_opt(sbi, DISABLE_CHECKPOINT))
2441 		seq_printf(seq, ",checkpoint=disable:%u",
2442 				F2FS_OPTION(sbi).unusable_cap);
2443 	if (test_opt(sbi, MERGE_CHECKPOINT))
2444 		seq_puts(seq, ",checkpoint_merge");
2445 	else
2446 		seq_puts(seq, ",nocheckpoint_merge");
2447 	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
2448 		seq_printf(seq, ",fsync_mode=%s", "posix");
2449 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
2450 		seq_printf(seq, ",fsync_mode=%s", "strict");
2451 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
2452 		seq_printf(seq, ",fsync_mode=%s", "nobarrier");
2453 
2454 #ifdef CONFIG_F2FS_FS_COMPRESSION
2455 	f2fs_show_compress_options(seq, sbi->sb);
2456 #endif
2457 
2458 	if (test_opt(sbi, ATGC))
2459 		seq_puts(seq, ",atgc");
2460 
2461 	if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
2462 		seq_printf(seq, ",memory=%s", "normal");
2463 	else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
2464 		seq_printf(seq, ",memory=%s", "low");
2465 
2466 	if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2467 		seq_printf(seq, ",errors=%s", "remount-ro");
2468 	else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
2469 		seq_printf(seq, ",errors=%s", "continue");
2470 	else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
2471 		seq_printf(seq, ",errors=%s", "panic");
2472 
2473 	if (test_opt(sbi, NAT_BITS))
2474 		seq_puts(seq, ",nat_bits");
2475 
2476 	if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_PERF)
2477 		seq_show_option(seq, "lookup_mode", "perf");
2478 	else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_COMPAT)
2479 		seq_show_option(seq, "lookup_mode", "compat");
2480 	else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_AUTO)
2481 		seq_show_option(seq, "lookup_mode", "auto");
2482 
2483 	return 0;
2484 }
2485 
2486 static void default_options(struct f2fs_sb_info *sbi, bool remount)
2487 {
2488 	/* init some FS parameters */
2489 	if (!remount) {
2490 		set_opt(sbi, READ_EXTENT_CACHE);
2491 		clear_opt(sbi, DISABLE_CHECKPOINT);
2492 
2493 		if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2494 			set_opt(sbi, DISCARD);
2495 
2496 		if (f2fs_sb_has_blkzoned(sbi))
2497 			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2498 		else
2499 			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2500 	}
2501 
2502 	if (f2fs_sb_has_readonly(sbi))
2503 		F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2504 	else
2505 		F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2506 
2507 	F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2508 	if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
2509 							SMALL_VOLUME_SEGMENTS)
2510 		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2511 	else
2512 		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2513 	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2514 	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2515 	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2516 	if (f2fs_sb_has_compression(sbi)) {
2517 		F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2518 		F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2519 		F2FS_OPTION(sbi).compress_ext_cnt = 0;
2520 		F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2521 	}
2522 	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2523 	F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
2524 	F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
2525 
2526 	set_opt(sbi, INLINE_XATTR);
2527 	set_opt(sbi, INLINE_DATA);
2528 	set_opt(sbi, INLINE_DENTRY);
2529 	set_opt(sbi, MERGE_CHECKPOINT);
2530 	set_opt(sbi, LAZYTIME);
2531 	F2FS_OPTION(sbi).unusable_cap = 0;
2532 	if (!f2fs_is_readonly(sbi))
2533 		set_opt(sbi, FLUSH_MERGE);
2534 	if (f2fs_sb_has_blkzoned(sbi))
2535 		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2536 	else
2537 		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2538 
2539 #ifdef CONFIG_F2FS_FS_XATTR
2540 	set_opt(sbi, XATTR_USER);
2541 #endif
2542 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2543 	set_opt(sbi, POSIX_ACL);
2544 #endif
2545 
2546 	f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL);
2547 
2548 	F2FS_OPTION(sbi).lookup_mode = LOOKUP_PERF;
2549 }
2550 
2551 #ifdef CONFIG_QUOTA
2552 static int f2fs_enable_quotas(struct super_block *sb);
2553 #endif
2554 
2555 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2556 {
2557 	unsigned int s_flags = sbi->sb->s_flags;
2558 	struct cp_control cpc;
2559 	unsigned int gc_mode = sbi->gc_mode;
2560 	int err = 0;
2561 	int ret;
2562 	block_t unusable;
2563 
2564 	if (s_flags & SB_RDONLY) {
2565 		f2fs_err(sbi, "checkpoint=disable on readonly fs");
2566 		return -EINVAL;
2567 	}
2568 	sbi->sb->s_flags |= SB_ACTIVE;
2569 
2570 	/* check if we need more GC first */
2571 	unusable = f2fs_get_unusable_blocks(sbi);
2572 	if (!f2fs_disable_cp_again(sbi, unusable))
2573 		goto skip_gc;
2574 
2575 	f2fs_update_time(sbi, DISABLE_TIME);
2576 
2577 	sbi->gc_mode = GC_URGENT_HIGH;
2578 
2579 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2580 		struct f2fs_gc_control gc_control = {
2581 			.victim_segno = NULL_SEGNO,
2582 			.init_gc_type = FG_GC,
2583 			.should_migrate_blocks = false,
2584 			.err_gc_skipped = true,
2585 			.no_bg_gc = true,
2586 			.nr_free_secs = 1 };
2587 
2588 		f2fs_down_write(&sbi->gc_lock);
2589 		stat_inc_gc_call_count(sbi, FOREGROUND);
2590 		err = f2fs_gc(sbi, &gc_control);
2591 		if (err == -ENODATA) {
2592 			err = 0;
2593 			break;
2594 		}
2595 		if (err && err != -EAGAIN)
2596 			break;
2597 	}
2598 
2599 	ret = sync_filesystem(sbi->sb);
2600 	if (ret || err) {
2601 		err = ret ? ret : err;
2602 		goto restore_flag;
2603 	}
2604 
2605 	unusable = f2fs_get_unusable_blocks(sbi);
2606 	if (f2fs_disable_cp_again(sbi, unusable)) {
2607 		err = -EAGAIN;
2608 		goto restore_flag;
2609 	}
2610 
2611 skip_gc:
2612 	f2fs_down_write(&sbi->gc_lock);
2613 	cpc.reason = CP_PAUSE;
2614 	set_sbi_flag(sbi, SBI_CP_DISABLED);
2615 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2616 	err = f2fs_write_checkpoint(sbi, &cpc);
2617 	if (err)
2618 		goto out_unlock;
2619 
2620 	spin_lock(&sbi->stat_lock);
2621 	sbi->unusable_block_count = unusable;
2622 	spin_unlock(&sbi->stat_lock);
2623 
2624 out_unlock:
2625 	f2fs_up_write(&sbi->gc_lock);
2626 restore_flag:
2627 	sbi->gc_mode = gc_mode;
2628 	sbi->sb->s_flags = s_flags;	/* Restore SB_RDONLY status */
2629 	f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err);
2630 	return err;
2631 }
2632 
2633 static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2634 {
2635 	unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
2636 	long long start, writeback, lock, sync_inode, end;
2637 	int ret;
2638 
2639 	f2fs_info(sbi, "%s start, meta: %lld, node: %lld, data: %lld",
2640 					__func__,
2641 					get_pages(sbi, F2FS_DIRTY_META),
2642 					get_pages(sbi, F2FS_DIRTY_NODES),
2643 					get_pages(sbi, F2FS_DIRTY_DATA));
2644 
2645 	f2fs_update_time(sbi, ENABLE_TIME);
2646 
2647 	start = ktime_get();
2648 
2649 	/* we should flush all the data to keep data consistency */
2650 	while (get_pages(sbi, F2FS_DIRTY_DATA)) {
2651 		writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC);
2652 		f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
2653 
2654 		if (f2fs_time_over(sbi, ENABLE_TIME))
2655 			break;
2656 	}
2657 	writeback = ktime_get();
2658 
2659 	f2fs_down_write(&sbi->cp_enable_rwsem);
2660 
2661 	lock = ktime_get();
2662 
2663 	if (get_pages(sbi, F2FS_DIRTY_DATA))
2664 		sync_inodes_sb(sbi->sb);
2665 
2666 	if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA)))
2667 		f2fs_warn(sbi, "%s: has some unwritten data: %lld",
2668 			__func__, get_pages(sbi, F2FS_DIRTY_DATA));
2669 
2670 	sync_inode = ktime_get();
2671 
2672 	f2fs_down_write(&sbi->gc_lock);
2673 	f2fs_dirty_to_prefree(sbi);
2674 
2675 	clear_sbi_flag(sbi, SBI_CP_DISABLED);
2676 	set_sbi_flag(sbi, SBI_IS_DIRTY);
2677 	f2fs_up_write(&sbi->gc_lock);
2678 
2679 	f2fs_info(sbi, "%s sync_fs, meta: %lld, imeta: %lld, node: %lld, dents: %lld, qdata: %lld",
2680 					__func__,
2681 					get_pages(sbi, F2FS_DIRTY_META),
2682 					get_pages(sbi, F2FS_DIRTY_IMETA),
2683 					get_pages(sbi, F2FS_DIRTY_NODES),
2684 					get_pages(sbi, F2FS_DIRTY_DENTS),
2685 					get_pages(sbi, F2FS_DIRTY_QDATA));
2686 	ret = f2fs_sync_fs(sbi->sb, 1);
2687 	if (ret)
2688 		f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret);
2689 
2690 	/* Let's ensure there's no pending checkpoint anymore */
2691 	f2fs_flush_ckpt_thread(sbi);
2692 
2693 	f2fs_up_write(&sbi->cp_enable_rwsem);
2694 
2695 	end = ktime_get();
2696 
2697 	f2fs_info(sbi, "%s end, writeback:%llu, "
2698 				"lock:%llu, sync_inode:%llu, sync_fs:%llu",
2699 				__func__,
2700 				ktime_ms_delta(writeback, start),
2701 				ktime_ms_delta(lock, writeback),
2702 				ktime_ms_delta(sync_inode, lock),
2703 				ktime_ms_delta(end, sync_inode));
2704 	return ret;
2705 }
2706 
2707 static int __f2fs_remount(struct fs_context *fc, struct super_block *sb)
2708 {
2709 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2710 	struct f2fs_mount_info org_mount_opt;
2711 	unsigned long old_sb_flags;
2712 	unsigned int flags = fc->sb_flags;
2713 	int err;
2714 	bool need_restart_gc = false, need_stop_gc = false;
2715 	bool need_restart_flush = false, need_stop_flush = false;
2716 	bool need_restart_discard = false, need_stop_discard = false;
2717 	bool need_enable_checkpoint = false, need_disable_checkpoint = false;
2718 	bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
2719 	bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
2720 	bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2721 	bool no_atgc = !test_opt(sbi, ATGC);
2722 	bool no_discard = !test_opt(sbi, DISCARD);
2723 	bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2724 	bool block_unit_discard = f2fs_block_unit_discard(sbi);
2725 	bool no_nat_bits = !test_opt(sbi, NAT_BITS);
2726 #ifdef CONFIG_QUOTA
2727 	int i, j;
2728 #endif
2729 
2730 	/*
2731 	 * Save the old mount options in case we
2732 	 * need to restore them.
2733 	 */
2734 	org_mount_opt = sbi->mount_opt;
2735 	old_sb_flags = sb->s_flags;
2736 
2737 	sbi->umount_lock_holder = current;
2738 
2739 #ifdef CONFIG_QUOTA
2740 	org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2741 	for (i = 0; i < MAXQUOTAS; i++) {
2742 		if (F2FS_OPTION(sbi).s_qf_names[i]) {
2743 			org_mount_opt.s_qf_names[i] =
2744 				kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2745 				GFP_KERNEL);
2746 			if (!org_mount_opt.s_qf_names[i]) {
2747 				for (j = 0; j < i; j++)
2748 					kfree(org_mount_opt.s_qf_names[j]);
2749 				return -ENOMEM;
2750 			}
2751 		} else {
2752 			org_mount_opt.s_qf_names[i] = NULL;
2753 		}
2754 	}
2755 #endif
2756 
2757 	/* recover superblocks we couldn't write due to previous RO mount */
2758 	if (!(flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2759 		err = f2fs_commit_super(sbi, false);
2760 		f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2761 			  err);
2762 		if (!err)
2763 			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2764 	}
2765 
2766 	default_options(sbi, true);
2767 
2768 	err = f2fs_check_opt_consistency(fc, sb);
2769 	if (err)
2770 		goto restore_opts;
2771 
2772 	f2fs_apply_options(fc, sb);
2773 
2774 	err = f2fs_sanity_check_options(sbi, true);
2775 	if (err)
2776 		goto restore_opts;
2777 
2778 	/* flush outstanding errors before changing fs state */
2779 	flush_work(&sbi->s_error_work);
2780 
2781 	/*
2782 	 * Previous and new state of filesystem is RO,
2783 	 * so skip checking GC and FLUSH_MERGE conditions.
2784 	 */
2785 	if (f2fs_readonly(sb) && (flags & SB_RDONLY))
2786 		goto skip;
2787 
2788 	if (f2fs_dev_is_readonly(sbi) && !(flags & SB_RDONLY)) {
2789 		err = -EROFS;
2790 		goto restore_opts;
2791 	}
2792 
2793 #ifdef CONFIG_QUOTA
2794 	if (!f2fs_readonly(sb) && (flags & SB_RDONLY)) {
2795 		err = dquot_suspend(sb, -1);
2796 		if (err < 0)
2797 			goto restore_opts;
2798 	} else if (f2fs_readonly(sb) && !(flags & SB_RDONLY)) {
2799 		/* dquot_resume needs RW */
2800 		sb->s_flags &= ~SB_RDONLY;
2801 		if (sb_any_quota_suspended(sb)) {
2802 			dquot_resume(sb, -1);
2803 		} else if (f2fs_sb_has_quota_ino(sbi)) {
2804 			err = f2fs_enable_quotas(sb);
2805 			if (err)
2806 				goto restore_opts;
2807 		}
2808 	}
2809 #endif
2810 	/* disallow enable atgc dynamically */
2811 	if (no_atgc == !!test_opt(sbi, ATGC)) {
2812 		err = -EINVAL;
2813 		f2fs_warn(sbi, "switch atgc option is not allowed");
2814 		goto restore_opts;
2815 	}
2816 
2817 	/* disallow enable/disable extent_cache dynamically */
2818 	if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
2819 		err = -EINVAL;
2820 		f2fs_warn(sbi, "switch extent_cache option is not allowed");
2821 		goto restore_opts;
2822 	}
2823 	/* disallow enable/disable age extent_cache dynamically */
2824 	if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
2825 		err = -EINVAL;
2826 		f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
2827 		goto restore_opts;
2828 	}
2829 
2830 	if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2831 		err = -EINVAL;
2832 		f2fs_warn(sbi, "switch compress_cache option is not allowed");
2833 		goto restore_opts;
2834 	}
2835 
2836 	if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2837 		err = -EINVAL;
2838 		f2fs_warn(sbi, "switch discard_unit option is not allowed");
2839 		goto restore_opts;
2840 	}
2841 
2842 	if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) {
2843 		err = -EINVAL;
2844 		f2fs_warn(sbi, "switch nat_bits option is not allowed");
2845 		goto restore_opts;
2846 	}
2847 
2848 	if ((flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2849 		err = -EINVAL;
2850 		f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2851 		goto restore_opts;
2852 	}
2853 
2854 	/*
2855 	 * We stop the GC thread if FS is mounted as RO
2856 	 * or if background_gc = off is passed in mount
2857 	 * option. Also sync the filesystem.
2858 	 */
2859 	if ((flags & SB_RDONLY) ||
2860 			(F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2861 			!test_opt(sbi, GC_MERGE))) {
2862 		if (sbi->gc_thread) {
2863 			f2fs_stop_gc_thread(sbi);
2864 			need_restart_gc = true;
2865 		}
2866 	} else if (!sbi->gc_thread) {
2867 		err = f2fs_start_gc_thread(sbi);
2868 		if (err)
2869 			goto restore_opts;
2870 		need_stop_gc = true;
2871 	}
2872 
2873 	if (flags & SB_RDONLY) {
2874 		sync_inodes_sb(sb);
2875 
2876 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2877 		set_sbi_flag(sbi, SBI_IS_CLOSE);
2878 		f2fs_sync_fs(sb, 1);
2879 		clear_sbi_flag(sbi, SBI_IS_CLOSE);
2880 	}
2881 
2882 	/*
2883 	 * We stop issue flush thread if FS is mounted as RO
2884 	 * or if flush_merge is not passed in mount option.
2885 	 */
2886 	if ((flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2887 		clear_opt(sbi, FLUSH_MERGE);
2888 		f2fs_destroy_flush_cmd_control(sbi, false);
2889 		need_restart_flush = true;
2890 	} else {
2891 		err = f2fs_create_flush_cmd_control(sbi);
2892 		if (err)
2893 			goto restore_gc;
2894 		need_stop_flush = true;
2895 	}
2896 
2897 	if (no_discard == !!test_opt(sbi, DISCARD)) {
2898 		if (test_opt(sbi, DISCARD)) {
2899 			err = f2fs_start_discard_thread(sbi);
2900 			if (err)
2901 				goto restore_flush;
2902 			need_stop_discard = true;
2903 		} else {
2904 			f2fs_stop_discard_thread(sbi);
2905 			f2fs_issue_discard_timeout(sbi);
2906 			need_restart_discard = true;
2907 		}
2908 	}
2909 
2910 	adjust_unusable_cap_perc(sbi);
2911 	if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2912 		if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2913 			err = f2fs_disable_checkpoint(sbi);
2914 			if (err)
2915 				goto restore_discard;
2916 			need_enable_checkpoint = true;
2917 		} else {
2918 			err = f2fs_enable_checkpoint(sbi);
2919 			if (err)
2920 				goto restore_discard;
2921 			need_disable_checkpoint = true;
2922 		}
2923 	}
2924 
2925 	/*
2926 	 * Place this routine at the end, since a new checkpoint would be
2927 	 * triggered while remount and we need to take care of it before
2928 	 * returning from remount.
2929 	 */
2930 	if ((flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2931 			!test_opt(sbi, MERGE_CHECKPOINT)) {
2932 		f2fs_stop_ckpt_thread(sbi);
2933 	} else {
2934 		/* Flush if the previous checkpoint, if exists. */
2935 		f2fs_flush_ckpt_thread(sbi);
2936 
2937 		err = f2fs_start_ckpt_thread(sbi);
2938 		if (err) {
2939 			f2fs_err(sbi,
2940 			    "Failed to start F2FS issue_checkpoint_thread (%d)",
2941 			    err);
2942 			goto restore_checkpoint;
2943 		}
2944 	}
2945 
2946 skip:
2947 #ifdef CONFIG_QUOTA
2948 	/* Release old quota file names */
2949 	for (i = 0; i < MAXQUOTAS; i++)
2950 		kfree(org_mount_opt.s_qf_names[i]);
2951 #endif
2952 	/* Update the POSIXACL Flag */
2953 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2954 		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2955 
2956 	limit_reserve_root(sbi);
2957 	fc->sb_flags = (flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2958 
2959 	sbi->umount_lock_holder = NULL;
2960 	return 0;
2961 restore_checkpoint:
2962 	if (need_enable_checkpoint) {
2963 		if (f2fs_enable_checkpoint(sbi))
2964 			f2fs_warn(sbi, "checkpoint has not been enabled");
2965 	} else if (need_disable_checkpoint) {
2966 		if (f2fs_disable_checkpoint(sbi))
2967 			f2fs_warn(sbi, "checkpoint has not been disabled");
2968 	}
2969 restore_discard:
2970 	if (need_restart_discard) {
2971 		if (f2fs_start_discard_thread(sbi))
2972 			f2fs_warn(sbi, "discard has been stopped");
2973 	} else if (need_stop_discard) {
2974 		f2fs_stop_discard_thread(sbi);
2975 	}
2976 restore_flush:
2977 	if (need_restart_flush) {
2978 		if (f2fs_create_flush_cmd_control(sbi))
2979 			f2fs_warn(sbi, "background flush thread has stopped");
2980 	} else if (need_stop_flush) {
2981 		clear_opt(sbi, FLUSH_MERGE);
2982 		f2fs_destroy_flush_cmd_control(sbi, false);
2983 	}
2984 restore_gc:
2985 	if (need_restart_gc) {
2986 		if (f2fs_start_gc_thread(sbi))
2987 			f2fs_warn(sbi, "background gc thread has stopped");
2988 	} else if (need_stop_gc) {
2989 		f2fs_stop_gc_thread(sbi);
2990 	}
2991 restore_opts:
2992 #ifdef CONFIG_QUOTA
2993 	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2994 	for (i = 0; i < MAXQUOTAS; i++) {
2995 		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2996 		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2997 	}
2998 #endif
2999 	sbi->mount_opt = org_mount_opt;
3000 	sb->s_flags = old_sb_flags;
3001 
3002 	sbi->umount_lock_holder = NULL;
3003 	return err;
3004 }
3005 
3006 static void f2fs_shutdown(struct super_block *sb)
3007 {
3008 	f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
3009 }
3010 
3011 #ifdef CONFIG_QUOTA
3012 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
3013 {
3014 	/* need to recovery orphan */
3015 	if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
3016 		return true;
3017 	/* need to recovery data */
3018 	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
3019 		return false;
3020 	if (test_opt(sbi, NORECOVERY))
3021 		return false;
3022 	return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
3023 }
3024 
3025 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
3026 {
3027 	bool readonly = f2fs_readonly(sbi->sb);
3028 
3029 	if (!f2fs_need_recovery(sbi))
3030 		return false;
3031 
3032 	/* it doesn't need to check f2fs_sb_has_readonly() */
3033 	if (f2fs_hw_is_readonly(sbi))
3034 		return false;
3035 
3036 	if (readonly) {
3037 		sbi->sb->s_flags &= ~SB_RDONLY;
3038 		set_sbi_flag(sbi, SBI_IS_WRITABLE);
3039 	}
3040 
3041 	/*
3042 	 * Turn on quotas which were not enabled for read-only mounts if
3043 	 * filesystem has quota feature, so that they are updated correctly.
3044 	 */
3045 	return f2fs_enable_quota_files(sbi, readonly);
3046 }
3047 
3048 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
3049 						bool quota_enabled)
3050 {
3051 	if (quota_enabled)
3052 		f2fs_quota_off_umount(sbi->sb);
3053 
3054 	if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
3055 		clear_sbi_flag(sbi, SBI_IS_WRITABLE);
3056 		sbi->sb->s_flags |= SB_RDONLY;
3057 	}
3058 }
3059 
3060 /* Read data from quotafile */
3061 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
3062 			       size_t len, loff_t off)
3063 {
3064 	struct inode *inode = sb_dqopt(sb)->files[type];
3065 	struct address_space *mapping = inode->i_mapping;
3066 	int tocopy;
3067 	size_t toread;
3068 	loff_t i_size = i_size_read(inode);
3069 
3070 	if (off > i_size)
3071 		return 0;
3072 
3073 	if (off + len > i_size)
3074 		len = i_size - off;
3075 	toread = len;
3076 	while (toread > 0) {
3077 		struct folio *folio;
3078 		size_t offset;
3079 
3080 repeat:
3081 		folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT,
3082 				GFP_NOFS);
3083 		if (IS_ERR(folio)) {
3084 			if (PTR_ERR(folio) == -ENOMEM) {
3085 				memalloc_retry_wait(GFP_NOFS);
3086 				goto repeat;
3087 			}
3088 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3089 			return PTR_ERR(folio);
3090 		}
3091 		offset = offset_in_folio(folio, off);
3092 		tocopy = min(folio_size(folio) - offset, toread);
3093 
3094 		folio_lock(folio);
3095 
3096 		if (unlikely(folio->mapping != mapping)) {
3097 			f2fs_folio_put(folio, true);
3098 			goto repeat;
3099 		}
3100 
3101 		/*
3102 		 * should never happen, just leave f2fs_bug_on() here to catch
3103 		 * any potential bug.
3104 		 */
3105 		f2fs_bug_on(F2FS_SB(sb), !folio_test_uptodate(folio));
3106 
3107 		memcpy_from_folio(data, folio, offset, tocopy);
3108 		f2fs_folio_put(folio, true);
3109 
3110 		toread -= tocopy;
3111 		data += tocopy;
3112 		off += tocopy;
3113 	}
3114 	return len;
3115 }
3116 
3117 /* Write to quotafile */
3118 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
3119 				const char *data, size_t len, loff_t off)
3120 {
3121 	struct inode *inode = sb_dqopt(sb)->files[type];
3122 	struct address_space *mapping = inode->i_mapping;
3123 	const struct address_space_operations *a_ops = mapping->a_ops;
3124 	int offset = off & (sb->s_blocksize - 1);
3125 	size_t towrite = len;
3126 	struct folio *folio;
3127 	void *fsdata = NULL;
3128 	int err = 0;
3129 	int tocopy;
3130 
3131 	while (towrite > 0) {
3132 		tocopy = min_t(unsigned long, sb->s_blocksize - offset,
3133 								towrite);
3134 retry:
3135 		err = a_ops->write_begin(NULL, mapping, off, tocopy,
3136 							&folio, &fsdata);
3137 		if (unlikely(err)) {
3138 			if (err == -ENOMEM) {
3139 				memalloc_retry_wait(GFP_NOFS);
3140 				goto retry;
3141 			}
3142 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3143 			break;
3144 		}
3145 
3146 		memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
3147 
3148 		a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
3149 						folio, fsdata);
3150 		offset = 0;
3151 		towrite -= tocopy;
3152 		off += tocopy;
3153 		data += tocopy;
3154 		cond_resched();
3155 	}
3156 
3157 	if (len == towrite)
3158 		return err;
3159 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
3160 	f2fs_mark_inode_dirty_sync(inode, false);
3161 	return len - towrite;
3162 }
3163 
3164 int f2fs_dquot_initialize(struct inode *inode)
3165 {
3166 	if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
3167 		return -ESRCH;
3168 
3169 	return dquot_initialize(inode);
3170 }
3171 
3172 static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
3173 {
3174 	return F2FS_I(inode)->i_dquot;
3175 }
3176 
3177 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
3178 {
3179 	return &F2FS_I(inode)->i_reserved_quota;
3180 }
3181 
3182 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
3183 {
3184 	if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
3185 		f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
3186 		return 0;
3187 	}
3188 
3189 	return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
3190 					F2FS_OPTION(sbi).s_jquota_fmt, type);
3191 }
3192 
3193 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
3194 {
3195 	int enabled = 0;
3196 	int i, err;
3197 
3198 	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
3199 		err = f2fs_enable_quotas(sbi->sb);
3200 		if (err) {
3201 			f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
3202 			return 0;
3203 		}
3204 		return 1;
3205 	}
3206 
3207 	for (i = 0; i < MAXQUOTAS; i++) {
3208 		if (F2FS_OPTION(sbi).s_qf_names[i]) {
3209 			err = f2fs_quota_on_mount(sbi, i);
3210 			if (!err) {
3211 				enabled = 1;
3212 				continue;
3213 			}
3214 			f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
3215 				 err, i);
3216 		}
3217 	}
3218 	return enabled;
3219 }
3220 
3221 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
3222 			     unsigned int flags)
3223 {
3224 	struct inode *qf_inode;
3225 	unsigned long qf_inum;
3226 	unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
3227 	int err;
3228 
3229 	BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
3230 
3231 	qf_inum = f2fs_qf_ino(sb, type);
3232 	if (!qf_inum)
3233 		return -EPERM;
3234 
3235 	qf_inode = f2fs_iget(sb, qf_inum);
3236 	if (IS_ERR(qf_inode)) {
3237 		f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
3238 		return PTR_ERR(qf_inode);
3239 	}
3240 
3241 	/* Don't account quota for quota files to avoid recursion */
3242 	inode_lock(qf_inode);
3243 	qf_inode->i_flags |= S_NOQUOTA;
3244 
3245 	if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
3246 		F2FS_I(qf_inode)->i_flags |= qf_flag;
3247 		f2fs_set_inode_flags(qf_inode);
3248 	}
3249 	inode_unlock(qf_inode);
3250 
3251 	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
3252 	iput(qf_inode);
3253 	return err;
3254 }
3255 
3256 static int f2fs_enable_quotas(struct super_block *sb)
3257 {
3258 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3259 	int type, err = 0;
3260 	unsigned long qf_inum;
3261 	bool quota_mopt[MAXQUOTAS] = {
3262 		test_opt(sbi, USRQUOTA),
3263 		test_opt(sbi, GRPQUOTA),
3264 		test_opt(sbi, PRJQUOTA),
3265 	};
3266 
3267 	if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
3268 		f2fs_err(sbi, "quota file may be corrupted, skip loading it");
3269 		return 0;
3270 	}
3271 
3272 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
3273 
3274 	for (type = 0; type < MAXQUOTAS; type++) {
3275 		qf_inum = f2fs_qf_ino(sb, type);
3276 		if (qf_inum) {
3277 			err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
3278 				DQUOT_USAGE_ENABLED |
3279 				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
3280 			if (err) {
3281 				f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
3282 					 type, err);
3283 				for (type--; type >= 0; type--)
3284 					dquot_quota_off(sb, type);
3285 				set_sbi_flag(F2FS_SB(sb),
3286 						SBI_QUOTA_NEED_REPAIR);
3287 				return err;
3288 			}
3289 		}
3290 	}
3291 	return 0;
3292 }
3293 
3294 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
3295 {
3296 	struct quota_info *dqopt = sb_dqopt(sbi->sb);
3297 	struct address_space *mapping = dqopt->files[type]->i_mapping;
3298 	int ret = 0;
3299 
3300 	ret = dquot_writeback_dquots(sbi->sb, type);
3301 	if (ret)
3302 		goto out;
3303 
3304 	ret = filemap_fdatawrite(mapping);
3305 	if (ret)
3306 		goto out;
3307 
3308 	/* if we are using journalled quota */
3309 	if (is_journalled_quota(sbi))
3310 		goto out;
3311 
3312 	ret = filemap_fdatawait(mapping);
3313 
3314 	truncate_inode_pages(&dqopt->files[type]->i_data, 0);
3315 out:
3316 	if (ret)
3317 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3318 	return ret;
3319 }
3320 
3321 int f2fs_do_quota_sync(struct super_block *sb, int type)
3322 {
3323 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3324 	struct quota_info *dqopt = sb_dqopt(sb);
3325 	int cnt;
3326 	int ret = 0;
3327 
3328 	/*
3329 	 * Now when everything is written we can discard the pagecache so
3330 	 * that userspace sees the changes.
3331 	 */
3332 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3333 
3334 		if (type != -1 && cnt != type)
3335 			continue;
3336 
3337 		if (!sb_has_quota_active(sb, cnt))
3338 			continue;
3339 
3340 		if (!f2fs_sb_has_quota_ino(sbi))
3341 			inode_lock(dqopt->files[cnt]);
3342 
3343 		/*
3344 		 * do_quotactl
3345 		 *  f2fs_quota_sync
3346 		 *  f2fs_down_read(quota_sem)
3347 		 *  dquot_writeback_dquots()
3348 		 *  f2fs_dquot_commit
3349 		 *			      block_operation
3350 		 *			      f2fs_down_read(quota_sem)
3351 		 */
3352 		f2fs_lock_op(sbi);
3353 		f2fs_down_read(&sbi->quota_sem);
3354 
3355 		ret = f2fs_quota_sync_file(sbi, cnt);
3356 
3357 		f2fs_up_read(&sbi->quota_sem);
3358 		f2fs_unlock_op(sbi);
3359 
3360 		if (!f2fs_sb_has_quota_ino(sbi))
3361 			inode_unlock(dqopt->files[cnt]);
3362 
3363 		if (ret)
3364 			break;
3365 	}
3366 	return ret;
3367 }
3368 
3369 static int f2fs_quota_sync(struct super_block *sb, int type)
3370 {
3371 	int ret;
3372 
3373 	F2FS_SB(sb)->umount_lock_holder = current;
3374 	ret = f2fs_do_quota_sync(sb, type);
3375 	F2FS_SB(sb)->umount_lock_holder = NULL;
3376 	return ret;
3377 }
3378 
3379 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
3380 							const struct path *path)
3381 {
3382 	struct inode *inode;
3383 	int err = 0;
3384 
3385 	/* if quota sysfile exists, deny enabling quota with specific file */
3386 	if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
3387 		f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
3388 		return -EBUSY;
3389 	}
3390 
3391 	if (path->dentry->d_sb != sb)
3392 		return -EXDEV;
3393 
3394 	F2FS_SB(sb)->umount_lock_holder = current;
3395 
3396 	err = f2fs_do_quota_sync(sb, type);
3397 	if (err)
3398 		goto out;
3399 
3400 	inode = d_inode(path->dentry);
3401 
3402 	err = filemap_fdatawrite(inode->i_mapping);
3403 	if (err)
3404 		goto out;
3405 
3406 	err = filemap_fdatawait(inode->i_mapping);
3407 	if (err)
3408 		goto out;
3409 
3410 	err = dquot_quota_on(sb, type, format_id, path);
3411 	if (err)
3412 		goto out;
3413 
3414 	inode_lock(inode);
3415 	F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
3416 	f2fs_set_inode_flags(inode);
3417 	inode_unlock(inode);
3418 	f2fs_mark_inode_dirty_sync(inode, false);
3419 out:
3420 	F2FS_SB(sb)->umount_lock_holder = NULL;
3421 	return err;
3422 }
3423 
3424 static int __f2fs_quota_off(struct super_block *sb, int type)
3425 {
3426 	struct inode *inode = sb_dqopt(sb)->files[type];
3427 	int err;
3428 
3429 	if (!inode || !igrab(inode))
3430 		return dquot_quota_off(sb, type);
3431 
3432 	err = f2fs_do_quota_sync(sb, type);
3433 	if (err)
3434 		goto out_put;
3435 
3436 	err = dquot_quota_off(sb, type);
3437 	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
3438 		goto out_put;
3439 
3440 	inode_lock(inode);
3441 	F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
3442 	f2fs_set_inode_flags(inode);
3443 	inode_unlock(inode);
3444 	f2fs_mark_inode_dirty_sync(inode, false);
3445 out_put:
3446 	iput(inode);
3447 	return err;
3448 }
3449 
3450 static int f2fs_quota_off(struct super_block *sb, int type)
3451 {
3452 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3453 	int err;
3454 
3455 	F2FS_SB(sb)->umount_lock_holder = current;
3456 
3457 	err = __f2fs_quota_off(sb, type);
3458 
3459 	/*
3460 	 * quotactl can shutdown journalled quota, result in inconsistence
3461 	 * between quota record and fs data by following updates, tag the
3462 	 * flag to let fsck be aware of it.
3463 	 */
3464 	if (is_journalled_quota(sbi))
3465 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3466 
3467 	F2FS_SB(sb)->umount_lock_holder = NULL;
3468 
3469 	return err;
3470 }
3471 
3472 void f2fs_quota_off_umount(struct super_block *sb)
3473 {
3474 	int type;
3475 	int err;
3476 
3477 	for (type = 0; type < MAXQUOTAS; type++) {
3478 		err = __f2fs_quota_off(sb, type);
3479 		if (err) {
3480 			int ret = dquot_quota_off(sb, type);
3481 
3482 			f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
3483 				 type, err, ret);
3484 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3485 		}
3486 	}
3487 	/*
3488 	 * In case of checkpoint=disable, we must flush quota blocks.
3489 	 * This can cause NULL exception for node_inode in end_io, since
3490 	 * put_super already dropped it.
3491 	 */
3492 	sync_filesystem(sb);
3493 }
3494 
3495 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
3496 {
3497 	struct quota_info *dqopt = sb_dqopt(sb);
3498 	int type;
3499 
3500 	for (type = 0; type < MAXQUOTAS; type++) {
3501 		if (!dqopt->files[type])
3502 			continue;
3503 		f2fs_inode_synced(dqopt->files[type]);
3504 	}
3505 }
3506 
3507 static int f2fs_dquot_commit(struct dquot *dquot)
3508 {
3509 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3510 	int ret;
3511 
3512 	f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
3513 	ret = dquot_commit(dquot);
3514 	if (ret < 0)
3515 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3516 	f2fs_up_read(&sbi->quota_sem);
3517 	return ret;
3518 }
3519 
3520 static int f2fs_dquot_acquire(struct dquot *dquot)
3521 {
3522 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3523 	int ret;
3524 
3525 	f2fs_down_read(&sbi->quota_sem);
3526 	ret = dquot_acquire(dquot);
3527 	if (ret < 0)
3528 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3529 	f2fs_up_read(&sbi->quota_sem);
3530 	return ret;
3531 }
3532 
3533 static int f2fs_dquot_release(struct dquot *dquot)
3534 {
3535 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3536 	int ret = dquot_release(dquot);
3537 
3538 	if (ret < 0)
3539 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3540 	return ret;
3541 }
3542 
3543 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
3544 {
3545 	struct super_block *sb = dquot->dq_sb;
3546 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3547 	int ret = dquot_mark_dquot_dirty(dquot);
3548 
3549 	/* if we are using journalled quota */
3550 	if (is_journalled_quota(sbi))
3551 		set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
3552 
3553 	return ret;
3554 }
3555 
3556 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
3557 {
3558 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3559 	int ret = dquot_commit_info(sb, type);
3560 
3561 	if (ret < 0)
3562 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3563 	return ret;
3564 }
3565 
3566 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
3567 {
3568 	*projid = F2FS_I(inode)->i_projid;
3569 	return 0;
3570 }
3571 
3572 static const struct dquot_operations f2fs_quota_operations = {
3573 	.get_reserved_space = f2fs_get_reserved_space,
3574 	.write_dquot	= f2fs_dquot_commit,
3575 	.acquire_dquot	= f2fs_dquot_acquire,
3576 	.release_dquot	= f2fs_dquot_release,
3577 	.mark_dirty	= f2fs_dquot_mark_dquot_dirty,
3578 	.write_info	= f2fs_dquot_commit_info,
3579 	.alloc_dquot	= dquot_alloc,
3580 	.destroy_dquot	= dquot_destroy,
3581 	.get_projid	= f2fs_get_projid,
3582 	.get_next_id	= dquot_get_next_id,
3583 };
3584 
3585 static const struct quotactl_ops f2fs_quotactl_ops = {
3586 	.quota_on	= f2fs_quota_on,
3587 	.quota_off	= f2fs_quota_off,
3588 	.quota_sync	= f2fs_quota_sync,
3589 	.get_state	= dquot_get_state,
3590 	.set_info	= dquot_set_dqinfo,
3591 	.get_dqblk	= dquot_get_dqblk,
3592 	.set_dqblk	= dquot_set_dqblk,
3593 	.get_nextdqblk	= dquot_get_next_dqblk,
3594 };
3595 #else
3596 int f2fs_dquot_initialize(struct inode *inode)
3597 {
3598 	return 0;
3599 }
3600 
3601 int f2fs_do_quota_sync(struct super_block *sb, int type)
3602 {
3603 	return 0;
3604 }
3605 
3606 void f2fs_quota_off_umount(struct super_block *sb)
3607 {
3608 }
3609 #endif
3610 
3611 static const struct super_operations f2fs_sops = {
3612 	.alloc_inode	= f2fs_alloc_inode,
3613 	.free_inode	= f2fs_free_inode,
3614 	.drop_inode	= f2fs_drop_inode,
3615 	.write_inode	= f2fs_write_inode,
3616 	.dirty_inode	= f2fs_dirty_inode,
3617 	.show_options	= f2fs_show_options,
3618 #ifdef CONFIG_QUOTA
3619 	.quota_read	= f2fs_quota_read,
3620 	.quota_write	= f2fs_quota_write,
3621 	.get_dquots	= f2fs_get_dquots,
3622 #endif
3623 	.evict_inode	= f2fs_evict_inode,
3624 	.put_super	= f2fs_put_super,
3625 	.sync_fs	= f2fs_sync_fs,
3626 	.freeze_fs	= f2fs_freeze,
3627 	.unfreeze_fs	= f2fs_unfreeze,
3628 	.statfs		= f2fs_statfs,
3629 	.shutdown	= f2fs_shutdown,
3630 };
3631 
3632 #ifdef CONFIG_FS_ENCRYPTION
3633 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
3634 {
3635 	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3636 				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3637 				ctx, len, NULL);
3638 }
3639 
3640 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3641 							void *fs_data)
3642 {
3643 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3644 
3645 	/*
3646 	 * Encrypting the root directory is not allowed because fsck
3647 	 * expects lost+found directory to exist and remain unencrypted
3648 	 * if LOST_FOUND feature is enabled.
3649 	 *
3650 	 */
3651 	if (f2fs_sb_has_lost_found(sbi) &&
3652 			inode->i_ino == F2FS_ROOT_INO(sbi))
3653 		return -EPERM;
3654 
3655 	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3656 				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3657 				ctx, len, fs_data, XATTR_CREATE);
3658 }
3659 
3660 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
3661 {
3662 	return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
3663 }
3664 
3665 static bool f2fs_has_stable_inodes(struct super_block *sb)
3666 {
3667 	return true;
3668 }
3669 
3670 static struct block_device **f2fs_get_devices(struct super_block *sb,
3671 					      unsigned int *num_devs)
3672 {
3673 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3674 	struct block_device **devs;
3675 	int i;
3676 
3677 	if (!f2fs_is_multi_device(sbi))
3678 		return NULL;
3679 
3680 	devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
3681 	if (!devs)
3682 		return ERR_PTR(-ENOMEM);
3683 
3684 	for (i = 0; i < sbi->s_ndevs; i++)
3685 		devs[i] = FDEV(i).bdev;
3686 	*num_devs = sbi->s_ndevs;
3687 	return devs;
3688 }
3689 
3690 static const struct fscrypt_operations f2fs_cryptops = {
3691 	.inode_info_offs	= (int)offsetof(struct f2fs_inode_info, i_crypt_info) -
3692 				  (int)offsetof(struct f2fs_inode_info, vfs_inode),
3693 	.needs_bounce_pages	= 1,
3694 	.has_32bit_inodes	= 1,
3695 	.supports_subblock_data_units = 1,
3696 	.legacy_key_prefix	= "f2fs:",
3697 	.get_context		= f2fs_get_context,
3698 	.set_context		= f2fs_set_context,
3699 	.get_dummy_policy	= f2fs_get_dummy_policy,
3700 	.empty_dir		= f2fs_empty_dir,
3701 	.has_stable_inodes	= f2fs_has_stable_inodes,
3702 	.get_devices		= f2fs_get_devices,
3703 };
3704 #endif /* CONFIG_FS_ENCRYPTION */
3705 
3706 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3707 		u64 ino, u32 generation)
3708 {
3709 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3710 	struct inode *inode;
3711 
3712 	if (f2fs_check_nid_range(sbi, ino))
3713 		return ERR_PTR(-ESTALE);
3714 
3715 	/*
3716 	 * f2fs_iget isn't quite right if the inode is currently unallocated!
3717 	 * However f2fs_iget currently does appropriate checks to handle stale
3718 	 * inodes so everything is OK.
3719 	 */
3720 	inode = f2fs_iget(sb, ino);
3721 	if (IS_ERR(inode))
3722 		return ERR_CAST(inode);
3723 	if (unlikely(generation && inode->i_generation != generation)) {
3724 		/* we didn't find the right inode.. */
3725 		iput(inode);
3726 		return ERR_PTR(-ESTALE);
3727 	}
3728 	return inode;
3729 }
3730 
3731 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3732 		int fh_len, int fh_type)
3733 {
3734 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3735 				    f2fs_nfs_get_inode);
3736 }
3737 
3738 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3739 		int fh_len, int fh_type)
3740 {
3741 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3742 				    f2fs_nfs_get_inode);
3743 }
3744 
3745 static const struct export_operations f2fs_export_ops = {
3746 	.encode_fh = generic_encode_ino32_fh,
3747 	.fh_to_dentry = f2fs_fh_to_dentry,
3748 	.fh_to_parent = f2fs_fh_to_parent,
3749 	.get_parent = f2fs_get_parent,
3750 };
3751 
3752 loff_t max_file_blocks(struct inode *inode)
3753 {
3754 	loff_t result = 0;
3755 	loff_t leaf_count;
3756 
3757 	/*
3758 	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3759 	 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3760 	 * space in inode.i_addr, it will be more safe to reassign
3761 	 * result as zero.
3762 	 */
3763 
3764 	if (inode && f2fs_compressed_file(inode))
3765 		leaf_count = ADDRS_PER_BLOCK(inode);
3766 	else
3767 		leaf_count = DEF_ADDRS_PER_BLOCK;
3768 
3769 	/* two direct node blocks */
3770 	result += (leaf_count * 2);
3771 
3772 	/* two indirect node blocks */
3773 	leaf_count *= NIDS_PER_BLOCK;
3774 	result += (leaf_count * 2);
3775 
3776 	/* one double indirect node block */
3777 	leaf_count *= NIDS_PER_BLOCK;
3778 	result += leaf_count;
3779 
3780 	/*
3781 	 * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
3782 	 * a 4K crypto data unit, we must restrict the max filesize to what can
3783 	 * fit within U32_MAX + 1 data units.
3784 	 */
3785 
3786 	result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
3787 
3788 	return result;
3789 }
3790 
3791 static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
3792 						pgoff_t index, bool update)
3793 {
3794 	struct bio *bio;
3795 	/* it's rare case, we can do fua all the time */
3796 	blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
3797 	int ret;
3798 
3799 	folio_lock(folio);
3800 	folio_wait_writeback(folio);
3801 	if (update)
3802 		memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
3803 					sizeof(struct f2fs_super_block));
3804 	folio_mark_dirty(folio);
3805 	folio_clear_dirty_for_io(folio);
3806 	folio_start_writeback(folio);
3807 	folio_unlock(folio);
3808 
3809 	bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
3810 
3811 	/* it doesn't need to set crypto context for superblock update */
3812 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index);
3813 
3814 	if (!bio_add_folio(bio, folio, folio_size(folio), 0))
3815 		f2fs_bug_on(sbi, 1);
3816 
3817 	ret = submit_bio_wait(bio);
3818 	bio_put(bio);
3819 	folio_end_writeback(folio);
3820 
3821 	return ret;
3822 }
3823 
3824 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3825 					struct folio *folio, pgoff_t index)
3826 {
3827 	struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
3828 	struct super_block *sb = sbi->sb;
3829 	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3830 	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3831 	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3832 	u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3833 	u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3834 	u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3835 	u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3836 	u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3837 	u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3838 	u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3839 	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3840 	u32 segment_count = le32_to_cpu(raw_super->segment_count);
3841 	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3842 	u64 main_end_blkaddr = main_blkaddr +
3843 				((u64)segment_count_main << log_blocks_per_seg);
3844 	u64 seg_end_blkaddr = segment0_blkaddr +
3845 				((u64)segment_count << log_blocks_per_seg);
3846 
3847 	if (segment0_blkaddr != cp_blkaddr) {
3848 		f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3849 			  segment0_blkaddr, cp_blkaddr);
3850 		return true;
3851 	}
3852 
3853 	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3854 							sit_blkaddr) {
3855 		f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3856 			  cp_blkaddr, sit_blkaddr,
3857 			  segment_count_ckpt << log_blocks_per_seg);
3858 		return true;
3859 	}
3860 
3861 	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3862 							nat_blkaddr) {
3863 		f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3864 			  sit_blkaddr, nat_blkaddr,
3865 			  segment_count_sit << log_blocks_per_seg);
3866 		return true;
3867 	}
3868 
3869 	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3870 							ssa_blkaddr) {
3871 		f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3872 			  nat_blkaddr, ssa_blkaddr,
3873 			  segment_count_nat << log_blocks_per_seg);
3874 		return true;
3875 	}
3876 
3877 	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3878 							main_blkaddr) {
3879 		f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3880 			  ssa_blkaddr, main_blkaddr,
3881 			  segment_count_ssa << log_blocks_per_seg);
3882 		return true;
3883 	}
3884 
3885 	if (main_end_blkaddr > seg_end_blkaddr) {
3886 		f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3887 			  main_blkaddr, seg_end_blkaddr,
3888 			  segment_count_main << log_blocks_per_seg);
3889 		return true;
3890 	} else if (main_end_blkaddr < seg_end_blkaddr) {
3891 		int err = 0;
3892 		char *res;
3893 
3894 		/* fix in-memory information all the time */
3895 		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3896 				segment0_blkaddr) >> log_blocks_per_seg);
3897 
3898 		if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
3899 			set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3900 			res = "internally";
3901 		} else {
3902 			err = __f2fs_commit_super(sbi, folio, index, false);
3903 			res = err ? "failed" : "done";
3904 		}
3905 		f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3906 			  res, main_blkaddr, seg_end_blkaddr,
3907 			  segment_count_main << log_blocks_per_seg);
3908 		if (err)
3909 			return true;
3910 	}
3911 	return false;
3912 }
3913 
3914 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3915 					struct folio *folio, pgoff_t index)
3916 {
3917 	block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3918 	block_t total_sections, blocks_per_seg;
3919 	struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
3920 	size_t crc_offset = 0;
3921 	__u32 crc = 0;
3922 
3923 	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3924 		f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3925 			  F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3926 		return -EINVAL;
3927 	}
3928 
3929 	/* Check checksum_offset and crc in superblock */
3930 	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3931 		crc_offset = le32_to_cpu(raw_super->checksum_offset);
3932 		if (crc_offset !=
3933 			offsetof(struct f2fs_super_block, crc)) {
3934 			f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3935 				  crc_offset);
3936 			return -EFSCORRUPTED;
3937 		}
3938 		crc = le32_to_cpu(raw_super->crc);
3939 		if (crc != f2fs_crc32(raw_super, crc_offset)) {
3940 			f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3941 			return -EFSCORRUPTED;
3942 		}
3943 	}
3944 
3945 	/* only support block_size equals to PAGE_SIZE */
3946 	if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3947 		f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3948 			  le32_to_cpu(raw_super->log_blocksize),
3949 			  F2FS_BLKSIZE_BITS);
3950 		return -EFSCORRUPTED;
3951 	}
3952 
3953 	/* check log blocks per segment */
3954 	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3955 		f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3956 			  le32_to_cpu(raw_super->log_blocks_per_seg));
3957 		return -EFSCORRUPTED;
3958 	}
3959 
3960 	/* Currently, support 512/1024/2048/4096/16K bytes sector size */
3961 	if (le32_to_cpu(raw_super->log_sectorsize) >
3962 				F2FS_MAX_LOG_SECTOR_SIZE ||
3963 		le32_to_cpu(raw_super->log_sectorsize) <
3964 				F2FS_MIN_LOG_SECTOR_SIZE) {
3965 		f2fs_info(sbi, "Invalid log sectorsize (%u)",
3966 			  le32_to_cpu(raw_super->log_sectorsize));
3967 		return -EFSCORRUPTED;
3968 	}
3969 	if (le32_to_cpu(raw_super->log_sectors_per_block) +
3970 		le32_to_cpu(raw_super->log_sectorsize) !=
3971 			F2FS_MAX_LOG_SECTOR_SIZE) {
3972 		f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3973 			  le32_to_cpu(raw_super->log_sectors_per_block),
3974 			  le32_to_cpu(raw_super->log_sectorsize));
3975 		return -EFSCORRUPTED;
3976 	}
3977 
3978 	segment_count = le32_to_cpu(raw_super->segment_count);
3979 	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3980 	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3981 	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3982 	total_sections = le32_to_cpu(raw_super->section_count);
3983 
3984 	/* blocks_per_seg should be 512, given the above check */
3985 	blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
3986 
3987 	if (segment_count > F2FS_MAX_SEGMENT ||
3988 				segment_count < F2FS_MIN_SEGMENTS) {
3989 		f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3990 		return -EFSCORRUPTED;
3991 	}
3992 
3993 	if (total_sections > segment_count_main || total_sections < 1 ||
3994 			segs_per_sec > segment_count || !segs_per_sec) {
3995 		f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3996 			  segment_count, total_sections, segs_per_sec);
3997 		return -EFSCORRUPTED;
3998 	}
3999 
4000 	if (segment_count_main != total_sections * segs_per_sec) {
4001 		f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
4002 			  segment_count_main, total_sections, segs_per_sec);
4003 		return -EFSCORRUPTED;
4004 	}
4005 
4006 	if ((segment_count / segs_per_sec) < total_sections) {
4007 		f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
4008 			  segment_count, segs_per_sec, total_sections);
4009 		return -EFSCORRUPTED;
4010 	}
4011 
4012 	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
4013 		f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
4014 			  segment_count, le64_to_cpu(raw_super->block_count));
4015 		return -EFSCORRUPTED;
4016 	}
4017 
4018 	if (RDEV(0).path[0]) {
4019 		block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
4020 		int i = 1;
4021 
4022 		while (i < MAX_DEVICES && RDEV(i).path[0]) {
4023 			dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
4024 			i++;
4025 		}
4026 		if (segment_count != dev_seg_count) {
4027 			f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
4028 					segment_count, dev_seg_count);
4029 			return -EFSCORRUPTED;
4030 		}
4031 	} else {
4032 		if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
4033 					!bdev_is_zoned(sbi->sb->s_bdev)) {
4034 			f2fs_info(sbi, "Zoned block device path is missing");
4035 			return -EFSCORRUPTED;
4036 		}
4037 	}
4038 
4039 	if (secs_per_zone > total_sections || !secs_per_zone) {
4040 		f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
4041 			  secs_per_zone, total_sections);
4042 		return -EFSCORRUPTED;
4043 	}
4044 	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
4045 			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
4046 			(le32_to_cpu(raw_super->extension_count) +
4047 			raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
4048 		f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
4049 			  le32_to_cpu(raw_super->extension_count),
4050 			  raw_super->hot_ext_count,
4051 			  F2FS_MAX_EXTENSION);
4052 		return -EFSCORRUPTED;
4053 	}
4054 
4055 	if (le32_to_cpu(raw_super->cp_payload) >=
4056 				(blocks_per_seg - F2FS_CP_PACKS -
4057 				NR_CURSEG_PERSIST_TYPE)) {
4058 		f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
4059 			  le32_to_cpu(raw_super->cp_payload),
4060 			  blocks_per_seg - F2FS_CP_PACKS -
4061 			  NR_CURSEG_PERSIST_TYPE);
4062 		return -EFSCORRUPTED;
4063 	}
4064 
4065 	/* check reserved ino info */
4066 	if (le32_to_cpu(raw_super->node_ino) != 1 ||
4067 		le32_to_cpu(raw_super->meta_ino) != 2 ||
4068 		le32_to_cpu(raw_super->root_ino) != 3) {
4069 		f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
4070 			  le32_to_cpu(raw_super->node_ino),
4071 			  le32_to_cpu(raw_super->meta_ino),
4072 			  le32_to_cpu(raw_super->root_ino));
4073 		return -EFSCORRUPTED;
4074 	}
4075 
4076 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
4077 	if (sanity_check_area_boundary(sbi, folio, index))
4078 		return -EFSCORRUPTED;
4079 
4080 	/*
4081 	 * Check for legacy summary layout on 16KB+ block devices.
4082 	 * Modern f2fs-tools packs multiple 4KB summary areas into one block,
4083 	 * whereas legacy versions used one block per summary, leading
4084 	 * to a much larger SSA.
4085 	 */
4086 	if (SUMS_PER_BLOCK > 1 &&
4087 		    !(__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_PACKED_SSA))) {
4088 		f2fs_info(sbi, "Error: Device formatted with a legacy version. "
4089 			"Please reformat with a tool supporting the packed ssa "
4090 			"feature for block sizes larger than 4kb.");
4091 		return -EOPNOTSUPP;
4092 	}
4093 
4094 	return 0;
4095 }
4096 
4097 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4098 {
4099 	unsigned int total, fsmeta;
4100 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4101 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4102 	unsigned int ovp_segments, reserved_segments;
4103 	unsigned int main_segs, blocks_per_seg;
4104 	unsigned int sit_segs, nat_segs;
4105 	unsigned int sit_bitmap_size, nat_bitmap_size;
4106 	unsigned int log_blocks_per_seg;
4107 	unsigned int segment_count_main;
4108 	unsigned int cp_pack_start_sum, cp_payload;
4109 	block_t user_block_count, valid_user_blocks;
4110 	block_t avail_node_count, valid_node_count;
4111 	unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
4112 	unsigned int sit_blk_cnt;
4113 	int i, j;
4114 
4115 	total = le32_to_cpu(raw_super->segment_count);
4116 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
4117 	sit_segs = le32_to_cpu(raw_super->segment_count_sit);
4118 	fsmeta += sit_segs;
4119 	nat_segs = le32_to_cpu(raw_super->segment_count_nat);
4120 	fsmeta += nat_segs;
4121 	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
4122 	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
4123 
4124 	if (unlikely(fsmeta >= total))
4125 		return 1;
4126 
4127 	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
4128 	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
4129 
4130 	if (!f2fs_sb_has_readonly(sbi) &&
4131 			unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
4132 			ovp_segments == 0 || reserved_segments == 0)) {
4133 		f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
4134 		return 1;
4135 	}
4136 	user_block_count = le64_to_cpu(ckpt->user_block_count);
4137 	segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
4138 			(f2fs_sb_has_readonly(sbi) ? 1 : 0);
4139 	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4140 	if (!user_block_count || user_block_count >=
4141 			segment_count_main << log_blocks_per_seg) {
4142 		f2fs_err(sbi, "Wrong user_block_count: %u",
4143 			 user_block_count);
4144 		return 1;
4145 	}
4146 
4147 	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
4148 	if (valid_user_blocks > user_block_count) {
4149 		f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
4150 			 valid_user_blocks, user_block_count);
4151 		return 1;
4152 	}
4153 
4154 	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
4155 	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
4156 	if (valid_node_count > avail_node_count) {
4157 		f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
4158 			 valid_node_count, avail_node_count);
4159 		return 1;
4160 	}
4161 
4162 	main_segs = le32_to_cpu(raw_super->segment_count_main);
4163 	blocks_per_seg = BLKS_PER_SEG(sbi);
4164 
4165 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
4166 		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
4167 			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
4168 			return 1;
4169 
4170 		if (f2fs_sb_has_readonly(sbi))
4171 			goto check_data;
4172 
4173 		for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
4174 			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
4175 				le32_to_cpu(ckpt->cur_node_segno[j])) {
4176 				f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
4177 					 i, j,
4178 					 le32_to_cpu(ckpt->cur_node_segno[i]));
4179 				return 1;
4180 			}
4181 		}
4182 	}
4183 check_data:
4184 	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
4185 		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
4186 			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
4187 			return 1;
4188 
4189 		if (f2fs_sb_has_readonly(sbi))
4190 			goto skip_cross;
4191 
4192 		for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
4193 			if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
4194 				le32_to_cpu(ckpt->cur_data_segno[j])) {
4195 				f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
4196 					 i, j,
4197 					 le32_to_cpu(ckpt->cur_data_segno[i]));
4198 				return 1;
4199 			}
4200 		}
4201 	}
4202 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
4203 		for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
4204 			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
4205 				le32_to_cpu(ckpt->cur_data_segno[j])) {
4206 				f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
4207 					 i, j,
4208 					 le32_to_cpu(ckpt->cur_node_segno[i]));
4209 				return 1;
4210 			}
4211 		}
4212 	}
4213 skip_cross:
4214 	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
4215 	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
4216 
4217 	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
4218 		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
4219 		f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
4220 			 sit_bitmap_size, nat_bitmap_size);
4221 		return 1;
4222 	}
4223 
4224 	sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK);
4225 	if (sit_bitmap_size * 8 < sit_blk_cnt) {
4226 		f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u",
4227 			 sit_bitmap_size, sit_blk_cnt);
4228 		return 1;
4229 	}
4230 
4231 	cp_pack_start_sum = __start_sum_addr(sbi);
4232 	cp_payload = __cp_payload(sbi);
4233 	if (cp_pack_start_sum < cp_payload + 1 ||
4234 		cp_pack_start_sum > blocks_per_seg - 1 -
4235 			NR_CURSEG_PERSIST_TYPE) {
4236 		f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
4237 			 cp_pack_start_sum);
4238 		return 1;
4239 	}
4240 
4241 	if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
4242 		le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
4243 		f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
4244 			  "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
4245 			  "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
4246 			  le32_to_cpu(ckpt->checksum_offset));
4247 		return 1;
4248 	}
4249 
4250 	nat_blocks = nat_segs << log_blocks_per_seg;
4251 	nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
4252 	nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
4253 	if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
4254 		(cp_payload + F2FS_CP_PACKS +
4255 		NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
4256 		f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
4257 			  cp_payload, nat_bits_blocks);
4258 		return 1;
4259 	}
4260 
4261 	if (unlikely(f2fs_cp_error(sbi))) {
4262 		f2fs_err(sbi, "A bug case: need to run fsck");
4263 		return 1;
4264 	}
4265 	return 0;
4266 }
4267 
4268 static void init_sb_info(struct f2fs_sb_info *sbi)
4269 {
4270 	struct f2fs_super_block *raw_super = sbi->raw_super;
4271 	int i;
4272 
4273 	sbi->log_sectors_per_block =
4274 		le32_to_cpu(raw_super->log_sectors_per_block);
4275 	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
4276 	sbi->blocksize = BIT(sbi->log_blocksize);
4277 	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4278 	sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
4279 	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
4280 	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
4281 	sbi->total_sections = le32_to_cpu(raw_super->section_count);
4282 	sbi->total_node_count = SEGS_TO_BLKS(sbi,
4283 			((le32_to_cpu(raw_super->segment_count_nat) / 2) *
4284 			NAT_ENTRY_PER_BLOCK));
4285 	sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count);
4286 	sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT;
4287 	F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
4288 	F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
4289 	F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
4290 	sbi->cur_victim_sec = NULL_SECNO;
4291 	sbi->gc_mode = GC_NORMAL;
4292 	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
4293 	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
4294 	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
4295 	sbi->migration_granularity = SEGS_PER_SEC(sbi);
4296 	sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
4297 		DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
4298 	sbi->seq_file_ra_mul = MIN_RA_MUL;
4299 	sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
4300 	sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
4301 	spin_lock_init(&sbi->gc_remaining_trials_lock);
4302 	atomic64_set(&sbi->current_atomic_write, 0);
4303 
4304 	sbi->dir_level = DEF_DIR_LEVEL;
4305 	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
4306 	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
4307 	sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
4308 	sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
4309 	sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
4310 	sbi->interval_time[ENABLE_TIME] = DEF_ENABLE_INTERVAL;
4311 	sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
4312 				DEF_UMOUNT_DISCARD_TIMEOUT;
4313 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
4314 
4315 	for (i = 0; i < NR_COUNT_TYPE; i++)
4316 		atomic_set(&sbi->nr_pages[i], 0);
4317 
4318 	for (i = 0; i < META; i++)
4319 		atomic_set(&sbi->wb_sync_req[i], 0);
4320 
4321 	INIT_LIST_HEAD(&sbi->s_list);
4322 	mutex_init(&sbi->umount_mutex);
4323 	init_f2fs_rwsem(&sbi->io_order_lock);
4324 	spin_lock_init(&sbi->cp_lock);
4325 
4326 	sbi->dirty_device = 0;
4327 	spin_lock_init(&sbi->dev_lock);
4328 
4329 	init_f2fs_rwsem(&sbi->sb_lock);
4330 	init_f2fs_rwsem(&sbi->pin_sem);
4331 }
4332 
4333 static int init_percpu_info(struct f2fs_sb_info *sbi)
4334 {
4335 	int err;
4336 
4337 	err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
4338 	if (err)
4339 		return err;
4340 
4341 	err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
4342 	if (err)
4343 		goto err_valid_block;
4344 
4345 	err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
4346 								GFP_KERNEL);
4347 	if (err)
4348 		goto err_node_block;
4349 	return 0;
4350 
4351 err_node_block:
4352 	percpu_counter_destroy(&sbi->rf_node_block_count);
4353 err_valid_block:
4354 	percpu_counter_destroy(&sbi->alloc_valid_block_count);
4355 	return err;
4356 }
4357 
4358 #ifdef CONFIG_BLK_DEV_ZONED
4359 
4360 struct f2fs_report_zones_args {
4361 	struct f2fs_sb_info *sbi;
4362 	struct f2fs_dev_info *dev;
4363 };
4364 
4365 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
4366 			      void *data)
4367 {
4368 	struct f2fs_report_zones_args *rz_args = data;
4369 	block_t unusable_blocks = (zone->len - zone->capacity) >>
4370 					F2FS_LOG_SECTORS_PER_BLOCK;
4371 
4372 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
4373 		return 0;
4374 
4375 	set_bit(idx, rz_args->dev->blkz_seq);
4376 	if (!rz_args->sbi->unusable_blocks_per_sec) {
4377 		rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
4378 		return 0;
4379 	}
4380 	if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
4381 		f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
4382 		return -EINVAL;
4383 	}
4384 	return 0;
4385 }
4386 
4387 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
4388 {
4389 	struct block_device *bdev = FDEV(devi).bdev;
4390 	sector_t nr_sectors = bdev_nr_sectors(bdev);
4391 	struct f2fs_report_zones_args rep_zone_arg;
4392 	u64 zone_sectors;
4393 	unsigned int max_open_zones;
4394 	int ret;
4395 
4396 	if (!f2fs_sb_has_blkzoned(sbi))
4397 		return 0;
4398 
4399 	if (bdev_is_zoned(FDEV(devi).bdev)) {
4400 		max_open_zones = bdev_max_open_zones(bdev);
4401 		if (max_open_zones && (max_open_zones < sbi->max_open_zones))
4402 			sbi->max_open_zones = max_open_zones;
4403 		if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
4404 			f2fs_err(sbi,
4405 				"zoned: max open zones %u is too small, need at least %u open zones",
4406 				sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
4407 			return -EINVAL;
4408 		}
4409 	}
4410 
4411 	zone_sectors = bdev_zone_sectors(bdev);
4412 	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
4413 				SECTOR_TO_BLOCK(zone_sectors))
4414 		return -EINVAL;
4415 	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
4416 	FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
4417 					sbi->blocks_per_blkz);
4418 	if (nr_sectors & (zone_sectors - 1))
4419 		FDEV(devi).nr_blkz++;
4420 
4421 	FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
4422 					BITS_TO_LONGS(FDEV(devi).nr_blkz)
4423 					* sizeof(unsigned long),
4424 					GFP_KERNEL);
4425 	if (!FDEV(devi).blkz_seq)
4426 		return -ENOMEM;
4427 
4428 	rep_zone_arg.sbi = sbi;
4429 	rep_zone_arg.dev = &FDEV(devi);
4430 
4431 	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
4432 				  &rep_zone_arg);
4433 	if (ret < 0)
4434 		return ret;
4435 	return 0;
4436 }
4437 #endif
4438 
4439 /*
4440  * Read f2fs raw super block.
4441  * Because we have two copies of super block, so read both of them
4442  * to get the first valid one. If any one of them is broken, we pass
4443  * them recovery flag back to the caller.
4444  */
4445 static int read_raw_super_block(struct f2fs_sb_info *sbi,
4446 			struct f2fs_super_block **raw_super,
4447 			int *valid_super_block, int *recovery)
4448 {
4449 	struct super_block *sb = sbi->sb;
4450 	int block;
4451 	struct folio *folio;
4452 	struct f2fs_super_block *super;
4453 	int err = 0;
4454 
4455 	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
4456 	if (!super)
4457 		return -ENOMEM;
4458 
4459 	for (block = 0; block < 2; block++) {
4460 		folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
4461 		if (IS_ERR(folio)) {
4462 			f2fs_err(sbi, "Unable to read %dth superblock",
4463 				 block + 1);
4464 			err = PTR_ERR(folio);
4465 			*recovery = 1;
4466 			continue;
4467 		}
4468 
4469 		/* sanity checking of raw super */
4470 		err = sanity_check_raw_super(sbi, folio, block);
4471 		if (err) {
4472 			f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
4473 				 block + 1);
4474 			folio_put(folio);
4475 			*recovery = 1;
4476 			continue;
4477 		}
4478 
4479 		if (!*raw_super) {
4480 			memcpy(super, F2FS_SUPER_BLOCK(folio, block),
4481 							sizeof(*super));
4482 			*valid_super_block = block;
4483 			*raw_super = super;
4484 		}
4485 		folio_put(folio);
4486 	}
4487 
4488 	/* No valid superblock */
4489 	if (!*raw_super)
4490 		kfree(super);
4491 	else
4492 		err = 0;
4493 
4494 	return err;
4495 }
4496 
4497 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
4498 {
4499 	struct folio *folio;
4500 	pgoff_t index;
4501 	__u32 crc = 0;
4502 	int err;
4503 
4504 	if ((recover && f2fs_readonly(sbi->sb)) ||
4505 				f2fs_hw_is_readonly(sbi)) {
4506 		set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
4507 		return -EROFS;
4508 	}
4509 
4510 	/* we should update superblock crc here */
4511 	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
4512 		crc = f2fs_crc32(F2FS_RAW_SUPER(sbi),
4513 				offsetof(struct f2fs_super_block, crc));
4514 		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
4515 	}
4516 
4517 	/* write back-up superblock first */
4518 	index = sbi->valid_super_block ? 0 : 1;
4519 	folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
4520 	if (IS_ERR(folio))
4521 		return PTR_ERR(folio);
4522 	err = __f2fs_commit_super(sbi, folio, index, true);
4523 	folio_put(folio);
4524 
4525 	/* if we are in recovery path, skip writing valid superblock */
4526 	if (recover || err)
4527 		return err;
4528 
4529 	/* write current valid superblock */
4530 	index = sbi->valid_super_block;
4531 	folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
4532 	if (IS_ERR(folio))
4533 		return PTR_ERR(folio);
4534 	err = __f2fs_commit_super(sbi, folio, index, true);
4535 	folio_put(folio);
4536 	return err;
4537 }
4538 
4539 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
4540 {
4541 	unsigned long flags;
4542 
4543 	spin_lock_irqsave(&sbi->error_lock, flags);
4544 	if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
4545 		sbi->stop_reason[reason]++;
4546 	spin_unlock_irqrestore(&sbi->error_lock, flags);
4547 }
4548 
4549 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
4550 {
4551 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4552 	unsigned long flags;
4553 	int err;
4554 
4555 	f2fs_down_write(&sbi->sb_lock);
4556 
4557 	spin_lock_irqsave(&sbi->error_lock, flags);
4558 	if (sbi->error_dirty) {
4559 		memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4560 							MAX_F2FS_ERRORS);
4561 		sbi->error_dirty = false;
4562 	}
4563 	memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
4564 	spin_unlock_irqrestore(&sbi->error_lock, flags);
4565 
4566 	err = f2fs_commit_super(sbi, false);
4567 
4568 	f2fs_up_write(&sbi->sb_lock);
4569 	if (err)
4570 		f2fs_err_ratelimited(sbi,
4571 			"f2fs_commit_super fails to record stop_reason, err:%d",
4572 			err);
4573 }
4574 
4575 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
4576 {
4577 	unsigned long flags;
4578 
4579 	spin_lock_irqsave(&sbi->error_lock, flags);
4580 	if (!test_bit(flag, (unsigned long *)sbi->errors)) {
4581 		set_bit(flag, (unsigned long *)sbi->errors);
4582 		sbi->error_dirty = true;
4583 	}
4584 	spin_unlock_irqrestore(&sbi->error_lock, flags);
4585 }
4586 
4587 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
4588 {
4589 	f2fs_save_errors(sbi, error);
4590 
4591 	if (!sbi->error_dirty)
4592 		return;
4593 	if (!test_bit(error, (unsigned long *)sbi->errors))
4594 		return;
4595 	schedule_work(&sbi->s_error_work);
4596 }
4597 
4598 static bool system_going_down(void)
4599 {
4600 	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
4601 		|| system_state == SYSTEM_RESTART;
4602 }
4603 
4604 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
4605 {
4606 	struct super_block *sb = sbi->sb;
4607 	bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
4608 	bool continue_fs = !shutdown &&
4609 			F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
4610 
4611 	set_ckpt_flags(sbi, CP_ERROR_FLAG);
4612 
4613 	if (!f2fs_hw_is_readonly(sbi)) {
4614 		save_stop_reason(sbi, reason);
4615 
4616 		/*
4617 		 * always create an asynchronous task to record stop_reason
4618 		 * in order to avoid potential deadlock when running into
4619 		 * f2fs_record_stop_reason() synchronously.
4620 		 */
4621 		schedule_work(&sbi->s_error_work);
4622 	}
4623 
4624 	/*
4625 	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
4626 	 * could panic during 'reboot -f' as the underlying device got already
4627 	 * disabled.
4628 	 */
4629 	if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
4630 				!shutdown && !system_going_down() &&
4631 				!is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
4632 		panic("F2FS-fs (device %s): panic forced after error\n",
4633 							sb->s_id);
4634 
4635 	if (shutdown)
4636 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
4637 	else
4638 		dump_stack();
4639 
4640 	/*
4641 	 * Continue filesystem operators if errors=continue. Should not set
4642 	 * RO by shutdown, since RO bypasses thaw_super which can hang the
4643 	 * system.
4644 	 */
4645 	if (continue_fs || f2fs_readonly(sb) || shutdown) {
4646 		f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
4647 		return;
4648 	}
4649 
4650 	f2fs_warn(sbi, "Remounting filesystem read-only");
4651 
4652 	/*
4653 	 * We have already set CP_ERROR_FLAG flag to stop all updates
4654 	 * to filesystem, so it doesn't need to set SB_RDONLY flag here
4655 	 * because the flag should be set covered w/ sb->s_umount semaphore
4656 	 * via remount procedure, otherwise, it will confuse code like
4657 	 * freeze_super() which will lead to deadlocks and other problems.
4658 	 */
4659 }
4660 
4661 static void f2fs_record_error_work(struct work_struct *work)
4662 {
4663 	struct f2fs_sb_info *sbi = container_of(work,
4664 					struct f2fs_sb_info, s_error_work);
4665 
4666 	f2fs_record_stop_reason(sbi);
4667 }
4668 
4669 static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi)
4670 {
4671 #ifdef CONFIG_BLK_DEV_ZONED
4672 	unsigned int zoneno, total_zones;
4673 	int devi;
4674 
4675 	if (!f2fs_sb_has_blkzoned(sbi))
4676 		return NULL_SEGNO;
4677 
4678 	for (devi = 0; devi < sbi->s_ndevs; devi++) {
4679 		if (!bdev_is_zoned(FDEV(devi).bdev))
4680 			continue;
4681 
4682 		total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments);
4683 
4684 		for (zoneno = 0; zoneno < total_zones; zoneno++) {
4685 			unsigned int segs, blks;
4686 
4687 			if (!f2fs_zone_is_seq(sbi, devi, zoneno))
4688 				continue;
4689 
4690 			segs = GET_SEG_FROM_SEC(sbi,
4691 					zoneno * sbi->secs_per_zone);
4692 			blks = SEGS_TO_BLKS(sbi, segs);
4693 			return GET_SEGNO(sbi, FDEV(devi).start_blk + blks);
4694 		}
4695 	}
4696 #endif
4697 	return NULL_SEGNO;
4698 }
4699 
4700 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
4701 {
4702 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4703 	unsigned int max_devices = MAX_DEVICES;
4704 	unsigned int logical_blksize;
4705 	blk_mode_t mode = sb_open_mode(sbi->sb->s_flags);
4706 	int i;
4707 
4708 	/* Initialize single device information */
4709 	if (!RDEV(0).path[0]) {
4710 		if (!bdev_is_zoned(sbi->sb->s_bdev))
4711 			return 0;
4712 		max_devices = 1;
4713 	}
4714 
4715 	/*
4716 	 * Initialize multiple devices information, or single
4717 	 * zoned block device information.
4718 	 */
4719 	sbi->devs = f2fs_kzalloc(sbi,
4720 				 array_size(max_devices,
4721 					    sizeof(struct f2fs_dev_info)),
4722 				 GFP_KERNEL);
4723 	if (!sbi->devs)
4724 		return -ENOMEM;
4725 
4726 	logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
4727 	sbi->aligned_blksize = true;
4728 	sbi->bggc_io_aware = AWARE_ALL_IO;
4729 #ifdef CONFIG_BLK_DEV_ZONED
4730 	sbi->max_open_zones = UINT_MAX;
4731 	sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
4732 	sbi->bggc_io_aware = AWARE_READ_IO;
4733 #endif
4734 
4735 	for (i = 0; i < max_devices; i++) {
4736 		if (max_devices == 1) {
4737 			FDEV(i).total_segments =
4738 				le32_to_cpu(raw_super->segment_count_main);
4739 			FDEV(i).start_blk = 0;
4740 			FDEV(i).end_blk = FDEV(i).total_segments *
4741 						BLKS_PER_SEG(sbi);
4742 		}
4743 
4744 		if (i == 0)
4745 			FDEV(0).bdev_file = sbi->sb->s_bdev_file;
4746 		else if (!RDEV(i).path[0])
4747 			break;
4748 
4749 		if (max_devices > 1) {
4750 			/* Multi-device mount */
4751 			memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
4752 			FDEV(i).total_segments =
4753 				le32_to_cpu(RDEV(i).total_segments);
4754 			if (i == 0) {
4755 				FDEV(i).start_blk = 0;
4756 				FDEV(i).end_blk = FDEV(i).start_blk +
4757 					SEGS_TO_BLKS(sbi,
4758 					FDEV(i).total_segments) - 1 +
4759 					le32_to_cpu(raw_super->segment0_blkaddr);
4760 				sbi->allocate_section_hint = FDEV(i).total_segments /
4761 							SEGS_PER_SEC(sbi);
4762 			} else {
4763 				FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
4764 				FDEV(i).end_blk = FDEV(i).start_blk +
4765 						SEGS_TO_BLKS(sbi,
4766 						FDEV(i).total_segments) - 1;
4767 				FDEV(i).bdev_file = bdev_file_open_by_path(
4768 					FDEV(i).path, mode, sbi->sb, NULL);
4769 			}
4770 		}
4771 		if (IS_ERR(FDEV(i).bdev_file))
4772 			return PTR_ERR(FDEV(i).bdev_file);
4773 
4774 		FDEV(i).bdev = file_bdev(FDEV(i).bdev_file);
4775 		/* to release errored devices */
4776 		sbi->s_ndevs = i + 1;
4777 
4778 		if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
4779 			sbi->aligned_blksize = false;
4780 
4781 #ifdef CONFIG_BLK_DEV_ZONED
4782 		if (bdev_is_zoned(FDEV(i).bdev)) {
4783 			if (!f2fs_sb_has_blkzoned(sbi)) {
4784 				f2fs_err(sbi, "Zoned block device feature not enabled");
4785 				return -EINVAL;
4786 			}
4787 			if (init_blkz_info(sbi, i)) {
4788 				f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
4789 				return -EINVAL;
4790 			}
4791 			if (max_devices == 1)
4792 				break;
4793 			f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
4794 				  i, FDEV(i).path,
4795 				  FDEV(i).total_segments,
4796 				  FDEV(i).start_blk, FDEV(i).end_blk);
4797 			continue;
4798 		}
4799 #endif
4800 		f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
4801 			  i, FDEV(i).path,
4802 			  FDEV(i).total_segments,
4803 			  FDEV(i).start_blk, FDEV(i).end_blk);
4804 	}
4805 	return 0;
4806 }
4807 
4808 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
4809 {
4810 #if IS_ENABLED(CONFIG_UNICODE)
4811 	if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
4812 		const struct f2fs_sb_encodings *encoding_info;
4813 		struct unicode_map *encoding;
4814 		__u16 encoding_flags;
4815 
4816 		encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
4817 		if (!encoding_info) {
4818 			f2fs_err(sbi,
4819 				 "Encoding requested by superblock is unknown");
4820 			return -EINVAL;
4821 		}
4822 
4823 		encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
4824 		encoding = utf8_load(encoding_info->version);
4825 		if (IS_ERR(encoding)) {
4826 			f2fs_err(sbi,
4827 				 "can't mount with superblock charset: %s-%u.%u.%u "
4828 				 "not supported by the kernel. flags: 0x%x.",
4829 				 encoding_info->name,
4830 				 unicode_major(encoding_info->version),
4831 				 unicode_minor(encoding_info->version),
4832 				 unicode_rev(encoding_info->version),
4833 				 encoding_flags);
4834 			return PTR_ERR(encoding);
4835 		}
4836 		f2fs_info(sbi, "Using encoding defined by superblock: "
4837 			 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4838 			 unicode_major(encoding_info->version),
4839 			 unicode_minor(encoding_info->version),
4840 			 unicode_rev(encoding_info->version),
4841 			 encoding_flags);
4842 
4843 		sbi->sb->s_encoding = encoding;
4844 		sbi->sb->s_encoding_flags = encoding_flags;
4845 	}
4846 #else
4847 	if (f2fs_sb_has_casefold(sbi)) {
4848 		f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
4849 		return -EINVAL;
4850 	}
4851 #endif
4852 	return 0;
4853 }
4854 
4855 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
4856 {
4857 	/* adjust parameters according to the volume size */
4858 	if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
4859 		if (f2fs_block_unit_discard(sbi))
4860 			SM_I(sbi)->dcc_info->discard_granularity =
4861 						MIN_DISCARD_GRANULARITY;
4862 		if (!f2fs_lfs_mode(sbi))
4863 			SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
4864 						BIT(F2FS_IPU_HONOR_OPU_WRITE);
4865 	}
4866 
4867 	sbi->readdir_ra = true;
4868 }
4869 
4870 static int f2fs_fill_super(struct super_block *sb, struct fs_context *fc)
4871 {
4872 	struct f2fs_fs_context *ctx = fc->fs_private;
4873 	struct f2fs_sb_info *sbi;
4874 	struct f2fs_super_block *raw_super;
4875 	struct inode *root;
4876 	int err;
4877 	bool skip_recovery = false, need_fsck = false;
4878 	int recovery, i, valid_super_block;
4879 	struct curseg_info *seg_i;
4880 	int retry_cnt = 1;
4881 #ifdef CONFIG_QUOTA
4882 	bool quota_enabled = false;
4883 #endif
4884 
4885 try_onemore:
4886 	err = -EINVAL;
4887 	raw_super = NULL;
4888 	valid_super_block = -1;
4889 	recovery = 0;
4890 
4891 	/* allocate memory for f2fs-specific super block info */
4892 	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
4893 	if (!sbi)
4894 		return -ENOMEM;
4895 
4896 	sbi->sb = sb;
4897 
4898 	/* initialize locks within allocated memory */
4899 	init_f2fs_rwsem(&sbi->gc_lock);
4900 	mutex_init(&sbi->writepages);
4901 	init_f2fs_rwsem(&sbi->cp_global_sem);
4902 	init_f2fs_rwsem(&sbi->node_write);
4903 	init_f2fs_rwsem(&sbi->node_change);
4904 	spin_lock_init(&sbi->stat_lock);
4905 	init_f2fs_rwsem(&sbi->cp_rwsem);
4906 	init_f2fs_rwsem(&sbi->cp_enable_rwsem);
4907 	init_f2fs_rwsem(&sbi->quota_sem);
4908 	init_waitqueue_head(&sbi->cp_wait);
4909 	spin_lock_init(&sbi->error_lock);
4910 
4911 	for (i = 0; i < NR_INODE_TYPE; i++) {
4912 		INIT_LIST_HEAD(&sbi->inode_list[i]);
4913 		spin_lock_init(&sbi->inode_lock[i]);
4914 	}
4915 	mutex_init(&sbi->flush_lock);
4916 
4917 	/* set a block size */
4918 	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
4919 		f2fs_err(sbi, "unable to set blocksize");
4920 		goto free_sbi;
4921 	}
4922 
4923 	err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4924 								&recovery);
4925 	if (err)
4926 		goto free_sbi;
4927 
4928 	sb->s_fs_info = sbi;
4929 	sbi->raw_super = raw_super;
4930 
4931 	INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
4932 	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
4933 	memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
4934 
4935 	/* precompute checksum seed for metadata */
4936 	if (f2fs_sb_has_inode_chksum(sbi))
4937 		sbi->s_chksum_seed = f2fs_chksum(~0, raw_super->uuid,
4938 						 sizeof(raw_super->uuid));
4939 
4940 	default_options(sbi, false);
4941 
4942 	err = f2fs_check_opt_consistency(fc, sb);
4943 	if (err)
4944 		goto free_sb_buf;
4945 
4946 	f2fs_apply_options(fc, sb);
4947 
4948 	err = f2fs_sanity_check_options(sbi, false);
4949 	if (err)
4950 		goto free_options;
4951 
4952 	sb->s_maxbytes = max_file_blocks(NULL) <<
4953 				le32_to_cpu(raw_super->log_blocksize);
4954 	sb->s_max_links = F2FS_LINK_MAX;
4955 
4956 	err = f2fs_setup_casefold(sbi);
4957 	if (err)
4958 		goto free_options;
4959 
4960 #ifdef CONFIG_QUOTA
4961 	sb->dq_op = &f2fs_quota_operations;
4962 	sb->s_qcop = &f2fs_quotactl_ops;
4963 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4964 
4965 	if (f2fs_sb_has_quota_ino(sbi)) {
4966 		for (i = 0; i < MAXQUOTAS; i++) {
4967 			if (f2fs_qf_ino(sbi->sb, i))
4968 				sbi->nquota_files++;
4969 		}
4970 	}
4971 #endif
4972 
4973 	sb->s_op = &f2fs_sops;
4974 #ifdef CONFIG_FS_ENCRYPTION
4975 	sb->s_cop = &f2fs_cryptops;
4976 #endif
4977 #ifdef CONFIG_FS_VERITY
4978 	sb->s_vop = &f2fs_verityops;
4979 #endif
4980 	sb->s_xattr = f2fs_xattr_handlers;
4981 	sb->s_export_op = &f2fs_export_ops;
4982 	sb->s_magic = F2FS_SUPER_MAGIC;
4983 	sb->s_time_gran = 1;
4984 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4985 		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4986 	if (test_opt(sbi, INLINECRYPT))
4987 		sb->s_flags |= SB_INLINECRYPT;
4988 
4989 	if (test_opt(sbi, LAZYTIME))
4990 		sb->s_flags |= SB_LAZYTIME;
4991 	else
4992 		sb->s_flags &= ~SB_LAZYTIME;
4993 
4994 	super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
4995 	super_set_sysfs_name_bdev(sb);
4996 	sb->s_iflags |= SB_I_CGROUPWB;
4997 
4998 	/* init f2fs-specific super block info */
4999 	sbi->valid_super_block = valid_super_block;
5000 
5001 	/* disallow all the data/node/meta page writes */
5002 	set_sbi_flag(sbi, SBI_POR_DOING);
5003 
5004 	err = f2fs_init_write_merge_io(sbi);
5005 	if (err)
5006 		goto free_bio_info;
5007 
5008 	init_sb_info(sbi);
5009 
5010 	err = f2fs_init_iostat(sbi);
5011 	if (err)
5012 		goto free_bio_info;
5013 
5014 	err = init_percpu_info(sbi);
5015 	if (err)
5016 		goto free_iostat;
5017 
5018 	err = f2fs_init_page_array_cache(sbi);
5019 	if (err)
5020 		goto free_percpu;
5021 
5022 	/* get an inode for meta space */
5023 	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
5024 	if (IS_ERR(sbi->meta_inode)) {
5025 		f2fs_err(sbi, "Failed to read F2FS meta data inode");
5026 		err = PTR_ERR(sbi->meta_inode);
5027 		goto free_page_array_cache;
5028 	}
5029 
5030 	err = f2fs_get_valid_checkpoint(sbi);
5031 	if (err) {
5032 		f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
5033 		goto free_meta_inode;
5034 	}
5035 
5036 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
5037 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
5038 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
5039 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
5040 		sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
5041 	}
5042 
5043 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
5044 		set_sbi_flag(sbi, SBI_NEED_FSCK);
5045 
5046 	/* Initialize device list */
5047 	err = f2fs_scan_devices(sbi);
5048 	if (err) {
5049 		f2fs_err(sbi, "Failed to find devices");
5050 		goto free_devices;
5051 	}
5052 
5053 	err = f2fs_init_post_read_wq(sbi);
5054 	if (err) {
5055 		f2fs_err(sbi, "Failed to initialize post read workqueue");
5056 		goto free_devices;
5057 	}
5058 
5059 	sbi->total_valid_node_count =
5060 				le32_to_cpu(sbi->ckpt->valid_node_count);
5061 	percpu_counter_set(&sbi->total_valid_inode_count,
5062 				le32_to_cpu(sbi->ckpt->valid_inode_count));
5063 	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
5064 	sbi->total_valid_block_count =
5065 				le64_to_cpu(sbi->ckpt->valid_block_count);
5066 	sbi->last_valid_block_count = sbi->total_valid_block_count;
5067 	sbi->reserved_blocks = 0;
5068 	sbi->current_reserved_blocks = 0;
5069 	limit_reserve_root(sbi);
5070 	adjust_unusable_cap_perc(sbi);
5071 
5072 	f2fs_init_extent_cache_info(sbi);
5073 
5074 	f2fs_init_ino_entry_info(sbi);
5075 
5076 	f2fs_init_fsync_node_info(sbi);
5077 
5078 	/* setup checkpoint request control and start checkpoint issue thread */
5079 	f2fs_init_ckpt_req_control(sbi);
5080 	if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
5081 			test_opt(sbi, MERGE_CHECKPOINT)) {
5082 		err = f2fs_start_ckpt_thread(sbi);
5083 		if (err) {
5084 			f2fs_err(sbi,
5085 			    "Failed to start F2FS issue_checkpoint_thread (%d)",
5086 			    err);
5087 			goto stop_ckpt_thread;
5088 		}
5089 	}
5090 
5091 	/* setup f2fs internal modules */
5092 	err = f2fs_build_segment_manager(sbi);
5093 	if (err) {
5094 		f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
5095 			 err);
5096 		goto free_sm;
5097 	}
5098 	err = f2fs_build_node_manager(sbi);
5099 	if (err) {
5100 		f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
5101 			 err);
5102 		goto free_nm;
5103 	}
5104 
5105 	/* For write statistics */
5106 	sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
5107 
5108 	/* get segno of first zoned block device */
5109 	sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi);
5110 
5111 	sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ?
5112 			ZONED_PIN_SEC_REQUIRED_COUNT :
5113 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi));
5114 
5115 	/* Read accumulated write IO statistics if exists */
5116 	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
5117 	if (__exist_node_summaries(sbi))
5118 		sbi->kbytes_written =
5119 			le64_to_cpu(seg_i->journal->info.kbytes_written);
5120 
5121 	f2fs_build_gc_manager(sbi);
5122 
5123 	err = f2fs_build_stats(sbi);
5124 	if (err)
5125 		goto free_nm;
5126 
5127 	/* get an inode for node space */
5128 	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
5129 	if (IS_ERR(sbi->node_inode)) {
5130 		f2fs_err(sbi, "Failed to read node inode");
5131 		err = PTR_ERR(sbi->node_inode);
5132 		goto free_stats;
5133 	}
5134 
5135 	/* read root inode and dentry */
5136 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
5137 	if (IS_ERR(root)) {
5138 		f2fs_err(sbi, "Failed to read root inode");
5139 		err = PTR_ERR(root);
5140 		goto free_node_inode;
5141 	}
5142 	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
5143 			!root->i_size || !root->i_nlink) {
5144 		iput(root);
5145 		err = -EINVAL;
5146 		goto free_node_inode;
5147 	}
5148 
5149 	generic_set_sb_d_ops(sb);
5150 	sb->s_root = d_make_root(root); /* allocate root dentry */
5151 	if (!sb->s_root) {
5152 		err = -ENOMEM;
5153 		goto free_node_inode;
5154 	}
5155 
5156 	err = f2fs_init_compress_inode(sbi);
5157 	if (err)
5158 		goto free_root_inode;
5159 
5160 	err = f2fs_register_sysfs(sbi);
5161 	if (err)
5162 		goto free_compress_inode;
5163 
5164 	sbi->umount_lock_holder = current;
5165 #ifdef CONFIG_QUOTA
5166 	/* Enable quota usage during mount */
5167 	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
5168 		err = f2fs_enable_quotas(sb);
5169 		if (err)
5170 			f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
5171 	}
5172 
5173 	quota_enabled = f2fs_recover_quota_begin(sbi);
5174 #endif
5175 	/* if there are any orphan inodes, free them */
5176 	err = f2fs_recover_orphan_inodes(sbi);
5177 	if (err)
5178 		goto free_meta;
5179 
5180 	if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) {
5181 		skip_recovery = true;
5182 		goto reset_checkpoint;
5183 	}
5184 
5185 	/* recover fsynced data */
5186 	if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
5187 			!test_opt(sbi, NORECOVERY)) {
5188 		/*
5189 		 * mount should be failed, when device has readonly mode, and
5190 		 * previous checkpoint was not done by clean system shutdown.
5191 		 */
5192 		if (f2fs_hw_is_readonly(sbi)) {
5193 			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5194 				err = f2fs_recover_fsync_data(sbi, true);
5195 				if (err > 0) {
5196 					err = -EROFS;
5197 					f2fs_err(sbi, "Need to recover fsync data, but "
5198 						"write access unavailable, please try "
5199 						"mount w/ disable_roll_forward or norecovery");
5200 				}
5201 				if (err < 0)
5202 					goto free_meta;
5203 			}
5204 			f2fs_info(sbi, "write access unavailable, skipping recovery");
5205 			goto reset_checkpoint;
5206 		}
5207 
5208 		if (need_fsck)
5209 			set_sbi_flag(sbi, SBI_NEED_FSCK);
5210 
5211 		if (skip_recovery)
5212 			goto reset_checkpoint;
5213 
5214 		err = f2fs_recover_fsync_data(sbi, false);
5215 		if (err < 0) {
5216 			if (err != -ENOMEM)
5217 				skip_recovery = true;
5218 			need_fsck = true;
5219 			f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
5220 				 err);
5221 			goto free_meta;
5222 		}
5223 	} else {
5224 		err = f2fs_recover_fsync_data(sbi, true);
5225 		if (err > 0) {
5226 			if (!f2fs_readonly(sb)) {
5227 				f2fs_err(sbi, "Need to recover fsync data");
5228 				err = -EINVAL;
5229 				goto free_meta;
5230 			} else {
5231 				f2fs_info(sbi, "drop all fsynced data");
5232 				err = 0;
5233 			}
5234 		}
5235 	}
5236 
5237 reset_checkpoint:
5238 #ifdef CONFIG_QUOTA
5239 	f2fs_recover_quota_end(sbi, quota_enabled);
5240 #endif
5241 	/*
5242 	 * If the f2fs is not readonly and fsync data recovery succeeds,
5243 	 * write pointer consistency of cursegs and other zones are already
5244 	 * checked and fixed during recovery. However, if recovery fails,
5245 	 * write pointers are left untouched, and retry-mount should check
5246 	 * them here.
5247 	 */
5248 	if (skip_recovery)
5249 		err = f2fs_check_and_fix_write_pointer(sbi);
5250 	if (err)
5251 		goto free_meta;
5252 
5253 	/* f2fs_recover_fsync_data() cleared this already */
5254 	clear_sbi_flag(sbi, SBI_POR_DOING);
5255 
5256 	err = f2fs_init_inmem_curseg(sbi);
5257 	if (err)
5258 		goto sync_free_meta;
5259 
5260 	if (test_opt(sbi, DISABLE_CHECKPOINT))
5261 		err = f2fs_disable_checkpoint(sbi);
5262 	else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))
5263 		err = f2fs_enable_checkpoint(sbi);
5264 	if (err)
5265 		goto sync_free_meta;
5266 
5267 	/*
5268 	 * If filesystem is not mounted as read-only then
5269 	 * do start the gc_thread.
5270 	 */
5271 	if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
5272 		test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
5273 		/* After POR, we can run background GC thread.*/
5274 		err = f2fs_start_gc_thread(sbi);
5275 		if (err)
5276 			goto sync_free_meta;
5277 	}
5278 
5279 	/* recover broken superblock */
5280 	if (recovery) {
5281 		err = f2fs_commit_super(sbi, true);
5282 		f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
5283 			  sbi->valid_super_block ? 1 : 2, err);
5284 	}
5285 
5286 	f2fs_join_shrinker(sbi);
5287 
5288 	f2fs_tuning_parameters(sbi);
5289 
5290 	f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
5291 		    cur_cp_version(F2FS_CKPT(sbi)));
5292 	f2fs_update_time(sbi, CP_TIME);
5293 	f2fs_update_time(sbi, REQ_TIME);
5294 	clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
5295 
5296 	sbi->umount_lock_holder = NULL;
5297 	return 0;
5298 
5299 sync_free_meta:
5300 	/* safe to flush all the data */
5301 	sync_filesystem(sbi->sb);
5302 	retry_cnt = 0;
5303 
5304 free_meta:
5305 #ifdef CONFIG_QUOTA
5306 	f2fs_truncate_quota_inode_pages(sb);
5307 	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
5308 		f2fs_quota_off_umount(sbi->sb);
5309 #endif
5310 	/*
5311 	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
5312 	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
5313 	 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
5314 	 * falls into an infinite loop in f2fs_sync_meta_pages().
5315 	 */
5316 	truncate_inode_pages_final(META_MAPPING(sbi));
5317 	/* evict some inodes being cached by GC */
5318 	evict_inodes(sb);
5319 	f2fs_unregister_sysfs(sbi);
5320 free_compress_inode:
5321 	f2fs_destroy_compress_inode(sbi);
5322 free_root_inode:
5323 	dput(sb->s_root);
5324 	sb->s_root = NULL;
5325 free_node_inode:
5326 	f2fs_release_ino_entry(sbi, true);
5327 	truncate_inode_pages_final(NODE_MAPPING(sbi));
5328 	iput(sbi->node_inode);
5329 	sbi->node_inode = NULL;
5330 free_stats:
5331 	f2fs_destroy_stats(sbi);
5332 free_nm:
5333 	/* stop discard thread before destroying node manager */
5334 	f2fs_stop_discard_thread(sbi);
5335 	f2fs_destroy_node_manager(sbi);
5336 free_sm:
5337 	f2fs_destroy_segment_manager(sbi);
5338 stop_ckpt_thread:
5339 	f2fs_stop_ckpt_thread(sbi);
5340 	/* flush s_error_work before sbi destroy */
5341 	flush_work(&sbi->s_error_work);
5342 	f2fs_destroy_post_read_wq(sbi);
5343 free_devices:
5344 	destroy_device_list(sbi);
5345 	kvfree(sbi->ckpt);
5346 free_meta_inode:
5347 	make_bad_inode(sbi->meta_inode);
5348 	iput(sbi->meta_inode);
5349 	sbi->meta_inode = NULL;
5350 free_page_array_cache:
5351 	f2fs_destroy_page_array_cache(sbi);
5352 free_percpu:
5353 	destroy_percpu_info(sbi);
5354 free_iostat:
5355 	f2fs_destroy_iostat(sbi);
5356 free_bio_info:
5357 	for (i = 0; i < NR_PAGE_TYPE; i++)
5358 		kfree(sbi->write_io[i]);
5359 
5360 #if IS_ENABLED(CONFIG_UNICODE)
5361 	utf8_unload(sb->s_encoding);
5362 	sb->s_encoding = NULL;
5363 #endif
5364 free_options:
5365 #ifdef CONFIG_QUOTA
5366 	for (i = 0; i < MAXQUOTAS; i++)
5367 		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
5368 #endif
5369 	/* no need to free dummy_enc_policy, we just keep it in ctx when failed */
5370 	swap(F2FS_CTX_INFO(ctx).dummy_enc_policy, F2FS_OPTION(sbi).dummy_enc_policy);
5371 free_sb_buf:
5372 	kfree(raw_super);
5373 free_sbi:
5374 	kfree(sbi);
5375 	sb->s_fs_info = NULL;
5376 
5377 	/* give only one another chance */
5378 	if (retry_cnt > 0 && skip_recovery) {
5379 		retry_cnt--;
5380 		shrink_dcache_sb(sb);
5381 		goto try_onemore;
5382 	}
5383 	return err;
5384 }
5385 
5386 static int f2fs_get_tree(struct fs_context *fc)
5387 {
5388 	return get_tree_bdev(fc, f2fs_fill_super);
5389 }
5390 
5391 static int f2fs_reconfigure(struct fs_context *fc)
5392 {
5393 	struct super_block *sb = fc->root->d_sb;
5394 
5395 	return __f2fs_remount(fc, sb);
5396 }
5397 
5398 static void f2fs_fc_free(struct fs_context *fc)
5399 {
5400 	struct f2fs_fs_context *ctx = fc->fs_private;
5401 
5402 	if (!ctx)
5403 		return;
5404 
5405 #ifdef CONFIG_QUOTA
5406 	f2fs_unnote_qf_name_all(fc);
5407 #endif
5408 	fscrypt_free_dummy_policy(&F2FS_CTX_INFO(ctx).dummy_enc_policy);
5409 	kfree(ctx);
5410 }
5411 
5412 static const struct fs_context_operations f2fs_context_ops = {
5413 	.parse_param	= f2fs_parse_param,
5414 	.get_tree	= f2fs_get_tree,
5415 	.reconfigure = f2fs_reconfigure,
5416 	.free	= f2fs_fc_free,
5417 };
5418 
5419 static void kill_f2fs_super(struct super_block *sb)
5420 {
5421 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
5422 
5423 	if (sb->s_root) {
5424 		sbi->umount_lock_holder = current;
5425 
5426 		set_sbi_flag(sbi, SBI_IS_CLOSE);
5427 		f2fs_stop_gc_thread(sbi);
5428 		f2fs_stop_discard_thread(sbi);
5429 
5430 #ifdef CONFIG_F2FS_FS_COMPRESSION
5431 		/*
5432 		 * latter evict_inode() can bypass checking and invalidating
5433 		 * compress inode cache.
5434 		 */
5435 		if (test_opt(sbi, COMPRESS_CACHE))
5436 			truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
5437 #endif
5438 
5439 		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
5440 				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5441 			struct cp_control cpc = {
5442 				.reason = CP_UMOUNT,
5443 			};
5444 			stat_inc_cp_call_count(sbi, TOTAL_CALL);
5445 			f2fs_write_checkpoint(sbi, &cpc);
5446 		}
5447 
5448 		if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
5449 			sb->s_flags &= ~SB_RDONLY;
5450 	}
5451 	kill_block_super(sb);
5452 	/* Release block devices last, after fscrypt_destroy_keyring(). */
5453 	if (sbi) {
5454 		destroy_device_list(sbi);
5455 		kfree(sbi);
5456 		sb->s_fs_info = NULL;
5457 	}
5458 }
5459 
5460 static int f2fs_init_fs_context(struct fs_context *fc)
5461 {
5462 	struct f2fs_fs_context *ctx;
5463 
5464 	ctx = kzalloc(sizeof(struct f2fs_fs_context), GFP_KERNEL);
5465 	if (!ctx)
5466 		return -ENOMEM;
5467 
5468 	fc->fs_private = ctx;
5469 	fc->ops = &f2fs_context_ops;
5470 
5471 	return 0;
5472 }
5473 
5474 static struct file_system_type f2fs_fs_type = {
5475 	.owner		= THIS_MODULE,
5476 	.name		= "f2fs",
5477 	.init_fs_context = f2fs_init_fs_context,
5478 	.kill_sb	= kill_f2fs_super,
5479 	.fs_flags	= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
5480 };
5481 MODULE_ALIAS_FS("f2fs");
5482 
5483 static int __init init_inodecache(void)
5484 {
5485 	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
5486 			sizeof(struct f2fs_inode_info), 0,
5487 			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
5488 	return f2fs_inode_cachep ? 0 : -ENOMEM;
5489 }
5490 
5491 static void destroy_inodecache(void)
5492 {
5493 	/*
5494 	 * Make sure all delayed rcu free inodes are flushed before we
5495 	 * destroy cache.
5496 	 */
5497 	rcu_barrier();
5498 	kmem_cache_destroy(f2fs_inode_cachep);
5499 }
5500 
5501 static int __init init_f2fs_fs(void)
5502 {
5503 	int err;
5504 
5505 	err = init_inodecache();
5506 	if (err)
5507 		goto fail;
5508 	err = f2fs_create_node_manager_caches();
5509 	if (err)
5510 		goto free_inodecache;
5511 	err = f2fs_create_segment_manager_caches();
5512 	if (err)
5513 		goto free_node_manager_caches;
5514 	err = f2fs_create_checkpoint_caches();
5515 	if (err)
5516 		goto free_segment_manager_caches;
5517 	err = f2fs_create_recovery_cache();
5518 	if (err)
5519 		goto free_checkpoint_caches;
5520 	err = f2fs_create_extent_cache();
5521 	if (err)
5522 		goto free_recovery_cache;
5523 	err = f2fs_create_garbage_collection_cache();
5524 	if (err)
5525 		goto free_extent_cache;
5526 	err = f2fs_init_sysfs();
5527 	if (err)
5528 		goto free_garbage_collection_cache;
5529 	err = f2fs_init_shrinker();
5530 	if (err)
5531 		goto free_sysfs;
5532 	f2fs_create_root_stats();
5533 	err = f2fs_init_post_read_processing();
5534 	if (err)
5535 		goto free_root_stats;
5536 	err = f2fs_init_iostat_processing();
5537 	if (err)
5538 		goto free_post_read;
5539 	err = f2fs_init_bio_entry_cache();
5540 	if (err)
5541 		goto free_iostat;
5542 	err = f2fs_init_bioset();
5543 	if (err)
5544 		goto free_bio_entry_cache;
5545 	err = f2fs_init_compress_mempool();
5546 	if (err)
5547 		goto free_bioset;
5548 	err = f2fs_init_compress_cache();
5549 	if (err)
5550 		goto free_compress_mempool;
5551 	err = f2fs_create_casefold_cache();
5552 	if (err)
5553 		goto free_compress_cache;
5554 	err = f2fs_init_xattr_cache();
5555 	if (err)
5556 		goto free_casefold_cache;
5557 	err = register_filesystem(&f2fs_fs_type);
5558 	if (err)
5559 		goto free_xattr_cache;
5560 	return 0;
5561 free_xattr_cache:
5562 	f2fs_destroy_xattr_cache();
5563 free_casefold_cache:
5564 	f2fs_destroy_casefold_cache();
5565 free_compress_cache:
5566 	f2fs_destroy_compress_cache();
5567 free_compress_mempool:
5568 	f2fs_destroy_compress_mempool();
5569 free_bioset:
5570 	f2fs_destroy_bioset();
5571 free_bio_entry_cache:
5572 	f2fs_destroy_bio_entry_cache();
5573 free_iostat:
5574 	f2fs_destroy_iostat_processing();
5575 free_post_read:
5576 	f2fs_destroy_post_read_processing();
5577 free_root_stats:
5578 	f2fs_destroy_root_stats();
5579 	f2fs_exit_shrinker();
5580 free_sysfs:
5581 	f2fs_exit_sysfs();
5582 free_garbage_collection_cache:
5583 	f2fs_destroy_garbage_collection_cache();
5584 free_extent_cache:
5585 	f2fs_destroy_extent_cache();
5586 free_recovery_cache:
5587 	f2fs_destroy_recovery_cache();
5588 free_checkpoint_caches:
5589 	f2fs_destroy_checkpoint_caches();
5590 free_segment_manager_caches:
5591 	f2fs_destroy_segment_manager_caches();
5592 free_node_manager_caches:
5593 	f2fs_destroy_node_manager_caches();
5594 free_inodecache:
5595 	destroy_inodecache();
5596 fail:
5597 	return err;
5598 }
5599 
5600 static void __exit exit_f2fs_fs(void)
5601 {
5602 	unregister_filesystem(&f2fs_fs_type);
5603 	f2fs_destroy_xattr_cache();
5604 	f2fs_destroy_casefold_cache();
5605 	f2fs_destroy_compress_cache();
5606 	f2fs_destroy_compress_mempool();
5607 	f2fs_destroy_bioset();
5608 	f2fs_destroy_bio_entry_cache();
5609 	f2fs_destroy_iostat_processing();
5610 	f2fs_destroy_post_read_processing();
5611 	f2fs_destroy_root_stats();
5612 	f2fs_exit_shrinker();
5613 	f2fs_exit_sysfs();
5614 	f2fs_destroy_garbage_collection_cache();
5615 	f2fs_destroy_extent_cache();
5616 	f2fs_destroy_recovery_cache();
5617 	f2fs_destroy_checkpoint_caches();
5618 	f2fs_destroy_segment_manager_caches();
5619 	f2fs_destroy_node_manager_caches();
5620 	destroy_inodecache();
5621 }
5622 
5623 module_init(init_f2fs_fs)
5624 module_exit(exit_f2fs_fs)
5625 
5626 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
5627 MODULE_DESCRIPTION("Flash Friendly File System");
5628 MODULE_LICENSE("GPL");
5629