xref: /linux/fs/f2fs/super.c (revision 3e48a11675c50698374d4ac596fb506736eb1c53)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/super.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/fs.h>
11 #include <linux/fs_context.h>
12 #include <linux/sched/mm.h>
13 #include <linux/statfs.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
28 #include <linux/zstd.h>
29 #include <linux/lz4.h>
30 #include <linux/ctype.h>
31 #include <linux/fs_parser.h>
32 
33 #include "f2fs.h"
34 #include "node.h"
35 #include "segment.h"
36 #include "xattr.h"
37 #include "gc.h"
38 #include "iostat.h"
39 
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/f2fs.h>
42 
43 static struct kmem_cache *f2fs_inode_cachep;
44 
45 #ifdef CONFIG_F2FS_FAULT_INJECTION
46 
47 const char *f2fs_fault_name[FAULT_MAX] = {
48 	[FAULT_KMALLOC]			= "kmalloc",
49 	[FAULT_KVMALLOC]		= "kvmalloc",
50 	[FAULT_PAGE_ALLOC]		= "page alloc",
51 	[FAULT_PAGE_GET]		= "page get",
52 	[FAULT_ALLOC_BIO]		= "alloc bio(obsolete)",
53 	[FAULT_ALLOC_NID]		= "alloc nid",
54 	[FAULT_ORPHAN]			= "orphan",
55 	[FAULT_BLOCK]			= "no more block",
56 	[FAULT_DIR_DEPTH]		= "too big dir depth",
57 	[FAULT_EVICT_INODE]		= "evict_inode fail",
58 	[FAULT_TRUNCATE]		= "truncate fail",
59 	[FAULT_READ_IO]			= "read IO error",
60 	[FAULT_CHECKPOINT]		= "checkpoint error",
61 	[FAULT_DISCARD]			= "discard error",
62 	[FAULT_WRITE_IO]		= "write IO error",
63 	[FAULT_SLAB_ALLOC]		= "slab alloc",
64 	[FAULT_DQUOT_INIT]		= "dquot initialize",
65 	[FAULT_LOCK_OP]			= "lock_op",
66 	[FAULT_BLKADDR_VALIDITY]	= "invalid blkaddr",
67 	[FAULT_BLKADDR_CONSISTENCE]	= "inconsistent blkaddr",
68 	[FAULT_NO_SEGMENT]		= "no free segment",
69 	[FAULT_INCONSISTENT_FOOTER]	= "inconsistent footer",
70 	[FAULT_ATOMIC_TIMEOUT]		= "atomic timeout",
71 	[FAULT_VMALLOC]			= "vmalloc",
72 	[FAULT_LOCK_TIMEOUT]		= "lock timeout",
73 	[FAULT_SKIP_WRITE]		= "skip write",
74 };
75 
76 int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
77 				unsigned long type, enum fault_option fo)
78 {
79 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
80 
81 	if (fo & FAULT_ALL) {
82 		memset(ffi, 0, sizeof(struct f2fs_fault_info));
83 		return 0;
84 	}
85 
86 	if (fo & FAULT_RATE) {
87 		if (rate > INT_MAX)
88 			return -EINVAL;
89 		atomic_set(&ffi->inject_ops, 0);
90 		ffi->inject_rate = (int)rate;
91 		f2fs_info(sbi, "build fault injection rate: %lu", rate);
92 	}
93 
94 	if (fo & FAULT_TYPE) {
95 		if (type >= BIT(FAULT_MAX))
96 			return -EINVAL;
97 		ffi->inject_type = (unsigned int)type;
98 		f2fs_info(sbi, "build fault injection type: 0x%lx", type);
99 	}
100 
101 	if (fo & FAULT_TIMEOUT) {
102 		if (type >= TIMEOUT_TYPE_MAX)
103 			return -EINVAL;
104 		ffi->inject_lock_timeout = (unsigned int)type;
105 		f2fs_info(sbi, "build fault timeout injection type: 0x%lx", type);
106 	}
107 
108 	return 0;
109 }
110 
111 static void inject_timeout(struct f2fs_sb_info *sbi)
112 {
113 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
114 	enum f2fs_timeout_type type = ffi->inject_lock_timeout;
115 	unsigned long start_time = jiffies;
116 	unsigned long timeout = HZ;
117 
118 	switch (type) {
119 	case TIMEOUT_TYPE_RUNNING:
120 		while (!time_after(jiffies, start_time + timeout)) {
121 			if (fatal_signal_pending(current))
122 				return;
123 			;
124 		}
125 		break;
126 	case TIMEOUT_TYPE_IO_SLEEP:
127 		f2fs_schedule_timeout_killable(timeout, true);
128 		break;
129 	case TIMEOUT_TYPE_NONIO_SLEEP:
130 		f2fs_schedule_timeout_killable(timeout, false);
131 		break;
132 	case TIMEOUT_TYPE_RUNNABLE:
133 		while (!time_after(jiffies, start_time + timeout)) {
134 			if (fatal_signal_pending(current))
135 				return;
136 			schedule();
137 		}
138 		break;
139 	default:
140 		return;
141 	}
142 }
143 
144 void f2fs_simulate_lock_timeout(struct f2fs_sb_info *sbi)
145 {
146 	struct f2fs_lock_context lc;
147 
148 	f2fs_lock_op(sbi, &lc);
149 	inject_timeout(sbi);
150 	f2fs_unlock_op(sbi, &lc);
151 }
152 #endif
153 
154 /* f2fs-wide shrinker description */
155 static struct shrinker *f2fs_shrinker_info;
156 
157 static int __init f2fs_init_shrinker(void)
158 {
159 	f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
160 	if (!f2fs_shrinker_info)
161 		return -ENOMEM;
162 
163 	f2fs_shrinker_info->count_objects = f2fs_shrink_count;
164 	f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
165 
166 	shrinker_register(f2fs_shrinker_info);
167 
168 	return 0;
169 }
170 
171 static void f2fs_exit_shrinker(void)
172 {
173 	shrinker_free(f2fs_shrinker_info);
174 }
175 
176 enum {
177 	Opt_gc_background,
178 	Opt_disable_roll_forward,
179 	Opt_norecovery,
180 	Opt_discard,
181 	Opt_noheap,
182 	Opt_heap,
183 	Opt_user_xattr,
184 	Opt_acl,
185 	Opt_active_logs,
186 	Opt_disable_ext_identify,
187 	Opt_inline_xattr,
188 	Opt_inline_xattr_size,
189 	Opt_inline_data,
190 	Opt_inline_dentry,
191 	Opt_flush_merge,
192 	Opt_barrier,
193 	Opt_fastboot,
194 	Opt_extent_cache,
195 	Opt_data_flush,
196 	Opt_reserve_root,
197 	Opt_reserve_node,
198 	Opt_resgid,
199 	Opt_resuid,
200 	Opt_mode,
201 	Opt_fault_injection,
202 	Opt_fault_type,
203 	Opt_lazytime,
204 	Opt_quota,
205 	Opt_usrquota,
206 	Opt_grpquota,
207 	Opt_prjquota,
208 	Opt_usrjquota,
209 	Opt_grpjquota,
210 	Opt_prjjquota,
211 	Opt_alloc,
212 	Opt_fsync,
213 	Opt_test_dummy_encryption,
214 	Opt_inlinecrypt,
215 	Opt_checkpoint_disable,
216 	Opt_checkpoint_disable_cap,
217 	Opt_checkpoint_disable_cap_perc,
218 	Opt_checkpoint_enable,
219 	Opt_checkpoint_merge,
220 	Opt_compress_algorithm,
221 	Opt_compress_log_size,
222 	Opt_nocompress_extension,
223 	Opt_compress_extension,
224 	Opt_compress_chksum,
225 	Opt_compress_mode,
226 	Opt_compress_cache,
227 	Opt_atgc,
228 	Opt_gc_merge,
229 	Opt_discard_unit,
230 	Opt_memory_mode,
231 	Opt_age_extent_cache,
232 	Opt_errors,
233 	Opt_nat_bits,
234 	Opt_jqfmt,
235 	Opt_checkpoint,
236 	Opt_lookup_mode,
237 	Opt_err,
238 };
239 
240 static const struct constant_table f2fs_param_background_gc[] = {
241 	{"on",		BGGC_MODE_ON},
242 	{"off",		BGGC_MODE_OFF},
243 	{"sync",	BGGC_MODE_SYNC},
244 	{}
245 };
246 
247 static const struct constant_table f2fs_param_mode[] = {
248 	{"adaptive",		FS_MODE_ADAPTIVE},
249 	{"lfs",			FS_MODE_LFS},
250 	{"fragment:segment",	FS_MODE_FRAGMENT_SEG},
251 	{"fragment:block",	FS_MODE_FRAGMENT_BLK},
252 	{}
253 };
254 
255 static const struct constant_table f2fs_param_jqfmt[] = {
256 	{"vfsold",	QFMT_VFS_OLD},
257 	{"vfsv0",	QFMT_VFS_V0},
258 	{"vfsv1",	QFMT_VFS_V1},
259 	{}
260 };
261 
262 static const struct constant_table f2fs_param_alloc_mode[] = {
263 	{"default",	ALLOC_MODE_DEFAULT},
264 	{"reuse",	ALLOC_MODE_REUSE},
265 	{}
266 };
267 static const struct constant_table f2fs_param_fsync_mode[] = {
268 	{"posix",	FSYNC_MODE_POSIX},
269 	{"strict",	FSYNC_MODE_STRICT},
270 	{"nobarrier",	FSYNC_MODE_NOBARRIER},
271 	{}
272 };
273 
274 static const struct constant_table f2fs_param_compress_mode[] = {
275 	{"fs",		COMPR_MODE_FS},
276 	{"user",	COMPR_MODE_USER},
277 	{}
278 };
279 
280 static const struct constant_table f2fs_param_discard_unit[] = {
281 	{"block",	DISCARD_UNIT_BLOCK},
282 	{"segment",	DISCARD_UNIT_SEGMENT},
283 	{"section",	DISCARD_UNIT_SECTION},
284 	{}
285 };
286 
287 static const struct constant_table f2fs_param_memory_mode[] = {
288 	{"normal",	MEMORY_MODE_NORMAL},
289 	{"low",		MEMORY_MODE_LOW},
290 	{}
291 };
292 
293 static const struct constant_table f2fs_param_errors[] = {
294 	{"remount-ro",	MOUNT_ERRORS_READONLY},
295 	{"continue",	MOUNT_ERRORS_CONTINUE},
296 	{"panic",	MOUNT_ERRORS_PANIC},
297 	{}
298 };
299 
300 static const struct constant_table f2fs_param_lookup_mode[] = {
301 	{"perf",	LOOKUP_PERF},
302 	{"compat",	LOOKUP_COMPAT},
303 	{"auto",	LOOKUP_AUTO},
304 	{}
305 };
306 
307 static const struct fs_parameter_spec f2fs_param_specs[] = {
308 	fsparam_enum("background_gc", Opt_gc_background, f2fs_param_background_gc),
309 	fsparam_flag("disable_roll_forward", Opt_disable_roll_forward),
310 	fsparam_flag("norecovery", Opt_norecovery),
311 	fsparam_flag_no("discard", Opt_discard),
312 	fsparam_flag("no_heap", Opt_noheap),
313 	fsparam_flag("heap", Opt_heap),
314 	fsparam_flag_no("user_xattr", Opt_user_xattr),
315 	fsparam_flag_no("acl", Opt_acl),
316 	fsparam_s32("active_logs", Opt_active_logs),
317 	fsparam_flag("disable_ext_identify", Opt_disable_ext_identify),
318 	fsparam_flag_no("inline_xattr", Opt_inline_xattr),
319 	fsparam_s32("inline_xattr_size", Opt_inline_xattr_size),
320 	fsparam_flag_no("inline_data", Opt_inline_data),
321 	fsparam_flag_no("inline_dentry", Opt_inline_dentry),
322 	fsparam_flag_no("flush_merge", Opt_flush_merge),
323 	fsparam_flag_no("barrier", Opt_barrier),
324 	fsparam_flag("fastboot", Opt_fastboot),
325 	fsparam_flag_no("extent_cache", Opt_extent_cache),
326 	fsparam_flag("data_flush", Opt_data_flush),
327 	fsparam_u32("reserve_root", Opt_reserve_root),
328 	fsparam_u32("reserve_node", Opt_reserve_node),
329 	fsparam_gid("resgid", Opt_resgid),
330 	fsparam_uid("resuid", Opt_resuid),
331 	fsparam_enum("mode", Opt_mode, f2fs_param_mode),
332 	fsparam_s32("fault_injection", Opt_fault_injection),
333 	fsparam_u32("fault_type", Opt_fault_type),
334 	fsparam_flag_no("lazytime", Opt_lazytime),
335 	fsparam_flag_no("quota", Opt_quota),
336 	fsparam_flag("usrquota", Opt_usrquota),
337 	fsparam_flag("grpquota", Opt_grpquota),
338 	fsparam_flag("prjquota", Opt_prjquota),
339 	fsparam_string_empty("usrjquota", Opt_usrjquota),
340 	fsparam_string_empty("grpjquota", Opt_grpjquota),
341 	fsparam_string_empty("prjjquota", Opt_prjjquota),
342 	fsparam_flag("nat_bits", Opt_nat_bits),
343 	fsparam_enum("jqfmt", Opt_jqfmt, f2fs_param_jqfmt),
344 	fsparam_enum("alloc_mode", Opt_alloc, f2fs_param_alloc_mode),
345 	fsparam_enum("fsync_mode", Opt_fsync, f2fs_param_fsync_mode),
346 	fsparam_string("test_dummy_encryption", Opt_test_dummy_encryption),
347 	fsparam_flag("test_dummy_encryption", Opt_test_dummy_encryption),
348 	fsparam_flag("inlinecrypt", Opt_inlinecrypt),
349 	fsparam_string("checkpoint", Opt_checkpoint),
350 	fsparam_flag_no("checkpoint_merge", Opt_checkpoint_merge),
351 	fsparam_string("compress_algorithm", Opt_compress_algorithm),
352 	fsparam_u32("compress_log_size", Opt_compress_log_size),
353 	fsparam_string("compress_extension", Opt_compress_extension),
354 	fsparam_string("nocompress_extension", Opt_nocompress_extension),
355 	fsparam_flag("compress_chksum", Opt_compress_chksum),
356 	fsparam_enum("compress_mode", Opt_compress_mode, f2fs_param_compress_mode),
357 	fsparam_flag("compress_cache", Opt_compress_cache),
358 	fsparam_flag("atgc", Opt_atgc),
359 	fsparam_flag_no("gc_merge", Opt_gc_merge),
360 	fsparam_enum("discard_unit", Opt_discard_unit, f2fs_param_discard_unit),
361 	fsparam_enum("memory", Opt_memory_mode, f2fs_param_memory_mode),
362 	fsparam_flag("age_extent_cache", Opt_age_extent_cache),
363 	fsparam_enum("errors", Opt_errors, f2fs_param_errors),
364 	fsparam_enum("lookup_mode", Opt_lookup_mode, f2fs_param_lookup_mode),
365 	{}
366 };
367 
368 /* Resort to a match_table for this interestingly formatted option */
369 static match_table_t f2fs_checkpoint_tokens = {
370 	{Opt_checkpoint_disable, "disable"},
371 	{Opt_checkpoint_disable_cap, "disable:%u"},
372 	{Opt_checkpoint_disable_cap_perc, "disable:%u%%"},
373 	{Opt_checkpoint_enable, "enable"},
374 	{Opt_err, NULL},
375 };
376 
377 #define F2FS_SPEC_background_gc			(1 << 0)
378 #define F2FS_SPEC_inline_xattr_size		(1 << 1)
379 #define F2FS_SPEC_active_logs			(1 << 2)
380 #define F2FS_SPEC_reserve_root			(1 << 3)
381 #define F2FS_SPEC_resgid			(1 << 4)
382 #define F2FS_SPEC_resuid			(1 << 5)
383 #define F2FS_SPEC_mode				(1 << 6)
384 #define F2FS_SPEC_fault_injection		(1 << 7)
385 #define F2FS_SPEC_fault_type			(1 << 8)
386 #define F2FS_SPEC_jqfmt				(1 << 9)
387 #define F2FS_SPEC_alloc_mode			(1 << 10)
388 #define F2FS_SPEC_fsync_mode			(1 << 11)
389 #define F2FS_SPEC_checkpoint_disable_cap	(1 << 12)
390 #define F2FS_SPEC_checkpoint_disable_cap_perc	(1 << 13)
391 #define F2FS_SPEC_compress_level		(1 << 14)
392 #define F2FS_SPEC_compress_algorithm		(1 << 15)
393 #define F2FS_SPEC_compress_log_size		(1 << 16)
394 #define F2FS_SPEC_compress_extension		(1 << 17)
395 #define F2FS_SPEC_nocompress_extension		(1 << 18)
396 #define F2FS_SPEC_compress_chksum		(1 << 19)
397 #define F2FS_SPEC_compress_mode			(1 << 20)
398 #define F2FS_SPEC_discard_unit			(1 << 21)
399 #define F2FS_SPEC_memory_mode			(1 << 22)
400 #define F2FS_SPEC_errors			(1 << 23)
401 #define F2FS_SPEC_lookup_mode			(1 << 24)
402 #define F2FS_SPEC_reserve_node			(1 << 25)
403 
404 struct f2fs_fs_context {
405 	struct f2fs_mount_info info;
406 	unsigned long long opt_mask;	/* Bits changed */
407 	unsigned int	spec_mask;
408 	unsigned short	qname_mask;
409 };
410 
411 #define F2FS_CTX_INFO(ctx)	((ctx)->info)
412 
413 static inline void ctx_set_opt(struct f2fs_fs_context *ctx,
414 			       enum f2fs_mount_opt flag)
415 {
416 	ctx->info.opt |= BIT(flag);
417 	ctx->opt_mask |= BIT(flag);
418 }
419 
420 static inline void ctx_clear_opt(struct f2fs_fs_context *ctx,
421 				 enum f2fs_mount_opt flag)
422 {
423 	ctx->info.opt &= ~BIT(flag);
424 	ctx->opt_mask |= BIT(flag);
425 }
426 
427 static inline bool ctx_test_opt(struct f2fs_fs_context *ctx,
428 				enum f2fs_mount_opt flag)
429 {
430 	return ctx->info.opt & BIT(flag);
431 }
432 
433 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
434 					const char *fmt, ...)
435 {
436 	struct va_format vaf;
437 	va_list args;
438 	int level;
439 
440 	va_start(args, fmt);
441 
442 	level = printk_get_level(fmt);
443 	vaf.fmt = printk_skip_level(fmt);
444 	vaf.va = &args;
445 	if (limit_rate)
446 		if (sbi)
447 			printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
448 				KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
449 		else
450 			printk_ratelimited("%c%cF2FS-fs: %pV\n",
451 				KERN_SOH_ASCII, level, &vaf);
452 	else
453 		if (sbi)
454 			printk("%c%cF2FS-fs (%s): %pV\n",
455 				KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
456 		else
457 			printk("%c%cF2FS-fs: %pV\n",
458 				KERN_SOH_ASCII, level, &vaf);
459 
460 	va_end(args);
461 }
462 
463 #if IS_ENABLED(CONFIG_UNICODE)
464 static const struct f2fs_sb_encodings {
465 	__u16 magic;
466 	char *name;
467 	unsigned int version;
468 } f2fs_sb_encoding_map[] = {
469 	{F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
470 };
471 
472 static const struct f2fs_sb_encodings *
473 f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
474 {
475 	__u16 magic = le16_to_cpu(sb->s_encoding);
476 	int i;
477 
478 	for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
479 		if (magic == f2fs_sb_encoding_map[i].magic)
480 			return &f2fs_sb_encoding_map[i];
481 
482 	return NULL;
483 }
484 
485 struct kmem_cache *f2fs_cf_name_slab;
486 static int __init f2fs_create_casefold_cache(void)
487 {
488 	f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
489 						   F2FS_NAME_LEN);
490 	return f2fs_cf_name_slab ? 0 : -ENOMEM;
491 }
492 
493 static void f2fs_destroy_casefold_cache(void)
494 {
495 	kmem_cache_destroy(f2fs_cf_name_slab);
496 }
497 #else
498 static int __init f2fs_create_casefold_cache(void) { return 0; }
499 static void f2fs_destroy_casefold_cache(void) { }
500 #endif
501 
502 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
503 {
504 	block_t block_limit = min((sbi->user_block_count >> 3),
505 			sbi->user_block_count - sbi->reserved_blocks);
506 	block_t node_limit = sbi->total_node_count >> 3;
507 
508 	/* limit is 12.5% */
509 	if (test_opt(sbi, RESERVE_ROOT) &&
510 			F2FS_OPTION(sbi).root_reserved_blocks > block_limit) {
511 		F2FS_OPTION(sbi).root_reserved_blocks = block_limit;
512 		f2fs_info(sbi, "Reduce reserved blocks for root = %u",
513 			  F2FS_OPTION(sbi).root_reserved_blocks);
514 	}
515 	if (test_opt(sbi, RESERVE_NODE) &&
516 			F2FS_OPTION(sbi).root_reserved_nodes > node_limit) {
517 		F2FS_OPTION(sbi).root_reserved_nodes = node_limit;
518 		f2fs_info(sbi, "Reduce reserved nodes for root = %u",
519 			  F2FS_OPTION(sbi).root_reserved_nodes);
520 	}
521 	if (!test_opt(sbi, RESERVE_ROOT) && !test_opt(sbi, RESERVE_NODE) &&
522 		(!uid_eq(F2FS_OPTION(sbi).s_resuid,
523 				make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
524 		!gid_eq(F2FS_OPTION(sbi).s_resgid,
525 				make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
526 		f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root"
527 				" and reserve_node",
528 			  from_kuid_munged(&init_user_ns,
529 					   F2FS_OPTION(sbi).s_resuid),
530 			  from_kgid_munged(&init_user_ns,
531 					   F2FS_OPTION(sbi).s_resgid));
532 }
533 
534 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
535 {
536 	if (!F2FS_OPTION(sbi).unusable_cap_perc)
537 		return;
538 
539 	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
540 		F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
541 	else
542 		F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
543 					F2FS_OPTION(sbi).unusable_cap_perc;
544 
545 	f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
546 			F2FS_OPTION(sbi).unusable_cap,
547 			F2FS_OPTION(sbi).unusable_cap_perc);
548 }
549 
550 static void init_once(void *foo)
551 {
552 	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
553 
554 	inode_init_once(&fi->vfs_inode);
555 #ifdef CONFIG_FS_ENCRYPTION
556 	fi->i_crypt_info = NULL;
557 #endif
558 }
559 
560 #ifdef CONFIG_QUOTA
561 static const char * const quotatypes[] = INITQFNAMES;
562 #define QTYPE2NAME(t) (quotatypes[t])
563 /*
564  * Note the name of the specified quota file.
565  */
566 static int f2fs_note_qf_name(struct fs_context *fc, int qtype,
567 			     struct fs_parameter *param)
568 {
569 	struct f2fs_fs_context *ctx = fc->fs_private;
570 	char *qname;
571 
572 	if (param->size < 1) {
573 		f2fs_err(NULL, "Missing quota name");
574 		return -EINVAL;
575 	}
576 	if (strchr(param->string, '/')) {
577 		f2fs_err(NULL, "quotafile must be on filesystem root");
578 		return -EINVAL;
579 	}
580 	if (ctx->info.s_qf_names[qtype]) {
581 		if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) {
582 			f2fs_err(NULL, "Quota file already specified");
583 			return -EINVAL;
584 		}
585 		return 0;
586 	}
587 
588 	qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
589 	if (!qname) {
590 		f2fs_err(NULL, "Not enough memory for storing quotafile name");
591 		return -ENOMEM;
592 	}
593 	F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname;
594 	ctx->qname_mask |= 1 << qtype;
595 	return 0;
596 }
597 
598 /*
599  * Clear the name of the specified quota file.
600  */
601 static int f2fs_unnote_qf_name(struct fs_context *fc, int qtype)
602 {
603 	struct f2fs_fs_context *ctx = fc->fs_private;
604 
605 	kfree(ctx->info.s_qf_names[qtype]);
606 	ctx->info.s_qf_names[qtype] = NULL;
607 	ctx->qname_mask |= 1 << qtype;
608 	return 0;
609 }
610 
611 static void f2fs_unnote_qf_name_all(struct fs_context *fc)
612 {
613 	int i;
614 
615 	for (i = 0; i < MAXQUOTAS; i++)
616 		f2fs_unnote_qf_name(fc, i);
617 }
618 #endif
619 
620 static int f2fs_parse_test_dummy_encryption(const struct fs_parameter *param,
621 					    struct f2fs_fs_context *ctx)
622 {
623 	int err;
624 
625 	if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
626 		f2fs_warn(NULL, "test_dummy_encryption option not supported");
627 		return -EINVAL;
628 	}
629 	err = fscrypt_parse_test_dummy_encryption(param,
630 					&ctx->info.dummy_enc_policy);
631 	if (err) {
632 		if (err == -EINVAL)
633 			f2fs_warn(NULL, "Value of option \"%s\" is unrecognized",
634 				  param->key);
635 		else if (err == -EEXIST)
636 			f2fs_warn(NULL, "Conflicting test_dummy_encryption options");
637 		else
638 			f2fs_warn(NULL, "Error processing option \"%s\" [%d]",
639 				  param->key, err);
640 		return -EINVAL;
641 	}
642 	return 0;
643 }
644 
645 #ifdef CONFIG_F2FS_FS_COMPRESSION
646 static bool is_compress_extension_exist(struct f2fs_mount_info *info,
647 					const char *new_ext, bool is_ext)
648 {
649 	unsigned char (*ext)[F2FS_EXTENSION_LEN];
650 	int ext_cnt;
651 	int i;
652 
653 	if (is_ext) {
654 		ext = info->extensions;
655 		ext_cnt = info->compress_ext_cnt;
656 	} else {
657 		ext = info->noextensions;
658 		ext_cnt = info->nocompress_ext_cnt;
659 	}
660 
661 	for (i = 0; i < ext_cnt; i++) {
662 		if (!strcasecmp(new_ext, ext[i]))
663 			return true;
664 	}
665 
666 	return false;
667 }
668 
669 /*
670  * 1. The same extension name cannot not appear in both compress and non-compress extension
671  * at the same time.
672  * 2. If the compress extension specifies all files, the types specified by the non-compress
673  * extension will be treated as special cases and will not be compressed.
674  * 3. Don't allow the non-compress extension specifies all files.
675  */
676 static int f2fs_test_compress_extension(unsigned char (*noext)[F2FS_EXTENSION_LEN],
677 					int noext_cnt,
678 					unsigned char (*ext)[F2FS_EXTENSION_LEN],
679 					int ext_cnt)
680 {
681 	int index = 0, no_index = 0;
682 
683 	if (!noext_cnt)
684 		return 0;
685 
686 	for (no_index = 0; no_index < noext_cnt; no_index++) {
687 		if (strlen(noext[no_index]) == 0)
688 			continue;
689 		if (!strcasecmp("*", noext[no_index])) {
690 			f2fs_info(NULL, "Don't allow the nocompress extension specifies all files");
691 			return -EINVAL;
692 		}
693 		for (index = 0; index < ext_cnt; index++) {
694 			if (strlen(ext[index]) == 0)
695 				continue;
696 			if (!strcasecmp(ext[index], noext[no_index])) {
697 				f2fs_info(NULL, "Don't allow the same extension %s appear in both compress and nocompress extension",
698 						ext[index]);
699 				return -EINVAL;
700 			}
701 		}
702 	}
703 	return 0;
704 }
705 
706 #ifdef CONFIG_F2FS_FS_LZ4
707 static int f2fs_set_lz4hc_level(struct f2fs_fs_context *ctx, const char *str)
708 {
709 #ifdef CONFIG_F2FS_FS_LZ4HC
710 	unsigned int level;
711 
712 	if (strlen(str) == 3) {
713 		F2FS_CTX_INFO(ctx).compress_level = 0;
714 		ctx->spec_mask |= F2FS_SPEC_compress_level;
715 		return 0;
716 	}
717 
718 	str += 3;
719 
720 	if (str[0] != ':') {
721 		f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
722 		return -EINVAL;
723 	}
724 	if (kstrtouint(str + 1, 10, &level))
725 		return -EINVAL;
726 
727 	if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
728 		f2fs_info(NULL, "invalid lz4hc compress level: %d", level);
729 		return -EINVAL;
730 	}
731 
732 	F2FS_CTX_INFO(ctx).compress_level = level;
733 	ctx->spec_mask |= F2FS_SPEC_compress_level;
734 	return 0;
735 #else
736 	if (strlen(str) == 3) {
737 		F2FS_CTX_INFO(ctx).compress_level = 0;
738 		ctx->spec_mask |= F2FS_SPEC_compress_level;
739 		return 0;
740 	}
741 	f2fs_info(NULL, "kernel doesn't support lz4hc compression");
742 	return -EINVAL;
743 #endif
744 }
745 #endif
746 
747 #ifdef CONFIG_F2FS_FS_ZSTD
748 static int f2fs_set_zstd_level(struct f2fs_fs_context *ctx, const char *str)
749 {
750 	int level;
751 	int len = 4;
752 
753 	if (strlen(str) == len) {
754 		F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
755 		ctx->spec_mask |= F2FS_SPEC_compress_level;
756 		return 0;
757 	}
758 
759 	str += len;
760 
761 	if (str[0] != ':') {
762 		f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
763 		return -EINVAL;
764 	}
765 	if (kstrtoint(str + 1, 10, &level))
766 		return -EINVAL;
767 
768 	/* f2fs does not support negative compress level now */
769 	if (level < 0) {
770 		f2fs_info(NULL, "do not support negative compress level: %d", level);
771 		return -ERANGE;
772 	}
773 
774 	if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
775 		f2fs_info(NULL, "invalid zstd compress level: %d", level);
776 		return -EINVAL;
777 	}
778 
779 	F2FS_CTX_INFO(ctx).compress_level = level;
780 	ctx->spec_mask |= F2FS_SPEC_compress_level;
781 	return 0;
782 }
783 #endif
784 #endif
785 
786 static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
787 {
788 	struct f2fs_fs_context *ctx = fc->fs_private;
789 #ifdef CONFIG_F2FS_FS_COMPRESSION
790 	unsigned char (*ext)[F2FS_EXTENSION_LEN];
791 	unsigned char (*noext)[F2FS_EXTENSION_LEN];
792 	int ext_cnt, noext_cnt;
793 	char *name;
794 #endif
795 	substring_t args[MAX_OPT_ARGS];
796 	struct fs_parse_result result;
797 	int token, ret, arg;
798 
799 	token = fs_parse(fc, f2fs_param_specs, param, &result);
800 	if (token < 0)
801 		return token;
802 
803 	switch (token) {
804 	case Opt_gc_background:
805 		F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32;
806 		ctx->spec_mask |= F2FS_SPEC_background_gc;
807 		break;
808 	case Opt_disable_roll_forward:
809 		ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD);
810 		break;
811 	case Opt_norecovery:
812 		/* requires ro mount, checked in f2fs_validate_options */
813 		ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY);
814 		break;
815 	case Opt_discard:
816 		if (result.negated)
817 			ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
818 		else
819 			ctx_set_opt(ctx, F2FS_MOUNT_DISCARD);
820 		break;
821 	case Opt_noheap:
822 	case Opt_heap:
823 		f2fs_warn(NULL, "heap/no_heap options were deprecated");
824 		break;
825 #ifdef CONFIG_F2FS_FS_XATTR
826 	case Opt_user_xattr:
827 		if (result.negated)
828 			ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER);
829 		else
830 			ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER);
831 		break;
832 	case Opt_inline_xattr:
833 		if (result.negated)
834 			ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
835 		else
836 			ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
837 		break;
838 	case Opt_inline_xattr_size:
839 		if (result.int_32 < MIN_INLINE_XATTR_SIZE ||
840 			result.int_32 > MAX_INLINE_XATTR_SIZE) {
841 			f2fs_err(NULL, "inline xattr size is out of range: %u ~ %u",
842 				 (u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE);
843 			return -EINVAL;
844 		}
845 		ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE);
846 		F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32;
847 		ctx->spec_mask |= F2FS_SPEC_inline_xattr_size;
848 		break;
849 #else
850 	case Opt_user_xattr:
851 	case Opt_inline_xattr:
852 	case Opt_inline_xattr_size:
853 		f2fs_info(NULL, "%s options not supported", param->key);
854 		break;
855 #endif
856 #ifdef CONFIG_F2FS_FS_POSIX_ACL
857 	case Opt_acl:
858 		if (result.negated)
859 			ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL);
860 		else
861 			ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL);
862 		break;
863 #else
864 	case Opt_acl:
865 		f2fs_info(NULL, "%s options not supported", param->key);
866 		break;
867 #endif
868 	case Opt_active_logs:
869 		if (result.int_32 != 2 && result.int_32 != 4 &&
870 			result.int_32 != NR_CURSEG_PERSIST_TYPE)
871 			return -EINVAL;
872 		ctx->spec_mask |= F2FS_SPEC_active_logs;
873 		F2FS_CTX_INFO(ctx).active_logs = result.int_32;
874 		break;
875 	case Opt_disable_ext_identify:
876 		ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY);
877 		break;
878 	case Opt_inline_data:
879 		if (result.negated)
880 			ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA);
881 		else
882 			ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA);
883 		break;
884 	case Opt_inline_dentry:
885 		if (result.negated)
886 			ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
887 		else
888 			ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
889 		break;
890 	case Opt_flush_merge:
891 		if (result.negated)
892 			ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
893 		else
894 			ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
895 		break;
896 	case Opt_barrier:
897 		if (result.negated)
898 			ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER);
899 		else
900 			ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER);
901 		break;
902 	case Opt_fastboot:
903 		ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT);
904 		break;
905 	case Opt_extent_cache:
906 		if (result.negated)
907 			ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
908 		else
909 			ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
910 		break;
911 	case Opt_data_flush:
912 		ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH);
913 		break;
914 	case Opt_reserve_root:
915 		ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
916 		F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32;
917 		ctx->spec_mask |= F2FS_SPEC_reserve_root;
918 		break;
919 	case Opt_reserve_node:
920 		ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
921 		F2FS_CTX_INFO(ctx).root_reserved_nodes = result.uint_32;
922 		ctx->spec_mask |= F2FS_SPEC_reserve_node;
923 		break;
924 	case Opt_resuid:
925 		F2FS_CTX_INFO(ctx).s_resuid = result.uid;
926 		ctx->spec_mask |= F2FS_SPEC_resuid;
927 		break;
928 	case Opt_resgid:
929 		F2FS_CTX_INFO(ctx).s_resgid = result.gid;
930 		ctx->spec_mask |= F2FS_SPEC_resgid;
931 		break;
932 	case Opt_mode:
933 		F2FS_CTX_INFO(ctx).fs_mode = result.uint_32;
934 		ctx->spec_mask |= F2FS_SPEC_mode;
935 		break;
936 #ifdef CONFIG_F2FS_FAULT_INJECTION
937 	case Opt_fault_injection:
938 		F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32;
939 		ctx->spec_mask |= F2FS_SPEC_fault_injection;
940 		ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
941 		break;
942 
943 	case Opt_fault_type:
944 		if (result.uint_32 > BIT(FAULT_MAX))
945 			return -EINVAL;
946 		F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32;
947 		ctx->spec_mask |= F2FS_SPEC_fault_type;
948 		ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
949 		break;
950 #else
951 	case Opt_fault_injection:
952 	case Opt_fault_type:
953 		f2fs_info(NULL, "%s options not supported", param->key);
954 		break;
955 #endif
956 	case Opt_lazytime:
957 		if (result.negated)
958 			ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME);
959 		else
960 			ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME);
961 		break;
962 #ifdef CONFIG_QUOTA
963 	case Opt_quota:
964 		if (result.negated) {
965 			ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA);
966 			ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
967 			ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
968 			ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
969 		} else
970 			ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
971 		break;
972 	case Opt_usrquota:
973 		ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
974 		break;
975 	case Opt_grpquota:
976 		ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA);
977 		break;
978 	case Opt_prjquota:
979 		ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA);
980 		break;
981 	case Opt_usrjquota:
982 		if (!*param->string)
983 			ret = f2fs_unnote_qf_name(fc, USRQUOTA);
984 		else
985 			ret = f2fs_note_qf_name(fc, USRQUOTA, param);
986 		if (ret)
987 			return ret;
988 		break;
989 	case Opt_grpjquota:
990 		if (!*param->string)
991 			ret = f2fs_unnote_qf_name(fc, GRPQUOTA);
992 		else
993 			ret = f2fs_note_qf_name(fc, GRPQUOTA, param);
994 		if (ret)
995 			return ret;
996 		break;
997 	case Opt_prjjquota:
998 		if (!*param->string)
999 			ret = f2fs_unnote_qf_name(fc, PRJQUOTA);
1000 		else
1001 			ret = f2fs_note_qf_name(fc, PRJQUOTA, param);
1002 		if (ret)
1003 			return ret;
1004 		break;
1005 	case Opt_jqfmt:
1006 		F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32;
1007 		ctx->spec_mask |= F2FS_SPEC_jqfmt;
1008 		break;
1009 #else
1010 	case Opt_quota:
1011 	case Opt_usrquota:
1012 	case Opt_grpquota:
1013 	case Opt_prjquota:
1014 	case Opt_usrjquota:
1015 	case Opt_grpjquota:
1016 	case Opt_prjjquota:
1017 		f2fs_info(NULL, "quota operations not supported");
1018 		break;
1019 #endif
1020 	case Opt_alloc:
1021 		F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32;
1022 		ctx->spec_mask |= F2FS_SPEC_alloc_mode;
1023 		break;
1024 	case Opt_fsync:
1025 		F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32;
1026 		ctx->spec_mask |= F2FS_SPEC_fsync_mode;
1027 		break;
1028 	case Opt_test_dummy_encryption:
1029 		ret = f2fs_parse_test_dummy_encryption(param, ctx);
1030 		if (ret)
1031 			return ret;
1032 		break;
1033 	case Opt_inlinecrypt:
1034 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1035 		ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT);
1036 #else
1037 		f2fs_info(NULL, "inline encryption not supported");
1038 #endif
1039 		break;
1040 	case Opt_checkpoint:
1041 		/*
1042 		 * Initialize args struct so we know whether arg was
1043 		 * found; some options take optional arguments.
1044 		 */
1045 		args[0].from = args[0].to = NULL;
1046 		arg = 0;
1047 
1048 		/* revert to match_table for checkpoint= options */
1049 		token = match_token(param->string, f2fs_checkpoint_tokens, args);
1050 		switch (token) {
1051 		case Opt_checkpoint_disable_cap_perc:
1052 			if (args->from && match_int(args, &arg))
1053 				return -EINVAL;
1054 			if (arg < 0 || arg > 100)
1055 				return -EINVAL;
1056 			F2FS_CTX_INFO(ctx).unusable_cap_perc = arg;
1057 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
1058 			ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1059 			break;
1060 		case Opt_checkpoint_disable_cap:
1061 			if (args->from && match_int(args, &arg))
1062 				return -EINVAL;
1063 			F2FS_CTX_INFO(ctx).unusable_cap = arg;
1064 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
1065 			ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1066 			break;
1067 		case Opt_checkpoint_disable:
1068 			ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1069 			break;
1070 		case Opt_checkpoint_enable:
1071 			F2FS_CTX_INFO(ctx).unusable_cap_perc = 0;
1072 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
1073 			F2FS_CTX_INFO(ctx).unusable_cap = 0;
1074 			ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
1075 			ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
1076 			break;
1077 		default:
1078 			return -EINVAL;
1079 		}
1080 		break;
1081 	case Opt_checkpoint_merge:
1082 		if (result.negated)
1083 			ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
1084 		else
1085 			ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
1086 		break;
1087 #ifdef CONFIG_F2FS_FS_COMPRESSION
1088 	case Opt_compress_algorithm:
1089 		name = param->string;
1090 		if (!strcmp(name, "lzo")) {
1091 #ifdef CONFIG_F2FS_FS_LZO
1092 			F2FS_CTX_INFO(ctx).compress_level = 0;
1093 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO;
1094 			ctx->spec_mask |= F2FS_SPEC_compress_level;
1095 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1096 #else
1097 			f2fs_info(NULL, "kernel doesn't support lzo compression");
1098 #endif
1099 		} else if (!strncmp(name, "lz4", 3)) {
1100 #ifdef CONFIG_F2FS_FS_LZ4
1101 			ret = f2fs_set_lz4hc_level(ctx, name);
1102 			if (ret)
1103 				return -EINVAL;
1104 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4;
1105 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1106 #else
1107 			f2fs_info(NULL, "kernel doesn't support lz4 compression");
1108 #endif
1109 		} else if (!strncmp(name, "zstd", 4)) {
1110 #ifdef CONFIG_F2FS_FS_ZSTD
1111 			ret = f2fs_set_zstd_level(ctx, name);
1112 			if (ret)
1113 				return -EINVAL;
1114 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD;
1115 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1116 #else
1117 			f2fs_info(NULL, "kernel doesn't support zstd compression");
1118 #endif
1119 		} else if (!strcmp(name, "lzo-rle")) {
1120 #ifdef CONFIG_F2FS_FS_LZORLE
1121 			F2FS_CTX_INFO(ctx).compress_level = 0;
1122 			F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE;
1123 			ctx->spec_mask |= F2FS_SPEC_compress_level;
1124 			ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
1125 #else
1126 			f2fs_info(NULL, "kernel doesn't support lzorle compression");
1127 #endif
1128 		} else
1129 			return -EINVAL;
1130 		break;
1131 	case Opt_compress_log_size:
1132 		if (result.uint_32 < MIN_COMPRESS_LOG_SIZE ||
1133 		    result.uint_32 > MAX_COMPRESS_LOG_SIZE) {
1134 			f2fs_err(NULL,
1135 				"Compress cluster log size is out of range");
1136 			return -EINVAL;
1137 		}
1138 		F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32;
1139 		ctx->spec_mask |= F2FS_SPEC_compress_log_size;
1140 		break;
1141 	case Opt_compress_extension:
1142 		name = param->string;
1143 		ext = F2FS_CTX_INFO(ctx).extensions;
1144 		ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
1145 
1146 		if (strlen(name) >= F2FS_EXTENSION_LEN ||
1147 		    ext_cnt >= COMPRESS_EXT_NUM) {
1148 			f2fs_err(NULL, "invalid extension length/number");
1149 			return -EINVAL;
1150 		}
1151 
1152 		if (is_compress_extension_exist(&ctx->info, name, true))
1153 			break;
1154 
1155 		ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN);
1156 		if (ret < 0)
1157 			return ret;
1158 		F2FS_CTX_INFO(ctx).compress_ext_cnt++;
1159 		ctx->spec_mask |= F2FS_SPEC_compress_extension;
1160 		break;
1161 	case Opt_nocompress_extension:
1162 		name = param->string;
1163 		noext = F2FS_CTX_INFO(ctx).noextensions;
1164 		noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
1165 
1166 		if (strlen(name) >= F2FS_EXTENSION_LEN ||
1167 			noext_cnt >= COMPRESS_EXT_NUM) {
1168 			f2fs_err(NULL, "invalid extension length/number");
1169 			return -EINVAL;
1170 		}
1171 
1172 		if (is_compress_extension_exist(&ctx->info, name, false))
1173 			break;
1174 
1175 		ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN);
1176 		if (ret < 0)
1177 			return ret;
1178 		F2FS_CTX_INFO(ctx).nocompress_ext_cnt++;
1179 		ctx->spec_mask |= F2FS_SPEC_nocompress_extension;
1180 		break;
1181 	case Opt_compress_chksum:
1182 		F2FS_CTX_INFO(ctx).compress_chksum = true;
1183 		ctx->spec_mask |= F2FS_SPEC_compress_chksum;
1184 		break;
1185 	case Opt_compress_mode:
1186 		F2FS_CTX_INFO(ctx).compress_mode = result.uint_32;
1187 		ctx->spec_mask |= F2FS_SPEC_compress_mode;
1188 		break;
1189 	case Opt_compress_cache:
1190 		ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE);
1191 		break;
1192 #else
1193 	case Opt_compress_algorithm:
1194 	case Opt_compress_log_size:
1195 	case Opt_compress_extension:
1196 	case Opt_nocompress_extension:
1197 	case Opt_compress_chksum:
1198 	case Opt_compress_mode:
1199 	case Opt_compress_cache:
1200 		f2fs_info(NULL, "compression options not supported");
1201 		break;
1202 #endif
1203 	case Opt_atgc:
1204 		ctx_set_opt(ctx, F2FS_MOUNT_ATGC);
1205 		break;
1206 	case Opt_gc_merge:
1207 		if (result.negated)
1208 			ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE);
1209 		else
1210 			ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE);
1211 		break;
1212 	case Opt_discard_unit:
1213 		F2FS_CTX_INFO(ctx).discard_unit = result.uint_32;
1214 		ctx->spec_mask |= F2FS_SPEC_discard_unit;
1215 		break;
1216 	case Opt_memory_mode:
1217 		F2FS_CTX_INFO(ctx).memory_mode = result.uint_32;
1218 		ctx->spec_mask |= F2FS_SPEC_memory_mode;
1219 		break;
1220 	case Opt_age_extent_cache:
1221 		ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE);
1222 		break;
1223 	case Opt_errors:
1224 		F2FS_CTX_INFO(ctx).errors = result.uint_32;
1225 		ctx->spec_mask |= F2FS_SPEC_errors;
1226 		break;
1227 	case Opt_nat_bits:
1228 		ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS);
1229 		break;
1230 	case Opt_lookup_mode:
1231 		F2FS_CTX_INFO(ctx).lookup_mode = result.uint_32;
1232 		ctx->spec_mask |= F2FS_SPEC_lookup_mode;
1233 		break;
1234 	}
1235 	return 0;
1236 }
1237 
1238 /*
1239  * Check quota settings consistency.
1240  */
1241 static int f2fs_check_quota_consistency(struct fs_context *fc,
1242 					struct super_block *sb)
1243 {
1244 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1245  #ifdef CONFIG_QUOTA
1246 	struct f2fs_fs_context *ctx = fc->fs_private;
1247 	bool quota_feature = f2fs_sb_has_quota_ino(sbi);
1248 	bool quota_turnon = sb_any_quota_loaded(sb);
1249 	char *old_qname, *new_qname;
1250 	bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota;
1251 	int i;
1252 
1253 	/*
1254 	 * We do the test below only for project quotas. 'usrquota' and
1255 	 * 'grpquota' mount options are allowed even without quota feature
1256 	 * to support legacy quotas in quota files.
1257 	 */
1258 	if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) &&
1259 			!f2fs_sb_has_project_quota(sbi)) {
1260 		f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
1261 		return -EINVAL;
1262 	}
1263 
1264 	if (ctx->qname_mask) {
1265 		for (i = 0; i < MAXQUOTAS; i++) {
1266 			if (!(ctx->qname_mask & (1 << i)))
1267 				continue;
1268 
1269 			old_qname = F2FS_OPTION(sbi).s_qf_names[i];
1270 			new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
1271 			if (quota_turnon &&
1272 				!!old_qname != !!new_qname)
1273 				goto err_jquota_change;
1274 
1275 			if (old_qname) {
1276 				if (!new_qname) {
1277 					f2fs_info(sbi, "remove qf_name %s",
1278 								old_qname);
1279 					continue;
1280 				} else if (strcmp(old_qname, new_qname) == 0) {
1281 					ctx->qname_mask &= ~(1 << i);
1282 					continue;
1283 				}
1284 				goto err_jquota_specified;
1285 			}
1286 
1287 			if (quota_feature) {
1288 				f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
1289 				ctx->qname_mask &= ~(1 << i);
1290 				kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]);
1291 				F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL;
1292 			}
1293 		}
1294 	}
1295 
1296 	/* Make sure we don't mix old and new quota format */
1297 	usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
1298 			F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA];
1299 	grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
1300 			F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA];
1301 	prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] ||
1302 			F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA];
1303 	usrquota = test_opt(sbi, USRQUOTA) ||
1304 			ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA);
1305 	grpquota = test_opt(sbi, GRPQUOTA) ||
1306 			ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA);
1307 	prjquota = test_opt(sbi, PRJQUOTA) ||
1308 			ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA);
1309 
1310 	if (usr_qf_name) {
1311 		ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
1312 		usrquota = false;
1313 	}
1314 	if (grp_qf_name) {
1315 		ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
1316 		grpquota = false;
1317 	}
1318 	if (prj_qf_name) {
1319 		ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
1320 		prjquota = false;
1321 	}
1322 	if (usr_qf_name || grp_qf_name || prj_qf_name) {
1323 		if (grpquota || usrquota || prjquota) {
1324 			f2fs_err(sbi, "old and new quota format mixing");
1325 			return -EINVAL;
1326 		}
1327 		if (!(ctx->spec_mask & F2FS_SPEC_jqfmt ||
1328 				F2FS_OPTION(sbi).s_jquota_fmt)) {
1329 			f2fs_err(sbi, "journaled quota format not specified");
1330 			return -EINVAL;
1331 		}
1332 	}
1333 	return 0;
1334 
1335 err_jquota_change:
1336 	f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
1337 	return -EINVAL;
1338 err_jquota_specified:
1339 	f2fs_err(sbi, "%s quota file already specified",
1340 		 QTYPE2NAME(i));
1341 	return -EINVAL;
1342 
1343 #else
1344 	if (f2fs_readonly(sbi->sb))
1345 		return 0;
1346 	if (f2fs_sb_has_quota_ino(sbi)) {
1347 		f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1348 		return -EINVAL;
1349 	}
1350 	if (f2fs_sb_has_project_quota(sbi)) {
1351 		f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1352 		return -EINVAL;
1353 	}
1354 
1355 	return 0;
1356 #endif
1357 }
1358 
1359 static int f2fs_check_test_dummy_encryption(struct fs_context *fc,
1360 					    struct super_block *sb)
1361 {
1362 	struct f2fs_fs_context *ctx = fc->fs_private;
1363 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1364 
1365 	if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy))
1366 		return 0;
1367 
1368 	if (!f2fs_sb_has_encrypt(sbi)) {
1369 		f2fs_err(sbi, "Encrypt feature is off");
1370 		return -EINVAL;
1371 	}
1372 
1373 	/*
1374 	 * This mount option is just for testing, and it's not worthwhile to
1375 	 * implement the extra complexity (e.g. RCU protection) that would be
1376 	 * needed to allow it to be set or changed during remount.  We do allow
1377 	 * it to be specified during remount, but only if there is no change.
1378 	 */
1379 	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
1380 		if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy,
1381 				&F2FS_CTX_INFO(ctx).dummy_enc_policy))
1382 			return 0;
1383 		f2fs_warn(sbi, "Can't set or change test_dummy_encryption on remount");
1384 		return -EINVAL;
1385 	}
1386 	return 0;
1387 }
1388 
1389 static inline bool test_compression_spec(unsigned int mask)
1390 {
1391 	return mask & (F2FS_SPEC_compress_algorithm
1392 			| F2FS_SPEC_compress_log_size
1393 			| F2FS_SPEC_compress_extension
1394 			| F2FS_SPEC_nocompress_extension
1395 			| F2FS_SPEC_compress_chksum
1396 			| F2FS_SPEC_compress_mode);
1397 }
1398 
1399 static inline void clear_compression_spec(struct f2fs_fs_context *ctx)
1400 {
1401 	ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm
1402 						| F2FS_SPEC_compress_log_size
1403 						| F2FS_SPEC_compress_extension
1404 						| F2FS_SPEC_nocompress_extension
1405 						| F2FS_SPEC_compress_chksum
1406 						| F2FS_SPEC_compress_mode);
1407 }
1408 
1409 static int f2fs_check_compression(struct fs_context *fc,
1410 				  struct super_block *sb)
1411 {
1412 #ifdef CONFIG_F2FS_FS_COMPRESSION
1413 	struct f2fs_fs_context *ctx = fc->fs_private;
1414 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1415 	int i, cnt;
1416 
1417 	if (!f2fs_sb_has_compression(sbi)) {
1418 		if (test_compression_spec(ctx->spec_mask) ||
1419 			ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE))
1420 			f2fs_info(sbi, "Image doesn't support compression");
1421 		clear_compression_spec(ctx);
1422 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_COMPRESS_CACHE);
1423 		return 0;
1424 	}
1425 	if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
1426 		cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
1427 		for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) {
1428 			if (is_compress_extension_exist(&F2FS_OPTION(sbi),
1429 					F2FS_CTX_INFO(ctx).extensions[i], true)) {
1430 				F2FS_CTX_INFO(ctx).extensions[i][0] = '\0';
1431 				cnt--;
1432 			}
1433 		}
1434 		if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
1435 			f2fs_err(sbi, "invalid extension length/number");
1436 			return -EINVAL;
1437 		}
1438 	}
1439 	if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
1440 		cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
1441 		for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) {
1442 			if (is_compress_extension_exist(&F2FS_OPTION(sbi),
1443 					F2FS_CTX_INFO(ctx).noextensions[i], false)) {
1444 				F2FS_CTX_INFO(ctx).noextensions[i][0] = '\0';
1445 				cnt--;
1446 			}
1447 		}
1448 		if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
1449 			f2fs_err(sbi, "invalid noextension length/number");
1450 			return -EINVAL;
1451 		}
1452 	}
1453 
1454 	if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
1455 				F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
1456 				F2FS_CTX_INFO(ctx).extensions,
1457 				F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
1458 		f2fs_err(sbi, "new noextensions conflicts with new extensions");
1459 		return -EINVAL;
1460 	}
1461 	if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
1462 				F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
1463 				F2FS_OPTION(sbi).extensions,
1464 				F2FS_OPTION(sbi).compress_ext_cnt)) {
1465 		f2fs_err(sbi, "new noextensions conflicts with old extensions");
1466 		return -EINVAL;
1467 	}
1468 	if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions,
1469 				F2FS_OPTION(sbi).nocompress_ext_cnt,
1470 				F2FS_CTX_INFO(ctx).extensions,
1471 				F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
1472 		f2fs_err(sbi, "new extensions conflicts with old noextensions");
1473 		return -EINVAL;
1474 	}
1475 #endif
1476 	return 0;
1477 }
1478 
1479 static int f2fs_check_opt_consistency(struct fs_context *fc,
1480 				      struct super_block *sb)
1481 {
1482 	struct f2fs_fs_context *ctx = fc->fs_private;
1483 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1484 	int err;
1485 
1486 	if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb))
1487 		return -EINVAL;
1488 
1489 	if (f2fs_hw_should_discard(sbi) &&
1490 			(ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
1491 			!ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
1492 		f2fs_warn(sbi, "discard is required for zoned block devices");
1493 		return -EINVAL;
1494 	}
1495 
1496 	if (!f2fs_hw_support_discard(sbi) &&
1497 			(ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
1498 			ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
1499 		f2fs_warn(sbi, "device does not support discard");
1500 		ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
1501 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_DISCARD);
1502 	}
1503 
1504 	if (f2fs_sb_has_device_alias(sbi) &&
1505 			(ctx->opt_mask & BIT(F2FS_MOUNT_READ_EXTENT_CACHE)) &&
1506 			!ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) {
1507 		f2fs_err(sbi, "device aliasing requires extent cache");
1508 		return -EINVAL;
1509 	}
1510 
1511 	if (test_opt(sbi, RESERVE_ROOT) &&
1512 			(ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_ROOT)) &&
1513 			ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) {
1514 		f2fs_info(sbi, "Preserve previous reserve_root=%u",
1515 			F2FS_OPTION(sbi).root_reserved_blocks);
1516 		ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
1517 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_ROOT);
1518 	}
1519 	if (test_opt(sbi, RESERVE_NODE) &&
1520 			(ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_NODE)) &&
1521 			ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_NODE)) {
1522 		f2fs_info(sbi, "Preserve previous reserve_node=%u",
1523 			F2FS_OPTION(sbi).root_reserved_nodes);
1524 		ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
1525 		ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_NODE);
1526 	}
1527 
1528 	err = f2fs_check_test_dummy_encryption(fc, sb);
1529 	if (err)
1530 		return err;
1531 
1532 	err = f2fs_check_compression(fc, sb);
1533 	if (err)
1534 		return err;
1535 
1536 	err = f2fs_check_quota_consistency(fc, sb);
1537 	if (err)
1538 		return err;
1539 
1540 	if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
1541 		f2fs_err(sbi,
1542 			"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1543 		return -EINVAL;
1544 	}
1545 
1546 	/*
1547 	 * The BLKZONED feature indicates that the drive was formatted with
1548 	 * zone alignment optimization. This is optional for host-aware
1549 	 * devices, but mandatory for host-managed zoned block devices.
1550 	 */
1551 	if (f2fs_sb_has_blkzoned(sbi)) {
1552 		if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) {
1553 			f2fs_warn(sbi, "zoned devices need bggc");
1554 			return -EINVAL;
1555 		}
1556 #ifdef CONFIG_BLK_DEV_ZONED
1557 		if ((ctx->spec_mask & F2FS_SPEC_discard_unit) &&
1558 		F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) {
1559 			f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1560 			F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION;
1561 		}
1562 
1563 		if ((ctx->spec_mask & F2FS_SPEC_mode) &&
1564 		F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) {
1565 			f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
1566 			return -EINVAL;
1567 		}
1568 #else
1569 		f2fs_err(sbi, "Zoned block device support is not enabled");
1570 		return -EINVAL;
1571 #endif
1572 	}
1573 
1574 	if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) {
1575 		if (!f2fs_sb_has_extra_attr(sbi) ||
1576 			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
1577 			f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1578 			return -EINVAL;
1579 		}
1580 		if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) {
1581 			f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1582 			return -EINVAL;
1583 		}
1584 	}
1585 
1586 	if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) &&
1587 	    F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) {
1588 		f2fs_err(sbi, "LFS is not compatible with ATGC");
1589 		return -EINVAL;
1590 	}
1591 
1592 	if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) {
1593 		f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
1594 		return -EINVAL;
1595 	}
1596 
1597 	if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1598 		f2fs_err(sbi, "Allow to mount readonly mode only");
1599 		return -EROFS;
1600 	}
1601 	return 0;
1602 }
1603 
1604 static void f2fs_apply_quota_options(struct fs_context *fc,
1605 				     struct super_block *sb)
1606 {
1607 #ifdef CONFIG_QUOTA
1608 	struct f2fs_fs_context *ctx = fc->fs_private;
1609 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1610 	bool quota_feature = f2fs_sb_has_quota_ino(sbi);
1611 	char *qname;
1612 	int i;
1613 
1614 	if (quota_feature)
1615 		return;
1616 
1617 	for (i = 0; i < MAXQUOTAS; i++) {
1618 		if (!(ctx->qname_mask & (1 << i)))
1619 			continue;
1620 
1621 		qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
1622 		if (qname) {
1623 			qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i],
1624 					GFP_KERNEL | __GFP_NOFAIL);
1625 			set_opt(sbi, QUOTA);
1626 		}
1627 		F2FS_OPTION(sbi).s_qf_names[i] = qname;
1628 	}
1629 
1630 	if (ctx->spec_mask & F2FS_SPEC_jqfmt)
1631 		F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt;
1632 
1633 	if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) {
1634 		f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
1635 		F2FS_OPTION(sbi).s_jquota_fmt = 0;
1636 	}
1637 #endif
1638 }
1639 
1640 static void f2fs_apply_test_dummy_encryption(struct fs_context *fc,
1641 					     struct super_block *sb)
1642 {
1643 	struct f2fs_fs_context *ctx = fc->fs_private;
1644 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1645 
1646 	if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) ||
1647 		/* if already set, it was already verified to be the same */
1648 		fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy))
1649 		return;
1650 	swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy);
1651 	f2fs_warn(sbi, "Test dummy encryption mode enabled");
1652 }
1653 
1654 static void f2fs_apply_compression(struct fs_context *fc,
1655 				   struct super_block *sb)
1656 {
1657 #ifdef CONFIG_F2FS_FS_COMPRESSION
1658 	struct f2fs_fs_context *ctx = fc->fs_private;
1659 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1660 	unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN];
1661 	unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN];
1662 	int ctx_cnt, sbi_cnt, i;
1663 
1664 	if (ctx->spec_mask & F2FS_SPEC_compress_level)
1665 		F2FS_OPTION(sbi).compress_level =
1666 					F2FS_CTX_INFO(ctx).compress_level;
1667 	if (ctx->spec_mask & F2FS_SPEC_compress_algorithm)
1668 		F2FS_OPTION(sbi).compress_algorithm =
1669 					F2FS_CTX_INFO(ctx).compress_algorithm;
1670 	if (ctx->spec_mask & F2FS_SPEC_compress_log_size)
1671 		F2FS_OPTION(sbi).compress_log_size =
1672 					F2FS_CTX_INFO(ctx).compress_log_size;
1673 	if (ctx->spec_mask & F2FS_SPEC_compress_chksum)
1674 		F2FS_OPTION(sbi).compress_chksum =
1675 					F2FS_CTX_INFO(ctx).compress_chksum;
1676 	if (ctx->spec_mask & F2FS_SPEC_compress_mode)
1677 		F2FS_OPTION(sbi).compress_mode =
1678 					F2FS_CTX_INFO(ctx).compress_mode;
1679 	if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
1680 		ctx_ext = F2FS_CTX_INFO(ctx).extensions;
1681 		ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
1682 		sbi_ext = F2FS_OPTION(sbi).extensions;
1683 		sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1684 		for (i = 0; i < ctx_cnt; i++) {
1685 			if (strlen(ctx_ext[i]) == 0)
1686 				continue;
1687 			strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
1688 			sbi_cnt++;
1689 		}
1690 		F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt;
1691 	}
1692 	if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
1693 		ctx_ext = F2FS_CTX_INFO(ctx).noextensions;
1694 		ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
1695 		sbi_ext = F2FS_OPTION(sbi).noextensions;
1696 		sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1697 		for (i = 0; i < ctx_cnt; i++) {
1698 			if (strlen(ctx_ext[i]) == 0)
1699 				continue;
1700 			strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
1701 			sbi_cnt++;
1702 		}
1703 		F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt;
1704 	}
1705 #endif
1706 }
1707 
1708 static void f2fs_apply_options(struct fs_context *fc, struct super_block *sb)
1709 {
1710 	struct f2fs_fs_context *ctx = fc->fs_private;
1711 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1712 
1713 	F2FS_OPTION(sbi).opt &= ~ctx->opt_mask;
1714 	F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt;
1715 
1716 	if (ctx->spec_mask & F2FS_SPEC_background_gc)
1717 		F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode;
1718 	if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size)
1719 		F2FS_OPTION(sbi).inline_xattr_size =
1720 					F2FS_CTX_INFO(ctx).inline_xattr_size;
1721 	if (ctx->spec_mask & F2FS_SPEC_active_logs)
1722 		F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs;
1723 	if (ctx->spec_mask & F2FS_SPEC_reserve_root)
1724 		F2FS_OPTION(sbi).root_reserved_blocks =
1725 					F2FS_CTX_INFO(ctx).root_reserved_blocks;
1726 	if (ctx->spec_mask & F2FS_SPEC_reserve_node)
1727 		F2FS_OPTION(sbi).root_reserved_nodes =
1728 					F2FS_CTX_INFO(ctx).root_reserved_nodes;
1729 	if (ctx->spec_mask & F2FS_SPEC_resgid)
1730 		F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid;
1731 	if (ctx->spec_mask & F2FS_SPEC_resuid)
1732 		F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid;
1733 	if (ctx->spec_mask & F2FS_SPEC_mode)
1734 		F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode;
1735 #ifdef CONFIG_F2FS_FAULT_INJECTION
1736 	if (ctx->spec_mask & F2FS_SPEC_fault_injection)
1737 		(void)f2fs_build_fault_attr(sbi,
1738 		F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE);
1739 	if (ctx->spec_mask & F2FS_SPEC_fault_type)
1740 		(void)f2fs_build_fault_attr(sbi, 0,
1741 			F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE);
1742 #endif
1743 	if (ctx->spec_mask & F2FS_SPEC_alloc_mode)
1744 		F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode;
1745 	if (ctx->spec_mask & F2FS_SPEC_fsync_mode)
1746 		F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode;
1747 	if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap)
1748 		F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap;
1749 	if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc)
1750 		F2FS_OPTION(sbi).unusable_cap_perc =
1751 					F2FS_CTX_INFO(ctx).unusable_cap_perc;
1752 	if (ctx->spec_mask & F2FS_SPEC_discard_unit)
1753 		F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit;
1754 	if (ctx->spec_mask & F2FS_SPEC_memory_mode)
1755 		F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode;
1756 	if (ctx->spec_mask & F2FS_SPEC_errors)
1757 		F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors;
1758 	if (ctx->spec_mask & F2FS_SPEC_lookup_mode)
1759 		F2FS_OPTION(sbi).lookup_mode = F2FS_CTX_INFO(ctx).lookup_mode;
1760 
1761 	f2fs_apply_compression(fc, sb);
1762 	f2fs_apply_test_dummy_encryption(fc, sb);
1763 	f2fs_apply_quota_options(fc, sb);
1764 }
1765 
1766 static int f2fs_sanity_check_options(struct f2fs_sb_info *sbi, bool remount)
1767 {
1768 	if (f2fs_sb_has_device_alias(sbi) &&
1769 	    !test_opt(sbi, READ_EXTENT_CACHE)) {
1770 		f2fs_err(sbi, "device aliasing requires extent cache");
1771 		return -EINVAL;
1772 	}
1773 
1774 	if (!remount)
1775 		return 0;
1776 
1777 #ifdef CONFIG_BLK_DEV_ZONED
1778 	if (f2fs_sb_has_blkzoned(sbi) &&
1779 	    sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
1780 		f2fs_err(sbi,
1781 			"zoned: max open zones %u is too small, need at least %u open zones",
1782 				 sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
1783 		return -EINVAL;
1784 	}
1785 #endif
1786 	if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
1787 		f2fs_warn(sbi, "LFS is not compatible with IPU");
1788 		return -EINVAL;
1789 	}
1790 	return 0;
1791 }
1792 
1793 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1794 {
1795 	struct f2fs_inode_info *fi;
1796 
1797 	if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
1798 		return NULL;
1799 
1800 	fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
1801 	if (!fi)
1802 		return NULL;
1803 
1804 	init_once((void *) fi);
1805 
1806 	/* Initialize f2fs-specific inode info */
1807 	atomic_set(&fi->dirty_pages, 0);
1808 	atomic_set(&fi->i_compr_blocks, 0);
1809 	atomic_set(&fi->open_count, 0);
1810 	atomic_set(&fi->writeback, 0);
1811 	init_f2fs_rwsem(&fi->i_sem);
1812 	spin_lock_init(&fi->i_size_lock);
1813 	INIT_LIST_HEAD(&fi->dirty_list);
1814 	INIT_LIST_HEAD(&fi->gdirty_list);
1815 	INIT_LIST_HEAD(&fi->gdonate_list);
1816 	init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1817 	init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1818 	init_f2fs_rwsem(&fi->i_xattr_sem);
1819 
1820 	/* Will be used by directory only */
1821 	fi->i_dir_level = F2FS_SB(sb)->dir_level;
1822 
1823 	return &fi->vfs_inode;
1824 }
1825 
1826 static int f2fs_drop_inode(struct inode *inode)
1827 {
1828 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1829 	int ret;
1830 
1831 	/*
1832 	 * during filesystem shutdown, if checkpoint is disabled,
1833 	 * drop useless meta/node dirty pages.
1834 	 */
1835 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1836 		if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1837 			inode->i_ino == F2FS_META_INO(sbi)) {
1838 			trace_f2fs_drop_inode(inode, 1);
1839 			return 1;
1840 		}
1841 	}
1842 
1843 	/*
1844 	 * This is to avoid a deadlock condition like below.
1845 	 * writeback_single_inode(inode)
1846 	 *  - f2fs_write_data_page
1847 	 *    - f2fs_gc -> iput -> evict
1848 	 *       - inode_wait_for_writeback(inode)
1849 	 */
1850 	if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) {
1851 		if (!inode->i_nlink && !is_bad_inode(inode)) {
1852 			/* to avoid evict_inode call simultaneously */
1853 			__iget(inode);
1854 			spin_unlock(&inode->i_lock);
1855 
1856 			/* should remain fi->extent_tree for writepage */
1857 			f2fs_destroy_extent_node(inode);
1858 
1859 			sb_start_intwrite(inode->i_sb);
1860 			f2fs_i_size_write(inode, 0);
1861 
1862 			f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1863 					inode, NULL, 0, DATA);
1864 			truncate_inode_pages_final(inode->i_mapping);
1865 
1866 			if (F2FS_HAS_BLOCKS(inode))
1867 				f2fs_truncate(inode);
1868 
1869 			sb_end_intwrite(inode->i_sb);
1870 
1871 			spin_lock(&inode->i_lock);
1872 			atomic_dec(&inode->i_count);
1873 		}
1874 		trace_f2fs_drop_inode(inode, 0);
1875 		return 0;
1876 	}
1877 	ret = inode_generic_drop(inode);
1878 	if (!ret)
1879 		ret = fscrypt_drop_inode(inode);
1880 	trace_f2fs_drop_inode(inode, ret);
1881 	return ret;
1882 }
1883 
1884 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1885 {
1886 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1887 	int ret = 0;
1888 
1889 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1890 	if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1891 		ret = 1;
1892 	} else {
1893 		set_inode_flag(inode, FI_DIRTY_INODE);
1894 		stat_inc_dirty_inode(sbi, DIRTY_META);
1895 	}
1896 	if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1897 		list_add_tail(&F2FS_I(inode)->gdirty_list,
1898 				&sbi->inode_list[DIRTY_META]);
1899 		inc_page_count(sbi, F2FS_DIRTY_IMETA);
1900 	}
1901 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1902 
1903 	/* if atomic write is not committed, set inode w/ atomic dirty */
1904 	if (!ret && f2fs_is_atomic_file(inode) &&
1905 			!is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
1906 		set_inode_flag(inode, FI_ATOMIC_DIRTIED);
1907 
1908 	return ret;
1909 }
1910 
1911 void f2fs_inode_synced(struct inode *inode)
1912 {
1913 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1914 
1915 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1916 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1917 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
1918 		return;
1919 	}
1920 	if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1921 		list_del_init(&F2FS_I(inode)->gdirty_list);
1922 		dec_page_count(sbi, F2FS_DIRTY_IMETA);
1923 	}
1924 	clear_inode_flag(inode, FI_DIRTY_INODE);
1925 	clear_inode_flag(inode, FI_AUTO_RECOVER);
1926 	stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1927 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1928 }
1929 
1930 /*
1931  * f2fs_dirty_inode() is called from __mark_inode_dirty()
1932  *
1933  * We should call set_dirty_inode to write the dirty inode through write_inode.
1934  */
1935 static void f2fs_dirty_inode(struct inode *inode, int flags)
1936 {
1937 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1938 
1939 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1940 			inode->i_ino == F2FS_META_INO(sbi))
1941 		return;
1942 
1943 	if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1944 		clear_inode_flag(inode, FI_AUTO_RECOVER);
1945 
1946 	f2fs_inode_dirtied(inode, false);
1947 }
1948 
1949 static void f2fs_free_inode(struct inode *inode)
1950 {
1951 	fscrypt_free_inode(inode);
1952 	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1953 }
1954 
1955 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1956 {
1957 	percpu_counter_destroy(&sbi->total_valid_inode_count);
1958 	percpu_counter_destroy(&sbi->rf_node_block_count);
1959 	percpu_counter_destroy(&sbi->alloc_valid_block_count);
1960 }
1961 
1962 static void destroy_device_list(struct f2fs_sb_info *sbi)
1963 {
1964 	int i;
1965 
1966 	for (i = 0; i < sbi->s_ndevs; i++) {
1967 		if (i > 0)
1968 			bdev_fput(FDEV(i).bdev_file);
1969 #ifdef CONFIG_BLK_DEV_ZONED
1970 		kvfree(FDEV(i).blkz_seq);
1971 #endif
1972 	}
1973 	kvfree(sbi->devs);
1974 }
1975 
1976 static void f2fs_put_super(struct super_block *sb)
1977 {
1978 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1979 	int i;
1980 	int err = 0;
1981 	bool done;
1982 
1983 	/* unregister procfs/sysfs entries in advance to avoid race case */
1984 	f2fs_unregister_sysfs(sbi);
1985 
1986 	f2fs_quota_off_umount(sb);
1987 
1988 	/* prevent remaining shrinker jobs */
1989 	mutex_lock(&sbi->umount_mutex);
1990 
1991 	/*
1992 	 * flush all issued checkpoints and stop checkpoint issue thread.
1993 	 * after then, all checkpoints should be done by each process context.
1994 	 */
1995 	f2fs_stop_ckpt_thread(sbi);
1996 
1997 	/*
1998 	 * We don't need to do checkpoint when superblock is clean.
1999 	 * But, the previous checkpoint was not done by umount, it needs to do
2000 	 * clean checkpoint again.
2001 	 */
2002 	if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
2003 			!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
2004 		struct cp_control cpc = {
2005 			.reason = CP_UMOUNT,
2006 		};
2007 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
2008 		err = f2fs_write_checkpoint(sbi, &cpc);
2009 	}
2010 
2011 	/* be sure to wait for any on-going discard commands */
2012 	done = f2fs_issue_discard_timeout(sbi);
2013 	if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
2014 		struct cp_control cpc = {
2015 			.reason = CP_UMOUNT | CP_TRIMMED,
2016 		};
2017 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
2018 		err = f2fs_write_checkpoint(sbi, &cpc);
2019 	}
2020 
2021 	/*
2022 	 * normally superblock is clean, so we need to release this.
2023 	 * In addition, EIO will skip do checkpoint, we need this as well.
2024 	 */
2025 	f2fs_release_ino_entry(sbi, true);
2026 
2027 	f2fs_leave_shrinker(sbi);
2028 	mutex_unlock(&sbi->umount_mutex);
2029 
2030 	/* our cp_error case, we can wait for any writeback page */
2031 	f2fs_flush_merged_writes(sbi);
2032 
2033 	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
2034 
2035 	if (err || f2fs_cp_error(sbi)) {
2036 		truncate_inode_pages_final(NODE_MAPPING(sbi));
2037 		truncate_inode_pages_final(META_MAPPING(sbi));
2038 	}
2039 
2040 	f2fs_bug_on(sbi, sbi->fsync_node_num);
2041 
2042 	f2fs_destroy_compress_inode(sbi);
2043 
2044 	iput(sbi->node_inode);
2045 	sbi->node_inode = NULL;
2046 
2047 	iput(sbi->meta_inode);
2048 	sbi->meta_inode = NULL;
2049 
2050 	/* Should check the page counts after dropping all node/meta pages */
2051 	for (i = 0; i < NR_COUNT_TYPE; i++) {
2052 		if (!get_pages(sbi, i))
2053 			continue;
2054 		f2fs_err(sbi, "detect filesystem reference count leak during "
2055 			"umount, type: %d, count: %lld", i, get_pages(sbi, i));
2056 		f2fs_bug_on(sbi, 1);
2057 	}
2058 
2059 	/*
2060 	 * iput() can update stat information, if f2fs_write_checkpoint()
2061 	 * above failed with error.
2062 	 */
2063 	f2fs_destroy_stats(sbi);
2064 
2065 	/* destroy f2fs internal modules */
2066 	f2fs_destroy_node_manager(sbi);
2067 	f2fs_destroy_segment_manager(sbi);
2068 
2069 	/* flush s_error_work before sbi destroy */
2070 	flush_work(&sbi->s_error_work);
2071 
2072 	f2fs_destroy_post_read_wq(sbi);
2073 
2074 	kvfree(sbi->ckpt);
2075 
2076 	kfree(sbi->raw_super);
2077 
2078 	f2fs_destroy_page_array_cache(sbi);
2079 #ifdef CONFIG_QUOTA
2080 	for (i = 0; i < MAXQUOTAS; i++)
2081 		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2082 #endif
2083 	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
2084 	destroy_percpu_info(sbi);
2085 	f2fs_destroy_iostat(sbi);
2086 	for (i = 0; i < NR_PAGE_TYPE; i++)
2087 		kfree(sbi->write_io[i]);
2088 #if IS_ENABLED(CONFIG_UNICODE)
2089 	utf8_unload(sb->s_encoding);
2090 #endif
2091 }
2092 
2093 int f2fs_sync_fs(struct super_block *sb, int sync)
2094 {
2095 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2096 	int err = 0;
2097 
2098 	if (unlikely(f2fs_cp_error(sbi)))
2099 		return 0;
2100 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2101 		return 0;
2102 
2103 	trace_f2fs_sync_fs(sb, sync);
2104 
2105 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2106 		return -EAGAIN;
2107 
2108 	if (sync) {
2109 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
2110 		err = f2fs_issue_checkpoint(sbi);
2111 	}
2112 
2113 	return err;
2114 }
2115 
2116 static int f2fs_freeze(struct super_block *sb)
2117 {
2118 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2119 
2120 	if (f2fs_readonly(sb))
2121 		return 0;
2122 
2123 	/* IO error happened before */
2124 	if (unlikely(f2fs_cp_error(sbi)))
2125 		return -EIO;
2126 
2127 	/* must be clean, since sync_filesystem() was already called */
2128 	if (is_sbi_flag_set(sbi, SBI_IS_DIRTY))
2129 		return -EINVAL;
2130 
2131 	sbi->umount_lock_holder = current;
2132 
2133 	/* Let's flush checkpoints and stop the thread. */
2134 	f2fs_flush_ckpt_thread(sbi);
2135 
2136 	sbi->umount_lock_holder = NULL;
2137 
2138 	/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
2139 	set_sbi_flag(sbi, SBI_IS_FREEZING);
2140 	return 0;
2141 }
2142 
2143 static int f2fs_unfreeze(struct super_block *sb)
2144 {
2145 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2146 
2147 	/*
2148 	 * It will update discard_max_bytes of mounted lvm device to zero
2149 	 * after creating snapshot on this lvm device, let's drop all
2150 	 * remained discards.
2151 	 * We don't need to disable real-time discard because discard_max_bytes
2152 	 * will recover after removal of snapshot.
2153 	 */
2154 	if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
2155 		f2fs_issue_discard_timeout(sbi);
2156 
2157 	clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
2158 	return 0;
2159 }
2160 
2161 #ifdef CONFIG_QUOTA
2162 static int f2fs_statfs_project(struct super_block *sb,
2163 				kprojid_t projid, struct kstatfs *buf)
2164 {
2165 	struct kqid qid;
2166 	struct dquot *dquot;
2167 	u64 limit;
2168 	u64 curblock;
2169 
2170 	qid = make_kqid_projid(projid);
2171 	dquot = dqget(sb, qid);
2172 	if (IS_ERR(dquot))
2173 		return PTR_ERR(dquot);
2174 	spin_lock(&dquot->dq_dqb_lock);
2175 
2176 	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
2177 					dquot->dq_dqb.dqb_bhardlimit);
2178 	limit >>= sb->s_blocksize_bits;
2179 
2180 	if (limit) {
2181 		uint64_t remaining = 0;
2182 
2183 		curblock = (dquot->dq_dqb.dqb_curspace +
2184 			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
2185 		if (limit > curblock)
2186 			remaining = limit - curblock;
2187 
2188 		buf->f_blocks = min(buf->f_blocks, limit);
2189 		buf->f_bfree = min(buf->f_bfree, remaining);
2190 		buf->f_bavail = min(buf->f_bavail, remaining);
2191 	}
2192 
2193 	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
2194 					dquot->dq_dqb.dqb_ihardlimit);
2195 
2196 	if (limit) {
2197 		uint64_t remaining = 0;
2198 
2199 		if (limit > dquot->dq_dqb.dqb_curinodes)
2200 			remaining = limit - dquot->dq_dqb.dqb_curinodes;
2201 
2202 		buf->f_files = min(buf->f_files, limit);
2203 		buf->f_ffree = min(buf->f_ffree, remaining);
2204 	}
2205 
2206 	spin_unlock(&dquot->dq_dqb_lock);
2207 	dqput(dquot);
2208 	return 0;
2209 }
2210 #endif
2211 
2212 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
2213 {
2214 	struct super_block *sb = dentry->d_sb;
2215 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2216 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2217 	block_t total_count, user_block_count, start_count;
2218 	u64 avail_node_count;
2219 	unsigned int total_valid_node_count;
2220 
2221 	total_count = le64_to_cpu(sbi->raw_super->block_count);
2222 	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
2223 	buf->f_type = F2FS_SUPER_MAGIC;
2224 	buf->f_bsize = sbi->blocksize;
2225 
2226 	buf->f_blocks = total_count - start_count;
2227 
2228 	spin_lock(&sbi->stat_lock);
2229 	if (sbi->carve_out)
2230 		buf->f_blocks -= sbi->current_reserved_blocks;
2231 	user_block_count = sbi->user_block_count;
2232 	total_valid_node_count = valid_node_count(sbi);
2233 	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
2234 	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
2235 						sbi->current_reserved_blocks;
2236 
2237 	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
2238 		buf->f_bfree = 0;
2239 	else
2240 		buf->f_bfree -= sbi->unusable_block_count;
2241 	spin_unlock(&sbi->stat_lock);
2242 
2243 	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
2244 		buf->f_bavail = buf->f_bfree -
2245 				F2FS_OPTION(sbi).root_reserved_blocks;
2246 	else
2247 		buf->f_bavail = 0;
2248 
2249 	if (avail_node_count > user_block_count) {
2250 		buf->f_files = user_block_count;
2251 		buf->f_ffree = buf->f_bavail;
2252 	} else {
2253 		buf->f_files = avail_node_count;
2254 		buf->f_ffree = min(avail_node_count - total_valid_node_count,
2255 					buf->f_bavail);
2256 	}
2257 
2258 	buf->f_namelen = F2FS_NAME_LEN;
2259 	buf->f_fsid    = u64_to_fsid(id);
2260 
2261 #ifdef CONFIG_QUOTA
2262 	if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
2263 			sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
2264 		f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
2265 	}
2266 #endif
2267 	return 0;
2268 }
2269 
2270 static inline void f2fs_show_quota_options(struct seq_file *seq,
2271 					   struct super_block *sb)
2272 {
2273 #ifdef CONFIG_QUOTA
2274 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2275 
2276 	if (F2FS_OPTION(sbi).s_jquota_fmt) {
2277 		char *fmtname = "";
2278 
2279 		switch (F2FS_OPTION(sbi).s_jquota_fmt) {
2280 		case QFMT_VFS_OLD:
2281 			fmtname = "vfsold";
2282 			break;
2283 		case QFMT_VFS_V0:
2284 			fmtname = "vfsv0";
2285 			break;
2286 		case QFMT_VFS_V1:
2287 			fmtname = "vfsv1";
2288 			break;
2289 		}
2290 		seq_printf(seq, ",jqfmt=%s", fmtname);
2291 	}
2292 
2293 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
2294 		seq_show_option(seq, "usrjquota",
2295 			F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
2296 
2297 	if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
2298 		seq_show_option(seq, "grpjquota",
2299 			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
2300 
2301 	if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
2302 		seq_show_option(seq, "prjjquota",
2303 			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
2304 #endif
2305 }
2306 
2307 #ifdef CONFIG_F2FS_FS_COMPRESSION
2308 static inline void f2fs_show_compress_options(struct seq_file *seq,
2309 							struct super_block *sb)
2310 {
2311 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2312 	char *algtype = "";
2313 	int i;
2314 
2315 	if (!f2fs_sb_has_compression(sbi))
2316 		return;
2317 
2318 	switch (F2FS_OPTION(sbi).compress_algorithm) {
2319 	case COMPRESS_LZO:
2320 		algtype = "lzo";
2321 		break;
2322 	case COMPRESS_LZ4:
2323 		algtype = "lz4";
2324 		break;
2325 	case COMPRESS_ZSTD:
2326 		algtype = "zstd";
2327 		break;
2328 	case COMPRESS_LZORLE:
2329 		algtype = "lzo-rle";
2330 		break;
2331 	}
2332 	seq_printf(seq, ",compress_algorithm=%s", algtype);
2333 
2334 	if (F2FS_OPTION(sbi).compress_level)
2335 		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
2336 
2337 	seq_printf(seq, ",compress_log_size=%u",
2338 			F2FS_OPTION(sbi).compress_log_size);
2339 
2340 	for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
2341 		seq_printf(seq, ",compress_extension=%s",
2342 			F2FS_OPTION(sbi).extensions[i]);
2343 	}
2344 
2345 	for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
2346 		seq_printf(seq, ",nocompress_extension=%s",
2347 			F2FS_OPTION(sbi).noextensions[i]);
2348 	}
2349 
2350 	if (F2FS_OPTION(sbi).compress_chksum)
2351 		seq_puts(seq, ",compress_chksum");
2352 
2353 	if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
2354 		seq_printf(seq, ",compress_mode=%s", "fs");
2355 	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
2356 		seq_printf(seq, ",compress_mode=%s", "user");
2357 
2358 	if (test_opt(sbi, COMPRESS_CACHE))
2359 		seq_puts(seq, ",compress_cache");
2360 }
2361 #endif
2362 
2363 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
2364 {
2365 	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
2366 
2367 	if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
2368 		seq_printf(seq, ",background_gc=%s", "sync");
2369 	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
2370 		seq_printf(seq, ",background_gc=%s", "on");
2371 	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
2372 		seq_printf(seq, ",background_gc=%s", "off");
2373 
2374 	if (test_opt(sbi, GC_MERGE))
2375 		seq_puts(seq, ",gc_merge");
2376 	else
2377 		seq_puts(seq, ",nogc_merge");
2378 
2379 	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
2380 		seq_puts(seq, ",disable_roll_forward");
2381 	if (test_opt(sbi, NORECOVERY))
2382 		seq_puts(seq, ",norecovery");
2383 	if (test_opt(sbi, DISCARD)) {
2384 		seq_puts(seq, ",discard");
2385 		if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
2386 			seq_printf(seq, ",discard_unit=%s", "block");
2387 		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2388 			seq_printf(seq, ",discard_unit=%s", "segment");
2389 		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2390 			seq_printf(seq, ",discard_unit=%s", "section");
2391 	} else {
2392 		seq_puts(seq, ",nodiscard");
2393 	}
2394 #ifdef CONFIG_F2FS_FS_XATTR
2395 	if (test_opt(sbi, XATTR_USER))
2396 		seq_puts(seq, ",user_xattr");
2397 	else
2398 		seq_puts(seq, ",nouser_xattr");
2399 	if (test_opt(sbi, INLINE_XATTR))
2400 		seq_puts(seq, ",inline_xattr");
2401 	else
2402 		seq_puts(seq, ",noinline_xattr");
2403 	if (test_opt(sbi, INLINE_XATTR_SIZE))
2404 		seq_printf(seq, ",inline_xattr_size=%u",
2405 					F2FS_OPTION(sbi).inline_xattr_size);
2406 #endif
2407 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2408 	if (test_opt(sbi, POSIX_ACL))
2409 		seq_puts(seq, ",acl");
2410 	else
2411 		seq_puts(seq, ",noacl");
2412 #endif
2413 	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
2414 		seq_puts(seq, ",disable_ext_identify");
2415 	if (test_opt(sbi, INLINE_DATA))
2416 		seq_puts(seq, ",inline_data");
2417 	else
2418 		seq_puts(seq, ",noinline_data");
2419 	if (test_opt(sbi, INLINE_DENTRY))
2420 		seq_puts(seq, ",inline_dentry");
2421 	else
2422 		seq_puts(seq, ",noinline_dentry");
2423 	if (test_opt(sbi, FLUSH_MERGE))
2424 		seq_puts(seq, ",flush_merge");
2425 	else
2426 		seq_puts(seq, ",noflush_merge");
2427 	if (test_opt(sbi, NOBARRIER))
2428 		seq_puts(seq, ",nobarrier");
2429 	else
2430 		seq_puts(seq, ",barrier");
2431 	if (test_opt(sbi, FASTBOOT))
2432 		seq_puts(seq, ",fastboot");
2433 	if (test_opt(sbi, READ_EXTENT_CACHE))
2434 		seq_puts(seq, ",extent_cache");
2435 	else
2436 		seq_puts(seq, ",noextent_cache");
2437 	if (test_opt(sbi, AGE_EXTENT_CACHE))
2438 		seq_puts(seq, ",age_extent_cache");
2439 	if (test_opt(sbi, DATA_FLUSH))
2440 		seq_puts(seq, ",data_flush");
2441 
2442 	seq_puts(seq, ",mode=");
2443 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
2444 		seq_puts(seq, "adaptive");
2445 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
2446 		seq_puts(seq, "lfs");
2447 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
2448 		seq_puts(seq, "fragment:segment");
2449 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2450 		seq_puts(seq, "fragment:block");
2451 	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
2452 	if (test_opt(sbi, RESERVE_ROOT) || test_opt(sbi, RESERVE_NODE))
2453 		seq_printf(seq, ",reserve_root=%u,reserve_node=%u,resuid=%u,"
2454 				"resgid=%u",
2455 				F2FS_OPTION(sbi).root_reserved_blocks,
2456 				F2FS_OPTION(sbi).root_reserved_nodes,
2457 				from_kuid_munged(&init_user_ns,
2458 					F2FS_OPTION(sbi).s_resuid),
2459 				from_kgid_munged(&init_user_ns,
2460 					F2FS_OPTION(sbi).s_resgid));
2461 #ifdef CONFIG_F2FS_FAULT_INJECTION
2462 	if (test_opt(sbi, FAULT_INJECTION)) {
2463 		seq_printf(seq, ",fault_injection=%u",
2464 				F2FS_OPTION(sbi).fault_info.inject_rate);
2465 		seq_printf(seq, ",fault_type=%u",
2466 				F2FS_OPTION(sbi).fault_info.inject_type);
2467 	}
2468 #endif
2469 #ifdef CONFIG_QUOTA
2470 	if (test_opt(sbi, QUOTA))
2471 		seq_puts(seq, ",quota");
2472 	if (test_opt(sbi, USRQUOTA))
2473 		seq_puts(seq, ",usrquota");
2474 	if (test_opt(sbi, GRPQUOTA))
2475 		seq_puts(seq, ",grpquota");
2476 	if (test_opt(sbi, PRJQUOTA))
2477 		seq_puts(seq, ",prjquota");
2478 #endif
2479 	f2fs_show_quota_options(seq, sbi->sb);
2480 
2481 	fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
2482 
2483 	if (sbi->sb->s_flags & SB_INLINECRYPT)
2484 		seq_puts(seq, ",inlinecrypt");
2485 
2486 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
2487 		seq_printf(seq, ",alloc_mode=%s", "default");
2488 	else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2489 		seq_printf(seq, ",alloc_mode=%s", "reuse");
2490 
2491 	if (test_opt(sbi, DISABLE_CHECKPOINT))
2492 		seq_printf(seq, ",checkpoint=disable:%u",
2493 				F2FS_OPTION(sbi).unusable_cap);
2494 	if (test_opt(sbi, MERGE_CHECKPOINT))
2495 		seq_puts(seq, ",checkpoint_merge");
2496 	else
2497 		seq_puts(seq, ",nocheckpoint_merge");
2498 	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
2499 		seq_printf(seq, ",fsync_mode=%s", "posix");
2500 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
2501 		seq_printf(seq, ",fsync_mode=%s", "strict");
2502 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
2503 		seq_printf(seq, ",fsync_mode=%s", "nobarrier");
2504 
2505 #ifdef CONFIG_F2FS_FS_COMPRESSION
2506 	f2fs_show_compress_options(seq, sbi->sb);
2507 #endif
2508 
2509 	if (test_opt(sbi, ATGC))
2510 		seq_puts(seq, ",atgc");
2511 
2512 	if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
2513 		seq_printf(seq, ",memory=%s", "normal");
2514 	else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
2515 		seq_printf(seq, ",memory=%s", "low");
2516 
2517 	if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2518 		seq_printf(seq, ",errors=%s", "remount-ro");
2519 	else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
2520 		seq_printf(seq, ",errors=%s", "continue");
2521 	else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
2522 		seq_printf(seq, ",errors=%s", "panic");
2523 
2524 	if (test_opt(sbi, NAT_BITS))
2525 		seq_puts(seq, ",nat_bits");
2526 
2527 	if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_PERF)
2528 		seq_show_option(seq, "lookup_mode", "perf");
2529 	else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_COMPAT)
2530 		seq_show_option(seq, "lookup_mode", "compat");
2531 	else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_AUTO)
2532 		seq_show_option(seq, "lookup_mode", "auto");
2533 
2534 	return 0;
2535 }
2536 
2537 static void default_options(struct f2fs_sb_info *sbi, bool remount)
2538 {
2539 	/* init some FS parameters */
2540 	if (!remount) {
2541 		set_opt(sbi, READ_EXTENT_CACHE);
2542 		clear_opt(sbi, DISABLE_CHECKPOINT);
2543 
2544 		if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2545 			set_opt(sbi, DISCARD);
2546 
2547 		if (f2fs_sb_has_blkzoned(sbi))
2548 			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2549 		else
2550 			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2551 	}
2552 
2553 	if (f2fs_sb_has_readonly(sbi))
2554 		F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2555 	else
2556 		F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2557 
2558 	F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2559 	if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
2560 							SMALL_VOLUME_SEGMENTS)
2561 		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2562 	else
2563 		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2564 	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2565 	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2566 	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2567 	if (f2fs_sb_has_compression(sbi)) {
2568 		F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2569 		F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2570 		F2FS_OPTION(sbi).compress_ext_cnt = 0;
2571 		F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2572 	}
2573 	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2574 	F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
2575 	F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
2576 
2577 	set_opt(sbi, INLINE_XATTR);
2578 	set_opt(sbi, INLINE_DATA);
2579 	set_opt(sbi, INLINE_DENTRY);
2580 	set_opt(sbi, MERGE_CHECKPOINT);
2581 	set_opt(sbi, LAZYTIME);
2582 	F2FS_OPTION(sbi).unusable_cap = 0;
2583 	if (!f2fs_is_readonly(sbi))
2584 		set_opt(sbi, FLUSH_MERGE);
2585 	if (f2fs_sb_has_blkzoned(sbi))
2586 		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2587 	else
2588 		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2589 
2590 #ifdef CONFIG_F2FS_FS_XATTR
2591 	set_opt(sbi, XATTR_USER);
2592 #endif
2593 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2594 	set_opt(sbi, POSIX_ACL);
2595 #endif
2596 
2597 	f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL);
2598 
2599 	F2FS_OPTION(sbi).lookup_mode = LOOKUP_PERF;
2600 }
2601 
2602 #ifdef CONFIG_QUOTA
2603 static int f2fs_enable_quotas(struct super_block *sb);
2604 #endif
2605 
2606 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2607 {
2608 	unsigned int s_flags = sbi->sb->s_flags;
2609 	struct cp_control cpc;
2610 	struct f2fs_lock_context lc;
2611 	unsigned int gc_mode = sbi->gc_mode;
2612 	int err = 0;
2613 	int ret;
2614 	block_t unusable;
2615 
2616 	if (s_flags & SB_RDONLY) {
2617 		f2fs_err(sbi, "checkpoint=disable on readonly fs");
2618 		return -EINVAL;
2619 	}
2620 	sbi->sb->s_flags |= SB_ACTIVE;
2621 
2622 	/* check if we need more GC first */
2623 	unusable = f2fs_get_unusable_blocks(sbi);
2624 	if (!f2fs_disable_cp_again(sbi, unusable))
2625 		goto skip_gc;
2626 
2627 	f2fs_update_time(sbi, DISABLE_TIME);
2628 
2629 	sbi->gc_mode = GC_URGENT_HIGH;
2630 
2631 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2632 		struct f2fs_gc_control gc_control = {
2633 			.victim_segno = NULL_SEGNO,
2634 			.init_gc_type = FG_GC,
2635 			.should_migrate_blocks = false,
2636 			.err_gc_skipped = true,
2637 			.no_bg_gc = true,
2638 			.nr_free_secs = 1 };
2639 
2640 		f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
2641 		stat_inc_gc_call_count(sbi, FOREGROUND);
2642 		err = f2fs_gc(sbi, &gc_control);
2643 		if (err == -ENODATA) {
2644 			err = 0;
2645 			break;
2646 		}
2647 		if (err && err != -EAGAIN)
2648 			break;
2649 	}
2650 
2651 	ret = sync_filesystem(sbi->sb);
2652 	if (ret || err) {
2653 		err = ret ? ret : err;
2654 		goto restore_flag;
2655 	}
2656 
2657 	unusable = f2fs_get_unusable_blocks(sbi);
2658 	if (f2fs_disable_cp_again(sbi, unusable)) {
2659 		err = -EAGAIN;
2660 		goto restore_flag;
2661 	}
2662 
2663 skip_gc:
2664 	f2fs_down_write_trace(&sbi->gc_lock, &lc);
2665 	cpc.reason = CP_PAUSE;
2666 	set_sbi_flag(sbi, SBI_CP_DISABLED);
2667 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2668 	err = f2fs_write_checkpoint(sbi, &cpc);
2669 	if (err)
2670 		goto out_unlock;
2671 
2672 	spin_lock(&sbi->stat_lock);
2673 	sbi->unusable_block_count = unusable;
2674 	spin_unlock(&sbi->stat_lock);
2675 
2676 out_unlock:
2677 	f2fs_up_write_trace(&sbi->gc_lock, &lc);
2678 restore_flag:
2679 	sbi->gc_mode = gc_mode;
2680 	sbi->sb->s_flags = s_flags;	/* Restore SB_RDONLY status */
2681 	f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err);
2682 	return err;
2683 }
2684 
2685 static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2686 {
2687 	int retry = MAX_FLUSH_RETRY_COUNT;
2688 	long long start, writeback, end;
2689 	int ret;
2690 	struct f2fs_lock_context lc;
2691 	long long skipped_write, dirty_data;
2692 
2693 	f2fs_info(sbi, "f2fs_enable_checkpoint() starts, meta: %lld, node: %lld, data: %lld",
2694 					get_pages(sbi, F2FS_DIRTY_META),
2695 					get_pages(sbi, F2FS_DIRTY_NODES),
2696 					get_pages(sbi, F2FS_DIRTY_DATA));
2697 
2698 	start = ktime_get();
2699 
2700 	set_sbi_flag(sbi, SBI_ENABLE_CHECKPOINT);
2701 
2702 	/* we should flush all the data to keep data consistency */
2703 	do {
2704 		skipped_write = get_pages(sbi, F2FS_SKIPPED_WRITE);
2705 		dirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
2706 
2707 		sync_inodes_sb(sbi->sb);
2708 		f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
2709 
2710 		f2fs_info(sbi, "sync_inode_sb done, dirty_data: %lld, %lld, "
2711 				"skipped write: %lld, %lld, retry: %d",
2712 				get_pages(sbi, F2FS_DIRTY_DATA),
2713 				dirty_data,
2714 				get_pages(sbi, F2FS_SKIPPED_WRITE),
2715 				skipped_write, retry);
2716 
2717 		/*
2718 		 * sync_inodes_sb() has retry logic, so let's check dirty_data
2719 		 * in prior to skipped_write in case there is no dirty data.
2720 		 */
2721 		if (!get_pages(sbi, F2FS_DIRTY_DATA))
2722 			break;
2723 		if (get_pages(sbi, F2FS_SKIPPED_WRITE) == skipped_write)
2724 			break;
2725 	} while (retry--);
2726 
2727 	clear_sbi_flag(sbi, SBI_ENABLE_CHECKPOINT);
2728 
2729 	writeback = ktime_get();
2730 
2731 	if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA) ||
2732 			get_pages(sbi, F2FS_SKIPPED_WRITE)))
2733 		f2fs_warn(sbi, "checkpoint=enable unwritten data: %lld, skipped data: %lld, retry: %d",
2734 				get_pages(sbi, F2FS_DIRTY_DATA),
2735 				get_pages(sbi, F2FS_SKIPPED_WRITE), retry);
2736 
2737 	if (get_pages(sbi, F2FS_SKIPPED_WRITE))
2738 		atomic_set(&sbi->nr_pages[F2FS_SKIPPED_WRITE], 0);
2739 
2740 	f2fs_down_write_trace(&sbi->gc_lock, &lc);
2741 	f2fs_dirty_to_prefree(sbi);
2742 
2743 	clear_sbi_flag(sbi, SBI_CP_DISABLED);
2744 	set_sbi_flag(sbi, SBI_IS_DIRTY);
2745 	f2fs_up_write_trace(&sbi->gc_lock, &lc);
2746 
2747 	ret = f2fs_sync_fs(sbi->sb, 1);
2748 	if (ret)
2749 		f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret);
2750 
2751 	/* Let's ensure there's no pending checkpoint anymore */
2752 	f2fs_flush_ckpt_thread(sbi);
2753 
2754 	end = ktime_get();
2755 
2756 	f2fs_info(sbi, "f2fs_enable_checkpoint() finishes, writeback:%llu, sync:%llu",
2757 					ktime_ms_delta(writeback, start),
2758 					ktime_ms_delta(end, writeback));
2759 	return ret;
2760 }
2761 
2762 static int __f2fs_remount(struct fs_context *fc, struct super_block *sb)
2763 {
2764 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2765 	struct f2fs_mount_info org_mount_opt;
2766 	unsigned long old_sb_flags;
2767 	unsigned int flags = fc->sb_flags;
2768 	int err;
2769 	bool need_restart_gc = false, need_stop_gc = false;
2770 	bool need_restart_flush = false, need_stop_flush = false;
2771 	bool need_restart_discard = false, need_stop_discard = false;
2772 	bool need_enable_checkpoint = false, need_disable_checkpoint = false;
2773 	bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
2774 	bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
2775 	bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2776 	bool no_atgc = !test_opt(sbi, ATGC);
2777 	bool no_discard = !test_opt(sbi, DISCARD);
2778 	bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2779 	bool block_unit_discard = f2fs_block_unit_discard(sbi);
2780 	bool no_nat_bits = !test_opt(sbi, NAT_BITS);
2781 #ifdef CONFIG_QUOTA
2782 	int i, j;
2783 #endif
2784 
2785 	/*
2786 	 * Save the old mount options in case we
2787 	 * need to restore them.
2788 	 */
2789 	org_mount_opt = sbi->mount_opt;
2790 	old_sb_flags = sb->s_flags;
2791 
2792 	sbi->umount_lock_holder = current;
2793 
2794 #ifdef CONFIG_QUOTA
2795 	org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2796 	for (i = 0; i < MAXQUOTAS; i++) {
2797 		if (F2FS_OPTION(sbi).s_qf_names[i]) {
2798 			org_mount_opt.s_qf_names[i] =
2799 				kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2800 				GFP_KERNEL);
2801 			if (!org_mount_opt.s_qf_names[i]) {
2802 				for (j = 0; j < i; j++)
2803 					kfree(org_mount_opt.s_qf_names[j]);
2804 				return -ENOMEM;
2805 			}
2806 		} else {
2807 			org_mount_opt.s_qf_names[i] = NULL;
2808 		}
2809 	}
2810 #endif
2811 
2812 	/* recover superblocks we couldn't write due to previous RO mount */
2813 	if (!(flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2814 		err = f2fs_commit_super(sbi, false);
2815 		f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2816 			  err);
2817 		if (!err)
2818 			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2819 	}
2820 
2821 	default_options(sbi, true);
2822 
2823 	err = f2fs_check_opt_consistency(fc, sb);
2824 	if (err)
2825 		goto restore_opts;
2826 
2827 	f2fs_apply_options(fc, sb);
2828 
2829 	err = f2fs_sanity_check_options(sbi, true);
2830 	if (err)
2831 		goto restore_opts;
2832 
2833 	/* flush outstanding errors before changing fs state */
2834 	flush_work(&sbi->s_error_work);
2835 
2836 	/*
2837 	 * Previous and new state of filesystem is RO,
2838 	 * so skip checking GC and FLUSH_MERGE conditions.
2839 	 */
2840 	if (f2fs_readonly(sb) && (flags & SB_RDONLY))
2841 		goto skip;
2842 
2843 	if (f2fs_dev_is_readonly(sbi) && !(flags & SB_RDONLY)) {
2844 		err = -EROFS;
2845 		goto restore_opts;
2846 	}
2847 
2848 #ifdef CONFIG_QUOTA
2849 	if (!f2fs_readonly(sb) && (flags & SB_RDONLY)) {
2850 		err = dquot_suspend(sb, -1);
2851 		if (err < 0)
2852 			goto restore_opts;
2853 	} else if (f2fs_readonly(sb) && !(flags & SB_RDONLY)) {
2854 		/* dquot_resume needs RW */
2855 		sb->s_flags &= ~SB_RDONLY;
2856 		if (sb_any_quota_suspended(sb)) {
2857 			dquot_resume(sb, -1);
2858 		} else if (f2fs_sb_has_quota_ino(sbi)) {
2859 			err = f2fs_enable_quotas(sb);
2860 			if (err)
2861 				goto restore_opts;
2862 		}
2863 	}
2864 #endif
2865 	/* disallow enable atgc dynamically */
2866 	if (no_atgc == !!test_opt(sbi, ATGC)) {
2867 		err = -EINVAL;
2868 		f2fs_warn(sbi, "switch atgc option is not allowed");
2869 		goto restore_opts;
2870 	}
2871 
2872 	/* disallow enable/disable extent_cache dynamically */
2873 	if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
2874 		err = -EINVAL;
2875 		f2fs_warn(sbi, "switch extent_cache option is not allowed");
2876 		goto restore_opts;
2877 	}
2878 	/* disallow enable/disable age extent_cache dynamically */
2879 	if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
2880 		err = -EINVAL;
2881 		f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
2882 		goto restore_opts;
2883 	}
2884 
2885 	if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2886 		err = -EINVAL;
2887 		f2fs_warn(sbi, "switch compress_cache option is not allowed");
2888 		goto restore_opts;
2889 	}
2890 
2891 	if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2892 		err = -EINVAL;
2893 		f2fs_warn(sbi, "switch discard_unit option is not allowed");
2894 		goto restore_opts;
2895 	}
2896 
2897 	if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) {
2898 		err = -EINVAL;
2899 		f2fs_warn(sbi, "switch nat_bits option is not allowed");
2900 		goto restore_opts;
2901 	}
2902 
2903 	if ((flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2904 		err = -EINVAL;
2905 		f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2906 		goto restore_opts;
2907 	}
2908 
2909 	/*
2910 	 * We stop the GC thread if FS is mounted as RO
2911 	 * or if background_gc = off is passed in mount
2912 	 * option. Also sync the filesystem.
2913 	 */
2914 	if ((flags & SB_RDONLY) ||
2915 			(F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2916 			!test_opt(sbi, GC_MERGE))) {
2917 		if (sbi->gc_thread) {
2918 			f2fs_stop_gc_thread(sbi);
2919 			need_restart_gc = true;
2920 		}
2921 	} else if (!sbi->gc_thread) {
2922 		err = f2fs_start_gc_thread(sbi);
2923 		if (err)
2924 			goto restore_opts;
2925 		need_stop_gc = true;
2926 	}
2927 
2928 	if (flags & SB_RDONLY) {
2929 		sync_inodes_sb(sb);
2930 
2931 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2932 		set_sbi_flag(sbi, SBI_IS_CLOSE);
2933 		f2fs_sync_fs(sb, 1);
2934 		clear_sbi_flag(sbi, SBI_IS_CLOSE);
2935 	}
2936 
2937 	/*
2938 	 * We stop issue flush thread if FS is mounted as RO
2939 	 * or if flush_merge is not passed in mount option.
2940 	 */
2941 	if ((flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2942 		clear_opt(sbi, FLUSH_MERGE);
2943 		f2fs_destroy_flush_cmd_control(sbi, false);
2944 		need_restart_flush = true;
2945 	} else {
2946 		err = f2fs_create_flush_cmd_control(sbi);
2947 		if (err)
2948 			goto restore_gc;
2949 		need_stop_flush = true;
2950 	}
2951 
2952 	if (no_discard == !!test_opt(sbi, DISCARD)) {
2953 		if (test_opt(sbi, DISCARD)) {
2954 			err = f2fs_start_discard_thread(sbi);
2955 			if (err)
2956 				goto restore_flush;
2957 			need_stop_discard = true;
2958 		} else {
2959 			f2fs_stop_discard_thread(sbi);
2960 			f2fs_issue_discard_timeout(sbi);
2961 			need_restart_discard = true;
2962 		}
2963 	}
2964 
2965 	adjust_unusable_cap_perc(sbi);
2966 	if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2967 		if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2968 			err = f2fs_disable_checkpoint(sbi);
2969 			if (err)
2970 				goto restore_discard;
2971 			need_enable_checkpoint = true;
2972 		} else {
2973 			err = f2fs_enable_checkpoint(sbi);
2974 			if (err)
2975 				goto restore_discard;
2976 			need_disable_checkpoint = true;
2977 		}
2978 	}
2979 
2980 	/*
2981 	 * Place this routine at the end, since a new checkpoint would be
2982 	 * triggered while remount and we need to take care of it before
2983 	 * returning from remount.
2984 	 */
2985 	if ((flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2986 			!test_opt(sbi, MERGE_CHECKPOINT)) {
2987 		f2fs_stop_ckpt_thread(sbi);
2988 	} else {
2989 		/* Flush if the previous checkpoint, if exists. */
2990 		f2fs_flush_ckpt_thread(sbi);
2991 
2992 		err = f2fs_start_ckpt_thread(sbi);
2993 		if (err) {
2994 			f2fs_err(sbi,
2995 			    "Failed to start F2FS issue_checkpoint_thread (%d)",
2996 			    err);
2997 			goto restore_checkpoint;
2998 		}
2999 	}
3000 
3001 skip:
3002 #ifdef CONFIG_QUOTA
3003 	/* Release old quota file names */
3004 	for (i = 0; i < MAXQUOTAS; i++)
3005 		kfree(org_mount_opt.s_qf_names[i]);
3006 #endif
3007 	/* Update the POSIXACL Flag */
3008 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3009 		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3010 
3011 	limit_reserve_root(sbi);
3012 	fc->sb_flags = (flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
3013 
3014 	sbi->umount_lock_holder = NULL;
3015 	return 0;
3016 restore_checkpoint:
3017 	if (need_enable_checkpoint) {
3018 		if (f2fs_enable_checkpoint(sbi))
3019 			f2fs_warn(sbi, "checkpoint has not been enabled");
3020 	} else if (need_disable_checkpoint) {
3021 		if (f2fs_disable_checkpoint(sbi))
3022 			f2fs_warn(sbi, "checkpoint has not been disabled");
3023 	}
3024 restore_discard:
3025 	if (need_restart_discard) {
3026 		if (f2fs_start_discard_thread(sbi))
3027 			f2fs_warn(sbi, "discard has been stopped");
3028 	} else if (need_stop_discard) {
3029 		f2fs_stop_discard_thread(sbi);
3030 	}
3031 restore_flush:
3032 	if (need_restart_flush) {
3033 		if (f2fs_create_flush_cmd_control(sbi))
3034 			f2fs_warn(sbi, "background flush thread has stopped");
3035 	} else if (need_stop_flush) {
3036 		clear_opt(sbi, FLUSH_MERGE);
3037 		f2fs_destroy_flush_cmd_control(sbi, false);
3038 	}
3039 restore_gc:
3040 	if (need_restart_gc) {
3041 		if (f2fs_start_gc_thread(sbi))
3042 			f2fs_warn(sbi, "background gc thread has stopped");
3043 	} else if (need_stop_gc) {
3044 		f2fs_stop_gc_thread(sbi);
3045 	}
3046 restore_opts:
3047 #ifdef CONFIG_QUOTA
3048 	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
3049 	for (i = 0; i < MAXQUOTAS; i++) {
3050 		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
3051 		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
3052 	}
3053 #endif
3054 	sbi->mount_opt = org_mount_opt;
3055 	sb->s_flags = old_sb_flags;
3056 
3057 	sbi->umount_lock_holder = NULL;
3058 	return err;
3059 }
3060 
3061 static void f2fs_shutdown(struct super_block *sb)
3062 {
3063 	f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
3064 }
3065 
3066 #ifdef CONFIG_QUOTA
3067 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
3068 {
3069 	/* need to recovery orphan */
3070 	if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
3071 		return true;
3072 	/* need to recovery data */
3073 	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
3074 		return false;
3075 	if (test_opt(sbi, NORECOVERY))
3076 		return false;
3077 	return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
3078 }
3079 
3080 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
3081 {
3082 	bool readonly = f2fs_readonly(sbi->sb);
3083 
3084 	if (!f2fs_need_recovery(sbi))
3085 		return false;
3086 
3087 	/* it doesn't need to check f2fs_sb_has_readonly() */
3088 	if (f2fs_hw_is_readonly(sbi))
3089 		return false;
3090 
3091 	if (readonly) {
3092 		sbi->sb->s_flags &= ~SB_RDONLY;
3093 		set_sbi_flag(sbi, SBI_IS_WRITABLE);
3094 	}
3095 
3096 	/*
3097 	 * Turn on quotas which were not enabled for read-only mounts if
3098 	 * filesystem has quota feature, so that they are updated correctly.
3099 	 */
3100 	return f2fs_enable_quota_files(sbi, readonly);
3101 }
3102 
3103 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
3104 						bool quota_enabled)
3105 {
3106 	if (quota_enabled)
3107 		f2fs_quota_off_umount(sbi->sb);
3108 
3109 	if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
3110 		clear_sbi_flag(sbi, SBI_IS_WRITABLE);
3111 		sbi->sb->s_flags |= SB_RDONLY;
3112 	}
3113 }
3114 
3115 /* Read data from quotafile */
3116 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
3117 			       size_t len, loff_t off)
3118 {
3119 	struct inode *inode = sb_dqopt(sb)->files[type];
3120 	struct address_space *mapping = inode->i_mapping;
3121 	int tocopy;
3122 	size_t toread;
3123 	loff_t i_size = i_size_read(inode);
3124 
3125 	if (off > i_size)
3126 		return 0;
3127 
3128 	if (off + len > i_size)
3129 		len = i_size - off;
3130 	toread = len;
3131 	while (toread > 0) {
3132 		struct folio *folio;
3133 		size_t offset;
3134 
3135 repeat:
3136 		folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT,
3137 				GFP_NOFS);
3138 		if (IS_ERR(folio)) {
3139 			if (PTR_ERR(folio) == -ENOMEM) {
3140 				memalloc_retry_wait(GFP_NOFS);
3141 				goto repeat;
3142 			}
3143 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3144 			return PTR_ERR(folio);
3145 		}
3146 		offset = offset_in_folio(folio, off);
3147 		tocopy = min(folio_size(folio) - offset, toread);
3148 
3149 		folio_lock(folio);
3150 
3151 		if (unlikely(folio->mapping != mapping)) {
3152 			f2fs_folio_put(folio, true);
3153 			goto repeat;
3154 		}
3155 
3156 		/*
3157 		 * should never happen, just leave f2fs_bug_on() here to catch
3158 		 * any potential bug.
3159 		 */
3160 		f2fs_bug_on(F2FS_SB(sb), !folio_test_uptodate(folio));
3161 
3162 		memcpy_from_folio(data, folio, offset, tocopy);
3163 		f2fs_folio_put(folio, true);
3164 
3165 		toread -= tocopy;
3166 		data += tocopy;
3167 		off += tocopy;
3168 	}
3169 	return len;
3170 }
3171 
3172 /* Write to quotafile */
3173 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
3174 				const char *data, size_t len, loff_t off)
3175 {
3176 	struct inode *inode = sb_dqopt(sb)->files[type];
3177 	struct address_space *mapping = inode->i_mapping;
3178 	const struct address_space_operations *a_ops = mapping->a_ops;
3179 	int offset = off & (sb->s_blocksize - 1);
3180 	size_t towrite = len;
3181 	struct folio *folio;
3182 	void *fsdata = NULL;
3183 	int err = 0;
3184 	int tocopy;
3185 
3186 	while (towrite > 0) {
3187 		tocopy = min_t(unsigned long, sb->s_blocksize - offset,
3188 								towrite);
3189 retry:
3190 		err = a_ops->write_begin(NULL, mapping, off, tocopy,
3191 							&folio, &fsdata);
3192 		if (unlikely(err)) {
3193 			if (err == -ENOMEM) {
3194 				memalloc_retry_wait(GFP_NOFS);
3195 				goto retry;
3196 			}
3197 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3198 			break;
3199 		}
3200 
3201 		memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
3202 
3203 		a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
3204 						folio, fsdata);
3205 		offset = 0;
3206 		towrite -= tocopy;
3207 		off += tocopy;
3208 		data += tocopy;
3209 		cond_resched();
3210 	}
3211 
3212 	if (len == towrite)
3213 		return err;
3214 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
3215 	f2fs_mark_inode_dirty_sync(inode, false);
3216 	return len - towrite;
3217 }
3218 
3219 int f2fs_dquot_initialize(struct inode *inode)
3220 {
3221 	if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
3222 		return -ESRCH;
3223 
3224 	return dquot_initialize(inode);
3225 }
3226 
3227 static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
3228 {
3229 	return F2FS_I(inode)->i_dquot;
3230 }
3231 
3232 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
3233 {
3234 	return &F2FS_I(inode)->i_reserved_quota;
3235 }
3236 
3237 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
3238 {
3239 	if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
3240 		f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
3241 		return 0;
3242 	}
3243 
3244 	return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
3245 					F2FS_OPTION(sbi).s_jquota_fmt, type);
3246 }
3247 
3248 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
3249 {
3250 	int enabled = 0;
3251 	int i, err;
3252 
3253 	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
3254 		err = f2fs_enable_quotas(sbi->sb);
3255 		if (err) {
3256 			f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
3257 			return 0;
3258 		}
3259 		return 1;
3260 	}
3261 
3262 	for (i = 0; i < MAXQUOTAS; i++) {
3263 		if (F2FS_OPTION(sbi).s_qf_names[i]) {
3264 			err = f2fs_quota_on_mount(sbi, i);
3265 			if (!err) {
3266 				enabled = 1;
3267 				continue;
3268 			}
3269 			f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
3270 				 err, i);
3271 		}
3272 	}
3273 	return enabled;
3274 }
3275 
3276 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
3277 			     unsigned int flags, unsigned long qf_inum)
3278 {
3279 	struct inode *qf_inode;
3280 	unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
3281 	int err;
3282 
3283 	qf_inode = f2fs_iget(sb, qf_inum);
3284 	if (IS_ERR(qf_inode)) {
3285 		f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
3286 		return PTR_ERR(qf_inode);
3287 	}
3288 
3289 	/* Don't account quota for quota files to avoid recursion */
3290 	inode_lock(qf_inode);
3291 	qf_inode->i_flags |= S_NOQUOTA;
3292 
3293 	if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
3294 		F2FS_I(qf_inode)->i_flags |= qf_flag;
3295 		f2fs_set_inode_flags(qf_inode);
3296 	}
3297 	inode_unlock(qf_inode);
3298 
3299 	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
3300 	iput(qf_inode);
3301 	return err;
3302 }
3303 
3304 static int f2fs_enable_quotas(struct super_block *sb)
3305 {
3306 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3307 	int type, err = 0;
3308 	unsigned long qf_inum;
3309 	bool quota_mopt[MAXQUOTAS] = {
3310 		test_opt(sbi, USRQUOTA),
3311 		test_opt(sbi, GRPQUOTA),
3312 		test_opt(sbi, PRJQUOTA),
3313 	};
3314 
3315 	if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
3316 		f2fs_err(sbi, "quota file may be corrupted, skip loading it");
3317 		return 0;
3318 	}
3319 
3320 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
3321 
3322 	for (type = 0; type < MAXQUOTAS; type++) {
3323 		qf_inum = f2fs_qf_ino(sb, type);
3324 		if (qf_inum) {
3325 			err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
3326 				DQUOT_USAGE_ENABLED |
3327 				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0), qf_inum);
3328 			if (err) {
3329 				f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
3330 					 type, err);
3331 				for (type--; type >= 0; type--)
3332 					dquot_quota_off(sb, type);
3333 				set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3334 				return err;
3335 			}
3336 		}
3337 	}
3338 	return 0;
3339 }
3340 
3341 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
3342 {
3343 	struct quota_info *dqopt = sb_dqopt(sbi->sb);
3344 	struct address_space *mapping = dqopt->files[type]->i_mapping;
3345 	int ret = 0;
3346 
3347 	ret = dquot_writeback_dquots(sbi->sb, type);
3348 	if (ret)
3349 		goto out;
3350 
3351 	ret = filemap_fdatawrite(mapping);
3352 	if (ret)
3353 		goto out;
3354 
3355 	/* if we are using journalled quota */
3356 	if (is_journalled_quota(sbi))
3357 		goto out;
3358 
3359 	ret = filemap_fdatawait(mapping);
3360 
3361 	truncate_inode_pages(&dqopt->files[type]->i_data, 0);
3362 out:
3363 	if (ret)
3364 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3365 	return ret;
3366 }
3367 
3368 int f2fs_do_quota_sync(struct super_block *sb, int type)
3369 {
3370 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3371 	struct quota_info *dqopt = sb_dqopt(sb);
3372 	int cnt;
3373 	int ret = 0;
3374 
3375 	/*
3376 	 * Now when everything is written we can discard the pagecache so
3377 	 * that userspace sees the changes.
3378 	 */
3379 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3380 		struct f2fs_lock_context lc;
3381 
3382 		if (type != -1 && cnt != type)
3383 			continue;
3384 
3385 		if (!sb_has_quota_active(sb, cnt))
3386 			continue;
3387 
3388 		if (!f2fs_sb_has_quota_ino(sbi))
3389 			inode_lock(dqopt->files[cnt]);
3390 
3391 		/*
3392 		 * do_quotactl
3393 		 *  f2fs_quota_sync
3394 		 *  f2fs_down_read(quota_sem)
3395 		 *  dquot_writeback_dquots()
3396 		 *  f2fs_dquot_commit
3397 		 *			      block_operation
3398 		 *			      f2fs_down_read(quota_sem)
3399 		 */
3400 		f2fs_lock_op(sbi, &lc);
3401 		f2fs_down_read(&sbi->quota_sem);
3402 
3403 		ret = f2fs_quota_sync_file(sbi, cnt);
3404 
3405 		f2fs_up_read(&sbi->quota_sem);
3406 		f2fs_unlock_op(sbi, &lc);
3407 
3408 		if (!f2fs_sb_has_quota_ino(sbi))
3409 			inode_unlock(dqopt->files[cnt]);
3410 
3411 		if (ret)
3412 			break;
3413 	}
3414 	return ret;
3415 }
3416 
3417 static int f2fs_quota_sync(struct super_block *sb, int type)
3418 {
3419 	int ret;
3420 
3421 	F2FS_SB(sb)->umount_lock_holder = current;
3422 	ret = f2fs_do_quota_sync(sb, type);
3423 	F2FS_SB(sb)->umount_lock_holder = NULL;
3424 	return ret;
3425 }
3426 
3427 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
3428 							const struct path *path)
3429 {
3430 	struct inode *inode;
3431 	int err = 0;
3432 
3433 	/* if quota sysfile exists, deny enabling quota with specific file */
3434 	if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
3435 		f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
3436 		return -EBUSY;
3437 	}
3438 
3439 	if (path->dentry->d_sb != sb)
3440 		return -EXDEV;
3441 
3442 	F2FS_SB(sb)->umount_lock_holder = current;
3443 
3444 	err = f2fs_do_quota_sync(sb, type);
3445 	if (err)
3446 		goto out;
3447 
3448 	inode = d_inode(path->dentry);
3449 
3450 	err = filemap_fdatawrite(inode->i_mapping);
3451 	if (err)
3452 		goto out;
3453 
3454 	err = filemap_fdatawait(inode->i_mapping);
3455 	if (err)
3456 		goto out;
3457 
3458 	err = dquot_quota_on(sb, type, format_id, path);
3459 	if (err)
3460 		goto out;
3461 
3462 	inode_lock(inode);
3463 	F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
3464 	f2fs_set_inode_flags(inode);
3465 	inode_unlock(inode);
3466 	f2fs_mark_inode_dirty_sync(inode, false);
3467 out:
3468 	F2FS_SB(sb)->umount_lock_holder = NULL;
3469 	return err;
3470 }
3471 
3472 static int __f2fs_quota_off(struct super_block *sb, int type)
3473 {
3474 	struct inode *inode = sb_dqopt(sb)->files[type];
3475 	int err;
3476 
3477 	if (!inode || !igrab(inode))
3478 		return dquot_quota_off(sb, type);
3479 
3480 	err = f2fs_do_quota_sync(sb, type);
3481 	if (err)
3482 		goto out_put;
3483 
3484 	err = dquot_quota_off(sb, type);
3485 	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
3486 		goto out_put;
3487 
3488 	inode_lock(inode);
3489 	F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
3490 	f2fs_set_inode_flags(inode);
3491 	inode_unlock(inode);
3492 	f2fs_mark_inode_dirty_sync(inode, false);
3493 out_put:
3494 	iput(inode);
3495 	return err;
3496 }
3497 
3498 static int f2fs_quota_off(struct super_block *sb, int type)
3499 {
3500 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3501 	int err;
3502 
3503 	F2FS_SB(sb)->umount_lock_holder = current;
3504 
3505 	err = __f2fs_quota_off(sb, type);
3506 
3507 	/*
3508 	 * quotactl can shutdown journalled quota, result in inconsistence
3509 	 * between quota record and fs data by following updates, tag the
3510 	 * flag to let fsck be aware of it.
3511 	 */
3512 	if (is_journalled_quota(sbi))
3513 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3514 
3515 	F2FS_SB(sb)->umount_lock_holder = NULL;
3516 
3517 	return err;
3518 }
3519 
3520 void f2fs_quota_off_umount(struct super_block *sb)
3521 {
3522 	int type;
3523 	int err;
3524 
3525 	for (type = 0; type < MAXQUOTAS; type++) {
3526 		err = __f2fs_quota_off(sb, type);
3527 		if (err) {
3528 			int ret = dquot_quota_off(sb, type);
3529 
3530 			f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
3531 				 type, err, ret);
3532 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3533 		}
3534 	}
3535 	/*
3536 	 * In case of checkpoint=disable, we must flush quota blocks.
3537 	 * This can cause NULL exception for node_inode in end_io, since
3538 	 * put_super already dropped it.
3539 	 */
3540 	sync_filesystem(sb);
3541 }
3542 
3543 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
3544 {
3545 	struct quota_info *dqopt = sb_dqopt(sb);
3546 	int type;
3547 
3548 	for (type = 0; type < MAXQUOTAS; type++) {
3549 		if (!dqopt->files[type])
3550 			continue;
3551 		f2fs_inode_synced(dqopt->files[type]);
3552 	}
3553 }
3554 
3555 static int f2fs_dquot_commit(struct dquot *dquot)
3556 {
3557 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3558 	int ret;
3559 
3560 	f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
3561 	ret = dquot_commit(dquot);
3562 	if (ret < 0)
3563 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3564 	f2fs_up_read(&sbi->quota_sem);
3565 	return ret;
3566 }
3567 
3568 static int f2fs_dquot_acquire(struct dquot *dquot)
3569 {
3570 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3571 	int ret;
3572 
3573 	f2fs_down_read(&sbi->quota_sem);
3574 	ret = dquot_acquire(dquot);
3575 	if (ret < 0)
3576 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3577 	f2fs_up_read(&sbi->quota_sem);
3578 	return ret;
3579 }
3580 
3581 static int f2fs_dquot_release(struct dquot *dquot)
3582 {
3583 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3584 	int ret = dquot_release(dquot);
3585 
3586 	if (ret < 0)
3587 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3588 	return ret;
3589 }
3590 
3591 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
3592 {
3593 	struct super_block *sb = dquot->dq_sb;
3594 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3595 	int ret = dquot_mark_dquot_dirty(dquot);
3596 
3597 	/* if we are using journalled quota */
3598 	if (is_journalled_quota(sbi))
3599 		set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
3600 
3601 	return ret;
3602 }
3603 
3604 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
3605 {
3606 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3607 	int ret = dquot_commit_info(sb, type);
3608 
3609 	if (ret < 0)
3610 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3611 	return ret;
3612 }
3613 
3614 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
3615 {
3616 	*projid = F2FS_I(inode)->i_projid;
3617 	return 0;
3618 }
3619 
3620 static const struct dquot_operations f2fs_quota_operations = {
3621 	.get_reserved_space = f2fs_get_reserved_space,
3622 	.write_dquot	= f2fs_dquot_commit,
3623 	.acquire_dquot	= f2fs_dquot_acquire,
3624 	.release_dquot	= f2fs_dquot_release,
3625 	.mark_dirty	= f2fs_dquot_mark_dquot_dirty,
3626 	.write_info	= f2fs_dquot_commit_info,
3627 	.alloc_dquot	= dquot_alloc,
3628 	.destroy_dquot	= dquot_destroy,
3629 	.get_projid	= f2fs_get_projid,
3630 	.get_next_id	= dquot_get_next_id,
3631 };
3632 
3633 static const struct quotactl_ops f2fs_quotactl_ops = {
3634 	.quota_on	= f2fs_quota_on,
3635 	.quota_off	= f2fs_quota_off,
3636 	.quota_sync	= f2fs_quota_sync,
3637 	.get_state	= dquot_get_state,
3638 	.set_info	= dquot_set_dqinfo,
3639 	.get_dqblk	= dquot_get_dqblk,
3640 	.set_dqblk	= dquot_set_dqblk,
3641 	.get_nextdqblk	= dquot_get_next_dqblk,
3642 };
3643 #else
3644 int f2fs_dquot_initialize(struct inode *inode)
3645 {
3646 	return 0;
3647 }
3648 
3649 int f2fs_do_quota_sync(struct super_block *sb, int type)
3650 {
3651 	return 0;
3652 }
3653 
3654 void f2fs_quota_off_umount(struct super_block *sb)
3655 {
3656 }
3657 #endif
3658 
3659 static const struct super_operations f2fs_sops = {
3660 	.alloc_inode	= f2fs_alloc_inode,
3661 	.free_inode	= f2fs_free_inode,
3662 	.drop_inode	= f2fs_drop_inode,
3663 	.write_inode	= f2fs_write_inode,
3664 	.dirty_inode	= f2fs_dirty_inode,
3665 	.show_options	= f2fs_show_options,
3666 #ifdef CONFIG_QUOTA
3667 	.quota_read	= f2fs_quota_read,
3668 	.quota_write	= f2fs_quota_write,
3669 	.get_dquots	= f2fs_get_dquots,
3670 #endif
3671 	.evict_inode	= f2fs_evict_inode,
3672 	.put_super	= f2fs_put_super,
3673 	.sync_fs	= f2fs_sync_fs,
3674 	.freeze_fs	= f2fs_freeze,
3675 	.unfreeze_fs	= f2fs_unfreeze,
3676 	.statfs		= f2fs_statfs,
3677 	.shutdown	= f2fs_shutdown,
3678 };
3679 
3680 #ifdef CONFIG_FS_ENCRYPTION
3681 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
3682 {
3683 	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3684 				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3685 				ctx, len, NULL);
3686 }
3687 
3688 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3689 							void *fs_data)
3690 {
3691 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3692 
3693 	/*
3694 	 * Encrypting the root directory is not allowed because fsck
3695 	 * expects lost+found directory to exist and remain unencrypted
3696 	 * if LOST_FOUND feature is enabled.
3697 	 *
3698 	 */
3699 	if (f2fs_sb_has_lost_found(sbi) &&
3700 			inode->i_ino == F2FS_ROOT_INO(sbi))
3701 		return -EPERM;
3702 
3703 	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3704 				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3705 				ctx, len, fs_data, XATTR_CREATE);
3706 }
3707 
3708 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
3709 {
3710 	return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
3711 }
3712 
3713 static bool f2fs_has_stable_inodes(struct super_block *sb)
3714 {
3715 	return true;
3716 }
3717 
3718 static struct block_device **f2fs_get_devices(struct super_block *sb,
3719 					      unsigned int *num_devs)
3720 {
3721 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3722 	struct block_device **devs;
3723 	int i;
3724 
3725 	if (!f2fs_is_multi_device(sbi))
3726 		return NULL;
3727 
3728 	devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
3729 	if (!devs)
3730 		return ERR_PTR(-ENOMEM);
3731 
3732 	for (i = 0; i < sbi->s_ndevs; i++)
3733 		devs[i] = FDEV(i).bdev;
3734 	*num_devs = sbi->s_ndevs;
3735 	return devs;
3736 }
3737 
3738 static const struct fscrypt_operations f2fs_cryptops = {
3739 	.inode_info_offs	= (int)offsetof(struct f2fs_inode_info, i_crypt_info) -
3740 				  (int)offsetof(struct f2fs_inode_info, vfs_inode),
3741 	.needs_bounce_pages	= 1,
3742 	.has_32bit_inodes	= 1,
3743 	.supports_subblock_data_units = 1,
3744 	.legacy_key_prefix	= "f2fs:",
3745 	.get_context		= f2fs_get_context,
3746 	.set_context		= f2fs_set_context,
3747 	.get_dummy_policy	= f2fs_get_dummy_policy,
3748 	.empty_dir		= f2fs_empty_dir,
3749 	.has_stable_inodes	= f2fs_has_stable_inodes,
3750 	.get_devices		= f2fs_get_devices,
3751 };
3752 #endif /* CONFIG_FS_ENCRYPTION */
3753 
3754 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3755 		u64 ino, u32 generation)
3756 {
3757 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3758 	struct inode *inode;
3759 
3760 	if (f2fs_check_nid_range(sbi, ino))
3761 		return ERR_PTR(-ESTALE);
3762 
3763 	/*
3764 	 * f2fs_iget isn't quite right if the inode is currently unallocated!
3765 	 * However f2fs_iget currently does appropriate checks to handle stale
3766 	 * inodes so everything is OK.
3767 	 */
3768 	inode = f2fs_iget(sb, ino);
3769 	if (IS_ERR(inode))
3770 		return ERR_CAST(inode);
3771 	if (unlikely(generation && inode->i_generation != generation)) {
3772 		/* we didn't find the right inode.. */
3773 		iput(inode);
3774 		return ERR_PTR(-ESTALE);
3775 	}
3776 	return inode;
3777 }
3778 
3779 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3780 		int fh_len, int fh_type)
3781 {
3782 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3783 				    f2fs_nfs_get_inode);
3784 }
3785 
3786 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3787 		int fh_len, int fh_type)
3788 {
3789 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3790 				    f2fs_nfs_get_inode);
3791 }
3792 
3793 static const struct export_operations f2fs_export_ops = {
3794 	.encode_fh = generic_encode_ino32_fh,
3795 	.fh_to_dentry = f2fs_fh_to_dentry,
3796 	.fh_to_parent = f2fs_fh_to_parent,
3797 	.get_parent = f2fs_get_parent,
3798 };
3799 
3800 loff_t max_file_blocks(struct inode *inode)
3801 {
3802 	loff_t result = 0;
3803 	loff_t leaf_count;
3804 
3805 	/*
3806 	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3807 	 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3808 	 * space in inode.i_addr, it will be more safe to reassign
3809 	 * result as zero.
3810 	 */
3811 
3812 	if (inode && f2fs_compressed_file(inode))
3813 		leaf_count = ADDRS_PER_BLOCK(inode);
3814 	else
3815 		leaf_count = DEF_ADDRS_PER_BLOCK;
3816 
3817 	/* two direct node blocks */
3818 	result += (leaf_count * 2);
3819 
3820 	/* two indirect node blocks */
3821 	leaf_count *= NIDS_PER_BLOCK;
3822 	result += (leaf_count * 2);
3823 
3824 	/* one double indirect node block */
3825 	leaf_count *= NIDS_PER_BLOCK;
3826 	result += leaf_count;
3827 
3828 	/*
3829 	 * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
3830 	 * a 4K crypto data unit, we must restrict the max filesize to what can
3831 	 * fit within U32_MAX + 1 data units.
3832 	 */
3833 
3834 	result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
3835 
3836 	return result;
3837 }
3838 
3839 static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
3840 						pgoff_t index, bool update)
3841 {
3842 	struct bio *bio;
3843 	/* it's rare case, we can do fua all the time */
3844 	blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
3845 	int ret;
3846 
3847 	folio_lock(folio);
3848 	folio_wait_writeback(folio);
3849 	if (update)
3850 		memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
3851 					sizeof(struct f2fs_super_block));
3852 	folio_mark_dirty(folio);
3853 	folio_clear_dirty_for_io(folio);
3854 	folio_start_writeback(folio);
3855 	folio_unlock(folio);
3856 
3857 	bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
3858 
3859 	/* it doesn't need to set crypto context for superblock update */
3860 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index);
3861 
3862 	if (!bio_add_folio(bio, folio, folio_size(folio), 0))
3863 		f2fs_bug_on(sbi, 1);
3864 
3865 	ret = submit_bio_wait(bio);
3866 	bio_put(bio);
3867 	folio_end_writeback(folio);
3868 
3869 	return ret;
3870 }
3871 
3872 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3873 					struct folio *folio, pgoff_t index)
3874 {
3875 	struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
3876 	struct super_block *sb = sbi->sb;
3877 	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3878 	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3879 	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3880 	u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3881 	u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3882 	u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3883 	u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3884 	u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3885 	u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3886 	u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3887 	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3888 	u32 segment_count = le32_to_cpu(raw_super->segment_count);
3889 	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3890 	u64 main_end_blkaddr = main_blkaddr +
3891 				((u64)segment_count_main << log_blocks_per_seg);
3892 	u64 seg_end_blkaddr = segment0_blkaddr +
3893 				((u64)segment_count << log_blocks_per_seg);
3894 
3895 	if (segment0_blkaddr != cp_blkaddr) {
3896 		f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3897 			  segment0_blkaddr, cp_blkaddr);
3898 		return true;
3899 	}
3900 
3901 	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3902 							sit_blkaddr) {
3903 		f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3904 			  cp_blkaddr, sit_blkaddr,
3905 			  segment_count_ckpt << log_blocks_per_seg);
3906 		return true;
3907 	}
3908 
3909 	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3910 							nat_blkaddr) {
3911 		f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3912 			  sit_blkaddr, nat_blkaddr,
3913 			  segment_count_sit << log_blocks_per_seg);
3914 		return true;
3915 	}
3916 
3917 	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3918 							ssa_blkaddr) {
3919 		f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3920 			  nat_blkaddr, ssa_blkaddr,
3921 			  segment_count_nat << log_blocks_per_seg);
3922 		return true;
3923 	}
3924 
3925 	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3926 							main_blkaddr) {
3927 		f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3928 			  ssa_blkaddr, main_blkaddr,
3929 			  segment_count_ssa << log_blocks_per_seg);
3930 		return true;
3931 	}
3932 
3933 	if (main_end_blkaddr > seg_end_blkaddr) {
3934 		f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3935 			  main_blkaddr, seg_end_blkaddr,
3936 			  segment_count_main << log_blocks_per_seg);
3937 		return true;
3938 	} else if (main_end_blkaddr < seg_end_blkaddr) {
3939 		int err = 0;
3940 		char *res;
3941 
3942 		/* fix in-memory information all the time */
3943 		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3944 				segment0_blkaddr) >> log_blocks_per_seg);
3945 
3946 		if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
3947 			set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3948 			res = "internally";
3949 		} else {
3950 			err = __f2fs_commit_super(sbi, folio, index, false);
3951 			res = err ? "failed" : "done";
3952 		}
3953 		f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3954 			  res, main_blkaddr, seg_end_blkaddr,
3955 			  segment_count_main << log_blocks_per_seg);
3956 		if (err)
3957 			return true;
3958 	}
3959 	return false;
3960 }
3961 
3962 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3963 					struct folio *folio, pgoff_t index)
3964 {
3965 	block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3966 	block_t total_sections, blocks_per_seg;
3967 	struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
3968 	size_t crc_offset = 0;
3969 	__u32 crc = 0;
3970 
3971 	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3972 		f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3973 			  F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3974 		return -EINVAL;
3975 	}
3976 
3977 	/* Check checksum_offset and crc in superblock */
3978 	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3979 		crc_offset = le32_to_cpu(raw_super->checksum_offset);
3980 		if (crc_offset !=
3981 			offsetof(struct f2fs_super_block, crc)) {
3982 			f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3983 				  crc_offset);
3984 			return -EFSCORRUPTED;
3985 		}
3986 		crc = le32_to_cpu(raw_super->crc);
3987 		if (crc != f2fs_crc32(raw_super, crc_offset)) {
3988 			f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3989 			return -EFSCORRUPTED;
3990 		}
3991 	}
3992 
3993 	/* only support block_size equals to PAGE_SIZE */
3994 	if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3995 		f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3996 			  le32_to_cpu(raw_super->log_blocksize),
3997 			  F2FS_BLKSIZE_BITS);
3998 		return -EFSCORRUPTED;
3999 	}
4000 
4001 	/* check log blocks per segment */
4002 	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
4003 		f2fs_info(sbi, "Invalid log blocks per segment (%u)",
4004 			  le32_to_cpu(raw_super->log_blocks_per_seg));
4005 		return -EFSCORRUPTED;
4006 	}
4007 
4008 	/* Currently, support 512/1024/2048/4096/16K bytes sector size */
4009 	if (le32_to_cpu(raw_super->log_sectorsize) >
4010 				F2FS_MAX_LOG_SECTOR_SIZE ||
4011 		le32_to_cpu(raw_super->log_sectorsize) <
4012 				F2FS_MIN_LOG_SECTOR_SIZE) {
4013 		f2fs_info(sbi, "Invalid log sectorsize (%u)",
4014 			  le32_to_cpu(raw_super->log_sectorsize));
4015 		return -EFSCORRUPTED;
4016 	}
4017 	if (le32_to_cpu(raw_super->log_sectors_per_block) +
4018 		le32_to_cpu(raw_super->log_sectorsize) !=
4019 			F2FS_MAX_LOG_SECTOR_SIZE) {
4020 		f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
4021 			  le32_to_cpu(raw_super->log_sectors_per_block),
4022 			  le32_to_cpu(raw_super->log_sectorsize));
4023 		return -EFSCORRUPTED;
4024 	}
4025 
4026 	segment_count = le32_to_cpu(raw_super->segment_count);
4027 	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
4028 	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
4029 	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
4030 	total_sections = le32_to_cpu(raw_super->section_count);
4031 
4032 	/* blocks_per_seg should be 512, given the above check */
4033 	blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
4034 
4035 	if (segment_count > F2FS_MAX_SEGMENT ||
4036 				segment_count < F2FS_MIN_SEGMENTS) {
4037 		f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
4038 		return -EFSCORRUPTED;
4039 	}
4040 
4041 	if (total_sections > segment_count_main || total_sections < 1 ||
4042 			segs_per_sec > segment_count || !segs_per_sec) {
4043 		f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
4044 			  segment_count, total_sections, segs_per_sec);
4045 		return -EFSCORRUPTED;
4046 	}
4047 
4048 	if (segment_count_main != total_sections * segs_per_sec) {
4049 		f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
4050 			  segment_count_main, total_sections, segs_per_sec);
4051 		return -EFSCORRUPTED;
4052 	}
4053 
4054 	if ((segment_count / segs_per_sec) < total_sections) {
4055 		f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
4056 			  segment_count, segs_per_sec, total_sections);
4057 		return -EFSCORRUPTED;
4058 	}
4059 
4060 	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
4061 		f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
4062 			  segment_count, le64_to_cpu(raw_super->block_count));
4063 		return -EFSCORRUPTED;
4064 	}
4065 
4066 	if (RDEV(0).path[0]) {
4067 		block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
4068 		int i = 1;
4069 
4070 		while (i < MAX_DEVICES && RDEV(i).path[0]) {
4071 			dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
4072 			i++;
4073 		}
4074 		if (segment_count != dev_seg_count) {
4075 			f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
4076 					segment_count, dev_seg_count);
4077 			return -EFSCORRUPTED;
4078 		}
4079 	} else {
4080 		if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
4081 					!bdev_is_zoned(sbi->sb->s_bdev)) {
4082 			f2fs_info(sbi, "Zoned block device path is missing");
4083 			return -EFSCORRUPTED;
4084 		}
4085 	}
4086 
4087 	if (secs_per_zone > total_sections || !secs_per_zone) {
4088 		f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
4089 			  secs_per_zone, total_sections);
4090 		return -EFSCORRUPTED;
4091 	}
4092 	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
4093 			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
4094 			(le32_to_cpu(raw_super->extension_count) +
4095 			raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
4096 		f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
4097 			  le32_to_cpu(raw_super->extension_count),
4098 			  raw_super->hot_ext_count,
4099 			  F2FS_MAX_EXTENSION);
4100 		return -EFSCORRUPTED;
4101 	}
4102 
4103 	if (le32_to_cpu(raw_super->cp_payload) >=
4104 				(blocks_per_seg - F2FS_CP_PACKS -
4105 				NR_CURSEG_PERSIST_TYPE)) {
4106 		f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
4107 			  le32_to_cpu(raw_super->cp_payload),
4108 			  blocks_per_seg - F2FS_CP_PACKS -
4109 			  NR_CURSEG_PERSIST_TYPE);
4110 		return -EFSCORRUPTED;
4111 	}
4112 
4113 	/* check reserved ino info */
4114 	if (le32_to_cpu(raw_super->node_ino) != 1 ||
4115 		le32_to_cpu(raw_super->meta_ino) != 2 ||
4116 		le32_to_cpu(raw_super->root_ino) != 3) {
4117 		f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
4118 			  le32_to_cpu(raw_super->node_ino),
4119 			  le32_to_cpu(raw_super->meta_ino),
4120 			  le32_to_cpu(raw_super->root_ino));
4121 		return -EFSCORRUPTED;
4122 	}
4123 
4124 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
4125 	if (sanity_check_area_boundary(sbi, folio, index))
4126 		return -EFSCORRUPTED;
4127 
4128 	return 0;
4129 }
4130 
4131 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4132 {
4133 	unsigned int total, fsmeta;
4134 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4135 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4136 	unsigned int ovp_segments, reserved_segments;
4137 	unsigned int main_segs, blocks_per_seg;
4138 	unsigned int sit_segs, nat_segs;
4139 	unsigned int sit_bitmap_size, nat_bitmap_size;
4140 	unsigned int log_blocks_per_seg;
4141 	unsigned int segment_count_main;
4142 	unsigned int cp_pack_start_sum, cp_payload;
4143 	block_t user_block_count, valid_user_blocks;
4144 	block_t avail_node_count, valid_node_count;
4145 	unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
4146 	unsigned int sit_blk_cnt;
4147 	int i, j;
4148 
4149 	total = le32_to_cpu(raw_super->segment_count);
4150 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
4151 	sit_segs = le32_to_cpu(raw_super->segment_count_sit);
4152 	fsmeta += sit_segs;
4153 	nat_segs = le32_to_cpu(raw_super->segment_count_nat);
4154 	fsmeta += nat_segs;
4155 	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
4156 	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
4157 
4158 	if (unlikely(fsmeta >= total))
4159 		return 1;
4160 
4161 	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
4162 	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
4163 
4164 	if (!f2fs_sb_has_readonly(sbi) &&
4165 			unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
4166 			ovp_segments == 0 || reserved_segments == 0)) {
4167 		f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
4168 		return 1;
4169 	}
4170 	user_block_count = le64_to_cpu(ckpt->user_block_count);
4171 	segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
4172 			(f2fs_sb_has_readonly(sbi) ? 1 : 0);
4173 	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4174 	if (!user_block_count || user_block_count >=
4175 			segment_count_main << log_blocks_per_seg) {
4176 		f2fs_err(sbi, "Wrong user_block_count: %u",
4177 			 user_block_count);
4178 		return 1;
4179 	}
4180 
4181 	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
4182 	if (valid_user_blocks > user_block_count) {
4183 		f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
4184 			 valid_user_blocks, user_block_count);
4185 		return 1;
4186 	}
4187 
4188 	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
4189 	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
4190 	if (valid_node_count > avail_node_count) {
4191 		f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
4192 			 valid_node_count, avail_node_count);
4193 		return 1;
4194 	}
4195 
4196 	main_segs = le32_to_cpu(raw_super->segment_count_main);
4197 	blocks_per_seg = BLKS_PER_SEG(sbi);
4198 
4199 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
4200 		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
4201 			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
4202 			return 1;
4203 
4204 		if (f2fs_sb_has_readonly(sbi))
4205 			goto check_data;
4206 
4207 		for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
4208 			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
4209 				le32_to_cpu(ckpt->cur_node_segno[j])) {
4210 				f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
4211 					 i, j,
4212 					 le32_to_cpu(ckpt->cur_node_segno[i]));
4213 				return 1;
4214 			}
4215 		}
4216 	}
4217 check_data:
4218 	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
4219 		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
4220 			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
4221 			return 1;
4222 
4223 		if (f2fs_sb_has_readonly(sbi))
4224 			goto skip_cross;
4225 
4226 		for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
4227 			if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
4228 				le32_to_cpu(ckpt->cur_data_segno[j])) {
4229 				f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
4230 					 i, j,
4231 					 le32_to_cpu(ckpt->cur_data_segno[i]));
4232 				return 1;
4233 			}
4234 		}
4235 	}
4236 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
4237 		for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
4238 			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
4239 				le32_to_cpu(ckpt->cur_data_segno[j])) {
4240 				f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
4241 					 i, j,
4242 					 le32_to_cpu(ckpt->cur_node_segno[i]));
4243 				return 1;
4244 			}
4245 		}
4246 	}
4247 skip_cross:
4248 	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
4249 	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
4250 
4251 	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
4252 		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
4253 		f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
4254 			 sit_bitmap_size, nat_bitmap_size);
4255 		return 1;
4256 	}
4257 
4258 	sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK);
4259 	if (sit_bitmap_size * 8 < sit_blk_cnt) {
4260 		f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u",
4261 			 sit_bitmap_size, sit_blk_cnt);
4262 		return 1;
4263 	}
4264 
4265 	cp_pack_start_sum = __start_sum_addr(sbi);
4266 	cp_payload = __cp_payload(sbi);
4267 	if (cp_pack_start_sum < cp_payload + 1 ||
4268 		cp_pack_start_sum > blocks_per_seg - 1 -
4269 			NR_CURSEG_PERSIST_TYPE) {
4270 		f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
4271 			 cp_pack_start_sum);
4272 		return 1;
4273 	}
4274 
4275 	if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
4276 		le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
4277 		f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
4278 			  "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
4279 			  "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
4280 			  le32_to_cpu(ckpt->checksum_offset));
4281 		return 1;
4282 	}
4283 
4284 	nat_blocks = nat_segs << log_blocks_per_seg;
4285 	nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
4286 	nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
4287 	if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
4288 		(cp_payload + F2FS_CP_PACKS +
4289 		NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
4290 		f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
4291 			  cp_payload, nat_bits_blocks);
4292 		return 1;
4293 	}
4294 
4295 	if (unlikely(f2fs_cp_error(sbi))) {
4296 		f2fs_err(sbi, "A bug case: need to run fsck");
4297 		return 1;
4298 	}
4299 	return 0;
4300 }
4301 
4302 static void init_sb_info(struct f2fs_sb_info *sbi)
4303 {
4304 	struct f2fs_super_block *raw_super = sbi->raw_super;
4305 	int i;
4306 
4307 	sbi->log_sectors_per_block =
4308 		le32_to_cpu(raw_super->log_sectors_per_block);
4309 	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
4310 	sbi->blocksize = BIT(sbi->log_blocksize);
4311 	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4312 	sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
4313 	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
4314 	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
4315 	sbi->total_sections = le32_to_cpu(raw_super->section_count);
4316 	sbi->total_node_count = SEGS_TO_BLKS(sbi,
4317 			((le32_to_cpu(raw_super->segment_count_nat) / 2) *
4318 			NAT_ENTRY_PER_BLOCK));
4319 	sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count);
4320 	sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT;
4321 	F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
4322 	F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
4323 	F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
4324 	sbi->cur_victim_sec = NULL_SECNO;
4325 	sbi->gc_mode = GC_NORMAL;
4326 	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
4327 	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
4328 	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
4329 	sbi->migration_granularity = SEGS_PER_SEC(sbi);
4330 	sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
4331 		DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
4332 	sbi->seq_file_ra_mul = MIN_RA_MUL;
4333 	sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
4334 	sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
4335 	spin_lock_init(&sbi->gc_remaining_trials_lock);
4336 	atomic64_set(&sbi->current_atomic_write, 0);
4337 	sbi->max_lock_elapsed_time = MAX_LOCK_ELAPSED_TIME;
4338 	sbi->adjust_lock_priority = 0;
4339 	sbi->lock_duration_priority = F2FS_DEFAULT_TASK_PRIORITY;
4340 	sbi->critical_task_priority = F2FS_CRITICAL_TASK_PRIORITY;
4341 
4342 	sbi->sum_blocksize = f2fs_sb_has_packed_ssa(sbi) ?
4343 		4096 : sbi->blocksize;
4344 	sbi->sums_per_block = sbi->blocksize / sbi->sum_blocksize;
4345 	sbi->entries_in_sum = sbi->sum_blocksize / 8;
4346 	sbi->sum_entry_size = SUMMARY_SIZE * sbi->entries_in_sum;
4347 	sbi->sum_journal_size = sbi->sum_blocksize - SUM_FOOTER_SIZE -
4348 		sbi->sum_entry_size;
4349 	sbi->nat_journal_entries = (sbi->sum_journal_size - 2) /
4350 		sizeof(struct nat_journal_entry);
4351 	sbi->sit_journal_entries = (sbi->sum_journal_size - 2) /
4352 		sizeof(struct sit_journal_entry);
4353 
4354 	sbi->dir_level = DEF_DIR_LEVEL;
4355 	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
4356 	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
4357 	sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
4358 	sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
4359 	sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
4360 	sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
4361 				DEF_UMOUNT_DISCARD_TIMEOUT;
4362 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
4363 
4364 	for (i = 0; i < NR_COUNT_TYPE; i++)
4365 		atomic_set(&sbi->nr_pages[i], 0);
4366 
4367 	for (i = 0; i < META; i++)
4368 		atomic_set(&sbi->wb_sync_req[i], 0);
4369 
4370 	INIT_LIST_HEAD(&sbi->s_list);
4371 	mutex_init(&sbi->umount_mutex);
4372 	init_f2fs_rwsem(&sbi->io_order_lock);
4373 	spin_lock_init(&sbi->cp_lock);
4374 
4375 	sbi->dirty_device = 0;
4376 	spin_lock_init(&sbi->dev_lock);
4377 
4378 	init_f2fs_rwsem(&sbi->sb_lock);
4379 	init_f2fs_rwsem(&sbi->pin_sem);
4380 }
4381 
4382 static int init_percpu_info(struct f2fs_sb_info *sbi)
4383 {
4384 	int err;
4385 
4386 	err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
4387 	if (err)
4388 		return err;
4389 
4390 	err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
4391 	if (err)
4392 		goto err_valid_block;
4393 
4394 	err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
4395 								GFP_KERNEL);
4396 	if (err)
4397 		goto err_node_block;
4398 	return 0;
4399 
4400 err_node_block:
4401 	percpu_counter_destroy(&sbi->rf_node_block_count);
4402 err_valid_block:
4403 	percpu_counter_destroy(&sbi->alloc_valid_block_count);
4404 	return err;
4405 }
4406 
4407 #ifdef CONFIG_BLK_DEV_ZONED
4408 
4409 struct f2fs_report_zones_args {
4410 	struct f2fs_sb_info *sbi;
4411 	struct f2fs_dev_info *dev;
4412 };
4413 
4414 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
4415 			      void *data)
4416 {
4417 	struct f2fs_report_zones_args *rz_args = data;
4418 	block_t unusable_blocks = (zone->len - zone->capacity) >>
4419 					F2FS_LOG_SECTORS_PER_BLOCK;
4420 
4421 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
4422 		return 0;
4423 
4424 	set_bit(idx, rz_args->dev->blkz_seq);
4425 	if (!rz_args->sbi->unusable_blocks_per_sec) {
4426 		rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
4427 		return 0;
4428 	}
4429 	if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
4430 		f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
4431 		return -EINVAL;
4432 	}
4433 	return 0;
4434 }
4435 
4436 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
4437 {
4438 	struct block_device *bdev = FDEV(devi).bdev;
4439 	sector_t nr_sectors = bdev_nr_sectors(bdev);
4440 	struct f2fs_report_zones_args rep_zone_arg;
4441 	u64 zone_sectors;
4442 	unsigned int max_open_zones;
4443 	int ret;
4444 
4445 	if (!f2fs_sb_has_blkzoned(sbi))
4446 		return 0;
4447 
4448 	if (bdev_is_zoned(FDEV(devi).bdev)) {
4449 		max_open_zones = bdev_max_open_zones(bdev);
4450 		if (max_open_zones && (max_open_zones < sbi->max_open_zones))
4451 			sbi->max_open_zones = max_open_zones;
4452 		if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
4453 			f2fs_err(sbi,
4454 				"zoned: max open zones %u is too small, need at least %u open zones",
4455 				sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
4456 			return -EINVAL;
4457 		}
4458 	}
4459 
4460 	zone_sectors = bdev_zone_sectors(bdev);
4461 	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
4462 				SECTOR_TO_BLOCK(zone_sectors))
4463 		return -EINVAL;
4464 	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
4465 	FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
4466 					sbi->blocks_per_blkz);
4467 	if (nr_sectors & (zone_sectors - 1))
4468 		FDEV(devi).nr_blkz++;
4469 
4470 	FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
4471 					BITS_TO_LONGS(FDEV(devi).nr_blkz)
4472 					* sizeof(unsigned long),
4473 					GFP_KERNEL);
4474 	if (!FDEV(devi).blkz_seq)
4475 		return -ENOMEM;
4476 
4477 	rep_zone_arg.sbi = sbi;
4478 	rep_zone_arg.dev = &FDEV(devi);
4479 
4480 	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
4481 				  &rep_zone_arg);
4482 	if (ret < 0)
4483 		return ret;
4484 	return 0;
4485 }
4486 #endif
4487 
4488 /*
4489  * Read f2fs raw super block.
4490  * Because we have two copies of super block, so read both of them
4491  * to get the first valid one. If any one of them is broken, we pass
4492  * them recovery flag back to the caller.
4493  */
4494 static int read_raw_super_block(struct f2fs_sb_info *sbi,
4495 			struct f2fs_super_block **raw_super,
4496 			int *valid_super_block, int *recovery)
4497 {
4498 	struct super_block *sb = sbi->sb;
4499 	int block;
4500 	struct folio *folio;
4501 	struct f2fs_super_block *super;
4502 	int err = 0;
4503 
4504 	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
4505 	if (!super)
4506 		return -ENOMEM;
4507 
4508 	for (block = 0; block < 2; block++) {
4509 		folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
4510 		if (IS_ERR(folio)) {
4511 			f2fs_err(sbi, "Unable to read %dth superblock",
4512 				 block + 1);
4513 			err = PTR_ERR(folio);
4514 			*recovery = 1;
4515 			continue;
4516 		}
4517 
4518 		/* sanity checking of raw super */
4519 		err = sanity_check_raw_super(sbi, folio, block);
4520 		if (err) {
4521 			f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
4522 				 block + 1);
4523 			folio_put(folio);
4524 			*recovery = 1;
4525 			continue;
4526 		}
4527 
4528 		if (!*raw_super) {
4529 			memcpy(super, F2FS_SUPER_BLOCK(folio, block),
4530 							sizeof(*super));
4531 			*valid_super_block = block;
4532 			*raw_super = super;
4533 		}
4534 		folio_put(folio);
4535 	}
4536 
4537 	/* No valid superblock */
4538 	if (!*raw_super)
4539 		kfree(super);
4540 	else
4541 		err = 0;
4542 
4543 	return err;
4544 }
4545 
4546 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
4547 {
4548 	struct folio *folio;
4549 	pgoff_t index;
4550 	__u32 crc = 0;
4551 	int err;
4552 
4553 	if ((recover && f2fs_readonly(sbi->sb)) ||
4554 				f2fs_hw_is_readonly(sbi)) {
4555 		set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
4556 		return -EROFS;
4557 	}
4558 
4559 	/* we should update superblock crc here */
4560 	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
4561 		crc = f2fs_crc32(F2FS_RAW_SUPER(sbi),
4562 				offsetof(struct f2fs_super_block, crc));
4563 		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
4564 	}
4565 
4566 	/* write back-up superblock first */
4567 	index = sbi->valid_super_block ? 0 : 1;
4568 	folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
4569 	if (IS_ERR(folio))
4570 		return PTR_ERR(folio);
4571 	err = __f2fs_commit_super(sbi, folio, index, true);
4572 	folio_put(folio);
4573 
4574 	/* if we are in recovery path, skip writing valid superblock */
4575 	if (recover || err)
4576 		return err;
4577 
4578 	/* write current valid superblock */
4579 	index = sbi->valid_super_block;
4580 	folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
4581 	if (IS_ERR(folio))
4582 		return PTR_ERR(folio);
4583 	err = __f2fs_commit_super(sbi, folio, index, true);
4584 	folio_put(folio);
4585 	return err;
4586 }
4587 
4588 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
4589 {
4590 	unsigned long flags;
4591 
4592 	spin_lock_irqsave(&sbi->error_lock, flags);
4593 	if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
4594 		sbi->stop_reason[reason]++;
4595 	spin_unlock_irqrestore(&sbi->error_lock, flags);
4596 }
4597 
4598 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
4599 {
4600 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4601 	unsigned long flags;
4602 	int err;
4603 
4604 	f2fs_down_write(&sbi->sb_lock);
4605 
4606 	spin_lock_irqsave(&sbi->error_lock, flags);
4607 	if (sbi->error_dirty) {
4608 		memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4609 							MAX_F2FS_ERRORS);
4610 		sbi->error_dirty = false;
4611 	}
4612 	memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
4613 	spin_unlock_irqrestore(&sbi->error_lock, flags);
4614 
4615 	err = f2fs_commit_super(sbi, false);
4616 
4617 	f2fs_up_write(&sbi->sb_lock);
4618 	if (err)
4619 		f2fs_err_ratelimited(sbi,
4620 			"f2fs_commit_super fails to record stop_reason, err:%d",
4621 			err);
4622 }
4623 
4624 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
4625 {
4626 	unsigned long flags;
4627 
4628 	spin_lock_irqsave(&sbi->error_lock, flags);
4629 	if (!test_bit(flag, (unsigned long *)sbi->errors)) {
4630 		set_bit(flag, (unsigned long *)sbi->errors);
4631 		sbi->error_dirty = true;
4632 	}
4633 	spin_unlock_irqrestore(&sbi->error_lock, flags);
4634 }
4635 
4636 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
4637 {
4638 	f2fs_save_errors(sbi, error);
4639 
4640 	if (!sbi->error_dirty)
4641 		return;
4642 	if (!test_bit(error, (unsigned long *)sbi->errors))
4643 		return;
4644 	schedule_work(&sbi->s_error_work);
4645 }
4646 
4647 static bool system_going_down(void)
4648 {
4649 	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
4650 		|| system_state == SYSTEM_RESTART;
4651 }
4652 
4653 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
4654 {
4655 	struct super_block *sb = sbi->sb;
4656 	bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
4657 	bool continue_fs = !shutdown &&
4658 			F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
4659 
4660 	set_ckpt_flags(sbi, CP_ERROR_FLAG);
4661 
4662 	if (!f2fs_hw_is_readonly(sbi)) {
4663 		save_stop_reason(sbi, reason);
4664 
4665 		/*
4666 		 * always create an asynchronous task to record stop_reason
4667 		 * in order to avoid potential deadlock when running into
4668 		 * f2fs_record_stop_reason() synchronously.
4669 		 */
4670 		schedule_work(&sbi->s_error_work);
4671 	}
4672 
4673 	/*
4674 	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
4675 	 * could panic during 'reboot -f' as the underlying device got already
4676 	 * disabled.
4677 	 */
4678 	if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
4679 				!shutdown && !system_going_down() &&
4680 				!is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
4681 		panic("F2FS-fs (device %s): panic forced after error\n",
4682 							sb->s_id);
4683 
4684 	if (shutdown)
4685 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
4686 	else
4687 		dump_stack();
4688 
4689 	/*
4690 	 * Continue filesystem operators if errors=continue. Should not set
4691 	 * RO by shutdown, since RO bypasses thaw_super which can hang the
4692 	 * system.
4693 	 */
4694 	if (continue_fs || f2fs_readonly(sb) || shutdown) {
4695 		f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
4696 		return;
4697 	}
4698 
4699 	f2fs_warn(sbi, "Remounting filesystem read-only");
4700 
4701 	/*
4702 	 * We have already set CP_ERROR_FLAG flag to stop all updates
4703 	 * to filesystem, so it doesn't need to set SB_RDONLY flag here
4704 	 * because the flag should be set covered w/ sb->s_umount semaphore
4705 	 * via remount procedure, otherwise, it will confuse code like
4706 	 * freeze_super() which will lead to deadlocks and other problems.
4707 	 */
4708 }
4709 
4710 static void f2fs_record_error_work(struct work_struct *work)
4711 {
4712 	struct f2fs_sb_info *sbi = container_of(work,
4713 					struct f2fs_sb_info, s_error_work);
4714 
4715 	f2fs_record_stop_reason(sbi);
4716 }
4717 
4718 static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi)
4719 {
4720 #ifdef CONFIG_BLK_DEV_ZONED
4721 	unsigned int zoneno, total_zones;
4722 	int devi;
4723 
4724 	if (!f2fs_sb_has_blkzoned(sbi))
4725 		return NULL_SEGNO;
4726 
4727 	for (devi = 0; devi < sbi->s_ndevs; devi++) {
4728 		if (!bdev_is_zoned(FDEV(devi).bdev))
4729 			continue;
4730 
4731 		total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments);
4732 
4733 		for (zoneno = 0; zoneno < total_zones; zoneno++) {
4734 			unsigned int segs, blks;
4735 
4736 			if (!f2fs_zone_is_seq(sbi, devi, zoneno))
4737 				continue;
4738 
4739 			segs = GET_SEG_FROM_SEC(sbi,
4740 					zoneno * sbi->secs_per_zone);
4741 			blks = SEGS_TO_BLKS(sbi, segs);
4742 			return GET_SEGNO(sbi, FDEV(devi).start_blk + blks);
4743 		}
4744 	}
4745 #endif
4746 	return NULL_SEGNO;
4747 }
4748 
4749 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
4750 {
4751 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4752 	unsigned int max_devices = MAX_DEVICES;
4753 	unsigned int logical_blksize;
4754 	blk_mode_t mode = sb_open_mode(sbi->sb->s_flags);
4755 	int i;
4756 
4757 	/* Initialize single device information */
4758 	if (!RDEV(0).path[0]) {
4759 		if (!bdev_is_zoned(sbi->sb->s_bdev))
4760 			return 0;
4761 		max_devices = 1;
4762 	}
4763 
4764 	/*
4765 	 * Initialize multiple devices information, or single
4766 	 * zoned block device information.
4767 	 */
4768 	sbi->devs = f2fs_kzalloc(sbi,
4769 				 array_size(max_devices,
4770 					    sizeof(struct f2fs_dev_info)),
4771 				 GFP_KERNEL);
4772 	if (!sbi->devs)
4773 		return -ENOMEM;
4774 
4775 	logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
4776 	sbi->aligned_blksize = true;
4777 	sbi->bggc_io_aware = AWARE_ALL_IO;
4778 #ifdef CONFIG_BLK_DEV_ZONED
4779 	sbi->max_open_zones = UINT_MAX;
4780 	sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
4781 	sbi->bggc_io_aware = AWARE_READ_IO;
4782 #endif
4783 
4784 	for (i = 0; i < max_devices; i++) {
4785 		if (max_devices == 1) {
4786 			FDEV(i).total_segments =
4787 				le32_to_cpu(raw_super->segment_count_main);
4788 			FDEV(i).start_blk = 0;
4789 			FDEV(i).end_blk = FDEV(i).total_segments *
4790 						BLKS_PER_SEG(sbi);
4791 		}
4792 
4793 		if (i == 0)
4794 			FDEV(0).bdev_file = sbi->sb->s_bdev_file;
4795 		else if (!RDEV(i).path[0])
4796 			break;
4797 
4798 		if (max_devices > 1) {
4799 			/* Multi-device mount */
4800 			memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
4801 			FDEV(i).total_segments =
4802 				le32_to_cpu(RDEV(i).total_segments);
4803 			if (i == 0) {
4804 				FDEV(i).start_blk = 0;
4805 				FDEV(i).end_blk = FDEV(i).start_blk +
4806 					SEGS_TO_BLKS(sbi,
4807 					FDEV(i).total_segments) - 1 +
4808 					le32_to_cpu(raw_super->segment0_blkaddr);
4809 				sbi->allocate_section_hint = FDEV(i).total_segments /
4810 							SEGS_PER_SEC(sbi);
4811 			} else {
4812 				FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
4813 				FDEV(i).end_blk = FDEV(i).start_blk +
4814 						SEGS_TO_BLKS(sbi,
4815 						FDEV(i).total_segments) - 1;
4816 				FDEV(i).bdev_file = bdev_file_open_by_path(
4817 					FDEV(i).path, mode, sbi->sb, NULL);
4818 			}
4819 		}
4820 		if (IS_ERR(FDEV(i).bdev_file))
4821 			return PTR_ERR(FDEV(i).bdev_file);
4822 
4823 		FDEV(i).bdev = file_bdev(FDEV(i).bdev_file);
4824 		/* to release errored devices */
4825 		sbi->s_ndevs = i + 1;
4826 
4827 		if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
4828 			sbi->aligned_blksize = false;
4829 
4830 #ifdef CONFIG_BLK_DEV_ZONED
4831 		if (bdev_is_zoned(FDEV(i).bdev)) {
4832 			if (!f2fs_sb_has_blkzoned(sbi)) {
4833 				f2fs_err(sbi, "Zoned block device feature not enabled");
4834 				return -EINVAL;
4835 			}
4836 			if (init_blkz_info(sbi, i)) {
4837 				f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
4838 				return -EINVAL;
4839 			}
4840 			if (max_devices == 1)
4841 				break;
4842 			f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
4843 				  i, FDEV(i).path,
4844 				  FDEV(i).total_segments,
4845 				  FDEV(i).start_blk, FDEV(i).end_blk);
4846 			continue;
4847 		}
4848 #endif
4849 		f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
4850 			  i, FDEV(i).path,
4851 			  FDEV(i).total_segments,
4852 			  FDEV(i).start_blk, FDEV(i).end_blk);
4853 	}
4854 	return 0;
4855 }
4856 
4857 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
4858 {
4859 #if IS_ENABLED(CONFIG_UNICODE)
4860 	if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
4861 		const struct f2fs_sb_encodings *encoding_info;
4862 		struct unicode_map *encoding;
4863 		__u16 encoding_flags;
4864 
4865 		encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
4866 		if (!encoding_info) {
4867 			f2fs_err(sbi,
4868 				 "Encoding requested by superblock is unknown");
4869 			return -EINVAL;
4870 		}
4871 
4872 		encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
4873 		encoding = utf8_load(encoding_info->version);
4874 		if (IS_ERR(encoding)) {
4875 			f2fs_err(sbi,
4876 				 "can't mount with superblock charset: %s-%u.%u.%u "
4877 				 "not supported by the kernel. flags: 0x%x.",
4878 				 encoding_info->name,
4879 				 unicode_major(encoding_info->version),
4880 				 unicode_minor(encoding_info->version),
4881 				 unicode_rev(encoding_info->version),
4882 				 encoding_flags);
4883 			return PTR_ERR(encoding);
4884 		}
4885 		f2fs_info(sbi, "Using encoding defined by superblock: "
4886 			 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4887 			 unicode_major(encoding_info->version),
4888 			 unicode_minor(encoding_info->version),
4889 			 unicode_rev(encoding_info->version),
4890 			 encoding_flags);
4891 
4892 		sbi->sb->s_encoding = encoding;
4893 		sbi->sb->s_encoding_flags = encoding_flags;
4894 	}
4895 #else
4896 	if (f2fs_sb_has_casefold(sbi)) {
4897 		f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
4898 		return -EINVAL;
4899 	}
4900 #endif
4901 	return 0;
4902 }
4903 
4904 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
4905 {
4906 	/* adjust parameters according to the volume size */
4907 	if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
4908 		if (f2fs_block_unit_discard(sbi))
4909 			SM_I(sbi)->dcc_info->discard_granularity =
4910 						MIN_DISCARD_GRANULARITY;
4911 		if (!f2fs_lfs_mode(sbi))
4912 			SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
4913 						BIT(F2FS_IPU_HONOR_OPU_WRITE);
4914 	}
4915 
4916 	sbi->readdir_ra = true;
4917 }
4918 
4919 static int f2fs_fill_super(struct super_block *sb, struct fs_context *fc)
4920 {
4921 	struct f2fs_fs_context *ctx = fc->fs_private;
4922 	struct f2fs_sb_info *sbi;
4923 	struct f2fs_super_block *raw_super;
4924 	struct inode *root;
4925 	int err;
4926 	bool skip_recovery = false, need_fsck = false;
4927 	int recovery, i, valid_super_block;
4928 	struct curseg_info *seg_i;
4929 	int retry_cnt = 1;
4930 #ifdef CONFIG_QUOTA
4931 	bool quota_enabled = false;
4932 #endif
4933 
4934 try_onemore:
4935 	err = -EINVAL;
4936 	raw_super = NULL;
4937 	valid_super_block = -1;
4938 	recovery = 0;
4939 
4940 	/* allocate memory for f2fs-specific super block info */
4941 	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
4942 	if (!sbi)
4943 		return -ENOMEM;
4944 
4945 	sbi->sb = sb;
4946 
4947 	/* initialize locks within allocated memory */
4948 	init_f2fs_rwsem_trace(&sbi->gc_lock, sbi, LOCK_NAME_GC_LOCK);
4949 	mutex_init(&sbi->writepages);
4950 	init_f2fs_rwsem_trace(&sbi->cp_global_sem, sbi, LOCK_NAME_CP_GLOBAL);
4951 	init_f2fs_rwsem_trace(&sbi->node_write, sbi, LOCK_NAME_NODE_WRITE);
4952 	init_f2fs_rwsem_trace(&sbi->node_change, sbi, LOCK_NAME_NODE_CHANGE);
4953 	spin_lock_init(&sbi->stat_lock);
4954 	init_f2fs_rwsem_trace(&sbi->cp_rwsem, sbi, LOCK_NAME_CP_RWSEM);
4955 	init_f2fs_rwsem(&sbi->quota_sem);
4956 	init_waitqueue_head(&sbi->cp_wait);
4957 	spin_lock_init(&sbi->error_lock);
4958 
4959 	for (i = 0; i < NR_INODE_TYPE; i++) {
4960 		INIT_LIST_HEAD(&sbi->inode_list[i]);
4961 		spin_lock_init(&sbi->inode_lock[i]);
4962 	}
4963 	mutex_init(&sbi->flush_lock);
4964 
4965 	/* set a block size */
4966 	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
4967 		f2fs_err(sbi, "unable to set blocksize");
4968 		goto free_sbi;
4969 	}
4970 
4971 	err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4972 								&recovery);
4973 	if (err)
4974 		goto free_sbi;
4975 
4976 	sb->s_fs_info = sbi;
4977 	sbi->raw_super = raw_super;
4978 
4979 	INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
4980 	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
4981 	memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
4982 
4983 	/* precompute checksum seed for metadata */
4984 	if (f2fs_sb_has_inode_chksum(sbi))
4985 		sbi->s_chksum_seed = f2fs_chksum(~0, raw_super->uuid,
4986 						 sizeof(raw_super->uuid));
4987 
4988 	default_options(sbi, false);
4989 
4990 	err = f2fs_check_opt_consistency(fc, sb);
4991 	if (err)
4992 		goto free_sb_buf;
4993 
4994 	f2fs_apply_options(fc, sb);
4995 
4996 	err = f2fs_sanity_check_options(sbi, false);
4997 	if (err)
4998 		goto free_options;
4999 
5000 	sb->s_maxbytes = max_file_blocks(NULL) <<
5001 				le32_to_cpu(raw_super->log_blocksize);
5002 	sb->s_max_links = F2FS_LINK_MAX;
5003 
5004 	err = f2fs_setup_casefold(sbi);
5005 	if (err)
5006 		goto free_options;
5007 
5008 #ifdef CONFIG_QUOTA
5009 	sb->dq_op = &f2fs_quota_operations;
5010 	sb->s_qcop = &f2fs_quotactl_ops;
5011 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
5012 
5013 	if (f2fs_sb_has_quota_ino(sbi)) {
5014 		for (i = 0; i < MAXQUOTAS; i++) {
5015 			if (f2fs_qf_ino(sbi->sb, i))
5016 				sbi->nquota_files++;
5017 		}
5018 	}
5019 #endif
5020 
5021 	sb->s_op = &f2fs_sops;
5022 #ifdef CONFIG_FS_ENCRYPTION
5023 	sb->s_cop = &f2fs_cryptops;
5024 #endif
5025 #ifdef CONFIG_FS_VERITY
5026 	sb->s_vop = &f2fs_verityops;
5027 #endif
5028 	sb->s_xattr = f2fs_xattr_handlers;
5029 	sb->s_export_op = &f2fs_export_ops;
5030 	sb->s_magic = F2FS_SUPER_MAGIC;
5031 	sb->s_time_gran = 1;
5032 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5033 		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
5034 	if (test_opt(sbi, INLINECRYPT))
5035 		sb->s_flags |= SB_INLINECRYPT;
5036 
5037 	if (test_opt(sbi, LAZYTIME))
5038 		sb->s_flags |= SB_LAZYTIME;
5039 	else
5040 		sb->s_flags &= ~SB_LAZYTIME;
5041 
5042 	super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
5043 	super_set_sysfs_name_bdev(sb);
5044 	sb->s_iflags |= SB_I_CGROUPWB;
5045 
5046 	/* init f2fs-specific super block info */
5047 	sbi->valid_super_block = valid_super_block;
5048 
5049 	/* disallow all the data/node/meta page writes */
5050 	set_sbi_flag(sbi, SBI_POR_DOING);
5051 
5052 	err = f2fs_init_write_merge_io(sbi);
5053 	if (err)
5054 		goto free_bio_info;
5055 
5056 	init_sb_info(sbi);
5057 
5058 	err = f2fs_init_iostat(sbi);
5059 	if (err)
5060 		goto free_bio_info;
5061 
5062 	err = init_percpu_info(sbi);
5063 	if (err)
5064 		goto free_iostat;
5065 
5066 	err = f2fs_init_page_array_cache(sbi);
5067 	if (err)
5068 		goto free_percpu;
5069 
5070 	/* get an inode for meta space */
5071 	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
5072 	if (IS_ERR(sbi->meta_inode)) {
5073 		f2fs_err(sbi, "Failed to read F2FS meta data inode");
5074 		err = PTR_ERR(sbi->meta_inode);
5075 		goto free_page_array_cache;
5076 	}
5077 
5078 	err = f2fs_get_valid_checkpoint(sbi);
5079 	if (err) {
5080 		f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
5081 		goto free_meta_inode;
5082 	}
5083 
5084 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
5085 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
5086 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
5087 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
5088 		sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
5089 	}
5090 
5091 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
5092 		set_sbi_flag(sbi, SBI_NEED_FSCK);
5093 
5094 	/* Initialize device list */
5095 	err = f2fs_scan_devices(sbi);
5096 	if (err) {
5097 		f2fs_err(sbi, "Failed to find devices");
5098 		goto free_devices;
5099 	}
5100 
5101 	err = f2fs_init_post_read_wq(sbi);
5102 	if (err) {
5103 		f2fs_err(sbi, "Failed to initialize post read workqueue");
5104 		goto free_devices;
5105 	}
5106 
5107 	sbi->total_valid_node_count =
5108 				le32_to_cpu(sbi->ckpt->valid_node_count);
5109 	percpu_counter_set(&sbi->total_valid_inode_count,
5110 				le32_to_cpu(sbi->ckpt->valid_inode_count));
5111 	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
5112 	sbi->total_valid_block_count =
5113 				le64_to_cpu(sbi->ckpt->valid_block_count);
5114 	sbi->last_valid_block_count = sbi->total_valid_block_count;
5115 	sbi->reserved_blocks = 0;
5116 	sbi->current_reserved_blocks = 0;
5117 	limit_reserve_root(sbi);
5118 	adjust_unusable_cap_perc(sbi);
5119 
5120 	f2fs_init_extent_cache_info(sbi);
5121 
5122 	f2fs_init_ino_entry_info(sbi);
5123 
5124 	f2fs_init_fsync_node_info(sbi);
5125 
5126 	/* setup checkpoint request control and start checkpoint issue thread */
5127 	f2fs_init_ckpt_req_control(sbi);
5128 	if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
5129 			test_opt(sbi, MERGE_CHECKPOINT)) {
5130 		err = f2fs_start_ckpt_thread(sbi);
5131 		if (err) {
5132 			f2fs_err(sbi,
5133 			    "Failed to start F2FS issue_checkpoint_thread (%d)",
5134 			    err);
5135 			goto stop_ckpt_thread;
5136 		}
5137 	}
5138 
5139 	/* setup f2fs internal modules */
5140 	err = f2fs_build_segment_manager(sbi);
5141 	if (err) {
5142 		f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
5143 			 err);
5144 		goto free_sm;
5145 	}
5146 	err = f2fs_build_node_manager(sbi);
5147 	if (err) {
5148 		f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
5149 			 err);
5150 		goto free_nm;
5151 	}
5152 
5153 	/* For write statistics */
5154 	sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
5155 
5156 	/* get segno of first zoned block device */
5157 	sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi);
5158 
5159 	sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ?
5160 			ZONED_PIN_SEC_REQUIRED_COUNT :
5161 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi));
5162 
5163 	/* Read accumulated write IO statistics if exists */
5164 	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
5165 	if (__exist_node_summaries(sbi))
5166 		sbi->kbytes_written =
5167 			le64_to_cpu(seg_i->journal->info.kbytes_written);
5168 
5169 	f2fs_build_gc_manager(sbi);
5170 
5171 	err = f2fs_build_stats(sbi);
5172 	if (err)
5173 		goto free_nm;
5174 
5175 	/* get an inode for node space */
5176 	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
5177 	if (IS_ERR(sbi->node_inode)) {
5178 		f2fs_err(sbi, "Failed to read node inode");
5179 		err = PTR_ERR(sbi->node_inode);
5180 		goto free_stats;
5181 	}
5182 
5183 	/* read root inode and dentry */
5184 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
5185 	if (IS_ERR(root)) {
5186 		f2fs_err(sbi, "Failed to read root inode");
5187 		err = PTR_ERR(root);
5188 		goto free_node_inode;
5189 	}
5190 	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
5191 			!root->i_size || !root->i_nlink) {
5192 		iput(root);
5193 		err = -EINVAL;
5194 		goto free_node_inode;
5195 	}
5196 
5197 	generic_set_sb_d_ops(sb);
5198 	sb->s_root = d_make_root(root); /* allocate root dentry */
5199 	if (!sb->s_root) {
5200 		err = -ENOMEM;
5201 		goto free_node_inode;
5202 	}
5203 
5204 	err = f2fs_init_compress_inode(sbi);
5205 	if (err)
5206 		goto free_root_inode;
5207 
5208 	err = f2fs_register_sysfs(sbi);
5209 	if (err)
5210 		goto free_compress_inode;
5211 
5212 	sbi->umount_lock_holder = current;
5213 #ifdef CONFIG_QUOTA
5214 	/* Enable quota usage during mount */
5215 	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
5216 		err = f2fs_enable_quotas(sb);
5217 		if (err)
5218 			f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
5219 	}
5220 
5221 	quota_enabled = f2fs_recover_quota_begin(sbi);
5222 #endif
5223 	/* if there are any orphan inodes, free them */
5224 	err = f2fs_recover_orphan_inodes(sbi);
5225 	if (err)
5226 		goto free_meta;
5227 
5228 	if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) {
5229 		skip_recovery = true;
5230 		goto reset_checkpoint;
5231 	}
5232 
5233 	/* recover fsynced data */
5234 	if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
5235 			!test_opt(sbi, NORECOVERY)) {
5236 		/*
5237 		 * mount should be failed, when device has readonly mode, and
5238 		 * previous checkpoint was not done by clean system shutdown.
5239 		 */
5240 		if (f2fs_hw_is_readonly(sbi)) {
5241 			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5242 				err = f2fs_recover_fsync_data(sbi, true);
5243 				if (err > 0) {
5244 					err = -EROFS;
5245 					f2fs_err(sbi, "Need to recover fsync data, but "
5246 						"write access unavailable, please try "
5247 						"mount w/ disable_roll_forward or norecovery");
5248 				}
5249 				if (err < 0)
5250 					goto free_meta;
5251 			}
5252 			f2fs_info(sbi, "write access unavailable, skipping recovery");
5253 			goto reset_checkpoint;
5254 		}
5255 
5256 		if (need_fsck)
5257 			set_sbi_flag(sbi, SBI_NEED_FSCK);
5258 
5259 		if (skip_recovery)
5260 			goto reset_checkpoint;
5261 
5262 		err = f2fs_recover_fsync_data(sbi, false);
5263 		if (err < 0) {
5264 			if (err != -ENOMEM)
5265 				skip_recovery = true;
5266 			need_fsck = true;
5267 			f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
5268 				 err);
5269 			goto free_meta;
5270 		}
5271 	} else {
5272 		err = f2fs_recover_fsync_data(sbi, true);
5273 		if (err > 0) {
5274 			if (!f2fs_readonly(sb)) {
5275 				f2fs_err(sbi, "Need to recover fsync data");
5276 				err = -EINVAL;
5277 				goto free_meta;
5278 			} else {
5279 				f2fs_info(sbi, "drop all fsynced data");
5280 				err = 0;
5281 			}
5282 		}
5283 	}
5284 
5285 reset_checkpoint:
5286 #ifdef CONFIG_QUOTA
5287 	f2fs_recover_quota_end(sbi, quota_enabled);
5288 #endif
5289 	/*
5290 	 * If the f2fs is not readonly and fsync data recovery succeeds,
5291 	 * write pointer consistency of cursegs and other zones are already
5292 	 * checked and fixed during recovery. However, if recovery fails,
5293 	 * write pointers are left untouched, and retry-mount should check
5294 	 * them here.
5295 	 */
5296 	if (skip_recovery)
5297 		err = f2fs_check_and_fix_write_pointer(sbi);
5298 	if (err)
5299 		goto free_meta;
5300 
5301 	/* f2fs_recover_fsync_data() cleared this already */
5302 	clear_sbi_flag(sbi, SBI_POR_DOING);
5303 
5304 	err = f2fs_init_inmem_curseg(sbi);
5305 	if (err)
5306 		goto sync_free_meta;
5307 
5308 	if (test_opt(sbi, DISABLE_CHECKPOINT))
5309 		err = f2fs_disable_checkpoint(sbi);
5310 	else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))
5311 		err = f2fs_enable_checkpoint(sbi);
5312 	if (err)
5313 		goto sync_free_meta;
5314 
5315 	/*
5316 	 * If filesystem is not mounted as read-only then
5317 	 * do start the gc_thread.
5318 	 */
5319 	if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
5320 		test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
5321 		/* After POR, we can run background GC thread.*/
5322 		err = f2fs_start_gc_thread(sbi);
5323 		if (err)
5324 			goto sync_free_meta;
5325 	}
5326 
5327 	/* recover broken superblock */
5328 	if (recovery) {
5329 		err = f2fs_commit_super(sbi, true);
5330 		f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
5331 			  sbi->valid_super_block ? 1 : 2, err);
5332 	}
5333 
5334 	f2fs_join_shrinker(sbi);
5335 
5336 	f2fs_tuning_parameters(sbi);
5337 
5338 	f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
5339 		    cur_cp_version(F2FS_CKPT(sbi)));
5340 	f2fs_update_time(sbi, CP_TIME);
5341 	f2fs_update_time(sbi, REQ_TIME);
5342 	clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
5343 
5344 	sbi->umount_lock_holder = NULL;
5345 	return 0;
5346 
5347 sync_free_meta:
5348 	/* safe to flush all the data */
5349 	sync_filesystem(sbi->sb);
5350 	retry_cnt = 0;
5351 
5352 free_meta:
5353 #ifdef CONFIG_QUOTA
5354 	f2fs_truncate_quota_inode_pages(sb);
5355 	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
5356 		f2fs_quota_off_umount(sbi->sb);
5357 #endif
5358 	/*
5359 	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
5360 	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
5361 	 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
5362 	 * falls into an infinite loop in f2fs_sync_meta_pages().
5363 	 */
5364 	truncate_inode_pages_final(META_MAPPING(sbi));
5365 	/* evict some inodes being cached by GC */
5366 	evict_inodes(sb);
5367 	f2fs_unregister_sysfs(sbi);
5368 free_compress_inode:
5369 	f2fs_destroy_compress_inode(sbi);
5370 free_root_inode:
5371 	dput(sb->s_root);
5372 	sb->s_root = NULL;
5373 free_node_inode:
5374 	f2fs_release_ino_entry(sbi, true);
5375 	truncate_inode_pages_final(NODE_MAPPING(sbi));
5376 	iput(sbi->node_inode);
5377 	sbi->node_inode = NULL;
5378 free_stats:
5379 	f2fs_destroy_stats(sbi);
5380 free_nm:
5381 	/* stop discard thread before destroying node manager */
5382 	f2fs_stop_discard_thread(sbi);
5383 	f2fs_destroy_node_manager(sbi);
5384 free_sm:
5385 	f2fs_destroy_segment_manager(sbi);
5386 stop_ckpt_thread:
5387 	f2fs_stop_ckpt_thread(sbi);
5388 	/* flush s_error_work before sbi destroy */
5389 	flush_work(&sbi->s_error_work);
5390 	f2fs_destroy_post_read_wq(sbi);
5391 free_devices:
5392 	destroy_device_list(sbi);
5393 	kvfree(sbi->ckpt);
5394 free_meta_inode:
5395 	make_bad_inode(sbi->meta_inode);
5396 	iput(sbi->meta_inode);
5397 	sbi->meta_inode = NULL;
5398 free_page_array_cache:
5399 	f2fs_destroy_page_array_cache(sbi);
5400 free_percpu:
5401 	destroy_percpu_info(sbi);
5402 free_iostat:
5403 	f2fs_destroy_iostat(sbi);
5404 free_bio_info:
5405 	for (i = 0; i < NR_PAGE_TYPE; i++)
5406 		kfree(sbi->write_io[i]);
5407 
5408 #if IS_ENABLED(CONFIG_UNICODE)
5409 	utf8_unload(sb->s_encoding);
5410 	sb->s_encoding = NULL;
5411 #endif
5412 free_options:
5413 #ifdef CONFIG_QUOTA
5414 	for (i = 0; i < MAXQUOTAS; i++)
5415 		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
5416 #endif
5417 	/* no need to free dummy_enc_policy, we just keep it in ctx when failed */
5418 	swap(F2FS_CTX_INFO(ctx).dummy_enc_policy, F2FS_OPTION(sbi).dummy_enc_policy);
5419 free_sb_buf:
5420 	kfree(raw_super);
5421 free_sbi:
5422 	kfree(sbi);
5423 	sb->s_fs_info = NULL;
5424 
5425 	/* give only one another chance */
5426 	if (retry_cnt > 0 && skip_recovery) {
5427 		retry_cnt--;
5428 		shrink_dcache_sb(sb);
5429 		goto try_onemore;
5430 	}
5431 	return err;
5432 }
5433 
5434 static int f2fs_get_tree(struct fs_context *fc)
5435 {
5436 	return get_tree_bdev(fc, f2fs_fill_super);
5437 }
5438 
5439 static int f2fs_reconfigure(struct fs_context *fc)
5440 {
5441 	struct super_block *sb = fc->root->d_sb;
5442 
5443 	return __f2fs_remount(fc, sb);
5444 }
5445 
5446 static void f2fs_fc_free(struct fs_context *fc)
5447 {
5448 	struct f2fs_fs_context *ctx = fc->fs_private;
5449 
5450 	if (!ctx)
5451 		return;
5452 
5453 #ifdef CONFIG_QUOTA
5454 	f2fs_unnote_qf_name_all(fc);
5455 #endif
5456 	fscrypt_free_dummy_policy(&F2FS_CTX_INFO(ctx).dummy_enc_policy);
5457 	kfree(ctx);
5458 }
5459 
5460 static const struct fs_context_operations f2fs_context_ops = {
5461 	.parse_param	= f2fs_parse_param,
5462 	.get_tree	= f2fs_get_tree,
5463 	.reconfigure = f2fs_reconfigure,
5464 	.free	= f2fs_fc_free,
5465 };
5466 
5467 static void kill_f2fs_super(struct super_block *sb)
5468 {
5469 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
5470 
5471 	if (sb->s_root) {
5472 		sbi->umount_lock_holder = current;
5473 
5474 		set_sbi_flag(sbi, SBI_IS_CLOSE);
5475 		f2fs_stop_gc_thread(sbi);
5476 		f2fs_stop_discard_thread(sbi);
5477 
5478 #ifdef CONFIG_F2FS_FS_COMPRESSION
5479 		/*
5480 		 * latter evict_inode() can bypass checking and invalidating
5481 		 * compress inode cache.
5482 		 */
5483 		if (test_opt(sbi, COMPRESS_CACHE))
5484 			truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
5485 #endif
5486 
5487 		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
5488 				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5489 			struct cp_control cpc = {
5490 				.reason = CP_UMOUNT,
5491 			};
5492 			stat_inc_cp_call_count(sbi, TOTAL_CALL);
5493 			f2fs_write_checkpoint(sbi, &cpc);
5494 		}
5495 
5496 		if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
5497 			sb->s_flags &= ~SB_RDONLY;
5498 	}
5499 	kill_block_super(sb);
5500 	/* Release block devices last, after fscrypt_destroy_keyring(). */
5501 	if (sbi) {
5502 		destroy_device_list(sbi);
5503 		kfree(sbi);
5504 		sb->s_fs_info = NULL;
5505 	}
5506 }
5507 
5508 static int f2fs_init_fs_context(struct fs_context *fc)
5509 {
5510 	struct f2fs_fs_context *ctx;
5511 
5512 	ctx = kzalloc(sizeof(struct f2fs_fs_context), GFP_KERNEL);
5513 	if (!ctx)
5514 		return -ENOMEM;
5515 
5516 	fc->fs_private = ctx;
5517 	fc->ops = &f2fs_context_ops;
5518 
5519 	return 0;
5520 }
5521 
5522 static struct file_system_type f2fs_fs_type = {
5523 	.owner		= THIS_MODULE,
5524 	.name		= "f2fs",
5525 	.init_fs_context = f2fs_init_fs_context,
5526 	.kill_sb	= kill_f2fs_super,
5527 	.fs_flags	= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
5528 };
5529 MODULE_ALIAS_FS("f2fs");
5530 
5531 static int __init init_inodecache(void)
5532 {
5533 	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
5534 			sizeof(struct f2fs_inode_info), 0,
5535 			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
5536 	return f2fs_inode_cachep ? 0 : -ENOMEM;
5537 }
5538 
5539 static void destroy_inodecache(void)
5540 {
5541 	/*
5542 	 * Make sure all delayed rcu free inodes are flushed before we
5543 	 * destroy cache.
5544 	 */
5545 	rcu_barrier();
5546 	kmem_cache_destroy(f2fs_inode_cachep);
5547 }
5548 
5549 static int __init init_f2fs_fs(void)
5550 {
5551 	int err;
5552 
5553 	err = init_inodecache();
5554 	if (err)
5555 		goto fail;
5556 	err = f2fs_create_node_manager_caches();
5557 	if (err)
5558 		goto free_inodecache;
5559 	err = f2fs_create_segment_manager_caches();
5560 	if (err)
5561 		goto free_node_manager_caches;
5562 	err = f2fs_create_checkpoint_caches();
5563 	if (err)
5564 		goto free_segment_manager_caches;
5565 	err = f2fs_create_recovery_cache();
5566 	if (err)
5567 		goto free_checkpoint_caches;
5568 	err = f2fs_create_extent_cache();
5569 	if (err)
5570 		goto free_recovery_cache;
5571 	err = f2fs_create_garbage_collection_cache();
5572 	if (err)
5573 		goto free_extent_cache;
5574 	err = f2fs_init_sysfs();
5575 	if (err)
5576 		goto free_garbage_collection_cache;
5577 	err = f2fs_init_shrinker();
5578 	if (err)
5579 		goto free_sysfs;
5580 	f2fs_create_root_stats();
5581 	err = f2fs_init_post_read_processing();
5582 	if (err)
5583 		goto free_root_stats;
5584 	err = f2fs_init_iostat_processing();
5585 	if (err)
5586 		goto free_post_read;
5587 	err = f2fs_init_bio_entry_cache();
5588 	if (err)
5589 		goto free_iostat;
5590 	err = f2fs_init_bioset();
5591 	if (err)
5592 		goto free_bio_entry_cache;
5593 	err = f2fs_init_compress_mempool();
5594 	if (err)
5595 		goto free_bioset;
5596 	err = f2fs_init_compress_cache();
5597 	if (err)
5598 		goto free_compress_mempool;
5599 	err = f2fs_create_casefold_cache();
5600 	if (err)
5601 		goto free_compress_cache;
5602 	err = f2fs_init_xattr_cache();
5603 	if (err)
5604 		goto free_casefold_cache;
5605 	err = register_filesystem(&f2fs_fs_type);
5606 	if (err)
5607 		goto free_xattr_cache;
5608 	return 0;
5609 free_xattr_cache:
5610 	f2fs_destroy_xattr_cache();
5611 free_casefold_cache:
5612 	f2fs_destroy_casefold_cache();
5613 free_compress_cache:
5614 	f2fs_destroy_compress_cache();
5615 free_compress_mempool:
5616 	f2fs_destroy_compress_mempool();
5617 free_bioset:
5618 	f2fs_destroy_bioset();
5619 free_bio_entry_cache:
5620 	f2fs_destroy_bio_entry_cache();
5621 free_iostat:
5622 	f2fs_destroy_iostat_processing();
5623 free_post_read:
5624 	f2fs_destroy_post_read_processing();
5625 free_root_stats:
5626 	f2fs_destroy_root_stats();
5627 	f2fs_exit_shrinker();
5628 free_sysfs:
5629 	f2fs_exit_sysfs();
5630 free_garbage_collection_cache:
5631 	f2fs_destroy_garbage_collection_cache();
5632 free_extent_cache:
5633 	f2fs_destroy_extent_cache();
5634 free_recovery_cache:
5635 	f2fs_destroy_recovery_cache();
5636 free_checkpoint_caches:
5637 	f2fs_destroy_checkpoint_caches();
5638 free_segment_manager_caches:
5639 	f2fs_destroy_segment_manager_caches();
5640 free_node_manager_caches:
5641 	f2fs_destroy_node_manager_caches();
5642 free_inodecache:
5643 	destroy_inodecache();
5644 fail:
5645 	return err;
5646 }
5647 
5648 static void __exit exit_f2fs_fs(void)
5649 {
5650 	unregister_filesystem(&f2fs_fs_type);
5651 	f2fs_destroy_xattr_cache();
5652 	f2fs_destroy_casefold_cache();
5653 	f2fs_destroy_compress_cache();
5654 	f2fs_destroy_compress_mempool();
5655 	f2fs_destroy_bioset();
5656 	f2fs_destroy_bio_entry_cache();
5657 	f2fs_destroy_iostat_processing();
5658 	f2fs_destroy_post_read_processing();
5659 	f2fs_destroy_root_stats();
5660 	f2fs_exit_shrinker();
5661 	f2fs_exit_sysfs();
5662 	f2fs_destroy_garbage_collection_cache();
5663 	f2fs_destroy_extent_cache();
5664 	f2fs_destroy_recovery_cache();
5665 	f2fs_destroy_checkpoint_caches();
5666 	f2fs_destroy_segment_manager_caches();
5667 	f2fs_destroy_node_manager_caches();
5668 	destroy_inodecache();
5669 }
5670 
5671 module_init(init_f2fs_fs)
5672 module_exit(exit_f2fs_fs)
5673 
5674 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
5675 MODULE_DESCRIPTION("Flash Friendly File System");
5676 MODULE_LICENSE("GPL");
5677