xref: /linux/fs/f2fs/f2fs.h (revision 03f76ddff5b04a808ae16c06418460151e2fdd4b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/slab.h>
15 #include <linux/crc32.h>
16 #include <linux/magic.h>
17 #include <linux/kobject.h>
18 #include <linux/sched.h>
19 #include <linux/cred.h>
20 #include <linux/sched/mm.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <linux/rw_hint.h>
27 
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30 
31 struct pagevec;
32 
33 #ifdef CONFIG_F2FS_CHECK_FS
34 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
35 #else
36 #define f2fs_bug_on(sbi, condition)					\
37 	do {								\
38 		if (WARN_ON(condition))					\
39 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
40 	} while (0)
41 #endif
42 
43 enum {
44 	FAULT_KMALLOC,
45 	FAULT_KVMALLOC,
46 	FAULT_PAGE_ALLOC,
47 	FAULT_PAGE_GET,
48 	FAULT_ALLOC_BIO,	/* it's obsolete due to bio_alloc() will never fail */
49 	FAULT_ALLOC_NID,
50 	FAULT_ORPHAN,
51 	FAULT_BLOCK,
52 	FAULT_DIR_DEPTH,
53 	FAULT_EVICT_INODE,
54 	FAULT_TRUNCATE,
55 	FAULT_READ_IO,
56 	FAULT_CHECKPOINT,
57 	FAULT_DISCARD,
58 	FAULT_WRITE_IO,
59 	FAULT_SLAB_ALLOC,
60 	FAULT_DQUOT_INIT,
61 	FAULT_LOCK_OP,
62 	FAULT_BLKADDR_VALIDITY,
63 	FAULT_BLKADDR_CONSISTENCE,
64 	FAULT_NO_SEGMENT,
65 	FAULT_INCONSISTENT_FOOTER,
66 	FAULT_TIMEOUT,
67 	FAULT_VMALLOC,
68 	FAULT_MAX,
69 };
70 
71 /* indicate which option to update */
72 enum fault_option {
73 	FAULT_RATE	= 1,	/* only update fault rate */
74 	FAULT_TYPE	= 2,	/* only update fault type */
75 	FAULT_ALL	= 4,	/* reset all fault injection options/stats */
76 };
77 
78 #ifdef CONFIG_F2FS_FAULT_INJECTION
79 struct f2fs_fault_info {
80 	atomic_t inject_ops;
81 	int inject_rate;
82 	unsigned int inject_type;
83 	/* Used to account total count of injection for each type */
84 	unsigned int inject_count[FAULT_MAX];
85 };
86 
87 extern const char *f2fs_fault_name[FAULT_MAX];
88 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
89 
90 /* maximum retry count for injected failure */
91 #define DEFAULT_FAILURE_RETRY_COUNT		8
92 #else
93 #define DEFAULT_FAILURE_RETRY_COUNT		1
94 #endif
95 
96 /*
97  * For mount options
98  */
99 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000001
100 #define F2FS_MOUNT_DISCARD		0x00000002
101 #define F2FS_MOUNT_NOHEAP		0x00000004
102 #define F2FS_MOUNT_XATTR_USER		0x00000008
103 #define F2FS_MOUNT_POSIX_ACL		0x00000010
104 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000020
105 #define F2FS_MOUNT_INLINE_XATTR		0x00000040
106 #define F2FS_MOUNT_INLINE_DATA		0x00000080
107 #define F2FS_MOUNT_INLINE_DENTRY	0x00000100
108 #define F2FS_MOUNT_FLUSH_MERGE		0x00000200
109 #define F2FS_MOUNT_NOBARRIER		0x00000400
110 #define F2FS_MOUNT_FASTBOOT		0x00000800
111 #define F2FS_MOUNT_READ_EXTENT_CACHE	0x00001000
112 #define F2FS_MOUNT_DATA_FLUSH		0x00002000
113 #define F2FS_MOUNT_FAULT_INJECTION	0x00004000
114 #define F2FS_MOUNT_USRQUOTA		0x00008000
115 #define F2FS_MOUNT_GRPQUOTA		0x00010000
116 #define F2FS_MOUNT_PRJQUOTA		0x00020000
117 #define F2FS_MOUNT_QUOTA		0x00040000
118 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00080000
119 #define F2FS_MOUNT_RESERVE_ROOT		0x00100000
120 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x00200000
121 #define F2FS_MOUNT_NORECOVERY		0x00400000
122 #define F2FS_MOUNT_ATGC			0x00800000
123 #define F2FS_MOUNT_MERGE_CHECKPOINT	0x01000000
124 #define	F2FS_MOUNT_GC_MERGE		0x02000000
125 #define F2FS_MOUNT_COMPRESS_CACHE	0x04000000
126 #define F2FS_MOUNT_AGE_EXTENT_CACHE	0x08000000
127 #define F2FS_MOUNT_NAT_BITS		0x10000000
128 #define F2FS_MOUNT_INLINECRYPT		0x20000000
129 /*
130  * Some f2fs environments expect to be able to pass the "lazytime" option
131  * string rather than using the MS_LAZYTIME flag, so this must remain.
132  */
133 #define F2FS_MOUNT_LAZYTIME		0x40000000
134 
135 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
136 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
137 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
138 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
139 
140 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
141 		typecheck(unsigned long long, b) &&			\
142 		((long long)((a) - (b)) > 0))
143 
144 typedef u32 block_t;	/*
145 			 * should not change u32, since it is the on-disk block
146 			 * address format, __le32.
147 			 */
148 typedef u32 nid_t;
149 
150 #define COMPRESS_EXT_NUM		16
151 
152 enum blkzone_allocation_policy {
153 	BLKZONE_ALLOC_PRIOR_SEQ,	/* Prioritize writing to sequential zones */
154 	BLKZONE_ALLOC_ONLY_SEQ,		/* Only allow writing to sequential zones */
155 	BLKZONE_ALLOC_PRIOR_CONV,	/* Prioritize writing to conventional zones */
156 };
157 
158 /*
159  * An implementation of an rwsem that is explicitly unfair to readers. This
160  * prevents priority inversion when a low-priority reader acquires the read lock
161  * while sleeping on the write lock but the write lock is needed by
162  * higher-priority clients.
163  */
164 
165 struct f2fs_rwsem {
166         struct rw_semaphore internal_rwsem;
167 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
168         wait_queue_head_t read_waiters;
169 #endif
170 };
171 
172 struct f2fs_mount_info {
173 	unsigned int opt;
174 	block_t root_reserved_blocks;	/* root reserved blocks */
175 	kuid_t s_resuid;		/* reserved blocks for uid */
176 	kgid_t s_resgid;		/* reserved blocks for gid */
177 	int active_logs;		/* # of active logs */
178 	int inline_xattr_size;		/* inline xattr size */
179 #ifdef CONFIG_F2FS_FAULT_INJECTION
180 	struct f2fs_fault_info fault_info;	/* For fault injection */
181 #endif
182 #ifdef CONFIG_QUOTA
183 	/* Names of quota files with journalled quota */
184 	char *s_qf_names[MAXQUOTAS];
185 	int s_jquota_fmt;			/* Format of quota to use */
186 #endif
187 	/* For which write hints are passed down to block layer */
188 	int alloc_mode;			/* segment allocation policy */
189 	int fsync_mode;			/* fsync policy */
190 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
191 	int bggc_mode;			/* bggc mode: off, on or sync */
192 	int memory_mode;		/* memory mode */
193 	int errors;			/* errors parameter */
194 	int discard_unit;		/*
195 					 * discard command's offset/size should
196 					 * be aligned to this unit: block,
197 					 * segment or section
198 					 */
199 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
200 	block_t unusable_cap_perc;	/* percentage for cap */
201 	block_t unusable_cap;		/* Amount of space allowed to be
202 					 * unusable when disabling checkpoint
203 					 */
204 
205 	/* For compression */
206 	unsigned char compress_algorithm;	/* algorithm type */
207 	unsigned char compress_log_size;	/* cluster log size */
208 	unsigned char compress_level;		/* compress level */
209 	bool compress_chksum;			/* compressed data chksum */
210 	unsigned char compress_ext_cnt;		/* extension count */
211 	unsigned char nocompress_ext_cnt;		/* nocompress extension count */
212 	int compress_mode;			/* compression mode */
213 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
214 	unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
215 };
216 
217 #define F2FS_FEATURE_ENCRYPT			0x00000001
218 #define F2FS_FEATURE_BLKZONED			0x00000002
219 #define F2FS_FEATURE_ATOMIC_WRITE		0x00000004
220 #define F2FS_FEATURE_EXTRA_ATTR			0x00000008
221 #define F2FS_FEATURE_PRJQUOTA			0x00000010
222 #define F2FS_FEATURE_INODE_CHKSUM		0x00000020
223 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x00000040
224 #define F2FS_FEATURE_QUOTA_INO			0x00000080
225 #define F2FS_FEATURE_INODE_CRTIME		0x00000100
226 #define F2FS_FEATURE_LOST_FOUND			0x00000200
227 #define F2FS_FEATURE_VERITY			0x00000400
228 #define F2FS_FEATURE_SB_CHKSUM			0x00000800
229 #define F2FS_FEATURE_CASEFOLD			0x00001000
230 #define F2FS_FEATURE_COMPRESSION		0x00002000
231 #define F2FS_FEATURE_RO				0x00004000
232 #define F2FS_FEATURE_DEVICE_ALIAS		0x00008000
233 
234 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
235 	((raw_super->feature & cpu_to_le32(mask)) != 0)
236 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
237 
238 /*
239  * Default values for user and/or group using reserved blocks
240  */
241 #define	F2FS_DEF_RESUID		0
242 #define	F2FS_DEF_RESGID		0
243 
244 /*
245  * For checkpoint manager
246  */
247 enum {
248 	NAT_BITMAP,
249 	SIT_BITMAP
250 };
251 
252 #define	CP_UMOUNT	0x00000001
253 #define	CP_FASTBOOT	0x00000002
254 #define	CP_SYNC		0x00000004
255 #define	CP_RECOVERY	0x00000008
256 #define	CP_DISCARD	0x00000010
257 #define CP_TRIMMED	0x00000020
258 #define CP_PAUSE	0x00000040
259 #define CP_RESIZE 	0x00000080
260 
261 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
262 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
263 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
264 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
265 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
266 #define DEF_CP_INTERVAL			60	/* 60 secs */
267 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
268 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
269 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
270 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
271 
272 struct cp_control {
273 	int reason;
274 	__u64 trim_start;
275 	__u64 trim_end;
276 	__u64 trim_minlen;
277 };
278 
279 /*
280  * indicate meta/data type
281  */
282 enum {
283 	META_CP,
284 	META_NAT,
285 	META_SIT,
286 	META_SSA,
287 	META_MAX,
288 	META_POR,
289 	DATA_GENERIC,		/* check range only */
290 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
291 	DATA_GENERIC_ENHANCE_READ,	/*
292 					 * strong check on range and segment
293 					 * bitmap but no warning due to race
294 					 * condition of read on truncated area
295 					 * by extent_cache
296 					 */
297 	DATA_GENERIC_ENHANCE_UPDATE,	/*
298 					 * strong check on range and segment
299 					 * bitmap for update case
300 					 */
301 	META_GENERIC,
302 };
303 
304 /* for the list of ino */
305 enum {
306 	ORPHAN_INO,		/* for orphan ino list */
307 	APPEND_INO,		/* for append ino list */
308 	UPDATE_INO,		/* for update ino list */
309 	TRANS_DIR_INO,		/* for transactions dir ino list */
310 	XATTR_DIR_INO,		/* for xattr updated dir ino list */
311 	FLUSH_INO,		/* for multiple device flushing */
312 	MAX_INO_ENTRY,		/* max. list */
313 };
314 
315 struct ino_entry {
316 	struct list_head list;		/* list head */
317 	nid_t ino;			/* inode number */
318 	unsigned int dirty_device;	/* dirty device bitmap */
319 };
320 
321 /* for the list of inodes to be GCed */
322 struct inode_entry {
323 	struct list_head list;	/* list head */
324 	struct inode *inode;	/* vfs inode pointer */
325 };
326 
327 struct fsync_node_entry {
328 	struct list_head list;	/* list head */
329 	struct folio *folio;	/* warm node folio pointer */
330 	unsigned int seq_id;	/* sequence id */
331 };
332 
333 struct ckpt_req {
334 	struct completion wait;		/* completion for checkpoint done */
335 	struct llist_node llnode;	/* llist_node to be linked in wait queue */
336 	int ret;			/* return code of checkpoint */
337 	ktime_t queue_time;		/* request queued time */
338 };
339 
340 struct ckpt_req_control {
341 	struct task_struct *f2fs_issue_ckpt;	/* checkpoint task */
342 	int ckpt_thread_ioprio;			/* checkpoint merge thread ioprio */
343 	wait_queue_head_t ckpt_wait_queue;	/* waiting queue for wake-up */
344 	atomic_t issued_ckpt;		/* # of actually issued ckpts */
345 	atomic_t total_ckpt;		/* # of total ckpts */
346 	atomic_t queued_ckpt;		/* # of queued ckpts */
347 	struct llist_head issue_list;	/* list for command issue */
348 	spinlock_t stat_lock;		/* lock for below checkpoint time stats */
349 	unsigned int cur_time;		/* cur wait time in msec for currently issued checkpoint */
350 	unsigned int peak_time;		/* peak wait time in msec until now */
351 };
352 
353 /* for the bitmap indicate blocks to be discarded */
354 struct discard_entry {
355 	struct list_head list;	/* list head */
356 	block_t start_blkaddr;	/* start blockaddr of current segment */
357 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
358 };
359 
360 /* minimum discard granularity, unit: block count */
361 #define MIN_DISCARD_GRANULARITY		1
362 /* default discard granularity of inner discard thread, unit: block count */
363 #define DEFAULT_DISCARD_GRANULARITY		16
364 /* default maximum discard granularity of ordered discard, unit: block count */
365 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY	16
366 
367 /* max discard pend list number */
368 #define MAX_PLIST_NUM		512
369 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
370 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
371 
372 enum {
373 	D_PREP,			/* initial */
374 	D_PARTIAL,		/* partially submitted */
375 	D_SUBMIT,		/* all submitted */
376 	D_DONE,			/* finished */
377 };
378 
379 struct discard_info {
380 	block_t lstart;			/* logical start address */
381 	block_t len;			/* length */
382 	block_t start;			/* actual start address in dev */
383 };
384 
385 struct discard_cmd {
386 	struct rb_node rb_node;		/* rb node located in rb-tree */
387 	struct discard_info di;		/* discard info */
388 	struct list_head list;		/* command list */
389 	struct completion wait;		/* completion */
390 	struct block_device *bdev;	/* bdev */
391 	unsigned short ref;		/* reference count */
392 	unsigned char state;		/* state */
393 	unsigned char queued;		/* queued discard */
394 	int error;			/* bio error */
395 	spinlock_t lock;		/* for state/bio_ref updating */
396 	unsigned short bio_ref;		/* bio reference count */
397 };
398 
399 enum {
400 	DPOLICY_BG,
401 	DPOLICY_FORCE,
402 	DPOLICY_FSTRIM,
403 	DPOLICY_UMOUNT,
404 	MAX_DPOLICY,
405 };
406 
407 enum {
408 	DPOLICY_IO_AWARE_DISABLE,	/* force to not be aware of IO */
409 	DPOLICY_IO_AWARE_ENABLE,	/* force to be aware of IO */
410 	DPOLICY_IO_AWARE_MAX,
411 };
412 
413 struct discard_policy {
414 	int type;			/* type of discard */
415 	unsigned int min_interval;	/* used for candidates exist */
416 	unsigned int mid_interval;	/* used for device busy */
417 	unsigned int max_interval;	/* used for candidates not exist */
418 	unsigned int max_requests;	/* # of discards issued per round */
419 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
420 	bool io_aware;			/* issue discard in idle time */
421 	bool sync;			/* submit discard with REQ_SYNC flag */
422 	bool ordered;			/* issue discard by lba order */
423 	bool timeout;			/* discard timeout for put_super */
424 	unsigned int granularity;	/* discard granularity */
425 };
426 
427 struct discard_cmd_control {
428 	struct task_struct *f2fs_issue_discard;	/* discard thread */
429 	struct list_head entry_list;		/* 4KB discard entry list */
430 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
431 	struct list_head wait_list;		/* store on-flushing entries */
432 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
433 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
434 	struct mutex cmd_lock;
435 	unsigned int nr_discards;		/* # of discards in the list */
436 	unsigned int max_discards;		/* max. discards to be issued */
437 	unsigned int max_discard_request;	/* max. discard request per round */
438 	unsigned int min_discard_issue_time;	/* min. interval between discard issue */
439 	unsigned int mid_discard_issue_time;	/* mid. interval between discard issue */
440 	unsigned int max_discard_issue_time;	/* max. interval between discard issue */
441 	unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
442 	unsigned int discard_urgent_util;	/* utilization which issue discard proactively */
443 	unsigned int discard_granularity;	/* discard granularity */
444 	unsigned int max_ordered_discard;	/* maximum discard granularity issued by lba order */
445 	unsigned int discard_io_aware;		/* io_aware policy */
446 	unsigned int undiscard_blks;		/* # of undiscard blocks */
447 	unsigned int next_pos;			/* next discard position */
448 	atomic_t issued_discard;		/* # of issued discard */
449 	atomic_t queued_discard;		/* # of queued discard */
450 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
451 	struct rb_root_cached root;		/* root of discard rb-tree */
452 	bool rbtree_check;			/* config for consistence check */
453 	bool discard_wake;			/* to wake up discard thread */
454 };
455 
456 /* for the list of fsync inodes, used only during recovery */
457 struct fsync_inode_entry {
458 	struct list_head list;	/* list head */
459 	struct inode *inode;	/* vfs inode pointer */
460 	block_t blkaddr;	/* block address locating the last fsync */
461 	block_t last_dentry;	/* block address locating the last dentry */
462 };
463 
464 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
465 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
466 
467 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
468 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
469 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
470 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
471 
472 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
473 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
474 
475 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
476 {
477 	int before = nats_in_cursum(journal);
478 
479 	journal->n_nats = cpu_to_le16(before + i);
480 	return before;
481 }
482 
483 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
484 {
485 	int before = sits_in_cursum(journal);
486 
487 	journal->n_sits = cpu_to_le16(before + i);
488 	return before;
489 }
490 
491 static inline bool __has_cursum_space(struct f2fs_journal *journal,
492 							int size, int type)
493 {
494 	if (type == NAT_JOURNAL)
495 		return size <= MAX_NAT_JENTRIES(journal);
496 	return size <= MAX_SIT_JENTRIES(journal);
497 }
498 
499 /* for inline stuff */
500 #define DEF_INLINE_RESERVED_SIZE	1
501 static inline int get_extra_isize(struct inode *inode);
502 static inline int get_inline_xattr_addrs(struct inode *inode);
503 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
504 				(CUR_ADDRS_PER_INODE(inode) -		\
505 				get_inline_xattr_addrs(inode) -	\
506 				DEF_INLINE_RESERVED_SIZE))
507 
508 /* for inline dir */
509 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
510 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
511 				BITS_PER_BYTE + 1))
512 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
513 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
514 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
515 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
516 				NR_INLINE_DENTRY(inode) + \
517 				INLINE_DENTRY_BITMAP_SIZE(inode)))
518 
519 /*
520  * For INODE and NODE manager
521  */
522 /* for directory operations */
523 
524 struct f2fs_filename {
525 	/*
526 	 * The filename the user specified.  This is NULL for some
527 	 * filesystem-internal operations, e.g. converting an inline directory
528 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
529 	 */
530 	const struct qstr *usr_fname;
531 
532 	/*
533 	 * The on-disk filename.  For encrypted directories, this is encrypted.
534 	 * This may be NULL for lookups in an encrypted dir without the key.
535 	 */
536 	struct fscrypt_str disk_name;
537 
538 	/* The dirhash of this filename */
539 	f2fs_hash_t hash;
540 
541 #ifdef CONFIG_FS_ENCRYPTION
542 	/*
543 	 * For lookups in encrypted directories: either the buffer backing
544 	 * disk_name, or a buffer that holds the decoded no-key name.
545 	 */
546 	struct fscrypt_str crypto_buf;
547 #endif
548 #if IS_ENABLED(CONFIG_UNICODE)
549 	/*
550 	 * For casefolded directories: the casefolded name, but it's left NULL
551 	 * if the original name is not valid Unicode, if the original name is
552 	 * "." or "..", if the directory is both casefolded and encrypted and
553 	 * its encryption key is unavailable, or if the filesystem is doing an
554 	 * internal operation where usr_fname is also NULL.  In all these cases
555 	 * we fall back to treating the name as an opaque byte sequence.
556 	 */
557 	struct qstr cf_name;
558 #endif
559 };
560 
561 struct f2fs_dentry_ptr {
562 	struct inode *inode;
563 	void *bitmap;
564 	struct f2fs_dir_entry *dentry;
565 	__u8 (*filename)[F2FS_SLOT_LEN];
566 	int max;
567 	int nr_bitmap;
568 };
569 
570 static inline void make_dentry_ptr_block(struct inode *inode,
571 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
572 {
573 	d->inode = inode;
574 	d->max = NR_DENTRY_IN_BLOCK;
575 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
576 	d->bitmap = t->dentry_bitmap;
577 	d->dentry = t->dentry;
578 	d->filename = t->filename;
579 }
580 
581 static inline void make_dentry_ptr_inline(struct inode *inode,
582 					struct f2fs_dentry_ptr *d, void *t)
583 {
584 	int entry_cnt = NR_INLINE_DENTRY(inode);
585 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
586 	int reserved_size = INLINE_RESERVED_SIZE(inode);
587 
588 	d->inode = inode;
589 	d->max = entry_cnt;
590 	d->nr_bitmap = bitmap_size;
591 	d->bitmap = t;
592 	d->dentry = t + bitmap_size + reserved_size;
593 	d->filename = t + bitmap_size + reserved_size +
594 					SIZE_OF_DIR_ENTRY * entry_cnt;
595 }
596 
597 /*
598  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
599  * as its node offset to distinguish from index node blocks.
600  * But some bits are used to mark the node block.
601  */
602 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
603 				>> OFFSET_BIT_SHIFT)
604 enum {
605 	ALLOC_NODE,			/* allocate a new node page if needed */
606 	LOOKUP_NODE,			/* look up a node without readahead */
607 	LOOKUP_NODE_RA,			/*
608 					 * look up a node with readahead called
609 					 * by get_data_block.
610 					 */
611 };
612 
613 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO or flush count */
614 
615 /* congestion wait timeout value, default: 20ms */
616 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
617 
618 /* timeout value injected, default: 1000ms */
619 #define DEFAULT_FAULT_TIMEOUT	(msecs_to_jiffies(1000))
620 
621 /* maximum retry quota flush count */
622 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
623 
624 /* maximum retry of EIO'ed page */
625 #define MAX_RETRY_PAGE_EIO			100
626 
627 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
628 
629 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
630 
631 /* dirty segments threshold for triggering CP */
632 #define DEFAULT_DIRTY_THRESHOLD		4
633 
634 #define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_VECS
635 #define RECOVERY_MIN_RA_BLOCKS		1
636 
637 #define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */
638 
639 /* for in-memory extent cache entry */
640 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
641 
642 /* number of extent info in extent cache we try to shrink */
643 #define READ_EXTENT_CACHE_SHRINK_NUMBER	128
644 
645 /* number of age extent info in extent cache we try to shrink */
646 #define AGE_EXTENT_CACHE_SHRINK_NUMBER	128
647 #define LAST_AGE_WEIGHT			30
648 #define SAME_AGE_REGION			1024
649 
650 /*
651  * Define data block with age less than 1GB as hot data
652  * define data block with age less than 10GB but more than 1GB as warm data
653  */
654 #define DEF_HOT_DATA_AGE_THRESHOLD	262144
655 #define DEF_WARM_DATA_AGE_THRESHOLD	2621440
656 
657 /* default max read extent count per inode */
658 #define DEF_MAX_READ_EXTENT_COUNT	10240
659 
660 /* extent cache type */
661 enum extent_type {
662 	EX_READ,
663 	EX_BLOCK_AGE,
664 	NR_EXTENT_CACHES,
665 };
666 
667 struct extent_info {
668 	unsigned int fofs;		/* start offset in a file */
669 	unsigned int len;		/* length of the extent */
670 	union {
671 		/* read extent_cache */
672 		struct {
673 			/* start block address of the extent */
674 			block_t blk;
675 #ifdef CONFIG_F2FS_FS_COMPRESSION
676 			/* physical extent length of compressed blocks */
677 			unsigned int c_len;
678 #endif
679 		};
680 		/* block age extent_cache */
681 		struct {
682 			/* block age of the extent */
683 			unsigned long long age;
684 			/* last total blocks allocated */
685 			unsigned long long last_blocks;
686 		};
687 	};
688 };
689 
690 struct extent_node {
691 	struct rb_node rb_node;		/* rb node located in rb-tree */
692 	struct extent_info ei;		/* extent info */
693 	struct list_head list;		/* node in global extent list of sbi */
694 	struct extent_tree *et;		/* extent tree pointer */
695 };
696 
697 struct extent_tree {
698 	nid_t ino;			/* inode number */
699 	enum extent_type type;		/* keep the extent tree type */
700 	struct rb_root_cached root;	/* root of extent info rb-tree */
701 	struct extent_node *cached_en;	/* recently accessed extent node */
702 	struct list_head list;		/* to be used by sbi->zombie_list */
703 	rwlock_t lock;			/* protect extent info rb-tree */
704 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
705 	bool largest_updated;		/* largest extent updated */
706 	struct extent_info largest;	/* largest cached extent for EX_READ */
707 };
708 
709 struct extent_tree_info {
710 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
711 	struct mutex extent_tree_lock;	/* locking extent radix tree */
712 	struct list_head extent_list;		/* lru list for shrinker */
713 	spinlock_t extent_lock;			/* locking extent lru list */
714 	atomic_t total_ext_tree;		/* extent tree count */
715 	struct list_head zombie_list;		/* extent zombie tree list */
716 	atomic_t total_zombie_tree;		/* extent zombie tree count */
717 	atomic_t total_ext_node;		/* extent info count */
718 };
719 
720 /*
721  * State of block returned by f2fs_map_blocks.
722  */
723 #define F2FS_MAP_NEW		(1U << 0)
724 #define F2FS_MAP_MAPPED		(1U << 1)
725 #define F2FS_MAP_DELALLOC	(1U << 2)
726 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
727 				F2FS_MAP_DELALLOC)
728 
729 struct f2fs_map_blocks {
730 	struct block_device *m_bdev;	/* for multi-device dio */
731 	block_t m_pblk;
732 	block_t m_lblk;
733 	unsigned int m_len;
734 	unsigned int m_flags;
735 	unsigned long m_last_pblk;	/* last allocated block, only used for DIO in LFS mode */
736 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
737 	pgoff_t *m_next_extent;		/* point to next possible extent */
738 	int m_seg_type;
739 	bool m_may_create;		/* indicate it is from write path */
740 	bool m_multidev_dio;		/* indicate it allows multi-device dio */
741 };
742 
743 /* for flag in get_data_block */
744 enum {
745 	F2FS_GET_BLOCK_DEFAULT,
746 	F2FS_GET_BLOCK_FIEMAP,
747 	F2FS_GET_BLOCK_BMAP,
748 	F2FS_GET_BLOCK_DIO,
749 	F2FS_GET_BLOCK_PRE_DIO,
750 	F2FS_GET_BLOCK_PRE_AIO,
751 	F2FS_GET_BLOCK_PRECACHE,
752 };
753 
754 /*
755  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
756  */
757 #define FADVISE_COLD_BIT	0x01
758 #define FADVISE_LOST_PINO_BIT	0x02
759 #define FADVISE_ENCRYPT_BIT	0x04
760 #define FADVISE_ENC_NAME_BIT	0x08
761 #define FADVISE_KEEP_SIZE_BIT	0x10
762 #define FADVISE_HOT_BIT		0x20
763 #define FADVISE_VERITY_BIT	0x40
764 #define FADVISE_TRUNC_BIT	0x80
765 
766 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
767 
768 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
769 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
770 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
771 
772 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
773 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
774 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
775 
776 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
777 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
778 
779 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
780 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
781 
782 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
783 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
784 
785 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
786 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
787 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
788 
789 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
790 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
791 
792 #define file_should_truncate(inode)	is_file(inode, FADVISE_TRUNC_BIT)
793 #define file_need_truncate(inode)	set_file(inode, FADVISE_TRUNC_BIT)
794 #define file_dont_truncate(inode)	clear_file(inode, FADVISE_TRUNC_BIT)
795 
796 #define DEF_DIR_LEVEL		0
797 
798 /* used for f2fs_inode_info->flags */
799 enum {
800 	FI_NEW_INODE,		/* indicate newly allocated inode */
801 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
802 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
803 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
804 	FI_INC_LINK,		/* need to increment i_nlink */
805 	FI_ACL_MODE,		/* indicate acl mode */
806 	FI_NO_ALLOC,		/* should not allocate any blocks */
807 	FI_FREE_NID,		/* free allocated nide */
808 	FI_NO_EXTENT,		/* not to use the extent cache */
809 	FI_INLINE_XATTR,	/* used for inline xattr */
810 	FI_INLINE_DATA,		/* used for inline data*/
811 	FI_INLINE_DENTRY,	/* used for inline dentry */
812 	FI_APPEND_WRITE,	/* inode has appended data */
813 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
814 	FI_NEED_IPU,		/* used for ipu per file */
815 	FI_ATOMIC_FILE,		/* indicate atomic file */
816 	FI_DATA_EXIST,		/* indicate data exists */
817 	FI_SKIP_WRITES,		/* should skip data page writeback */
818 	FI_OPU_WRITE,		/* used for opu per file */
819 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
820 	FI_PREALLOCATED_ALL,	/* all blocks for write were preallocated */
821 	FI_HOT_DATA,		/* indicate file is hot */
822 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
823 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
824 	FI_PIN_FILE,		/* indicate file should not be gced */
825 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
826 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
827 	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
828 	FI_MMAP_FILE,		/* indicate file was mmapped */
829 	FI_ENABLE_COMPRESS,	/* enable compression in "user" compression mode */
830 	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
831 	FI_ALIGNED_WRITE,	/* enable aligned write */
832 	FI_COW_FILE,		/* indicate COW file */
833 	FI_ATOMIC_COMMITTED,	/* indicate atomic commit completed except disk sync */
834 	FI_ATOMIC_DIRTIED,	/* indicate atomic file is dirtied */
835 	FI_ATOMIC_REPLACE,	/* indicate atomic replace */
836 	FI_OPENED_FILE,		/* indicate file has been opened */
837 	FI_DONATE_FINISHED,	/* indicate page donation of file has been finished */
838 	FI_MAX,			/* max flag, never be used */
839 };
840 
841 struct f2fs_inode_info {
842 	struct inode vfs_inode;		/* serve a vfs inode */
843 	unsigned long i_flags;		/* keep an inode flags for ioctl */
844 	unsigned char i_advise;		/* use to give file attribute hints */
845 	unsigned char i_dir_level;	/* use for dentry level for large dir */
846 	union {
847 		unsigned int i_current_depth;	/* only for directory depth */
848 		unsigned short i_gc_failures;	/* for gc failure statistic */
849 	};
850 	unsigned int i_pino;		/* parent inode number */
851 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
852 
853 	/* Use below internally in f2fs*/
854 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
855 	unsigned int ioprio_hint;	/* hint for IO priority */
856 	struct f2fs_rwsem i_sem;	/* protect fi info */
857 	atomic_t dirty_pages;		/* # of dirty pages */
858 	f2fs_hash_t chash;		/* hash value of given file name */
859 	unsigned int clevel;		/* maximum level of given file name */
860 	struct task_struct *task;	/* lookup and create consistency */
861 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
862 	struct task_struct *wb_task;	/* indicate inode is in context of writeback */
863 	nid_t i_xattr_nid;		/* node id that contains xattrs */
864 	loff_t	last_disk_size;		/* lastly written file size */
865 	spinlock_t i_size_lock;		/* protect last_disk_size */
866 
867 #ifdef CONFIG_QUOTA
868 	struct dquot __rcu *i_dquot[MAXQUOTAS];
869 
870 	/* quota space reservation, managed internally by quota code */
871 	qsize_t i_reserved_quota;
872 #endif
873 	struct list_head dirty_list;	/* dirty list for dirs and files */
874 	struct list_head gdirty_list;	/* linked in global dirty list */
875 
876 	/* linked in global inode list for cache donation */
877 	struct list_head gdonate_list;
878 	pgoff_t donate_start, donate_end; /* inclusive */
879 	atomic_t open_count;		/* # of open files */
880 
881 	struct task_struct *atomic_write_task;	/* store atomic write task */
882 	struct extent_tree *extent_tree[NR_EXTENT_CACHES];
883 					/* cached extent_tree entry */
884 	union {
885 		struct inode *cow_inode;	/* copy-on-write inode for atomic write */
886 		struct inode *atomic_inode;
887 					/* point to atomic_inode, available only for cow_inode */
888 	};
889 
890 	/* avoid racing between foreground op and gc */
891 	struct f2fs_rwsem i_gc_rwsem[2];
892 	struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
893 
894 	int i_extra_isize;		/* size of extra space located in i_addr */
895 	kprojid_t i_projid;		/* id for project quota */
896 	int i_inline_xattr_size;	/* inline xattr size */
897 	struct timespec64 i_crtime;	/* inode creation time */
898 	struct timespec64 i_disk_time[3];/* inode disk times */
899 
900 	/* for file compress */
901 	atomic_t i_compr_blocks;		/* # of compressed blocks */
902 	unsigned char i_compress_algorithm;	/* algorithm type */
903 	unsigned char i_log_cluster_size;	/* log of cluster size */
904 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
905 	unsigned char i_compress_flag;		/* compress flag */
906 	unsigned int i_cluster_size;		/* cluster size */
907 
908 	unsigned int atomic_write_cnt;
909 	loff_t original_i_size;		/* original i_size before atomic write */
910 #ifdef CONFIG_FS_ENCRYPTION
911 	struct fscrypt_inode_info *i_crypt_info; /* filesystem encryption info */
912 #endif
913 #ifdef CONFIG_FS_VERITY
914 	struct fsverity_info *i_verity_info; /* filesystem verity info */
915 #endif
916 };
917 
918 static inline void get_read_extent_info(struct extent_info *ext,
919 					struct f2fs_extent *i_ext)
920 {
921 	ext->fofs = le32_to_cpu(i_ext->fofs);
922 	ext->blk = le32_to_cpu(i_ext->blk);
923 	ext->len = le32_to_cpu(i_ext->len);
924 }
925 
926 static inline void set_raw_read_extent(struct extent_info *ext,
927 					struct f2fs_extent *i_ext)
928 {
929 	i_ext->fofs = cpu_to_le32(ext->fofs);
930 	i_ext->blk = cpu_to_le32(ext->blk);
931 	i_ext->len = cpu_to_le32(ext->len);
932 }
933 
934 static inline bool __is_discard_mergeable(struct discard_info *back,
935 			struct discard_info *front, unsigned int max_len)
936 {
937 	return (back->lstart + back->len == front->lstart) &&
938 		(back->len + front->len <= max_len);
939 }
940 
941 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
942 			struct discard_info *back, unsigned int max_len)
943 {
944 	return __is_discard_mergeable(back, cur, max_len);
945 }
946 
947 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
948 			struct discard_info *front, unsigned int max_len)
949 {
950 	return __is_discard_mergeable(cur, front, max_len);
951 }
952 
953 /*
954  * For free nid management
955  */
956 enum nid_state {
957 	FREE_NID,		/* newly added to free nid list */
958 	PREALLOC_NID,		/* it is preallocated */
959 	MAX_NID_STATE,
960 };
961 
962 enum nat_state {
963 	TOTAL_NAT,
964 	DIRTY_NAT,
965 	RECLAIMABLE_NAT,
966 	MAX_NAT_STATE,
967 };
968 
969 struct f2fs_nm_info {
970 	block_t nat_blkaddr;		/* base disk address of NAT */
971 	nid_t max_nid;			/* maximum possible node ids */
972 	nid_t available_nids;		/* # of available node ids */
973 	nid_t next_scan_nid;		/* the next nid to be scanned */
974 	nid_t max_rf_node_blocks;	/* max # of nodes for recovery */
975 	unsigned int ram_thresh;	/* control the memory footprint */
976 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
977 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
978 
979 	/* NAT cache management */
980 	struct radix_tree_root nat_root;/* root of the nat entry cache */
981 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
982 	struct f2fs_rwsem nat_tree_lock;	/* protect nat entry tree */
983 	struct list_head nat_entries;	/* cached nat entry list (clean) */
984 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
985 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
986 	unsigned int nat_blocks;	/* # of nat blocks */
987 
988 	/* free node ids management */
989 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
990 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
991 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
992 	spinlock_t nid_list_lock;	/* protect nid lists ops */
993 	struct mutex build_lock;	/* lock for build free nids */
994 	unsigned char **free_nid_bitmap;
995 	unsigned char *nat_block_bitmap;
996 	unsigned short *free_nid_count;	/* free nid count of NAT block */
997 
998 	/* for checkpoint */
999 	char *nat_bitmap;		/* NAT bitmap pointer */
1000 
1001 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
1002 	unsigned char *nat_bits;	/* NAT bits blocks */
1003 	unsigned char *full_nat_bits;	/* full NAT pages */
1004 	unsigned char *empty_nat_bits;	/* empty NAT pages */
1005 #ifdef CONFIG_F2FS_CHECK_FS
1006 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
1007 #endif
1008 	int bitmap_size;		/* bitmap size */
1009 };
1010 
1011 /*
1012  * this structure is used as one of function parameters.
1013  * all the information are dedicated to a given direct node block determined
1014  * by the data offset in a file.
1015  */
1016 struct dnode_of_data {
1017 	struct inode *inode;		/* vfs inode pointer */
1018 	struct folio *inode_folio;	/* its inode folio, NULL is possible */
1019 	struct folio *node_folio;	/* cached direct node folio */
1020 	nid_t nid;			/* node id of the direct node block */
1021 	unsigned int ofs_in_node;	/* data offset in the node page */
1022 	bool inode_folio_locked;	/* inode folio is locked or not */
1023 	bool node_changed;		/* is node block changed */
1024 	char cur_level;			/* level of hole node page */
1025 	char max_level;			/* level of current page located */
1026 	block_t	data_blkaddr;		/* block address of the node block */
1027 };
1028 
1029 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
1030 		struct folio *ifolio, struct folio *nfolio, nid_t nid)
1031 {
1032 	memset(dn, 0, sizeof(*dn));
1033 	dn->inode = inode;
1034 	dn->inode_folio = ifolio;
1035 	dn->node_folio = nfolio;
1036 	dn->nid = nid;
1037 }
1038 
1039 /*
1040  * For SIT manager
1041  *
1042  * By default, there are 6 active log areas across the whole main area.
1043  * When considering hot and cold data separation to reduce cleaning overhead,
1044  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
1045  * respectively.
1046  * In the current design, you should not change the numbers intentionally.
1047  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
1048  * logs individually according to the underlying devices. (default: 6)
1049  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
1050  * data and 8 for node logs.
1051  */
1052 #define	NR_CURSEG_DATA_TYPE	(3)
1053 #define NR_CURSEG_NODE_TYPE	(3)
1054 #define NR_CURSEG_INMEM_TYPE	(2)
1055 #define NR_CURSEG_RO_TYPE	(2)
1056 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
1057 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
1058 
1059 enum log_type {
1060 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
1061 	CURSEG_WARM_DATA,	/* data blocks */
1062 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
1063 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
1064 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
1065 	CURSEG_COLD_NODE,	/* indirect node blocks */
1066 	NR_PERSISTENT_LOG,	/* number of persistent log */
1067 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
1068 				/* pinned file that needs consecutive block address */
1069 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
1070 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
1071 };
1072 
1073 struct flush_cmd {
1074 	struct completion wait;
1075 	struct llist_node llnode;
1076 	nid_t ino;
1077 	int ret;
1078 };
1079 
1080 struct flush_cmd_control {
1081 	struct task_struct *f2fs_issue_flush;	/* flush thread */
1082 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
1083 	atomic_t issued_flush;			/* # of issued flushes */
1084 	atomic_t queued_flush;			/* # of queued flushes */
1085 	struct llist_head issue_list;		/* list for command issue */
1086 	struct llist_node *dispatch_list;	/* list for command dispatch */
1087 };
1088 
1089 struct f2fs_sm_info {
1090 	struct sit_info *sit_info;		/* whole segment information */
1091 	struct free_segmap_info *free_info;	/* free segment information */
1092 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
1093 	struct curseg_info *curseg_array;	/* active segment information */
1094 
1095 	struct f2fs_rwsem curseg_lock;	/* for preventing curseg change */
1096 
1097 	block_t seg0_blkaddr;		/* block address of 0'th segment */
1098 	block_t main_blkaddr;		/* start block address of main area */
1099 	block_t ssa_blkaddr;		/* start block address of SSA area */
1100 
1101 	unsigned int segment_count;	/* total # of segments */
1102 	unsigned int main_segments;	/* # of segments in main area */
1103 	unsigned int reserved_segments;	/* # of reserved segments */
1104 	unsigned int ovp_segments;	/* # of overprovision segments */
1105 
1106 	/* a threshold to reclaim prefree segments */
1107 	unsigned int rec_prefree_segments;
1108 
1109 	struct list_head sit_entry_set;	/* sit entry set list */
1110 
1111 	unsigned int ipu_policy;	/* in-place-update policy */
1112 	unsigned int min_ipu_util;	/* in-place-update threshold */
1113 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1114 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1115 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1116 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1117 
1118 	/* for flush command control */
1119 	struct flush_cmd_control *fcc_info;
1120 
1121 	/* for discard command control */
1122 	struct discard_cmd_control *dcc_info;
1123 };
1124 
1125 /*
1126  * For superblock
1127  */
1128 /*
1129  * COUNT_TYPE for monitoring
1130  *
1131  * f2fs monitors the number of several block types such as on-writeback,
1132  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1133  */
1134 #define WB_DATA_TYPE(folio, f)			\
1135 	(f || f2fs_is_cp_guaranteed(folio) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1136 enum count_type {
1137 	F2FS_DIRTY_DENTS,
1138 	F2FS_DIRTY_DATA,
1139 	F2FS_DIRTY_QDATA,
1140 	F2FS_DIRTY_NODES,
1141 	F2FS_DIRTY_META,
1142 	F2FS_DIRTY_IMETA,
1143 	F2FS_WB_CP_DATA,
1144 	F2FS_WB_DATA,
1145 	F2FS_RD_DATA,
1146 	F2FS_RD_NODE,
1147 	F2FS_RD_META,
1148 	F2FS_DIO_WRITE,
1149 	F2FS_DIO_READ,
1150 	NR_COUNT_TYPE,
1151 };
1152 
1153 /*
1154  * The below are the page types of bios used in submit_bio().
1155  * The available types are:
1156  * DATA			User data pages. It operates as async mode.
1157  * NODE			Node pages. It operates as async mode.
1158  * META			FS metadata pages such as SIT, NAT, CP.
1159  * NR_PAGE_TYPE		The number of page types.
1160  * META_FLUSH		Make sure the previous pages are written
1161  *			with waiting the bio's completion
1162  * ...			Only can be used with META.
1163  */
1164 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1165 #define PAGE_TYPE_ON_MAIN(type)	((type) == DATA || (type) == NODE)
1166 enum page_type {
1167 	DATA = 0,
1168 	NODE = 1,	/* should not change this */
1169 	META,
1170 	NR_PAGE_TYPE,
1171 	META_FLUSH,
1172 	IPU,		/* the below types are used by tracepoints only. */
1173 	OPU,
1174 };
1175 
1176 enum temp_type {
1177 	HOT = 0,	/* must be zero for meta bio */
1178 	WARM,
1179 	COLD,
1180 	NR_TEMP_TYPE,
1181 };
1182 
1183 enum need_lock_type {
1184 	LOCK_REQ = 0,
1185 	LOCK_DONE,
1186 	LOCK_RETRY,
1187 };
1188 
1189 enum cp_reason_type {
1190 	CP_NO_NEEDED,
1191 	CP_NON_REGULAR,
1192 	CP_COMPRESSED,
1193 	CP_HARDLINK,
1194 	CP_SB_NEED_CP,
1195 	CP_WRONG_PINO,
1196 	CP_NO_SPC_ROLL,
1197 	CP_NODE_NEED_CP,
1198 	CP_FASTBOOT_MODE,
1199 	CP_SPEC_LOG_NUM,
1200 	CP_RECOVER_DIR,
1201 	CP_XATTR_DIR,
1202 };
1203 
1204 enum iostat_type {
1205 	/* WRITE IO */
1206 	APP_DIRECT_IO,			/* app direct write IOs */
1207 	APP_BUFFERED_IO,		/* app buffered write IOs */
1208 	APP_WRITE_IO,			/* app write IOs */
1209 	APP_MAPPED_IO,			/* app mapped IOs */
1210 	APP_BUFFERED_CDATA_IO,		/* app buffered write IOs on compressed file */
1211 	APP_MAPPED_CDATA_IO,		/* app mapped write IOs on compressed file */
1212 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1213 	FS_CDATA_IO,			/* data IOs from kworker/fsync/reclaimer on compressed file */
1214 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1215 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1216 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1217 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1218 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1219 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1220 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1221 
1222 	/* READ IO */
1223 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1224 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1225 	APP_READ_IO,			/* app read IOs */
1226 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1227 	APP_BUFFERED_CDATA_READ_IO,	/* app buffered read IOs on compressed file  */
1228 	APP_MAPPED_CDATA_READ_IO,	/* app mapped read IOs on compressed file  */
1229 	FS_DATA_READ_IO,		/* data read IOs */
1230 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1231 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1232 	FS_NODE_READ_IO,		/* node read IOs */
1233 	FS_META_READ_IO,		/* meta read IOs */
1234 
1235 	/* other */
1236 	FS_DISCARD_IO,			/* discard */
1237 	FS_FLUSH_IO,			/* flush */
1238 	FS_ZONE_RESET_IO,		/* zone reset */
1239 	NR_IO_TYPE,
1240 };
1241 
1242 struct f2fs_io_info {
1243 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1244 	nid_t ino;		/* inode number */
1245 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1246 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1247 	enum req_op op;		/* contains REQ_OP_ */
1248 	blk_opf_t op_flags;	/* req_flag_bits */
1249 	block_t new_blkaddr;	/* new block address to be written */
1250 	block_t old_blkaddr;	/* old block address before Cow */
1251 	union {
1252 		struct page *page;	/* page to be written */
1253 		struct folio *folio;
1254 	};
1255 	struct page *encrypted_page;	/* encrypted page */
1256 	struct page *compressed_page;	/* compressed page */
1257 	struct list_head list;		/* serialize IOs */
1258 	unsigned int compr_blocks;	/* # of compressed block addresses */
1259 	unsigned int need_lock:8;	/* indicate we need to lock cp_rwsem */
1260 	unsigned int version:8;		/* version of the node */
1261 	unsigned int submitted:1;	/* indicate IO submission */
1262 	unsigned int in_list:1;		/* indicate fio is in io_list */
1263 	unsigned int is_por:1;		/* indicate IO is from recovery or not */
1264 	unsigned int encrypted:1;	/* indicate file is encrypted */
1265 	unsigned int meta_gc:1;		/* require meta inode GC */
1266 	enum iostat_type io_type;	/* io type */
1267 	struct writeback_control *io_wbc; /* writeback control */
1268 	struct bio **bio;		/* bio for ipu */
1269 	sector_t *last_block;		/* last block number in bio */
1270 };
1271 
1272 struct bio_entry {
1273 	struct bio *bio;
1274 	struct list_head list;
1275 };
1276 
1277 #define is_read_io(rw) ((rw) == READ)
1278 struct f2fs_bio_info {
1279 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1280 	struct bio *bio;		/* bios to merge */
1281 	sector_t last_block_in_bio;	/* last block number */
1282 	struct f2fs_io_info fio;	/* store buffered io info. */
1283 #ifdef CONFIG_BLK_DEV_ZONED
1284 	struct completion zone_wait;	/* condition value for the previous open zone to close */
1285 	struct bio *zone_pending_bio;	/* pending bio for the previous zone */
1286 	void *bi_private;		/* previous bi_private for pending bio */
1287 #endif
1288 	struct f2fs_rwsem io_rwsem;	/* blocking op for bio */
1289 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1290 	struct list_head io_list;	/* track fios */
1291 	struct list_head bio_list;	/* bio entry list head */
1292 	struct f2fs_rwsem bio_list_lock;	/* lock to protect bio entry list */
1293 };
1294 
1295 #define FDEV(i)				(sbi->devs[i])
1296 #define RDEV(i)				(raw_super->devs[i])
1297 struct f2fs_dev_info {
1298 	struct file *bdev_file;
1299 	struct block_device *bdev;
1300 	char path[MAX_PATH_LEN + 1];
1301 	unsigned int total_segments;
1302 	block_t start_blk;
1303 	block_t end_blk;
1304 #ifdef CONFIG_BLK_DEV_ZONED
1305 	unsigned int nr_blkz;		/* Total number of zones */
1306 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1307 #endif
1308 };
1309 
1310 enum inode_type {
1311 	DIR_INODE,			/* for dirty dir inode */
1312 	FILE_INODE,			/* for dirty regular/symlink inode */
1313 	DIRTY_META,			/* for all dirtied inode metadata */
1314 	DONATE_INODE,			/* for all inode to donate pages */
1315 	NR_INODE_TYPE,
1316 };
1317 
1318 /* for inner inode cache management */
1319 struct inode_management {
1320 	struct radix_tree_root ino_root;	/* ino entry array */
1321 	spinlock_t ino_lock;			/* for ino entry lock */
1322 	struct list_head ino_list;		/* inode list head */
1323 	unsigned long ino_num;			/* number of entries */
1324 };
1325 
1326 /* for GC_AT */
1327 struct atgc_management {
1328 	bool atgc_enabled;			/* ATGC is enabled or not */
1329 	struct rb_root_cached root;		/* root of victim rb-tree */
1330 	struct list_head victim_list;		/* linked with all victim entries */
1331 	unsigned int victim_count;		/* victim count in rb-tree */
1332 	unsigned int candidate_ratio;		/* candidate ratio */
1333 	unsigned int max_candidate_count;	/* max candidate count */
1334 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1335 	unsigned long long age_threshold;	/* age threshold */
1336 };
1337 
1338 struct f2fs_gc_control {
1339 	unsigned int victim_segno;	/* target victim segment number */
1340 	int init_gc_type;		/* FG_GC or BG_GC */
1341 	bool no_bg_gc;			/* check the space and stop bg_gc */
1342 	bool should_migrate_blocks;	/* should migrate blocks */
1343 	bool err_gc_skipped;		/* return EAGAIN if GC skipped */
1344 	bool one_time;			/* require one time GC in one migration unit */
1345 	unsigned int nr_free_secs;	/* # of free sections to do GC */
1346 };
1347 
1348 /*
1349  * For s_flag in struct f2fs_sb_info
1350  * Modification on enum should be synchronized with s_flag array
1351  */
1352 enum {
1353 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1354 	SBI_IS_CLOSE,				/* specify unmounting */
1355 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1356 	SBI_POR_DOING,				/* recovery is doing or not */
1357 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1358 	SBI_NEED_CP,				/* need to checkpoint */
1359 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1360 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1361 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1362 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1363 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1364 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1365 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1366 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1367 	SBI_IS_FREEZING,			/* freezefs is in process */
1368 	SBI_IS_WRITABLE,			/* remove ro mountoption transiently */
1369 	MAX_SBI_FLAG,
1370 };
1371 
1372 enum {
1373 	CP_TIME,
1374 	REQ_TIME,
1375 	DISCARD_TIME,
1376 	GC_TIME,
1377 	DISABLE_TIME,
1378 	UMOUNT_DISCARD_TIMEOUT,
1379 	MAX_TIME,
1380 };
1381 
1382 /* Note that you need to keep synchronization with this gc_mode_names array */
1383 enum {
1384 	GC_NORMAL,
1385 	GC_IDLE_CB,
1386 	GC_IDLE_GREEDY,
1387 	GC_IDLE_AT,
1388 	GC_URGENT_HIGH,
1389 	GC_URGENT_LOW,
1390 	GC_URGENT_MID,
1391 	MAX_GC_MODE,
1392 };
1393 
1394 enum {
1395 	BGGC_MODE_ON,		/* background gc is on */
1396 	BGGC_MODE_OFF,		/* background gc is off */
1397 	BGGC_MODE_SYNC,		/*
1398 				 * background gc is on, migrating blocks
1399 				 * like foreground gc
1400 				 */
1401 };
1402 
1403 enum {
1404 	FS_MODE_ADAPTIVE,		/* use both lfs/ssr allocation */
1405 	FS_MODE_LFS,			/* use lfs allocation only */
1406 	FS_MODE_FRAGMENT_SEG,		/* segment fragmentation mode */
1407 	FS_MODE_FRAGMENT_BLK,		/* block fragmentation mode */
1408 };
1409 
1410 enum {
1411 	ALLOC_MODE_DEFAULT,	/* stay default */
1412 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1413 };
1414 
1415 enum fsync_mode {
1416 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1417 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1418 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1419 };
1420 
1421 enum {
1422 	COMPR_MODE_FS,		/*
1423 				 * automatically compress compression
1424 				 * enabled files
1425 				 */
1426 	COMPR_MODE_USER,	/*
1427 				 * automatical compression is disabled.
1428 				 * user can control the file compression
1429 				 * using ioctls
1430 				 */
1431 };
1432 
1433 enum {
1434 	DISCARD_UNIT_BLOCK,	/* basic discard unit is block */
1435 	DISCARD_UNIT_SEGMENT,	/* basic discard unit is segment */
1436 	DISCARD_UNIT_SECTION,	/* basic discard unit is section */
1437 };
1438 
1439 enum {
1440 	MEMORY_MODE_NORMAL,	/* memory mode for normal devices */
1441 	MEMORY_MODE_LOW,	/* memory mode for low memory devices */
1442 };
1443 
1444 enum errors_option {
1445 	MOUNT_ERRORS_READONLY,	/* remount fs ro on errors */
1446 	MOUNT_ERRORS_CONTINUE,	/* continue on errors */
1447 	MOUNT_ERRORS_PANIC,	/* panic on errors */
1448 };
1449 
1450 enum {
1451 	BACKGROUND,
1452 	FOREGROUND,
1453 	MAX_CALL_TYPE,
1454 	TOTAL_CALL = FOREGROUND,
1455 };
1456 
1457 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1458 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1459 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1460 
1461 /*
1462  * Layout of f2fs page.private:
1463  *
1464  * Layout A: lowest bit should be 1
1465  * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1466  * bit 0	PAGE_PRIVATE_NOT_POINTER
1467  * bit 1	PAGE_PRIVATE_ONGOING_MIGRATION
1468  * bit 2	PAGE_PRIVATE_INLINE_INODE
1469  * bit 3	PAGE_PRIVATE_REF_RESOURCE
1470  * bit 4	PAGE_PRIVATE_ATOMIC_WRITE
1471  * bit 5-	f2fs private data
1472  *
1473  * Layout B: lowest bit should be 0
1474  * page.private is a wrapped pointer.
1475  */
1476 enum {
1477 	PAGE_PRIVATE_NOT_POINTER,		/* private contains non-pointer data */
1478 	PAGE_PRIVATE_ONGOING_MIGRATION,		/* data page which is on-going migrating */
1479 	PAGE_PRIVATE_INLINE_INODE,		/* inode page contains inline data */
1480 	PAGE_PRIVATE_REF_RESOURCE,		/* dirty page has referenced resources */
1481 	PAGE_PRIVATE_ATOMIC_WRITE,		/* data page from atomic write path */
1482 	PAGE_PRIVATE_MAX
1483 };
1484 
1485 /* For compression */
1486 enum compress_algorithm_type {
1487 	COMPRESS_LZO,
1488 	COMPRESS_LZ4,
1489 	COMPRESS_ZSTD,
1490 	COMPRESS_LZORLE,
1491 	COMPRESS_MAX,
1492 };
1493 
1494 enum compress_flag {
1495 	COMPRESS_CHKSUM,
1496 	COMPRESS_MAX_FLAG,
1497 };
1498 
1499 #define	COMPRESS_WATERMARK			20
1500 #define	COMPRESS_PERCENT			20
1501 
1502 #define COMPRESS_DATA_RESERVED_SIZE		4
1503 struct compress_data {
1504 	__le32 clen;			/* compressed data size */
1505 	__le32 chksum;			/* compressed data checksum */
1506 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1507 	u8 cdata[];			/* compressed data */
1508 };
1509 
1510 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1511 
1512 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1513 
1514 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
1515 
1516 #define	COMPRESS_LEVEL_OFFSET	8
1517 
1518 /* compress context */
1519 struct compress_ctx {
1520 	struct inode *inode;		/* inode the context belong to */
1521 	pgoff_t cluster_idx;		/* cluster index number */
1522 	unsigned int cluster_size;	/* page count in cluster */
1523 	unsigned int log_cluster_size;	/* log of cluster size */
1524 	struct page **rpages;		/* pages store raw data in cluster */
1525 	unsigned int nr_rpages;		/* total page number in rpages */
1526 	struct page **cpages;		/* pages store compressed data in cluster */
1527 	unsigned int nr_cpages;		/* total page number in cpages */
1528 	unsigned int valid_nr_cpages;	/* valid page number in cpages */
1529 	void *rbuf;			/* virtual mapped address on rpages */
1530 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1531 	size_t rlen;			/* valid data length in rbuf */
1532 	size_t clen;			/* valid data length in cbuf */
1533 	void *private;			/* payload buffer for specified compression algorithm */
1534 	void *private2;			/* extra payload buffer */
1535 };
1536 
1537 /* compress context for write IO path */
1538 struct compress_io_ctx {
1539 	u32 magic;			/* magic number to indicate page is compressed */
1540 	struct inode *inode;		/* inode the context belong to */
1541 	struct page **rpages;		/* pages store raw data in cluster */
1542 	unsigned int nr_rpages;		/* total page number in rpages */
1543 	atomic_t pending_pages;		/* in-flight compressed page count */
1544 };
1545 
1546 /* Context for decompressing one cluster on the read IO path */
1547 struct decompress_io_ctx {
1548 	u32 magic;			/* magic number to indicate page is compressed */
1549 	struct inode *inode;		/* inode the context belong to */
1550 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1551 	pgoff_t cluster_idx;		/* cluster index number */
1552 	unsigned int cluster_size;	/* page count in cluster */
1553 	unsigned int log_cluster_size;	/* log of cluster size */
1554 	struct page **rpages;		/* pages store raw data in cluster */
1555 	unsigned int nr_rpages;		/* total page number in rpages */
1556 	struct page **cpages;		/* pages store compressed data in cluster */
1557 	unsigned int nr_cpages;		/* total page number in cpages */
1558 	struct page **tpages;		/* temp pages to pad holes in cluster */
1559 	void *rbuf;			/* virtual mapped address on rpages */
1560 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1561 	size_t rlen;			/* valid data length in rbuf */
1562 	size_t clen;			/* valid data length in cbuf */
1563 
1564 	/*
1565 	 * The number of compressed pages remaining to be read in this cluster.
1566 	 * This is initially nr_cpages.  It is decremented by 1 each time a page
1567 	 * has been read (or failed to be read).  When it reaches 0, the cluster
1568 	 * is decompressed (or an error is reported).
1569 	 *
1570 	 * If an error occurs before all the pages have been submitted for I/O,
1571 	 * then this will never reach 0.  In this case the I/O submitter is
1572 	 * responsible for calling f2fs_decompress_end_io() instead.
1573 	 */
1574 	atomic_t remaining_pages;
1575 
1576 	/*
1577 	 * Number of references to this decompress_io_ctx.
1578 	 *
1579 	 * One reference is held for I/O completion.  This reference is dropped
1580 	 * after the pagecache pages are updated and unlocked -- either after
1581 	 * decompression (and verity if enabled), or after an error.
1582 	 *
1583 	 * In addition, each compressed page holds a reference while it is in a
1584 	 * bio.  These references are necessary prevent compressed pages from
1585 	 * being freed while they are still in a bio.
1586 	 */
1587 	refcount_t refcnt;
1588 
1589 	bool failed;			/* IO error occurred before decompression? */
1590 	bool need_verity;		/* need fs-verity verification after decompression? */
1591 	unsigned char compress_algorithm;	/* backup algorithm type */
1592 	void *private;			/* payload buffer for specified decompression algorithm */
1593 	void *private2;			/* extra payload buffer */
1594 	struct work_struct verity_work;	/* work to verify the decompressed pages */
1595 	struct work_struct free_work;	/* work for late free this structure itself */
1596 };
1597 
1598 #define NULL_CLUSTER			((unsigned int)(~0))
1599 #define MIN_COMPRESS_LOG_SIZE		2
1600 #define MAX_COMPRESS_LOG_SIZE		8
1601 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1602 
1603 struct f2fs_sb_info {
1604 	struct super_block *sb;			/* pointer to VFS super block */
1605 	struct proc_dir_entry *s_proc;		/* proc entry */
1606 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1607 	struct f2fs_rwsem sb_lock;		/* lock for raw super block */
1608 	int valid_super_block;			/* valid super block no */
1609 	unsigned long s_flag;				/* flags for sbi */
1610 	struct mutex writepages;		/* mutex for writepages() */
1611 
1612 #ifdef CONFIG_BLK_DEV_ZONED
1613 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1614 	unsigned int max_open_zones;		/* max open zone resources of the zoned device */
1615 	/* For adjust the priority writing position of data in zone UFS */
1616 	unsigned int blkzone_alloc_policy;
1617 #endif
1618 
1619 	/* for node-related operations */
1620 	struct f2fs_nm_info *nm_info;		/* node manager */
1621 	struct inode *node_inode;		/* cache node blocks */
1622 
1623 	/* for segment-related operations */
1624 	struct f2fs_sm_info *sm_info;		/* segment manager */
1625 
1626 	/* for bio operations */
1627 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1628 	/* keep migration IO order for LFS mode */
1629 	struct f2fs_rwsem io_order_lock;
1630 	pgoff_t page_eio_ofs[NR_PAGE_TYPE];	/* EIO page offset */
1631 	int page_eio_cnt[NR_PAGE_TYPE];		/* EIO count */
1632 
1633 	/* for checkpoint */
1634 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1635 	int cur_cp_pack;			/* remain current cp pack */
1636 	spinlock_t cp_lock;			/* for flag in ckpt */
1637 	struct inode *meta_inode;		/* cache meta blocks */
1638 	struct f2fs_rwsem cp_global_sem;	/* checkpoint procedure lock */
1639 	struct f2fs_rwsem cp_rwsem;		/* blocking FS operations */
1640 	struct f2fs_rwsem node_write;		/* locking node writes */
1641 	struct f2fs_rwsem node_change;	/* locking node change */
1642 	wait_queue_head_t cp_wait;
1643 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1644 	long interval_time[MAX_TIME];		/* to store thresholds */
1645 	struct ckpt_req_control cprc_info;	/* for checkpoint request control */
1646 
1647 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1648 
1649 	spinlock_t fsync_node_lock;		/* for node entry lock */
1650 	struct list_head fsync_node_list;	/* node list head */
1651 	unsigned int fsync_seg_id;		/* sequence id */
1652 	unsigned int fsync_node_num;		/* number of node entries */
1653 
1654 	/* for orphan inode, use 0'th array */
1655 	unsigned int max_orphans;		/* max orphan inodes */
1656 
1657 	/* for inode management */
1658 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1659 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1660 	struct mutex flush_lock;		/* for flush exclusion */
1661 
1662 	/* for extent tree cache */
1663 	struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
1664 	atomic64_t allocated_data_blocks;	/* for block age extent_cache */
1665 	unsigned int max_read_extent_count;	/* max read extent count per inode */
1666 
1667 	/* The threshold used for hot and warm data seperation*/
1668 	unsigned int hot_data_age_threshold;
1669 	unsigned int warm_data_age_threshold;
1670 	unsigned int last_age_weight;
1671 
1672 	/* control donate caches */
1673 	unsigned int donate_files;
1674 
1675 	/* basic filesystem units */
1676 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1677 	unsigned int log_blocksize;		/* log2 block size */
1678 	unsigned int blocksize;			/* block size */
1679 	unsigned int root_ino_num;		/* root inode number*/
1680 	unsigned int node_ino_num;		/* node inode number*/
1681 	unsigned int meta_ino_num;		/* meta inode number*/
1682 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1683 	unsigned int blocks_per_seg;		/* blocks per segment */
1684 	unsigned int unusable_blocks_per_sec;	/* unusable blocks per section */
1685 	unsigned int segs_per_sec;		/* segments per section */
1686 	unsigned int secs_per_zone;		/* sections per zone */
1687 	unsigned int total_sections;		/* total section count */
1688 	unsigned int total_node_count;		/* total node block count */
1689 	unsigned int total_valid_node_count;	/* valid node block count */
1690 	int dir_level;				/* directory level */
1691 	bool readdir_ra;			/* readahead inode in readdir */
1692 	u64 max_io_bytes;			/* max io bytes to merge IOs */
1693 
1694 	block_t user_block_count;		/* # of user blocks */
1695 	block_t total_valid_block_count;	/* # of valid blocks */
1696 	block_t discard_blks;			/* discard command candidats */
1697 	block_t last_valid_block_count;		/* for recovery */
1698 	block_t reserved_blocks;		/* configurable reserved blocks */
1699 	block_t current_reserved_blocks;	/* current reserved blocks */
1700 
1701 	/* Additional tracking for no checkpoint mode */
1702 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1703 
1704 	unsigned int nquota_files;		/* # of quota sysfile */
1705 	struct f2fs_rwsem quota_sem;		/* blocking cp for flags */
1706 	struct task_struct *umount_lock_holder;	/* s_umount lock holder */
1707 
1708 	/* # of pages, see count_type */
1709 	atomic_t nr_pages[NR_COUNT_TYPE];
1710 	/* # of allocated blocks */
1711 	struct percpu_counter alloc_valid_block_count;
1712 	/* # of node block writes as roll forward recovery */
1713 	struct percpu_counter rf_node_block_count;
1714 
1715 	/* writeback control */
1716 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1717 
1718 	/* valid inode count */
1719 	struct percpu_counter total_valid_inode_count;
1720 
1721 	struct f2fs_mount_info mount_opt;	/* mount options */
1722 
1723 	/* for cleaning operations */
1724 	struct f2fs_rwsem gc_lock;		/*
1725 						 * semaphore for GC, avoid
1726 						 * race between GC and GC or CP
1727 						 */
1728 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1729 	struct atgc_management am;		/* atgc management */
1730 	unsigned int cur_victim_sec;		/* current victim section num */
1731 	unsigned int gc_mode;			/* current GC state */
1732 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1733 	spinlock_t gc_remaining_trials_lock;
1734 	/* remaining trial count for GC_URGENT_* and GC_IDLE_* */
1735 	unsigned int gc_remaining_trials;
1736 
1737 	/* for skip statistic */
1738 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1739 
1740 	/* free sections reserved for pinned file */
1741 	unsigned int reserved_pin_section;
1742 
1743 	/* threshold for gc trials on pinned files */
1744 	unsigned short gc_pin_file_threshold;
1745 	struct f2fs_rwsem pin_sem;
1746 
1747 	/* maximum # of trials to find a victim segment for SSR and GC */
1748 	unsigned int max_victim_search;
1749 	/* migration granularity of garbage collection, unit: segment */
1750 	unsigned int migration_granularity;
1751 	/* migration window granularity of garbage collection, unit: segment */
1752 	unsigned int migration_window_granularity;
1753 
1754 	/*
1755 	 * for stat information.
1756 	 * one is for the LFS mode, and the other is for the SSR mode.
1757 	 */
1758 #ifdef CONFIG_F2FS_STAT_FS
1759 	struct f2fs_stat_info *stat_info;	/* FS status information */
1760 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1761 	unsigned int segment_count[2];		/* # of allocated segments */
1762 	unsigned int block_count[2];		/* # of allocated blocks */
1763 	atomic_t inplace_count;		/* # of inplace update */
1764 	/* # of lookup extent cache */
1765 	atomic64_t total_hit_ext[NR_EXTENT_CACHES];
1766 	/* # of hit rbtree extent node */
1767 	atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
1768 	/* # of hit cached extent node */
1769 	atomic64_t read_hit_cached[NR_EXTENT_CACHES];
1770 	/* # of hit largest extent node in read extent cache */
1771 	atomic64_t read_hit_largest;
1772 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1773 	atomic_t inline_inode;			/* # of inline_data inodes */
1774 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1775 	atomic_t compr_inode;			/* # of compressed inodes */
1776 	atomic64_t compr_blocks;		/* # of compressed blocks */
1777 	atomic_t swapfile_inode;		/* # of swapfile inodes */
1778 	atomic_t atomic_files;			/* # of opened atomic file */
1779 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1780 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1781 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1782 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1783 	atomic_t cp_call_count[MAX_CALL_TYPE];	/* # of cp call */
1784 #endif
1785 	spinlock_t stat_lock;			/* lock for stat operations */
1786 
1787 	/* to attach REQ_META|REQ_FUA flags */
1788 	unsigned int data_io_flag;
1789 	unsigned int node_io_flag;
1790 
1791 	/* For sysfs support */
1792 	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
1793 	struct completion s_kobj_unregister;
1794 
1795 	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
1796 	struct completion s_stat_kobj_unregister;
1797 
1798 	struct kobject s_feature_list_kobj;		/* /sys/fs/f2fs/<devname>/feature_list */
1799 	struct completion s_feature_list_kobj_unregister;
1800 
1801 	/* For shrinker support */
1802 	struct list_head s_list;
1803 	struct mutex umount_mutex;
1804 	unsigned int shrinker_run_no;
1805 
1806 	/* For multi devices */
1807 	int s_ndevs;				/* number of devices */
1808 	struct f2fs_dev_info *devs;		/* for device list */
1809 	unsigned int dirty_device;		/* for checkpoint data flush */
1810 	spinlock_t dev_lock;			/* protect dirty_device */
1811 	bool aligned_blksize;			/* all devices has the same logical blksize */
1812 	unsigned int first_seq_zone_segno;	/* first segno in sequential zone */
1813 
1814 	/* For write statistics */
1815 	u64 sectors_written_start;
1816 	u64 kbytes_written;
1817 
1818 	/* Precomputed FS UUID checksum for seeding other checksums */
1819 	__u32 s_chksum_seed;
1820 
1821 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1822 
1823 	/*
1824 	 * If we are in irq context, let's update error information into
1825 	 * on-disk superblock in the work.
1826 	 */
1827 	struct work_struct s_error_work;
1828 	unsigned char errors[MAX_F2FS_ERRORS];		/* error flags */
1829 	unsigned char stop_reason[MAX_STOP_REASON];	/* stop reason */
1830 	spinlock_t error_lock;			/* protect errors/stop_reason array */
1831 	bool error_dirty;			/* errors of sb is dirty */
1832 
1833 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1834 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1835 
1836 	/* For reclaimed segs statistics per each GC mode */
1837 	unsigned int gc_segment_mode;		/* GC state for reclaimed segments */
1838 	unsigned int gc_reclaimed_segs[MAX_GC_MODE];	/* Reclaimed segs for each mode */
1839 
1840 	unsigned long seq_file_ra_mul;		/* multiplier for ra_pages of seq. files in fadvise */
1841 
1842 	int max_fragment_chunk;			/* max chunk size for block fragmentation mode */
1843 	int max_fragment_hole;			/* max hole size for block fragmentation mode */
1844 
1845 	/* For atomic write statistics */
1846 	atomic64_t current_atomic_write;
1847 	s64 peak_atomic_write;
1848 	u64 committed_atomic_block;
1849 	u64 revoked_atomic_block;
1850 
1851 	/* carve out reserved_blocks from total blocks */
1852 	bool carve_out;
1853 
1854 #ifdef CONFIG_F2FS_FS_COMPRESSION
1855 	struct kmem_cache *page_array_slab;	/* page array entry */
1856 	unsigned int page_array_slab_size;	/* default page array slab size */
1857 
1858 	/* For runtime compression statistics */
1859 	u64 compr_written_block;
1860 	u64 compr_saved_block;
1861 	u32 compr_new_inode;
1862 
1863 	/* For compressed block cache */
1864 	struct inode *compress_inode;		/* cache compressed blocks */
1865 	unsigned int compress_percent;		/* cache page percentage */
1866 	unsigned int compress_watermark;	/* cache page watermark */
1867 	atomic_t compress_page_hit;		/* cache hit count */
1868 #endif
1869 
1870 #ifdef CONFIG_F2FS_IOSTAT
1871 	/* For app/fs IO statistics */
1872 	spinlock_t iostat_lock;
1873 	unsigned long long iostat_count[NR_IO_TYPE];
1874 	unsigned long long iostat_bytes[NR_IO_TYPE];
1875 	unsigned long long prev_iostat_bytes[NR_IO_TYPE];
1876 	bool iostat_enable;
1877 	unsigned long iostat_next_period;
1878 	unsigned int iostat_period_ms;
1879 
1880 	/* For io latency related statistics info in one iostat period */
1881 	spinlock_t iostat_lat_lock;
1882 	struct iostat_lat_info *iostat_io_lat;
1883 #endif
1884 };
1885 
1886 /* Definitions to access f2fs_sb_info */
1887 #define SEGS_TO_BLKS(sbi, segs)					\
1888 		((segs) << (sbi)->log_blocks_per_seg)
1889 #define BLKS_TO_SEGS(sbi, blks)					\
1890 		((blks) >> (sbi)->log_blocks_per_seg)
1891 
1892 #define BLKS_PER_SEG(sbi)	((sbi)->blocks_per_seg)
1893 #define BLKS_PER_SEC(sbi)	(SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec))
1894 #define SEGS_PER_SEC(sbi)	((sbi)->segs_per_sec)
1895 
1896 __printf(3, 4)
1897 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
1898 
1899 #define f2fs_err(sbi, fmt, ...)						\
1900 	f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
1901 #define f2fs_warn(sbi, fmt, ...)					\
1902 	f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
1903 #define f2fs_notice(sbi, fmt, ...)					\
1904 	f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
1905 #define f2fs_info(sbi, fmt, ...)					\
1906 	f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
1907 #define f2fs_debug(sbi, fmt, ...)					\
1908 	f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
1909 
1910 #define f2fs_err_ratelimited(sbi, fmt, ...)				\
1911 	f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
1912 #define f2fs_warn_ratelimited(sbi, fmt, ...)				\
1913 	f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
1914 #define f2fs_info_ratelimited(sbi, fmt, ...)				\
1915 	f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
1916 
1917 #ifdef CONFIG_F2FS_FAULT_INJECTION
1918 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__,	\
1919 									__builtin_return_address(0))
1920 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
1921 				const char *func, const char *parent_func)
1922 {
1923 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1924 
1925 	if (!ffi->inject_rate)
1926 		return false;
1927 
1928 	if (!IS_FAULT_SET(ffi, type))
1929 		return false;
1930 
1931 	atomic_inc(&ffi->inject_ops);
1932 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1933 		atomic_set(&ffi->inject_ops, 0);
1934 		ffi->inject_count[type]++;
1935 		f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
1936 				f2fs_fault_name[type], func, parent_func);
1937 		return true;
1938 	}
1939 	return false;
1940 }
1941 #else
1942 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1943 {
1944 	return false;
1945 }
1946 #endif
1947 
1948 /*
1949  * Test if the mounted volume is a multi-device volume.
1950  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1951  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1952  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1953  */
1954 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1955 {
1956 	return sbi->s_ndevs > 1;
1957 }
1958 
1959 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1960 {
1961 	unsigned long now = jiffies;
1962 
1963 	sbi->last_time[type] = now;
1964 
1965 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1966 	if (type == REQ_TIME) {
1967 		sbi->last_time[DISCARD_TIME] = now;
1968 		sbi->last_time[GC_TIME] = now;
1969 	}
1970 }
1971 
1972 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1973 {
1974 	unsigned long interval = sbi->interval_time[type] * HZ;
1975 
1976 	return time_after(jiffies, sbi->last_time[type] + interval);
1977 }
1978 
1979 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1980 						int type)
1981 {
1982 	unsigned long interval = sbi->interval_time[type] * HZ;
1983 	unsigned int wait_ms = 0;
1984 	long delta;
1985 
1986 	delta = (sbi->last_time[type] + interval) - jiffies;
1987 	if (delta > 0)
1988 		wait_ms = jiffies_to_msecs(delta);
1989 
1990 	return wait_ms;
1991 }
1992 
1993 /*
1994  * Inline functions
1995  */
1996 static inline u32 __f2fs_crc32(u32 crc, const void *address,
1997 			       unsigned int length)
1998 {
1999 	return crc32(crc, address, length);
2000 }
2001 
2002 static inline u32 f2fs_crc32(const void *address, unsigned int length)
2003 {
2004 	return __f2fs_crc32(F2FS_SUPER_MAGIC, address, length);
2005 }
2006 
2007 static inline u32 f2fs_chksum(u32 crc, const void *address, unsigned int length)
2008 {
2009 	return __f2fs_crc32(crc, address, length);
2010 }
2011 
2012 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
2013 {
2014 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
2015 }
2016 
2017 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
2018 {
2019 	return sb->s_fs_info;
2020 }
2021 
2022 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
2023 {
2024 	return F2FS_SB(inode->i_sb);
2025 }
2026 
2027 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
2028 {
2029 	return F2FS_I_SB(mapping->host);
2030 }
2031 
2032 static inline struct f2fs_sb_info *F2FS_F_SB(const struct folio *folio)
2033 {
2034 	return F2FS_M_SB(folio->mapping);
2035 }
2036 
2037 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
2038 {
2039 	return (struct f2fs_super_block *)(sbi->raw_super);
2040 }
2041 
2042 static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
2043 								pgoff_t index)
2044 {
2045 	pgoff_t idx_in_folio = index % (1 << folio_order(folio));
2046 
2047 	return (struct f2fs_super_block *)
2048 		(page_address(folio_page(folio, idx_in_folio)) +
2049 						F2FS_SUPER_OFFSET);
2050 }
2051 
2052 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
2053 {
2054 	return (struct f2fs_checkpoint *)(sbi->ckpt);
2055 }
2056 
2057 static inline struct f2fs_node *F2FS_NODE(const struct folio *folio)
2058 {
2059 	return (struct f2fs_node *)folio_address(folio);
2060 }
2061 
2062 static inline struct f2fs_inode *F2FS_INODE(const struct folio *folio)
2063 {
2064 	return &((struct f2fs_node *)folio_address(folio))->i;
2065 }
2066 
2067 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
2068 {
2069 	return (struct f2fs_nm_info *)(sbi->nm_info);
2070 }
2071 
2072 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
2073 {
2074 	return (struct f2fs_sm_info *)(sbi->sm_info);
2075 }
2076 
2077 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
2078 {
2079 	return (struct sit_info *)(SM_I(sbi)->sit_info);
2080 }
2081 
2082 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
2083 {
2084 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
2085 }
2086 
2087 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
2088 {
2089 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
2090 }
2091 
2092 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
2093 {
2094 	return sbi->meta_inode->i_mapping;
2095 }
2096 
2097 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2098 {
2099 	return sbi->node_inode->i_mapping;
2100 }
2101 
2102 static inline bool is_meta_folio(struct folio *folio)
2103 {
2104 	return folio->mapping == META_MAPPING(F2FS_F_SB(folio));
2105 }
2106 
2107 static inline bool is_node_folio(struct folio *folio)
2108 {
2109 	return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio));
2110 }
2111 
2112 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2113 {
2114 	return test_bit(type, &sbi->s_flag);
2115 }
2116 
2117 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2118 {
2119 	set_bit(type, &sbi->s_flag);
2120 }
2121 
2122 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2123 {
2124 	clear_bit(type, &sbi->s_flag);
2125 }
2126 
2127 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2128 {
2129 	return le64_to_cpu(cp->checkpoint_ver);
2130 }
2131 
2132 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2133 {
2134 	if (type < F2FS_MAX_QUOTAS)
2135 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2136 	return 0;
2137 }
2138 
2139 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2140 {
2141 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2142 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2143 }
2144 
2145 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2146 {
2147 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2148 
2149 	return ckpt_flags & f;
2150 }
2151 
2152 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2153 {
2154 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2155 }
2156 
2157 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2158 {
2159 	unsigned int ckpt_flags;
2160 
2161 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2162 	ckpt_flags |= f;
2163 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2164 }
2165 
2166 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2167 {
2168 	unsigned long flags;
2169 
2170 	spin_lock_irqsave(&sbi->cp_lock, flags);
2171 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
2172 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2173 }
2174 
2175 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2176 {
2177 	unsigned int ckpt_flags;
2178 
2179 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2180 	ckpt_flags &= (~f);
2181 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2182 }
2183 
2184 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2185 {
2186 	unsigned long flags;
2187 
2188 	spin_lock_irqsave(&sbi->cp_lock, flags);
2189 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
2190 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2191 }
2192 
2193 #define init_f2fs_rwsem(sem)					\
2194 do {								\
2195 	static struct lock_class_key __key;			\
2196 								\
2197 	__init_f2fs_rwsem((sem), #sem, &__key);			\
2198 } while (0)
2199 
2200 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2201 		const char *sem_name, struct lock_class_key *key)
2202 {
2203 	__init_rwsem(&sem->internal_rwsem, sem_name, key);
2204 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2205 	init_waitqueue_head(&sem->read_waiters);
2206 #endif
2207 }
2208 
2209 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2210 {
2211 	return rwsem_is_locked(&sem->internal_rwsem);
2212 }
2213 
2214 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2215 {
2216 	return rwsem_is_contended(&sem->internal_rwsem);
2217 }
2218 
2219 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2220 {
2221 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2222 	wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2223 #else
2224 	down_read(&sem->internal_rwsem);
2225 #endif
2226 }
2227 
2228 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2229 {
2230 	return down_read_trylock(&sem->internal_rwsem);
2231 }
2232 
2233 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2234 {
2235 	up_read(&sem->internal_rwsem);
2236 }
2237 
2238 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2239 {
2240 	down_write(&sem->internal_rwsem);
2241 }
2242 
2243 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2244 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2245 {
2246 	down_read_nested(&sem->internal_rwsem, subclass);
2247 }
2248 
2249 static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
2250 {
2251 	down_write_nested(&sem->internal_rwsem, subclass);
2252 }
2253 #else
2254 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2255 #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
2256 #endif
2257 
2258 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2259 {
2260 	return down_write_trylock(&sem->internal_rwsem);
2261 }
2262 
2263 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2264 {
2265 	up_write(&sem->internal_rwsem);
2266 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2267 	wake_up_all(&sem->read_waiters);
2268 #endif
2269 }
2270 
2271 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
2272 {
2273 	unsigned long flags;
2274 	unsigned char *nat_bits;
2275 
2276 	/*
2277 	 * In order to re-enable nat_bits we need to call fsck.f2fs by
2278 	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
2279 	 * so let's rely on regular fsck or unclean shutdown.
2280 	 */
2281 
2282 	if (lock)
2283 		spin_lock_irqsave(&sbi->cp_lock, flags);
2284 	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
2285 	nat_bits = NM_I(sbi)->nat_bits;
2286 	NM_I(sbi)->nat_bits = NULL;
2287 	if (lock)
2288 		spin_unlock_irqrestore(&sbi->cp_lock, flags);
2289 
2290 	kvfree(nat_bits);
2291 }
2292 
2293 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
2294 					struct cp_control *cpc)
2295 {
2296 	bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
2297 
2298 	return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
2299 }
2300 
2301 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2302 {
2303 	f2fs_down_read(&sbi->cp_rwsem);
2304 }
2305 
2306 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2307 {
2308 	if (time_to_inject(sbi, FAULT_LOCK_OP))
2309 		return 0;
2310 	return f2fs_down_read_trylock(&sbi->cp_rwsem);
2311 }
2312 
2313 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2314 {
2315 	f2fs_up_read(&sbi->cp_rwsem);
2316 }
2317 
2318 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2319 {
2320 	f2fs_down_write(&sbi->cp_rwsem);
2321 }
2322 
2323 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2324 {
2325 	f2fs_up_write(&sbi->cp_rwsem);
2326 }
2327 
2328 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2329 {
2330 	int reason = CP_SYNC;
2331 
2332 	if (test_opt(sbi, FASTBOOT))
2333 		reason = CP_FASTBOOT;
2334 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2335 		reason = CP_UMOUNT;
2336 	return reason;
2337 }
2338 
2339 static inline bool __remain_node_summaries(int reason)
2340 {
2341 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
2342 }
2343 
2344 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2345 {
2346 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2347 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2348 }
2349 
2350 /*
2351  * Check whether the inode has blocks or not
2352  */
2353 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2354 {
2355 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2356 
2357 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2358 }
2359 
2360 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2361 {
2362 	return ofs == XATTR_NODE_OFFSET;
2363 }
2364 
2365 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2366 					struct inode *inode, bool cap)
2367 {
2368 	if (!inode)
2369 		return true;
2370 	if (!test_opt(sbi, RESERVE_ROOT))
2371 		return false;
2372 	if (IS_NOQUOTA(inode))
2373 		return true;
2374 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2375 		return true;
2376 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2377 					in_group_p(F2FS_OPTION(sbi).s_resgid))
2378 		return true;
2379 	if (cap && capable(CAP_SYS_RESOURCE))
2380 		return true;
2381 	return false;
2382 }
2383 
2384 static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
2385 						struct inode *inode, bool cap)
2386 {
2387 	block_t avail_user_block_count;
2388 
2389 	avail_user_block_count = sbi->user_block_count -
2390 					sbi->current_reserved_blocks;
2391 
2392 	if (!__allow_reserved_blocks(sbi, inode, cap))
2393 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2394 
2395 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2396 		if (avail_user_block_count > sbi->unusable_block_count)
2397 			avail_user_block_count -= sbi->unusable_block_count;
2398 		else
2399 			avail_user_block_count = 0;
2400 	}
2401 
2402 	return avail_user_block_count;
2403 }
2404 
2405 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2406 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2407 				 struct inode *inode, blkcnt_t *count, bool partial)
2408 {
2409 	long long diff = 0, release = 0;
2410 	block_t avail_user_block_count;
2411 	int ret;
2412 
2413 	ret = dquot_reserve_block(inode, *count);
2414 	if (ret)
2415 		return ret;
2416 
2417 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2418 		release = *count;
2419 		goto release_quota;
2420 	}
2421 
2422 	/*
2423 	 * let's increase this in prior to actual block count change in order
2424 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2425 	 */
2426 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2427 
2428 	spin_lock(&sbi->stat_lock);
2429 
2430 	avail_user_block_count = get_available_block_count(sbi, inode, true);
2431 	diff = (long long)sbi->total_valid_block_count + *count -
2432 						avail_user_block_count;
2433 	if (unlikely(diff > 0)) {
2434 		if (!partial) {
2435 			spin_unlock(&sbi->stat_lock);
2436 			release = *count;
2437 			goto enospc;
2438 		}
2439 		if (diff > *count)
2440 			diff = *count;
2441 		*count -= diff;
2442 		release = diff;
2443 		if (!*count) {
2444 			spin_unlock(&sbi->stat_lock);
2445 			goto enospc;
2446 		}
2447 	}
2448 	sbi->total_valid_block_count += (block_t)(*count);
2449 
2450 	spin_unlock(&sbi->stat_lock);
2451 
2452 	if (unlikely(release)) {
2453 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2454 		dquot_release_reservation_block(inode, release);
2455 	}
2456 	f2fs_i_blocks_write(inode, *count, true, true);
2457 	return 0;
2458 
2459 enospc:
2460 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2461 release_quota:
2462 	dquot_release_reservation_block(inode, release);
2463 	return -ENOSPC;
2464 }
2465 
2466 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
2467 static inline bool folio_test_f2fs_##name(const struct folio *folio)	\
2468 {									\
2469 	unsigned long priv = (unsigned long)folio->private;		\
2470 	unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) |		\
2471 			     (1UL << PAGE_PRIVATE_##flagname);		\
2472 	return (priv & v) == v;						\
2473 }									\
2474 static inline bool page_private_##name(struct page *page) \
2475 { \
2476 	return PagePrivate(page) && \
2477 		test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
2478 		test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2479 }
2480 
2481 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
2482 static inline void folio_set_f2fs_##name(struct folio *folio)		\
2483 {									\
2484 	unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) |		\
2485 			     (1UL << PAGE_PRIVATE_##flagname);		\
2486 	if (!folio->private)						\
2487 		folio_attach_private(folio, (void *)v);			\
2488 	else {								\
2489 		v |= (unsigned long)folio->private;			\
2490 		folio->private = (void *)v;				\
2491 	}								\
2492 }									\
2493 static inline void set_page_private_##name(struct page *page) \
2494 { \
2495 	if (!PagePrivate(page)) \
2496 		attach_page_private(page, (void *)0); \
2497 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
2498 	set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2499 }
2500 
2501 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
2502 static inline void folio_clear_f2fs_##name(struct folio *folio)		\
2503 {									\
2504 	unsigned long v = (unsigned long)folio->private;		\
2505 									\
2506 	v &= ~(1UL << PAGE_PRIVATE_##flagname);				\
2507 	if (v == (1UL << PAGE_PRIVATE_NOT_POINTER))			\
2508 		folio_detach_private(folio);				\
2509 	else								\
2510 		folio->private = (void *)v;				\
2511 }									\
2512 static inline void clear_page_private_##name(struct page *page) \
2513 { \
2514 	clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2515 	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
2516 		detach_page_private(page); \
2517 }
2518 
2519 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
2520 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
2521 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
2522 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
2523 
2524 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
2525 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
2526 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
2527 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
2528 
2529 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
2530 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
2531 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
2532 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
2533 
2534 static inline unsigned long folio_get_f2fs_data(struct folio *folio)
2535 {
2536 	unsigned long data = (unsigned long)folio->private;
2537 
2538 	if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
2539 		return 0;
2540 	return data >> PAGE_PRIVATE_MAX;
2541 }
2542 
2543 static inline void folio_set_f2fs_data(struct folio *folio, unsigned long data)
2544 {
2545 	data = (1UL << PAGE_PRIVATE_NOT_POINTER) | (data << PAGE_PRIVATE_MAX);
2546 
2547 	if (!folio_test_private(folio))
2548 		folio_attach_private(folio, (void *)data);
2549 	else
2550 		folio->private = (void *)((unsigned long)folio->private | data);
2551 }
2552 
2553 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2554 						struct inode *inode,
2555 						block_t count)
2556 {
2557 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2558 
2559 	spin_lock(&sbi->stat_lock);
2560 	if (unlikely(sbi->total_valid_block_count < count)) {
2561 		f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u",
2562 			  sbi->total_valid_block_count, inode->i_ino, count);
2563 		sbi->total_valid_block_count = 0;
2564 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2565 	} else {
2566 		sbi->total_valid_block_count -= count;
2567 	}
2568 	if (sbi->reserved_blocks &&
2569 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2570 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2571 					sbi->current_reserved_blocks + count);
2572 	spin_unlock(&sbi->stat_lock);
2573 	if (unlikely(inode->i_blocks < sectors)) {
2574 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2575 			  inode->i_ino,
2576 			  (unsigned long long)inode->i_blocks,
2577 			  (unsigned long long)sectors);
2578 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2579 		return;
2580 	}
2581 	f2fs_i_blocks_write(inode, count, false, true);
2582 }
2583 
2584 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2585 {
2586 	atomic_inc(&sbi->nr_pages[count_type]);
2587 
2588 	if (count_type == F2FS_DIRTY_DENTS ||
2589 			count_type == F2FS_DIRTY_NODES ||
2590 			count_type == F2FS_DIRTY_META ||
2591 			count_type == F2FS_DIRTY_QDATA ||
2592 			count_type == F2FS_DIRTY_IMETA)
2593 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2594 }
2595 
2596 static inline void inode_inc_dirty_pages(struct inode *inode)
2597 {
2598 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2599 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2600 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2601 	if (IS_NOQUOTA(inode))
2602 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2603 }
2604 
2605 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2606 {
2607 	atomic_dec(&sbi->nr_pages[count_type]);
2608 }
2609 
2610 static inline void inode_dec_dirty_pages(struct inode *inode)
2611 {
2612 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2613 			!S_ISLNK(inode->i_mode))
2614 		return;
2615 
2616 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2617 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2618 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2619 	if (IS_NOQUOTA(inode))
2620 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2621 }
2622 
2623 static inline void inc_atomic_write_cnt(struct inode *inode)
2624 {
2625 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2626 	struct f2fs_inode_info *fi = F2FS_I(inode);
2627 	u64 current_write;
2628 
2629 	fi->atomic_write_cnt++;
2630 	atomic64_inc(&sbi->current_atomic_write);
2631 	current_write = atomic64_read(&sbi->current_atomic_write);
2632 	if (current_write > sbi->peak_atomic_write)
2633 		sbi->peak_atomic_write = current_write;
2634 }
2635 
2636 static inline void release_atomic_write_cnt(struct inode *inode)
2637 {
2638 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2639 	struct f2fs_inode_info *fi = F2FS_I(inode);
2640 
2641 	atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
2642 	fi->atomic_write_cnt = 0;
2643 }
2644 
2645 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2646 {
2647 	return atomic_read(&sbi->nr_pages[count_type]);
2648 }
2649 
2650 static inline int get_dirty_pages(struct inode *inode)
2651 {
2652 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2653 }
2654 
2655 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2656 {
2657 	return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
2658 							BLKS_PER_SEC(sbi));
2659 }
2660 
2661 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2662 {
2663 	return sbi->total_valid_block_count;
2664 }
2665 
2666 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2667 {
2668 	return sbi->discard_blks;
2669 }
2670 
2671 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2672 {
2673 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2674 
2675 	/* return NAT or SIT bitmap */
2676 	if (flag == NAT_BITMAP)
2677 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2678 	else if (flag == SIT_BITMAP)
2679 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2680 
2681 	return 0;
2682 }
2683 
2684 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2685 {
2686 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2687 }
2688 
2689 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2690 {
2691 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2692 	void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2693 	int offset;
2694 
2695 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2696 		offset = (flag == SIT_BITMAP) ?
2697 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2698 		/*
2699 		 * if large_nat_bitmap feature is enabled, leave checksum
2700 		 * protection for all nat/sit bitmaps.
2701 		 */
2702 		return tmp_ptr + offset + sizeof(__le32);
2703 	}
2704 
2705 	if (__cp_payload(sbi) > 0) {
2706 		if (flag == NAT_BITMAP)
2707 			return tmp_ptr;
2708 		else
2709 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2710 	} else {
2711 		offset = (flag == NAT_BITMAP) ?
2712 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2713 		return tmp_ptr + offset;
2714 	}
2715 }
2716 
2717 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2718 {
2719 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2720 
2721 	if (sbi->cur_cp_pack == 2)
2722 		start_addr += BLKS_PER_SEG(sbi);
2723 	return start_addr;
2724 }
2725 
2726 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2727 {
2728 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2729 
2730 	if (sbi->cur_cp_pack == 1)
2731 		start_addr += BLKS_PER_SEG(sbi);
2732 	return start_addr;
2733 }
2734 
2735 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2736 {
2737 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2738 }
2739 
2740 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2741 {
2742 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2743 }
2744 
2745 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
2746 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2747 					struct inode *inode, bool is_inode)
2748 {
2749 	block_t	valid_block_count;
2750 	unsigned int valid_node_count;
2751 	unsigned int avail_user_block_count;
2752 	int err;
2753 
2754 	if (is_inode) {
2755 		if (inode) {
2756 			err = dquot_alloc_inode(inode);
2757 			if (err)
2758 				return err;
2759 		}
2760 	} else {
2761 		err = dquot_reserve_block(inode, 1);
2762 		if (err)
2763 			return err;
2764 	}
2765 
2766 	if (time_to_inject(sbi, FAULT_BLOCK))
2767 		goto enospc;
2768 
2769 	spin_lock(&sbi->stat_lock);
2770 
2771 	valid_block_count = sbi->total_valid_block_count + 1;
2772 	avail_user_block_count = get_available_block_count(sbi, inode, false);
2773 
2774 	if (unlikely(valid_block_count > avail_user_block_count)) {
2775 		spin_unlock(&sbi->stat_lock);
2776 		goto enospc;
2777 	}
2778 
2779 	valid_node_count = sbi->total_valid_node_count + 1;
2780 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2781 		spin_unlock(&sbi->stat_lock);
2782 		goto enospc;
2783 	}
2784 
2785 	sbi->total_valid_node_count++;
2786 	sbi->total_valid_block_count++;
2787 	spin_unlock(&sbi->stat_lock);
2788 
2789 	if (inode) {
2790 		if (is_inode)
2791 			f2fs_mark_inode_dirty_sync(inode, true);
2792 		else
2793 			f2fs_i_blocks_write(inode, 1, true, true);
2794 	}
2795 
2796 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2797 	return 0;
2798 
2799 enospc:
2800 	if (is_inode) {
2801 		if (inode)
2802 			dquot_free_inode(inode);
2803 	} else {
2804 		dquot_release_reservation_block(inode, 1);
2805 	}
2806 	return -ENOSPC;
2807 }
2808 
2809 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2810 					struct inode *inode, bool is_inode)
2811 {
2812 	spin_lock(&sbi->stat_lock);
2813 
2814 	if (unlikely(!sbi->total_valid_block_count ||
2815 			!sbi->total_valid_node_count)) {
2816 		f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
2817 			  sbi->total_valid_block_count,
2818 			  sbi->total_valid_node_count);
2819 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2820 	} else {
2821 		sbi->total_valid_block_count--;
2822 		sbi->total_valid_node_count--;
2823 	}
2824 
2825 	if (sbi->reserved_blocks &&
2826 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2827 		sbi->current_reserved_blocks++;
2828 
2829 	spin_unlock(&sbi->stat_lock);
2830 
2831 	if (is_inode) {
2832 		dquot_free_inode(inode);
2833 	} else {
2834 		if (unlikely(inode->i_blocks == 0)) {
2835 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2836 				  inode->i_ino,
2837 				  (unsigned long long)inode->i_blocks);
2838 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2839 			return;
2840 		}
2841 		f2fs_i_blocks_write(inode, 1, false, true);
2842 	}
2843 }
2844 
2845 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2846 {
2847 	return sbi->total_valid_node_count;
2848 }
2849 
2850 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2851 {
2852 	percpu_counter_inc(&sbi->total_valid_inode_count);
2853 }
2854 
2855 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2856 {
2857 	percpu_counter_dec(&sbi->total_valid_inode_count);
2858 }
2859 
2860 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2861 {
2862 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2863 }
2864 
2865 static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping,
2866 		pgoff_t index, bool for_write)
2867 {
2868 	struct folio *folio;
2869 	unsigned int flags;
2870 
2871 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2872 		fgf_t fgf_flags;
2873 
2874 		if (!for_write)
2875 			fgf_flags = FGP_LOCK | FGP_ACCESSED;
2876 		else
2877 			fgf_flags = FGP_LOCK;
2878 		folio = __filemap_get_folio(mapping, index, fgf_flags, 0);
2879 		if (!IS_ERR(folio))
2880 			return folio;
2881 
2882 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
2883 			return ERR_PTR(-ENOMEM);
2884 	}
2885 
2886 	if (!for_write)
2887 		return filemap_grab_folio(mapping, index);
2888 
2889 	flags = memalloc_nofs_save();
2890 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2891 			mapping_gfp_mask(mapping));
2892 	memalloc_nofs_restore(flags);
2893 
2894 	return folio;
2895 }
2896 
2897 static inline struct folio *f2fs_filemap_get_folio(
2898 				struct address_space *mapping, pgoff_t index,
2899 				fgf_t fgp_flags, gfp_t gfp_mask)
2900 {
2901 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
2902 		return ERR_PTR(-ENOMEM);
2903 
2904 	return __filemap_get_folio(mapping, index, fgp_flags, gfp_mask);
2905 }
2906 
2907 static inline struct page *f2fs_pagecache_get_page(
2908 				struct address_space *mapping, pgoff_t index,
2909 				fgf_t fgp_flags, gfp_t gfp_mask)
2910 {
2911 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
2912 		return NULL;
2913 
2914 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2915 }
2916 
2917 static inline void f2fs_folio_put(struct folio *folio, bool unlock)
2918 {
2919 	if (IS_ERR_OR_NULL(folio))
2920 		return;
2921 
2922 	if (unlock) {
2923 		f2fs_bug_on(F2FS_F_SB(folio), !folio_test_locked(folio));
2924 		folio_unlock(folio);
2925 	}
2926 	folio_put(folio);
2927 }
2928 
2929 static inline void f2fs_put_page(struct page *page, int unlock)
2930 {
2931 	if (!page)
2932 		return;
2933 	f2fs_folio_put(page_folio(page), unlock);
2934 }
2935 
2936 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2937 {
2938 	if (dn->node_folio)
2939 		f2fs_folio_put(dn->node_folio, true);
2940 	if (dn->inode_folio && dn->node_folio != dn->inode_folio)
2941 		f2fs_folio_put(dn->inode_folio, false);
2942 	dn->node_folio = NULL;
2943 	dn->inode_folio = NULL;
2944 }
2945 
2946 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2947 					size_t size)
2948 {
2949 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2950 }
2951 
2952 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2953 						gfp_t flags)
2954 {
2955 	void *entry;
2956 
2957 	entry = kmem_cache_alloc(cachep, flags);
2958 	if (!entry)
2959 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2960 	return entry;
2961 }
2962 
2963 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2964 			gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2965 {
2966 	if (nofail)
2967 		return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2968 
2969 	if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
2970 		return NULL;
2971 
2972 	return kmem_cache_alloc(cachep, flags);
2973 }
2974 
2975 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2976 {
2977 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2978 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2979 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2980 		get_pages(sbi, F2FS_DIO_READ) ||
2981 		get_pages(sbi, F2FS_DIO_WRITE))
2982 		return true;
2983 
2984 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2985 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2986 		return true;
2987 
2988 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2989 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2990 		return true;
2991 	return false;
2992 }
2993 
2994 static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
2995 {
2996 	return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
2997 }
2998 
2999 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
3000 {
3001 	bool zoned_gc = (type == GC_TIME &&
3002 			F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
3003 
3004 	if (sbi->gc_mode == GC_URGENT_HIGH)
3005 		return true;
3006 
3007 	if (zoned_gc) {
3008 		if (is_inflight_read_io(sbi))
3009 			return false;
3010 	} else {
3011 		if (is_inflight_io(sbi, type))
3012 			return false;
3013 	}
3014 
3015 	if (sbi->gc_mode == GC_URGENT_MID)
3016 		return true;
3017 
3018 	if (sbi->gc_mode == GC_URGENT_LOW &&
3019 			(type == DISCARD_TIME || type == GC_TIME))
3020 		return true;
3021 
3022 	if (zoned_gc)
3023 		return true;
3024 
3025 	return f2fs_time_over(sbi, type);
3026 }
3027 
3028 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
3029 				unsigned long index, void *item)
3030 {
3031 	while (radix_tree_insert(root, index, item))
3032 		cond_resched();
3033 }
3034 
3035 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
3036 
3037 static inline bool IS_INODE(const struct folio *folio)
3038 {
3039 	struct f2fs_node *p = F2FS_NODE(folio);
3040 
3041 	return RAW_IS_INODE(p);
3042 }
3043 
3044 static inline int offset_in_addr(struct f2fs_inode *i)
3045 {
3046 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
3047 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
3048 }
3049 
3050 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
3051 {
3052 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
3053 }
3054 
3055 static inline int f2fs_has_extra_attr(struct inode *inode);
3056 static inline unsigned int get_dnode_base(struct inode *inode,
3057 					struct folio *node_folio)
3058 {
3059 	if (!IS_INODE(node_folio))
3060 		return 0;
3061 
3062 	return inode ? get_extra_isize(inode) :
3063 			offset_in_addr(&F2FS_NODE(node_folio)->i);
3064 }
3065 
3066 static inline __le32 *get_dnode_addr(struct inode *inode,
3067 					struct folio *node_folio)
3068 {
3069 	return blkaddr_in_node(F2FS_NODE(node_folio)) +
3070 			get_dnode_base(inode, node_folio);
3071 }
3072 
3073 static inline block_t data_blkaddr(struct inode *inode,
3074 			struct folio *node_folio, unsigned int offset)
3075 {
3076 	return le32_to_cpu(*(get_dnode_addr(inode, node_folio) + offset));
3077 }
3078 
3079 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
3080 {
3081 	return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node);
3082 }
3083 
3084 static inline int f2fs_test_bit(unsigned int nr, char *addr)
3085 {
3086 	int mask;
3087 
3088 	addr += (nr >> 3);
3089 	mask = BIT(7 - (nr & 0x07));
3090 	return mask & *addr;
3091 }
3092 
3093 static inline void f2fs_set_bit(unsigned int nr, char *addr)
3094 {
3095 	int mask;
3096 
3097 	addr += (nr >> 3);
3098 	mask = BIT(7 - (nr & 0x07));
3099 	*addr |= mask;
3100 }
3101 
3102 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
3103 {
3104 	int mask;
3105 
3106 	addr += (nr >> 3);
3107 	mask = BIT(7 - (nr & 0x07));
3108 	*addr &= ~mask;
3109 }
3110 
3111 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
3112 {
3113 	int mask;
3114 	int ret;
3115 
3116 	addr += (nr >> 3);
3117 	mask = BIT(7 - (nr & 0x07));
3118 	ret = mask & *addr;
3119 	*addr |= mask;
3120 	return ret;
3121 }
3122 
3123 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
3124 {
3125 	int mask;
3126 	int ret;
3127 
3128 	addr += (nr >> 3);
3129 	mask = BIT(7 - (nr & 0x07));
3130 	ret = mask & *addr;
3131 	*addr &= ~mask;
3132 	return ret;
3133 }
3134 
3135 static inline void f2fs_change_bit(unsigned int nr, char *addr)
3136 {
3137 	int mask;
3138 
3139 	addr += (nr >> 3);
3140 	mask = BIT(7 - (nr & 0x07));
3141 	*addr ^= mask;
3142 }
3143 
3144 /*
3145  * On-disk inode flags (f2fs_inode::i_flags)
3146  */
3147 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
3148 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
3149 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
3150 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
3151 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
3152 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
3153 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
3154 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
3155 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
3156 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
3157 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
3158 #define F2FS_DEVICE_ALIAS_FL		0x80000000 /* File for aliasing a device */
3159 
3160 #define F2FS_QUOTA_DEFAULT_FL		(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
3161 
3162 /* Flags that should be inherited by new inodes from their parent. */
3163 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
3164 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3165 			   F2FS_CASEFOLD_FL)
3166 
3167 /* Flags that are appropriate for regular files (all but dir-specific ones). */
3168 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3169 				F2FS_CASEFOLD_FL))
3170 
3171 /* Flags that are appropriate for non-directories/regular files. */
3172 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
3173 
3174 #define IS_DEVICE_ALIASING(inode)	(F2FS_I(inode)->i_flags & F2FS_DEVICE_ALIAS_FL)
3175 
3176 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
3177 {
3178 	if (S_ISDIR(mode))
3179 		return flags;
3180 	else if (S_ISREG(mode))
3181 		return flags & F2FS_REG_FLMASK;
3182 	else
3183 		return flags & F2FS_OTHER_FLMASK;
3184 }
3185 
3186 static inline void __mark_inode_dirty_flag(struct inode *inode,
3187 						int flag, bool set)
3188 {
3189 	switch (flag) {
3190 	case FI_INLINE_XATTR:
3191 	case FI_INLINE_DATA:
3192 	case FI_INLINE_DENTRY:
3193 	case FI_NEW_INODE:
3194 		if (set)
3195 			return;
3196 		fallthrough;
3197 	case FI_DATA_EXIST:
3198 	case FI_PIN_FILE:
3199 	case FI_COMPRESS_RELEASED:
3200 		f2fs_mark_inode_dirty_sync(inode, true);
3201 	}
3202 }
3203 
3204 static inline void set_inode_flag(struct inode *inode, int flag)
3205 {
3206 	set_bit(flag, F2FS_I(inode)->flags);
3207 	__mark_inode_dirty_flag(inode, flag, true);
3208 }
3209 
3210 static inline int is_inode_flag_set(struct inode *inode, int flag)
3211 {
3212 	return test_bit(flag, F2FS_I(inode)->flags);
3213 }
3214 
3215 static inline void clear_inode_flag(struct inode *inode, int flag)
3216 {
3217 	clear_bit(flag, F2FS_I(inode)->flags);
3218 	__mark_inode_dirty_flag(inode, flag, false);
3219 }
3220 
3221 static inline bool f2fs_verity_in_progress(struct inode *inode)
3222 {
3223 	return IS_ENABLED(CONFIG_FS_VERITY) &&
3224 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
3225 }
3226 
3227 static inline void set_acl_inode(struct inode *inode, umode_t mode)
3228 {
3229 	F2FS_I(inode)->i_acl_mode = mode;
3230 	set_inode_flag(inode, FI_ACL_MODE);
3231 	f2fs_mark_inode_dirty_sync(inode, false);
3232 }
3233 
3234 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
3235 {
3236 	if (inc)
3237 		inc_nlink(inode);
3238 	else
3239 		drop_nlink(inode);
3240 	f2fs_mark_inode_dirty_sync(inode, true);
3241 }
3242 
3243 static inline void f2fs_i_blocks_write(struct inode *inode,
3244 					block_t diff, bool add, bool claim)
3245 {
3246 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3247 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3248 
3249 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
3250 	if (add) {
3251 		if (claim)
3252 			dquot_claim_block(inode, diff);
3253 		else
3254 			dquot_alloc_block_nofail(inode, diff);
3255 	} else {
3256 		dquot_free_block(inode, diff);
3257 	}
3258 
3259 	f2fs_mark_inode_dirty_sync(inode, true);
3260 	if (clean || recover)
3261 		set_inode_flag(inode, FI_AUTO_RECOVER);
3262 }
3263 
3264 static inline bool f2fs_is_atomic_file(struct inode *inode);
3265 
3266 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3267 {
3268 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3269 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3270 
3271 	if (i_size_read(inode) == i_size)
3272 		return;
3273 
3274 	i_size_write(inode, i_size);
3275 
3276 	if (f2fs_is_atomic_file(inode))
3277 		return;
3278 
3279 	f2fs_mark_inode_dirty_sync(inode, true);
3280 	if (clean || recover)
3281 		set_inode_flag(inode, FI_AUTO_RECOVER);
3282 }
3283 
3284 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3285 {
3286 	F2FS_I(inode)->i_current_depth = depth;
3287 	f2fs_mark_inode_dirty_sync(inode, true);
3288 }
3289 
3290 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3291 					unsigned int count)
3292 {
3293 	F2FS_I(inode)->i_gc_failures = count;
3294 	f2fs_mark_inode_dirty_sync(inode, true);
3295 }
3296 
3297 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3298 {
3299 	F2FS_I(inode)->i_xattr_nid = xnid;
3300 	f2fs_mark_inode_dirty_sync(inode, true);
3301 }
3302 
3303 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3304 {
3305 	F2FS_I(inode)->i_pino = pino;
3306 	f2fs_mark_inode_dirty_sync(inode, true);
3307 }
3308 
3309 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3310 {
3311 	struct f2fs_inode_info *fi = F2FS_I(inode);
3312 
3313 	if (ri->i_inline & F2FS_INLINE_XATTR)
3314 		set_bit(FI_INLINE_XATTR, fi->flags);
3315 	if (ri->i_inline & F2FS_INLINE_DATA)
3316 		set_bit(FI_INLINE_DATA, fi->flags);
3317 	if (ri->i_inline & F2FS_INLINE_DENTRY)
3318 		set_bit(FI_INLINE_DENTRY, fi->flags);
3319 	if (ri->i_inline & F2FS_DATA_EXIST)
3320 		set_bit(FI_DATA_EXIST, fi->flags);
3321 	if (ri->i_inline & F2FS_EXTRA_ATTR)
3322 		set_bit(FI_EXTRA_ATTR, fi->flags);
3323 	if (ri->i_inline & F2FS_PIN_FILE)
3324 		set_bit(FI_PIN_FILE, fi->flags);
3325 	if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3326 		set_bit(FI_COMPRESS_RELEASED, fi->flags);
3327 }
3328 
3329 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3330 {
3331 	ri->i_inline = 0;
3332 
3333 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3334 		ri->i_inline |= F2FS_INLINE_XATTR;
3335 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
3336 		ri->i_inline |= F2FS_INLINE_DATA;
3337 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3338 		ri->i_inline |= F2FS_INLINE_DENTRY;
3339 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
3340 		ri->i_inline |= F2FS_DATA_EXIST;
3341 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3342 		ri->i_inline |= F2FS_EXTRA_ATTR;
3343 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3344 		ri->i_inline |= F2FS_PIN_FILE;
3345 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3346 		ri->i_inline |= F2FS_COMPRESS_RELEASED;
3347 }
3348 
3349 static inline int f2fs_has_extra_attr(struct inode *inode)
3350 {
3351 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3352 }
3353 
3354 static inline int f2fs_has_inline_xattr(struct inode *inode)
3355 {
3356 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
3357 }
3358 
3359 static inline int f2fs_compressed_file(struct inode *inode)
3360 {
3361 	return S_ISREG(inode->i_mode) &&
3362 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3363 }
3364 
3365 static inline bool f2fs_need_compress_data(struct inode *inode)
3366 {
3367 	int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3368 
3369 	if (!f2fs_compressed_file(inode))
3370 		return false;
3371 
3372 	if (compress_mode == COMPR_MODE_FS)
3373 		return true;
3374 	else if (compress_mode == COMPR_MODE_USER &&
3375 			is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3376 		return true;
3377 
3378 	return false;
3379 }
3380 
3381 static inline unsigned int addrs_per_page(struct inode *inode,
3382 							bool is_inode)
3383 {
3384 	unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) -
3385 			get_inline_xattr_addrs(inode)) : DEF_ADDRS_PER_BLOCK;
3386 
3387 	if (f2fs_compressed_file(inode))
3388 		return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3389 	return addrs;
3390 }
3391 
3392 static inline
3393 void *inline_xattr_addr(struct inode *inode, const struct folio *folio)
3394 {
3395 	struct f2fs_inode *ri = F2FS_INODE(folio);
3396 
3397 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3398 					get_inline_xattr_addrs(inode)]);
3399 }
3400 
3401 static inline int inline_xattr_size(struct inode *inode)
3402 {
3403 	if (f2fs_has_inline_xattr(inode))
3404 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
3405 	return 0;
3406 }
3407 
3408 /*
3409  * Notice: check inline_data flag without inode page lock is unsafe.
3410  * It could change at any time by f2fs_convert_inline_folio().
3411  */
3412 static inline int f2fs_has_inline_data(struct inode *inode)
3413 {
3414 	return is_inode_flag_set(inode, FI_INLINE_DATA);
3415 }
3416 
3417 static inline int f2fs_exist_data(struct inode *inode)
3418 {
3419 	return is_inode_flag_set(inode, FI_DATA_EXIST);
3420 }
3421 
3422 static inline int f2fs_is_mmap_file(struct inode *inode)
3423 {
3424 	return is_inode_flag_set(inode, FI_MMAP_FILE);
3425 }
3426 
3427 static inline bool f2fs_is_pinned_file(struct inode *inode)
3428 {
3429 	return is_inode_flag_set(inode, FI_PIN_FILE);
3430 }
3431 
3432 static inline bool f2fs_is_atomic_file(struct inode *inode)
3433 {
3434 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3435 }
3436 
3437 static inline bool f2fs_is_cow_file(struct inode *inode)
3438 {
3439 	return is_inode_flag_set(inode, FI_COW_FILE);
3440 }
3441 
3442 static inline void *inline_data_addr(struct inode *inode, struct folio *folio)
3443 {
3444 	__le32 *addr = get_dnode_addr(inode, folio);
3445 
3446 	return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
3447 }
3448 
3449 static inline int f2fs_has_inline_dentry(struct inode *inode)
3450 {
3451 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3452 }
3453 
3454 static inline int is_file(struct inode *inode, int type)
3455 {
3456 	return F2FS_I(inode)->i_advise & type;
3457 }
3458 
3459 static inline void set_file(struct inode *inode, int type)
3460 {
3461 	if (is_file(inode, type))
3462 		return;
3463 	F2FS_I(inode)->i_advise |= type;
3464 	f2fs_mark_inode_dirty_sync(inode, true);
3465 }
3466 
3467 static inline void clear_file(struct inode *inode, int type)
3468 {
3469 	if (!is_file(inode, type))
3470 		return;
3471 	F2FS_I(inode)->i_advise &= ~type;
3472 	f2fs_mark_inode_dirty_sync(inode, true);
3473 }
3474 
3475 static inline bool f2fs_is_time_consistent(struct inode *inode)
3476 {
3477 	struct timespec64 ts = inode_get_atime(inode);
3478 
3479 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts))
3480 		return false;
3481 	ts = inode_get_ctime(inode);
3482 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
3483 		return false;
3484 	ts = inode_get_mtime(inode);
3485 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
3486 		return false;
3487 	return true;
3488 }
3489 
3490 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3491 {
3492 	bool ret;
3493 
3494 	if (dsync) {
3495 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3496 
3497 		spin_lock(&sbi->inode_lock[DIRTY_META]);
3498 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
3499 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
3500 		return ret;
3501 	}
3502 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3503 			file_keep_isize(inode) ||
3504 			i_size_read(inode) & ~PAGE_MASK)
3505 		return false;
3506 
3507 	if (!f2fs_is_time_consistent(inode))
3508 		return false;
3509 
3510 	spin_lock(&F2FS_I(inode)->i_size_lock);
3511 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3512 	spin_unlock(&F2FS_I(inode)->i_size_lock);
3513 
3514 	return ret;
3515 }
3516 
3517 static inline bool f2fs_readonly(struct super_block *sb)
3518 {
3519 	return sb_rdonly(sb);
3520 }
3521 
3522 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3523 {
3524 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3525 }
3526 
3527 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3528 					size_t size, gfp_t flags)
3529 {
3530 	if (time_to_inject(sbi, FAULT_KMALLOC))
3531 		return NULL;
3532 
3533 	return kmalloc(size, flags);
3534 }
3535 
3536 static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
3537 {
3538 	if (time_to_inject(sbi, FAULT_KMALLOC))
3539 		return NULL;
3540 
3541 	return __getname();
3542 }
3543 
3544 static inline void f2fs_putname(char *buf)
3545 {
3546 	__putname(buf);
3547 }
3548 
3549 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3550 					size_t size, gfp_t flags)
3551 {
3552 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3553 }
3554 
3555 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3556 					size_t size, gfp_t flags)
3557 {
3558 	if (time_to_inject(sbi, FAULT_KVMALLOC))
3559 		return NULL;
3560 
3561 	return kvmalloc(size, flags);
3562 }
3563 
3564 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3565 					size_t size, gfp_t flags)
3566 {
3567 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3568 }
3569 
3570 static inline void *f2fs_vmalloc(struct f2fs_sb_info *sbi, size_t size)
3571 {
3572 	if (time_to_inject(sbi, FAULT_VMALLOC))
3573 		return NULL;
3574 
3575 	return vmalloc(size);
3576 }
3577 
3578 static inline int get_extra_isize(struct inode *inode)
3579 {
3580 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3581 }
3582 
3583 static inline int get_inline_xattr_addrs(struct inode *inode)
3584 {
3585 	return F2FS_I(inode)->i_inline_xattr_size;
3586 }
3587 
3588 #define f2fs_get_inode_mode(i) \
3589 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3590 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3591 
3592 #define F2FS_MIN_EXTRA_ATTR_SIZE		(sizeof(__le32))
3593 
3594 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3595 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3596 	offsetof(struct f2fs_inode, i_extra_isize))	\
3597 
3598 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3599 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3600 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3601 		sizeof((f2fs_inode)->field))			\
3602 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3603 
3604 #define __is_large_section(sbi)		(SEGS_PER_SEC(sbi) > 1)
3605 
3606 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3607 
3608 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3609 					block_t blkaddr, int type);
3610 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3611 					block_t blkaddr, int type)
3612 {
3613 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
3614 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3615 			 blkaddr, type);
3616 }
3617 
3618 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3619 {
3620 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3621 			blkaddr == COMPRESS_ADDR)
3622 		return false;
3623 	return true;
3624 }
3625 
3626 /*
3627  * file.c
3628  */
3629 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3630 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3631 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3632 int f2fs_truncate(struct inode *inode);
3633 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
3634 		 struct kstat *stat, u32 request_mask, unsigned int flags);
3635 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
3636 		 struct iattr *attr);
3637 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3638 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3639 int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
3640 						bool readonly, bool need_lock);
3641 int f2fs_precache_extents(struct inode *inode);
3642 int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
3643 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3644 		      struct dentry *dentry, struct file_kattr *fa);
3645 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3646 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3647 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3648 int f2fs_pin_file_control(struct inode *inode, bool inc);
3649 
3650 /*
3651  * inode.c
3652  */
3653 void f2fs_set_inode_flags(struct inode *inode);
3654 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio);
3655 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio);
3656 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3657 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3658 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3659 void f2fs_update_inode(struct inode *inode, struct folio *node_folio);
3660 void f2fs_update_inode_page(struct inode *inode);
3661 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3662 void f2fs_remove_donate_inode(struct inode *inode);
3663 void f2fs_evict_inode(struct inode *inode);
3664 void f2fs_handle_failed_inode(struct inode *inode);
3665 
3666 /*
3667  * namei.c
3668  */
3669 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3670 							bool hot, bool set);
3671 struct dentry *f2fs_get_parent(struct dentry *child);
3672 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3673 		     struct inode **new_inode);
3674 
3675 /*
3676  * dir.c
3677  */
3678 #if IS_ENABLED(CONFIG_UNICODE)
3679 int f2fs_init_casefolded_name(const struct inode *dir,
3680 			      struct f2fs_filename *fname);
3681 void f2fs_free_casefolded_name(struct f2fs_filename *fname);
3682 #else
3683 static inline int f2fs_init_casefolded_name(const struct inode *dir,
3684 					    struct f2fs_filename *fname)
3685 {
3686 	return 0;
3687 }
3688 
3689 static inline void f2fs_free_casefolded_name(struct f2fs_filename *fname)
3690 {
3691 }
3692 #endif /* CONFIG_UNICODE */
3693 
3694 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3695 			int lookup, struct f2fs_filename *fname);
3696 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3697 			struct f2fs_filename *fname);
3698 void f2fs_free_filename(struct f2fs_filename *fname);
3699 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3700 			const struct f2fs_filename *fname, int *max_slots,
3701 			bool use_hash);
3702 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3703 			unsigned int start_pos, struct fscrypt_str *fstr);
3704 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3705 			struct f2fs_dentry_ptr *d);
3706 struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3707 		const struct f2fs_filename *fname, struct folio *dfolio);
3708 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3709 			unsigned int current_depth);
3710 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3711 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3712 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3713 		const struct f2fs_filename *fname, struct folio **res_folio);
3714 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3715 			const struct qstr *child, struct folio **res_folio);
3716 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f);
3717 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3718 			struct folio **folio);
3719 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3720 			struct folio *folio, struct inode *inode);
3721 bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio,
3722 			  const struct f2fs_filename *fname);
3723 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3724 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3725 			unsigned int bit_pos);
3726 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3727 			struct inode *inode, nid_t ino, umode_t mode);
3728 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3729 			struct inode *inode, nid_t ino, umode_t mode);
3730 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3731 			struct inode *inode, nid_t ino, umode_t mode);
3732 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio,
3733 			struct inode *dir, struct inode *inode);
3734 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
3735 					struct f2fs_filename *fname);
3736 bool f2fs_empty_dir(struct inode *dir);
3737 
3738 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3739 {
3740 	if (fscrypt_is_nokey_name(dentry))
3741 		return -ENOKEY;
3742 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3743 				inode, inode->i_ino, inode->i_mode);
3744 }
3745 
3746 /*
3747  * super.c
3748  */
3749 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3750 void f2fs_inode_synced(struct inode *inode);
3751 int f2fs_dquot_initialize(struct inode *inode);
3752 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3753 int f2fs_do_quota_sync(struct super_block *sb, int type);
3754 loff_t max_file_blocks(struct inode *inode);
3755 void f2fs_quota_off_umount(struct super_block *sb);
3756 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
3757 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason);
3758 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
3759 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
3760 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3761 int f2fs_sync_fs(struct super_block *sb, int sync);
3762 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3763 
3764 /*
3765  * hash.c
3766  */
3767 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3768 
3769 /*
3770  * node.c
3771  */
3772 struct node_info;
3773 
3774 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3775 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3776 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio);
3777 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3778 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio);
3779 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3780 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3781 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3782 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3783 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3784 				struct node_info *ni, bool checkpoint_context);
3785 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3786 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3787 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3788 int f2fs_truncate_xattr_node(struct inode *inode);
3789 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3790 					unsigned int seq_id);
3791 int f2fs_remove_inode_page(struct inode *inode);
3792 struct folio *f2fs_new_inode_folio(struct inode *inode);
3793 struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs);
3794 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3795 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid);
3796 struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino);
3797 struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid);
3798 int f2fs_move_node_folio(struct folio *node_folio, int gc_type);
3799 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3800 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3801 			struct writeback_control *wbc, bool atomic,
3802 			unsigned int *seq_id);
3803 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3804 			struct writeback_control *wbc,
3805 			bool do_balance, enum iostat_type io_type);
3806 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3807 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3808 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3809 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3810 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3811 int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio);
3812 int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio);
3813 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio);
3814 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3815 			unsigned int segno, struct f2fs_summary_block *sum);
3816 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3817 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3818 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3819 int __init f2fs_create_node_manager_caches(void);
3820 void f2fs_destroy_node_manager_caches(void);
3821 
3822 /*
3823  * segment.c
3824  */
3825 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3826 int f2fs_commit_atomic_write(struct inode *inode);
3827 void f2fs_abort_atomic_write(struct inode *inode, bool clean);
3828 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3829 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3830 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3831 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3832 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3833 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3834 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
3835 						unsigned int len);
3836 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3837 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3838 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3839 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3840 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3841 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3842 					struct cp_control *cpc);
3843 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3844 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3845 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3846 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3847 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3848 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3849 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3850 int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi);
3851 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3852 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3853 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3854 					unsigned int start, unsigned int end);
3855 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3856 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
3857 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3858 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3859 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3860 					struct cp_control *cpc);
3861 struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno);
3862 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3863 					block_t blk_addr);
3864 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
3865 						enum iostat_type io_type);
3866 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3867 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3868 			struct f2fs_io_info *fio);
3869 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3870 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3871 			block_t old_blkaddr, block_t new_blkaddr,
3872 			bool recover_curseg, bool recover_newaddr,
3873 			bool from_gc);
3874 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3875 			block_t old_addr, block_t new_addr,
3876 			unsigned char version, bool recover_curseg,
3877 			bool recover_newaddr);
3878 enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
3879 						enum log_type seg_type);
3880 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
3881 			block_t old_blkaddr, block_t *new_blkaddr,
3882 			struct f2fs_summary *sum, int type,
3883 			struct f2fs_io_info *fio);
3884 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3885 					block_t blkaddr, unsigned int blkcnt);
3886 void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
3887 		bool ordered, bool locked);
3888 #define f2fs_wait_on_page_writeback(page, type, ordered, locked)	\
3889 		f2fs_folio_wait_writeback(page_folio(page), type, ordered, locked)
3890 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3891 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3892 								block_t len);
3893 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3894 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3895 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3896 			unsigned int val, int alloc);
3897 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3898 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi);
3899 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3900 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3901 int __init f2fs_create_segment_manager_caches(void);
3902 void f2fs_destroy_segment_manager_caches(void);
3903 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
3904 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3905 			enum page_type type, enum temp_type temp);
3906 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
3907 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3908 			unsigned int segno);
3909 unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
3910 			unsigned int segno);
3911 
3912 static inline struct inode *fio_inode(struct f2fs_io_info *fio)
3913 {
3914 	return fio->folio->mapping->host;
3915 }
3916 
3917 #define DEF_FRAGMENT_SIZE	4
3918 #define MIN_FRAGMENT_SIZE	1
3919 #define MAX_FRAGMENT_SIZE	512
3920 
3921 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3922 {
3923 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3924 		F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3925 }
3926 
3927 /*
3928  * checkpoint.c
3929  */
3930 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
3931 							unsigned char reason);
3932 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
3933 struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
3934 struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
3935 struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3936 struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index);
3937 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3938 					block_t blkaddr, int type);
3939 bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
3940 					block_t blkaddr, int type);
3941 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3942 			int type, bool sync);
3943 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
3944 							unsigned int ra_blocks);
3945 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3946 			long nr_to_write, enum iostat_type io_type);
3947 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3948 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3949 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3950 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3951 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3952 					unsigned int devidx, int type);
3953 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3954 					unsigned int devidx, int type);
3955 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3956 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3957 void f2fs_add_orphan_inode(struct inode *inode);
3958 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3959 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3960 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3961 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
3962 void f2fs_remove_dirty_inode(struct inode *inode);
3963 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
3964 								bool from_cp);
3965 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3966 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3967 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3968 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3969 int __init f2fs_create_checkpoint_caches(void);
3970 void f2fs_destroy_checkpoint_caches(void);
3971 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3972 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3973 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3974 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3975 
3976 /*
3977  * data.c
3978  */
3979 int __init f2fs_init_bioset(void);
3980 void f2fs_destroy_bioset(void);
3981 bool f2fs_is_cp_guaranteed(const struct folio *folio);
3982 int f2fs_init_bio_entry_cache(void);
3983 void f2fs_destroy_bio_entry_cache(void);
3984 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
3985 			  enum page_type type);
3986 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
3987 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3988 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3989 				struct inode *inode, struct folio *folio,
3990 				nid_t ino, enum page_type type);
3991 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3992 					struct bio **bio, struct folio *folio);
3993 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3994 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3995 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3996 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3997 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3998 		block_t blk_addr, sector_t *sector);
3999 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
4000 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
4001 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
4002 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
4003 int f2fs_reserve_new_block(struct dnode_of_data *dn);
4004 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
4005 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
4006 struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
4007 		blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
4008 struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
4009 		pgoff_t *next_pgofs);
4010 struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
4011 			bool for_write);
4012 struct folio *f2fs_get_new_data_folio(struct inode *inode,
4013 			struct folio *ifolio, pgoff_t index, bool new_i_size);
4014 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
4015 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
4016 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4017 			u64 start, u64 len);
4018 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
4019 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
4020 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
4021 int f2fs_write_single_data_page(struct folio *folio, int *submitted,
4022 				struct bio **bio, sector_t *last_block,
4023 				struct writeback_control *wbc,
4024 				enum iostat_type io_type,
4025 				int compr_blocks, bool allow_balance);
4026 void f2fs_write_failed(struct inode *inode, loff_t to);
4027 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
4028 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
4029 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
4030 void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
4031 int f2fs_init_post_read_processing(void);
4032 void f2fs_destroy_post_read_processing(void);
4033 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
4034 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
4035 extern const struct iomap_ops f2fs_iomap_ops;
4036 
4037 /*
4038  * gc.c
4039  */
4040 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
4041 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
4042 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
4043 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
4044 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
4045 int f2fs_gc_range(struct f2fs_sb_info *sbi,
4046 		unsigned int start_seg, unsigned int end_seg,
4047 		bool dry_run, unsigned int dry_run_sections);
4048 int f2fs_resize_fs(struct file *filp, __u64 block_count);
4049 int __init f2fs_create_garbage_collection_cache(void);
4050 void f2fs_destroy_garbage_collection_cache(void);
4051 /* victim selection function for cleaning and SSR */
4052 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
4053 			int gc_type, int type, char alloc_mode,
4054 			unsigned long long age, bool one_time);
4055 
4056 /*
4057  * recovery.c
4058  */
4059 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
4060 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
4061 int __init f2fs_create_recovery_cache(void);
4062 void f2fs_destroy_recovery_cache(void);
4063 
4064 /*
4065  * debug.c
4066  */
4067 #ifdef CONFIG_F2FS_STAT_FS
4068 enum {
4069 	DEVSTAT_INUSE,
4070 	DEVSTAT_DIRTY,
4071 	DEVSTAT_FULL,
4072 	DEVSTAT_FREE,
4073 	DEVSTAT_PREFREE,
4074 	DEVSTAT_MAX,
4075 };
4076 
4077 struct f2fs_dev_stats {
4078 	unsigned int devstats[2][DEVSTAT_MAX];		/* 0: segs, 1: secs */
4079 };
4080 
4081 struct f2fs_stat_info {
4082 	struct list_head stat_list;
4083 	struct f2fs_sb_info *sbi;
4084 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
4085 	int main_area_segs, main_area_sections, main_area_zones;
4086 	unsigned long long hit_cached[NR_EXTENT_CACHES];
4087 	unsigned long long hit_rbtree[NR_EXTENT_CACHES];
4088 	unsigned long long total_ext[NR_EXTENT_CACHES];
4089 	unsigned long long hit_total[NR_EXTENT_CACHES];
4090 	int ext_tree[NR_EXTENT_CACHES];
4091 	int zombie_tree[NR_EXTENT_CACHES];
4092 	int ext_node[NR_EXTENT_CACHES];
4093 	/* to count memory footprint */
4094 	unsigned long long ext_mem[NR_EXTENT_CACHES];
4095 	/* for read extent cache */
4096 	unsigned long long hit_largest;
4097 	/* for block age extent cache */
4098 	unsigned long long allocated_data_blocks;
4099 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
4100 	int ndirty_data, ndirty_qdata;
4101 	unsigned int ndirty_dirs, ndirty_files, ndirty_all;
4102 	unsigned int nquota_files, ndonate_files;
4103 	int nats, dirty_nats, sits, dirty_sits;
4104 	int free_nids, avail_nids, alloc_nids;
4105 	int total_count, utilization;
4106 	int nr_wb_cp_data, nr_wb_data;
4107 	int nr_rd_data, nr_rd_node, nr_rd_meta;
4108 	int nr_dio_read, nr_dio_write;
4109 	unsigned int io_skip_bggc, other_skip_bggc;
4110 	int nr_flushing, nr_flushed, flush_list_empty;
4111 	int nr_discarding, nr_discarded;
4112 	int nr_discard_cmd;
4113 	unsigned int undiscard_blks;
4114 	int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
4115 	unsigned int cur_ckpt_time, peak_ckpt_time;
4116 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
4117 	int compr_inode, swapfile_inode;
4118 	unsigned long long compr_blocks;
4119 	int aw_cnt, max_aw_cnt;
4120 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
4121 	unsigned int bimodal, avg_vblocks;
4122 	int util_free, util_valid, util_invalid;
4123 	int rsvd_segs, overp_segs;
4124 	int dirty_count, node_pages, meta_pages, compress_pages;
4125 	int compress_page_hit;
4126 	int prefree_count, free_segs, free_secs;
4127 	int cp_call_count[MAX_CALL_TYPE], cp_count;
4128 	int gc_call_count[MAX_CALL_TYPE];
4129 	int gc_segs[2][2];
4130 	int gc_secs[2][2];
4131 	int tot_blks, data_blks, node_blks;
4132 	int bg_data_blks, bg_node_blks;
4133 	int curseg[NR_CURSEG_TYPE];
4134 	int cursec[NR_CURSEG_TYPE];
4135 	int curzone[NR_CURSEG_TYPE];
4136 	unsigned int dirty_seg[NR_CURSEG_TYPE];
4137 	unsigned int full_seg[NR_CURSEG_TYPE];
4138 	unsigned int valid_blks[NR_CURSEG_TYPE];
4139 
4140 	unsigned int meta_count[META_MAX];
4141 	unsigned int segment_count[2];
4142 	unsigned int block_count[2];
4143 	unsigned int inplace_count;
4144 	unsigned long long base_mem, cache_mem, page_mem;
4145 	struct f2fs_dev_stats *dev_stats;
4146 };
4147 
4148 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
4149 {
4150 	return (struct f2fs_stat_info *)sbi->stat_info;
4151 }
4152 
4153 #define stat_inc_cp_call_count(sbi, foreground)				\
4154 		atomic_inc(&sbi->cp_call_count[(foreground)])
4155 #define stat_inc_cp_count(sbi)		(F2FS_STAT(sbi)->cp_count++)
4156 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
4157 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
4158 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
4159 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
4160 #define stat_inc_total_hit(sbi, type)		(atomic64_inc(&(sbi)->total_hit_ext[type]))
4161 #define stat_inc_rbtree_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_rbtree[type]))
4162 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
4163 #define stat_inc_cached_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_cached[type]))
4164 #define stat_inc_inline_xattr(inode)					\
4165 	do {								\
4166 		if (f2fs_has_inline_xattr(inode))			\
4167 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
4168 	} while (0)
4169 #define stat_dec_inline_xattr(inode)					\
4170 	do {								\
4171 		if (f2fs_has_inline_xattr(inode))			\
4172 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
4173 	} while (0)
4174 #define stat_inc_inline_inode(inode)					\
4175 	do {								\
4176 		if (f2fs_has_inline_data(inode))			\
4177 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
4178 	} while (0)
4179 #define stat_dec_inline_inode(inode)					\
4180 	do {								\
4181 		if (f2fs_has_inline_data(inode))			\
4182 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
4183 	} while (0)
4184 #define stat_inc_inline_dir(inode)					\
4185 	do {								\
4186 		if (f2fs_has_inline_dentry(inode))			\
4187 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
4188 	} while (0)
4189 #define stat_dec_inline_dir(inode)					\
4190 	do {								\
4191 		if (f2fs_has_inline_dentry(inode))			\
4192 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
4193 	} while (0)
4194 #define stat_inc_compr_inode(inode)					\
4195 	do {								\
4196 		if (f2fs_compressed_file(inode))			\
4197 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
4198 	} while (0)
4199 #define stat_dec_compr_inode(inode)					\
4200 	do {								\
4201 		if (f2fs_compressed_file(inode))			\
4202 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
4203 	} while (0)
4204 #define stat_add_compr_blocks(inode, blocks)				\
4205 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
4206 #define stat_sub_compr_blocks(inode, blocks)				\
4207 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
4208 #define stat_inc_swapfile_inode(inode)					\
4209 		(atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
4210 #define stat_dec_swapfile_inode(inode)					\
4211 		(atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
4212 #define stat_inc_atomic_inode(inode)					\
4213 			(atomic_inc(&F2FS_I_SB(inode)->atomic_files))
4214 #define stat_dec_atomic_inode(inode)					\
4215 			(atomic_dec(&F2FS_I_SB(inode)->atomic_files))
4216 #define stat_inc_meta_count(sbi, blkaddr)				\
4217 	do {								\
4218 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
4219 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
4220 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
4221 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
4222 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
4223 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
4224 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
4225 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
4226 	} while (0)
4227 #define stat_inc_seg_type(sbi, curseg)					\
4228 		((sbi)->segment_count[(curseg)->alloc_type]++)
4229 #define stat_inc_block_count(sbi, curseg)				\
4230 		((sbi)->block_count[(curseg)->alloc_type]++)
4231 #define stat_inc_inplace_blocks(sbi)					\
4232 		(atomic_inc(&(sbi)->inplace_count))
4233 #define stat_update_max_atomic_write(inode)				\
4234 	do {								\
4235 		int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files);	\
4236 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
4237 		if (cur > max)						\
4238 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
4239 	} while (0)
4240 #define stat_inc_gc_call_count(sbi, foreground)				\
4241 		(F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
4242 #define stat_inc_gc_sec_count(sbi, type, gc_type)			\
4243 		(F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
4244 #define stat_inc_gc_seg_count(sbi, type, gc_type)			\
4245 		(F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
4246 
4247 #define stat_inc_tot_blk_count(si, blks)				\
4248 	((si)->tot_blks += (blks))
4249 
4250 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
4251 	do {								\
4252 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
4253 		stat_inc_tot_blk_count(si, blks);			\
4254 		si->data_blks += (blks);				\
4255 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
4256 	} while (0)
4257 
4258 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
4259 	do {								\
4260 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
4261 		stat_inc_tot_blk_count(si, blks);			\
4262 		si->node_blks += (blks);				\
4263 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
4264 	} while (0)
4265 
4266 int f2fs_build_stats(struct f2fs_sb_info *sbi);
4267 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
4268 void __init f2fs_create_root_stats(void);
4269 void f2fs_destroy_root_stats(void);
4270 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
4271 #else
4272 #define stat_inc_cp_call_count(sbi, foreground)		do { } while (0)
4273 #define stat_inc_cp_count(sbi)				do { } while (0)
4274 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
4275 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
4276 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
4277 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
4278 #define stat_inc_total_hit(sbi, type)			do { } while (0)
4279 #define stat_inc_rbtree_node_hit(sbi, type)		do { } while (0)
4280 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
4281 #define stat_inc_cached_node_hit(sbi, type)		do { } while (0)
4282 #define stat_inc_inline_xattr(inode)			do { } while (0)
4283 #define stat_dec_inline_xattr(inode)			do { } while (0)
4284 #define stat_inc_inline_inode(inode)			do { } while (0)
4285 #define stat_dec_inline_inode(inode)			do { } while (0)
4286 #define stat_inc_inline_dir(inode)			do { } while (0)
4287 #define stat_dec_inline_dir(inode)			do { } while (0)
4288 #define stat_inc_compr_inode(inode)			do { } while (0)
4289 #define stat_dec_compr_inode(inode)			do { } while (0)
4290 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
4291 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
4292 #define stat_inc_swapfile_inode(inode)			do { } while (0)
4293 #define stat_dec_swapfile_inode(inode)			do { } while (0)
4294 #define stat_inc_atomic_inode(inode)			do { } while (0)
4295 #define stat_dec_atomic_inode(inode)			do { } while (0)
4296 #define stat_update_max_atomic_write(inode)		do { } while (0)
4297 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
4298 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
4299 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
4300 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
4301 #define stat_inc_gc_call_count(sbi, foreground)		do { } while (0)
4302 #define stat_inc_gc_sec_count(sbi, type, gc_type)	do { } while (0)
4303 #define stat_inc_gc_seg_count(sbi, type, gc_type)	do { } while (0)
4304 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
4305 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
4306 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
4307 
4308 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
4309 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
4310 static inline void __init f2fs_create_root_stats(void) { }
4311 static inline void f2fs_destroy_root_stats(void) { }
4312 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4313 #endif
4314 
4315 extern const struct file_operations f2fs_dir_operations;
4316 extern const struct file_operations f2fs_file_operations;
4317 extern const struct inode_operations f2fs_file_inode_operations;
4318 extern const struct address_space_operations f2fs_dblock_aops;
4319 extern const struct address_space_operations f2fs_node_aops;
4320 extern const struct address_space_operations f2fs_meta_aops;
4321 extern const struct inode_operations f2fs_dir_inode_operations;
4322 extern const struct inode_operations f2fs_symlink_inode_operations;
4323 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4324 extern const struct inode_operations f2fs_special_inode_operations;
4325 extern struct kmem_cache *f2fs_inode_entry_slab;
4326 
4327 /*
4328  * inline.c
4329  */
4330 bool f2fs_may_inline_data(struct inode *inode);
4331 bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio);
4332 bool f2fs_may_inline_dentry(struct inode *inode);
4333 void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio);
4334 void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
4335 		u64 from);
4336 int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
4337 int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio);
4338 int f2fs_convert_inline_inode(struct inode *inode);
4339 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4340 int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
4341 int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio);
4342 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4343 		const struct f2fs_filename *fname, struct folio **res_folio,
4344 		bool use_hash);
4345 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4346 			struct folio *ifolio);
4347 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4348 			struct inode *inode, nid_t ino, umode_t mode);
4349 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4350 		struct folio *folio, struct inode *dir, struct inode *inode);
4351 bool f2fs_empty_inline_dir(struct inode *dir);
4352 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4353 			struct fscrypt_str *fstr);
4354 int f2fs_inline_data_fiemap(struct inode *inode,
4355 			struct fiemap_extent_info *fieinfo,
4356 			__u64 start, __u64 len);
4357 
4358 /*
4359  * shrinker.c
4360  */
4361 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4362 			struct shrink_control *sc);
4363 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4364 			struct shrink_control *sc);
4365 unsigned int f2fs_donate_files(void);
4366 void f2fs_reclaim_caches(unsigned int reclaim_caches_kb);
4367 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4368 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4369 
4370 /*
4371  * extent_cache.c
4372  */
4373 bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio);
4374 void f2fs_init_extent_tree(struct inode *inode);
4375 void f2fs_drop_extent_tree(struct inode *inode);
4376 void f2fs_destroy_extent_node(struct inode *inode);
4377 void f2fs_destroy_extent_tree(struct inode *inode);
4378 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4379 int __init f2fs_create_extent_cache(void);
4380 void f2fs_destroy_extent_cache(void);
4381 
4382 /* read extent cache ops */
4383 void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio);
4384 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
4385 			struct extent_info *ei);
4386 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
4387 			block_t *blkaddr);
4388 void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
4389 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
4390 			pgoff_t fofs, block_t blkaddr, unsigned int len);
4391 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
4392 			int nr_shrink);
4393 
4394 /* block age extent cache ops */
4395 void f2fs_init_age_extent_tree(struct inode *inode);
4396 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
4397 			struct extent_info *ei);
4398 void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
4399 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
4400 			pgoff_t fofs, unsigned int len);
4401 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
4402 			int nr_shrink);
4403 
4404 /*
4405  * sysfs.c
4406  */
4407 #define MIN_RA_MUL	2
4408 #define MAX_RA_MUL	256
4409 
4410 int __init f2fs_init_sysfs(void);
4411 void f2fs_exit_sysfs(void);
4412 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4413 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4414 
4415 /* verity.c */
4416 extern const struct fsverity_operations f2fs_verityops;
4417 
4418 /*
4419  * crypto support
4420  */
4421 static inline bool f2fs_encrypted_file(struct inode *inode)
4422 {
4423 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4424 }
4425 
4426 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4427 {
4428 #ifdef CONFIG_FS_ENCRYPTION
4429 	file_set_encrypt(inode);
4430 	f2fs_set_inode_flags(inode);
4431 #endif
4432 }
4433 
4434 /*
4435  * Returns true if the reads of the inode's data need to undergo some
4436  * postprocessing step, like decryption or authenticity verification.
4437  */
4438 static inline bool f2fs_post_read_required(struct inode *inode)
4439 {
4440 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4441 		f2fs_compressed_file(inode);
4442 }
4443 
4444 static inline bool f2fs_used_in_atomic_write(struct inode *inode)
4445 {
4446 	return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
4447 }
4448 
4449 static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
4450 {
4451 	return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
4452 }
4453 
4454 /*
4455  * compress.c
4456  */
4457 #ifdef CONFIG_F2FS_FS_COMPRESSION
4458 enum cluster_check_type {
4459 	CLUSTER_IS_COMPR,   /* check only if compressed cluster */
4460 	CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
4461 	CLUSTER_RAW_BLKS    /* return # of raw blocks in a cluster */
4462 };
4463 bool f2fs_is_compressed_page(struct folio *folio);
4464 struct folio *f2fs_compress_control_folio(struct folio *folio);
4465 int f2fs_prepare_compress_overwrite(struct inode *inode,
4466 			struct page **pagep, pgoff_t index, void **fsdata);
4467 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4468 					pgoff_t index, unsigned copied);
4469 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4470 void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio);
4471 bool f2fs_is_compress_backend_ready(struct inode *inode);
4472 bool f2fs_is_compress_level_valid(int alg, int lvl);
4473 int __init f2fs_init_compress_mempool(void);
4474 void f2fs_destroy_compress_mempool(void);
4475 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
4476 void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
4477 				block_t blkaddr, bool in_task);
4478 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4479 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4480 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
4481 				int index, int nr_pages, bool uptodate);
4482 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4483 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
4484 int f2fs_write_multi_pages(struct compress_ctx *cc,
4485 						int *submitted,
4486 						struct writeback_control *wbc,
4487 						enum iostat_type io_type);
4488 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4489 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
4490 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
4491 				pgoff_t fofs, block_t blkaddr,
4492 				unsigned int llen, unsigned int c_len);
4493 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4494 				unsigned nr_pages, sector_t *last_block_in_bio,
4495 				struct readahead_control *rac, bool for_write);
4496 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4497 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
4498 				bool in_task);
4499 void f2fs_put_folio_dic(struct folio *folio, bool in_task);
4500 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
4501 						unsigned int ofs_in_node);
4502 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4503 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4504 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4505 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4506 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4507 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4508 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4509 int __init f2fs_init_compress_cache(void);
4510 void f2fs_destroy_compress_cache(void);
4511 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4512 void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
4513 					block_t blkaddr, unsigned int len);
4514 bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
4515 								block_t blkaddr);
4516 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4517 #define inc_compr_inode_stat(inode)					\
4518 	do {								\
4519 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4520 		sbi->compr_new_inode++;					\
4521 	} while (0)
4522 #define add_compr_block_stat(inode, blocks)				\
4523 	do {								\
4524 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4525 		int diff = F2FS_I(inode)->i_cluster_size - blocks;	\
4526 		sbi->compr_written_block += blocks;			\
4527 		sbi->compr_saved_block += diff;				\
4528 	} while (0)
4529 #else
4530 static inline bool f2fs_is_compressed_page(struct folio *folio) { return false; }
4531 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4532 {
4533 	if (!f2fs_compressed_file(inode))
4534 		return true;
4535 	/* not support compression */
4536 	return false;
4537 }
4538 static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
4539 static inline struct folio *f2fs_compress_control_folio(struct folio *folio)
4540 {
4541 	WARN_ON_ONCE(1);
4542 	return ERR_PTR(-EINVAL);
4543 }
4544 static inline int __init f2fs_init_compress_mempool(void) { return 0; }
4545 static inline void f2fs_destroy_compress_mempool(void) { }
4546 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
4547 				bool in_task) { }
4548 static inline void f2fs_end_read_compressed_page(struct folio *folio,
4549 				bool failed, block_t blkaddr, bool in_task)
4550 {
4551 	WARN_ON_ONCE(1);
4552 }
4553 static inline void f2fs_put_folio_dic(struct folio *folio, bool in_task)
4554 {
4555 	WARN_ON_ONCE(1);
4556 }
4557 static inline unsigned int f2fs_cluster_blocks_are_contiguous(
4558 			struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
4559 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
4560 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
4561 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
4562 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4563 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4564 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4565 static inline void f2fs_destroy_compress_cache(void) { }
4566 static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
4567 				block_t blkaddr, unsigned int len) { }
4568 static inline bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi,
4569 		struct folio *folio, block_t blkaddr) { return false; }
4570 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4571 							nid_t ino) { }
4572 #define inc_compr_inode_stat(inode)		do { } while (0)
4573 static inline int f2fs_is_compressed_cluster(
4574 				struct inode *inode,
4575 				pgoff_t index) { return 0; }
4576 static inline bool f2fs_is_sparse_cluster(
4577 				struct inode *inode,
4578 				pgoff_t index) { return true; }
4579 static inline void f2fs_update_read_extent_tree_range_compressed(
4580 				struct inode *inode,
4581 				pgoff_t fofs, block_t blkaddr,
4582 				unsigned int llen, unsigned int c_len) { }
4583 #endif
4584 
4585 static inline int set_compress_context(struct inode *inode)
4586 {
4587 #ifdef CONFIG_F2FS_FS_COMPRESSION
4588 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4589 	struct f2fs_inode_info *fi = F2FS_I(inode);
4590 
4591 	fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm;
4592 	fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size;
4593 	fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ?
4594 					BIT(COMPRESS_CHKSUM) : 0;
4595 	fi->i_cluster_size = BIT(fi->i_log_cluster_size);
4596 	if ((fi->i_compress_algorithm == COMPRESS_LZ4 ||
4597 		fi->i_compress_algorithm == COMPRESS_ZSTD) &&
4598 			F2FS_OPTION(sbi).compress_level)
4599 		fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
4600 	fi->i_flags |= F2FS_COMPR_FL;
4601 	set_inode_flag(inode, FI_COMPRESSED_FILE);
4602 	stat_inc_compr_inode(inode);
4603 	inc_compr_inode_stat(inode);
4604 	f2fs_mark_inode_dirty_sync(inode, true);
4605 	return 0;
4606 #else
4607 	return -EOPNOTSUPP;
4608 #endif
4609 }
4610 
4611 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4612 {
4613 	struct f2fs_inode_info *fi = F2FS_I(inode);
4614 
4615 	f2fs_down_write(&fi->i_sem);
4616 
4617 	if (!f2fs_compressed_file(inode)) {
4618 		f2fs_up_write(&fi->i_sem);
4619 		return true;
4620 	}
4621 	if (f2fs_is_mmap_file(inode) ||
4622 		(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
4623 		f2fs_up_write(&fi->i_sem);
4624 		return false;
4625 	}
4626 
4627 	fi->i_flags &= ~F2FS_COMPR_FL;
4628 	stat_dec_compr_inode(inode);
4629 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
4630 	f2fs_mark_inode_dirty_sync(inode, true);
4631 
4632 	f2fs_up_write(&fi->i_sem);
4633 	return true;
4634 }
4635 
4636 #define F2FS_FEATURE_FUNCS(name, flagname) \
4637 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4638 { \
4639 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4640 }
4641 
4642 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4643 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4644 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4645 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4646 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4647 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4648 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4649 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4650 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4651 F2FS_FEATURE_FUNCS(verity, VERITY);
4652 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4653 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4654 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4655 F2FS_FEATURE_FUNCS(readonly, RO);
4656 F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS);
4657 
4658 #ifdef CONFIG_BLK_DEV_ZONED
4659 static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi,
4660 							unsigned int zone)
4661 {
4662 	return test_bit(zone, FDEV(devi).blkz_seq);
4663 }
4664 
4665 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4666 								block_t blkaddr)
4667 {
4668 	return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz);
4669 }
4670 #endif
4671 
4672 static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
4673 				  struct block_device *bdev)
4674 {
4675 	int i;
4676 
4677 	if (!f2fs_is_multi_device(sbi))
4678 		return 0;
4679 
4680 	for (i = 0; i < sbi->s_ndevs; i++)
4681 		if (FDEV(i).bdev == bdev)
4682 			return i;
4683 
4684 	WARN_ON(1);
4685 	return -1;
4686 }
4687 
4688 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4689 {
4690 	return f2fs_sb_has_blkzoned(sbi);
4691 }
4692 
4693 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4694 {
4695 	return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
4696 }
4697 
4698 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4699 {
4700 	int i;
4701 
4702 	if (!f2fs_is_multi_device(sbi))
4703 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4704 
4705 	for (i = 0; i < sbi->s_ndevs; i++)
4706 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4707 			return true;
4708 	return false;
4709 }
4710 
4711 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4712 {
4713 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4714 					f2fs_hw_should_discard(sbi);
4715 }
4716 
4717 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4718 {
4719 	int i;
4720 
4721 	if (!f2fs_is_multi_device(sbi))
4722 		return bdev_read_only(sbi->sb->s_bdev);
4723 
4724 	for (i = 0; i < sbi->s_ndevs; i++)
4725 		if (bdev_read_only(FDEV(i).bdev))
4726 			return true;
4727 	return false;
4728 }
4729 
4730 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
4731 {
4732 	return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
4733 }
4734 
4735 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4736 {
4737 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4738 }
4739 
4740 static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi,
4741 					  block_t blkaddr)
4742 {
4743 	if (f2fs_sb_has_blkzoned(sbi)) {
4744 #ifdef CONFIG_BLK_DEV_ZONED
4745 		int devi = f2fs_target_device_index(sbi, blkaddr);
4746 
4747 		if (!bdev_is_zoned(FDEV(devi).bdev))
4748 			return false;
4749 
4750 		if (f2fs_is_multi_device(sbi)) {
4751 			if (blkaddr < FDEV(devi).start_blk ||
4752 				blkaddr > FDEV(devi).end_blk) {
4753 				f2fs_err(sbi, "Invalid block %x", blkaddr);
4754 				return false;
4755 			}
4756 			blkaddr -= FDEV(devi).start_blk;
4757 		}
4758 
4759 		return f2fs_blkz_is_seq(sbi, devi, blkaddr);
4760 #else
4761 		return false;
4762 #endif
4763 	}
4764 	return false;
4765 }
4766 
4767 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
4768 {
4769 	return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
4770 }
4771 
4772 static inline bool f2fs_may_compress(struct inode *inode)
4773 {
4774 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4775 		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
4776 		f2fs_is_mmap_file(inode))
4777 		return false;
4778 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4779 }
4780 
4781 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4782 						u64 blocks, bool add)
4783 {
4784 	struct f2fs_inode_info *fi = F2FS_I(inode);
4785 	int diff = fi->i_cluster_size - blocks;
4786 
4787 	/* don't update i_compr_blocks if saved blocks were released */
4788 	if (!add && !atomic_read(&fi->i_compr_blocks))
4789 		return;
4790 
4791 	if (add) {
4792 		atomic_add(diff, &fi->i_compr_blocks);
4793 		stat_add_compr_blocks(inode, diff);
4794 	} else {
4795 		atomic_sub(diff, &fi->i_compr_blocks);
4796 		stat_sub_compr_blocks(inode, diff);
4797 	}
4798 	f2fs_mark_inode_dirty_sync(inode, true);
4799 }
4800 
4801 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4802 								int flag)
4803 {
4804 	if (!f2fs_is_multi_device(sbi))
4805 		return false;
4806 	if (flag != F2FS_GET_BLOCK_DIO)
4807 		return false;
4808 	return sbi->aligned_blksize;
4809 }
4810 
4811 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4812 {
4813 	return fsverity_active(inode) &&
4814 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4815 }
4816 
4817 #ifdef CONFIG_F2FS_FAULT_INJECTION
4818 extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
4819 					unsigned long type, enum fault_option fo);
4820 #else
4821 static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
4822 					unsigned long rate, unsigned long type,
4823 					enum fault_option fo)
4824 {
4825 	return 0;
4826 }
4827 #endif
4828 
4829 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4830 {
4831 #ifdef CONFIG_QUOTA
4832 	if (f2fs_sb_has_quota_ino(sbi))
4833 		return true;
4834 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4835 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4836 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4837 		return true;
4838 #endif
4839 	return false;
4840 }
4841 
4842 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4843 {
4844 	return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4845 }
4846 
4847 static inline void f2fs_io_schedule_timeout(long timeout)
4848 {
4849 	set_current_state(TASK_UNINTERRUPTIBLE);
4850 	io_schedule_timeout(timeout);
4851 }
4852 
4853 static inline void f2fs_io_schedule_timeout_killable(long timeout)
4854 {
4855 	while (timeout) {
4856 		if (fatal_signal_pending(current))
4857 			return;
4858 		set_current_state(TASK_UNINTERRUPTIBLE);
4859 		io_schedule_timeout(DEFAULT_IO_TIMEOUT);
4860 		if (timeout <= DEFAULT_IO_TIMEOUT)
4861 			return;
4862 		timeout -= DEFAULT_IO_TIMEOUT;
4863 	}
4864 }
4865 
4866 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
4867 				struct folio *folio, enum page_type type)
4868 {
4869 	pgoff_t ofs = folio->index;
4870 
4871 	if (unlikely(f2fs_cp_error(sbi)))
4872 		return;
4873 
4874 	if (ofs == sbi->page_eio_ofs[type]) {
4875 		if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
4876 			set_ckpt_flags(sbi, CP_ERROR_FLAG);
4877 	} else {
4878 		sbi->page_eio_ofs[type] = ofs;
4879 		sbi->page_eio_cnt[type] = 0;
4880 	}
4881 }
4882 
4883 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
4884 {
4885 	return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
4886 }
4887 
4888 static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
4889 					block_t blkaddr, unsigned int cnt)
4890 {
4891 	bool need_submit = false;
4892 	int i = 0;
4893 
4894 	do {
4895 		struct folio *folio;
4896 
4897 		folio = filemap_get_folio(META_MAPPING(sbi), blkaddr + i);
4898 		if (!IS_ERR(folio)) {
4899 			if (folio_test_writeback(folio))
4900 				need_submit = true;
4901 			f2fs_folio_put(folio, false);
4902 		}
4903 	} while (++i < cnt && !need_submit);
4904 
4905 	if (need_submit)
4906 		f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
4907 							NULL, 0, DATA);
4908 
4909 	truncate_inode_pages_range(META_MAPPING(sbi),
4910 			F2FS_BLK_TO_BYTES((loff_t)blkaddr),
4911 			F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
4912 }
4913 
4914 static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
4915 						block_t blkaddr, unsigned int len)
4916 {
4917 	f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
4918 	f2fs_invalidate_compress_pages_range(sbi, blkaddr, len);
4919 }
4920 
4921 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4922 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4923 
4924 #endif /* _LINUX_F2FS_H */
4925