1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/sched/mm.h> 22 #include <linux/vmalloc.h> 23 #include <linux/bio.h> 24 #include <linux/blkdev.h> 25 #include <linux/quotaops.h> 26 #include <linux/part_stat.h> 27 #include <linux/rw_hint.h> 28 #include <crypto/hash.h> 29 30 #include <linux/fscrypt.h> 31 #include <linux/fsverity.h> 32 33 struct pagevec; 34 35 #ifdef CONFIG_F2FS_CHECK_FS 36 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 37 #else 38 #define f2fs_bug_on(sbi, condition) \ 39 do { \ 40 if (WARN_ON(condition)) \ 41 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 42 } while (0) 43 #endif 44 45 enum { 46 FAULT_KMALLOC, 47 FAULT_KVMALLOC, 48 FAULT_PAGE_ALLOC, 49 FAULT_PAGE_GET, 50 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 51 FAULT_ALLOC_NID, 52 FAULT_ORPHAN, 53 FAULT_BLOCK, 54 FAULT_DIR_DEPTH, 55 FAULT_EVICT_INODE, 56 FAULT_TRUNCATE, 57 FAULT_READ_IO, 58 FAULT_CHECKPOINT, 59 FAULT_DISCARD, 60 FAULT_WRITE_IO, 61 FAULT_SLAB_ALLOC, 62 FAULT_DQUOT_INIT, 63 FAULT_LOCK_OP, 64 FAULT_BLKADDR_VALIDITY, 65 FAULT_BLKADDR_CONSISTENCE, 66 FAULT_NO_SEGMENT, 67 FAULT_MAX, 68 }; 69 70 #ifdef CONFIG_F2FS_FAULT_INJECTION 71 #define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0)) 72 73 struct f2fs_fault_info { 74 atomic_t inject_ops; 75 int inject_rate; 76 unsigned int inject_type; 77 }; 78 79 extern const char *f2fs_fault_name[FAULT_MAX]; 80 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type)) 81 82 /* maximum retry count for injected failure */ 83 #define DEFAULT_FAILURE_RETRY_COUNT 8 84 #else 85 #define DEFAULT_FAILURE_RETRY_COUNT 1 86 #endif 87 88 /* 89 * For mount options 90 */ 91 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001 92 #define F2FS_MOUNT_DISCARD 0x00000002 93 #define F2FS_MOUNT_NOHEAP 0x00000004 94 #define F2FS_MOUNT_XATTR_USER 0x00000008 95 #define F2FS_MOUNT_POSIX_ACL 0x00000010 96 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020 97 #define F2FS_MOUNT_INLINE_XATTR 0x00000040 98 #define F2FS_MOUNT_INLINE_DATA 0x00000080 99 #define F2FS_MOUNT_INLINE_DENTRY 0x00000100 100 #define F2FS_MOUNT_FLUSH_MERGE 0x00000200 101 #define F2FS_MOUNT_NOBARRIER 0x00000400 102 #define F2FS_MOUNT_FASTBOOT 0x00000800 103 #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000 104 #define F2FS_MOUNT_DATA_FLUSH 0x00002000 105 #define F2FS_MOUNT_FAULT_INJECTION 0x00004000 106 #define F2FS_MOUNT_USRQUOTA 0x00008000 107 #define F2FS_MOUNT_GRPQUOTA 0x00010000 108 #define F2FS_MOUNT_PRJQUOTA 0x00020000 109 #define F2FS_MOUNT_QUOTA 0x00040000 110 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000 111 #define F2FS_MOUNT_RESERVE_ROOT 0x00100000 112 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000 113 #define F2FS_MOUNT_NORECOVERY 0x00400000 114 #define F2FS_MOUNT_ATGC 0x00800000 115 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000 116 #define F2FS_MOUNT_GC_MERGE 0x02000000 117 #define F2FS_MOUNT_COMPRESS_CACHE 0x04000000 118 #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000 119 120 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 121 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 122 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 123 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 124 125 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 126 typecheck(unsigned long long, b) && \ 127 ((long long)((a) - (b)) > 0)) 128 129 typedef u32 block_t; /* 130 * should not change u32, since it is the on-disk block 131 * address format, __le32. 132 */ 133 typedef u32 nid_t; 134 135 #define COMPRESS_EXT_NUM 16 136 137 /* 138 * An implementation of an rwsem that is explicitly unfair to readers. This 139 * prevents priority inversion when a low-priority reader acquires the read lock 140 * while sleeping on the write lock but the write lock is needed by 141 * higher-priority clients. 142 */ 143 144 struct f2fs_rwsem { 145 struct rw_semaphore internal_rwsem; 146 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 147 wait_queue_head_t read_waiters; 148 #endif 149 }; 150 151 struct f2fs_mount_info { 152 unsigned int opt; 153 block_t root_reserved_blocks; /* root reserved blocks */ 154 kuid_t s_resuid; /* reserved blocks for uid */ 155 kgid_t s_resgid; /* reserved blocks for gid */ 156 int active_logs; /* # of active logs */ 157 int inline_xattr_size; /* inline xattr size */ 158 #ifdef CONFIG_F2FS_FAULT_INJECTION 159 struct f2fs_fault_info fault_info; /* For fault injection */ 160 #endif 161 #ifdef CONFIG_QUOTA 162 /* Names of quota files with journalled quota */ 163 char *s_qf_names[MAXQUOTAS]; 164 int s_jquota_fmt; /* Format of quota to use */ 165 #endif 166 /* For which write hints are passed down to block layer */ 167 int alloc_mode; /* segment allocation policy */ 168 int fsync_mode; /* fsync policy */ 169 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 170 int bggc_mode; /* bggc mode: off, on or sync */ 171 int memory_mode; /* memory mode */ 172 int errors; /* errors parameter */ 173 int discard_unit; /* 174 * discard command's offset/size should 175 * be aligned to this unit: block, 176 * segment or section 177 */ 178 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 179 block_t unusable_cap_perc; /* percentage for cap */ 180 block_t unusable_cap; /* Amount of space allowed to be 181 * unusable when disabling checkpoint 182 */ 183 184 /* For compression */ 185 unsigned char compress_algorithm; /* algorithm type */ 186 unsigned char compress_log_size; /* cluster log size */ 187 unsigned char compress_level; /* compress level */ 188 bool compress_chksum; /* compressed data chksum */ 189 unsigned char compress_ext_cnt; /* extension count */ 190 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 191 int compress_mode; /* compression mode */ 192 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 193 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 194 }; 195 196 #define F2FS_FEATURE_ENCRYPT 0x00000001 197 #define F2FS_FEATURE_BLKZONED 0x00000002 198 #define F2FS_FEATURE_ATOMIC_WRITE 0x00000004 199 #define F2FS_FEATURE_EXTRA_ATTR 0x00000008 200 #define F2FS_FEATURE_PRJQUOTA 0x00000010 201 #define F2FS_FEATURE_INODE_CHKSUM 0x00000020 202 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040 203 #define F2FS_FEATURE_QUOTA_INO 0x00000080 204 #define F2FS_FEATURE_INODE_CRTIME 0x00000100 205 #define F2FS_FEATURE_LOST_FOUND 0x00000200 206 #define F2FS_FEATURE_VERITY 0x00000400 207 #define F2FS_FEATURE_SB_CHKSUM 0x00000800 208 #define F2FS_FEATURE_CASEFOLD 0x00001000 209 #define F2FS_FEATURE_COMPRESSION 0x00002000 210 #define F2FS_FEATURE_RO 0x00004000 211 212 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 213 ((raw_super->feature & cpu_to_le32(mask)) != 0) 214 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 215 216 /* 217 * Default values for user and/or group using reserved blocks 218 */ 219 #define F2FS_DEF_RESUID 0 220 #define F2FS_DEF_RESGID 0 221 222 /* 223 * For checkpoint manager 224 */ 225 enum { 226 NAT_BITMAP, 227 SIT_BITMAP 228 }; 229 230 #define CP_UMOUNT 0x00000001 231 #define CP_FASTBOOT 0x00000002 232 #define CP_SYNC 0x00000004 233 #define CP_RECOVERY 0x00000008 234 #define CP_DISCARD 0x00000010 235 #define CP_TRIMMED 0x00000020 236 #define CP_PAUSE 0x00000040 237 #define CP_RESIZE 0x00000080 238 239 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 240 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 241 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 242 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 243 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 244 #define DEF_CP_INTERVAL 60 /* 60 secs */ 245 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 246 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 247 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 248 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 249 250 struct cp_control { 251 int reason; 252 __u64 trim_start; 253 __u64 trim_end; 254 __u64 trim_minlen; 255 }; 256 257 /* 258 * indicate meta/data type 259 */ 260 enum { 261 META_CP, 262 META_NAT, 263 META_SIT, 264 META_SSA, 265 META_MAX, 266 META_POR, 267 DATA_GENERIC, /* check range only */ 268 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 269 DATA_GENERIC_ENHANCE_READ, /* 270 * strong check on range and segment 271 * bitmap but no warning due to race 272 * condition of read on truncated area 273 * by extent_cache 274 */ 275 DATA_GENERIC_ENHANCE_UPDATE, /* 276 * strong check on range and segment 277 * bitmap for update case 278 */ 279 META_GENERIC, 280 }; 281 282 /* for the list of ino */ 283 enum { 284 ORPHAN_INO, /* for orphan ino list */ 285 APPEND_INO, /* for append ino list */ 286 UPDATE_INO, /* for update ino list */ 287 TRANS_DIR_INO, /* for transactions dir ino list */ 288 FLUSH_INO, /* for multiple device flushing */ 289 MAX_INO_ENTRY, /* max. list */ 290 }; 291 292 struct ino_entry { 293 struct list_head list; /* list head */ 294 nid_t ino; /* inode number */ 295 unsigned int dirty_device; /* dirty device bitmap */ 296 }; 297 298 /* for the list of inodes to be GCed */ 299 struct inode_entry { 300 struct list_head list; /* list head */ 301 struct inode *inode; /* vfs inode pointer */ 302 }; 303 304 struct fsync_node_entry { 305 struct list_head list; /* list head */ 306 struct page *page; /* warm node page pointer */ 307 unsigned int seq_id; /* sequence id */ 308 }; 309 310 struct ckpt_req { 311 struct completion wait; /* completion for checkpoint done */ 312 struct llist_node llnode; /* llist_node to be linked in wait queue */ 313 int ret; /* return code of checkpoint */ 314 ktime_t queue_time; /* request queued time */ 315 }; 316 317 struct ckpt_req_control { 318 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 319 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 320 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 321 atomic_t issued_ckpt; /* # of actually issued ckpts */ 322 atomic_t total_ckpt; /* # of total ckpts */ 323 atomic_t queued_ckpt; /* # of queued ckpts */ 324 struct llist_head issue_list; /* list for command issue */ 325 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 326 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 327 unsigned int peak_time; /* peak wait time in msec until now */ 328 }; 329 330 /* for the bitmap indicate blocks to be discarded */ 331 struct discard_entry { 332 struct list_head list; /* list head */ 333 block_t start_blkaddr; /* start blockaddr of current segment */ 334 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 335 }; 336 337 /* minimum discard granularity, unit: block count */ 338 #define MIN_DISCARD_GRANULARITY 1 339 /* default discard granularity of inner discard thread, unit: block count */ 340 #define DEFAULT_DISCARD_GRANULARITY 16 341 /* default maximum discard granularity of ordered discard, unit: block count */ 342 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16 343 344 /* max discard pend list number */ 345 #define MAX_PLIST_NUM 512 346 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 347 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 348 349 enum { 350 D_PREP, /* initial */ 351 D_PARTIAL, /* partially submitted */ 352 D_SUBMIT, /* all submitted */ 353 D_DONE, /* finished */ 354 }; 355 356 struct discard_info { 357 block_t lstart; /* logical start address */ 358 block_t len; /* length */ 359 block_t start; /* actual start address in dev */ 360 }; 361 362 struct discard_cmd { 363 struct rb_node rb_node; /* rb node located in rb-tree */ 364 struct discard_info di; /* discard info */ 365 struct list_head list; /* command list */ 366 struct completion wait; /* compleation */ 367 struct block_device *bdev; /* bdev */ 368 unsigned short ref; /* reference count */ 369 unsigned char state; /* state */ 370 unsigned char queued; /* queued discard */ 371 int error; /* bio error */ 372 spinlock_t lock; /* for state/bio_ref updating */ 373 unsigned short bio_ref; /* bio reference count */ 374 }; 375 376 enum { 377 DPOLICY_BG, 378 DPOLICY_FORCE, 379 DPOLICY_FSTRIM, 380 DPOLICY_UMOUNT, 381 MAX_DPOLICY, 382 }; 383 384 enum { 385 DPOLICY_IO_AWARE_DISABLE, /* force to not be aware of IO */ 386 DPOLICY_IO_AWARE_ENABLE, /* force to be aware of IO */ 387 DPOLICY_IO_AWARE_MAX, 388 }; 389 390 struct discard_policy { 391 int type; /* type of discard */ 392 unsigned int min_interval; /* used for candidates exist */ 393 unsigned int mid_interval; /* used for device busy */ 394 unsigned int max_interval; /* used for candidates not exist */ 395 unsigned int max_requests; /* # of discards issued per round */ 396 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 397 bool io_aware; /* issue discard in idle time */ 398 bool sync; /* submit discard with REQ_SYNC flag */ 399 bool ordered; /* issue discard by lba order */ 400 bool timeout; /* discard timeout for put_super */ 401 unsigned int granularity; /* discard granularity */ 402 }; 403 404 struct discard_cmd_control { 405 struct task_struct *f2fs_issue_discard; /* discard thread */ 406 struct list_head entry_list; /* 4KB discard entry list */ 407 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 408 struct list_head wait_list; /* store on-flushing entries */ 409 struct list_head fstrim_list; /* in-flight discard from fstrim */ 410 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 411 struct mutex cmd_lock; 412 unsigned int nr_discards; /* # of discards in the list */ 413 unsigned int max_discards; /* max. discards to be issued */ 414 unsigned int max_discard_request; /* max. discard request per round */ 415 unsigned int min_discard_issue_time; /* min. interval between discard issue */ 416 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ 417 unsigned int max_discard_issue_time; /* max. interval between discard issue */ 418 unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */ 419 unsigned int discard_urgent_util; /* utilization which issue discard proactively */ 420 unsigned int discard_granularity; /* discard granularity */ 421 unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */ 422 unsigned int discard_io_aware; /* io_aware policy */ 423 unsigned int undiscard_blks; /* # of undiscard blocks */ 424 unsigned int next_pos; /* next discard position */ 425 atomic_t issued_discard; /* # of issued discard */ 426 atomic_t queued_discard; /* # of queued discard */ 427 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 428 struct rb_root_cached root; /* root of discard rb-tree */ 429 bool rbtree_check; /* config for consistence check */ 430 bool discard_wake; /* to wake up discard thread */ 431 }; 432 433 /* for the list of fsync inodes, used only during recovery */ 434 struct fsync_inode_entry { 435 struct list_head list; /* list head */ 436 struct inode *inode; /* vfs inode pointer */ 437 block_t blkaddr; /* block address locating the last fsync */ 438 block_t last_dentry; /* block address locating the last dentry */ 439 }; 440 441 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 442 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 443 444 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 445 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 446 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 447 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 448 449 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 450 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 451 452 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 453 { 454 int before = nats_in_cursum(journal); 455 456 journal->n_nats = cpu_to_le16(before + i); 457 return before; 458 } 459 460 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 461 { 462 int before = sits_in_cursum(journal); 463 464 journal->n_sits = cpu_to_le16(before + i); 465 return before; 466 } 467 468 static inline bool __has_cursum_space(struct f2fs_journal *journal, 469 int size, int type) 470 { 471 if (type == NAT_JOURNAL) 472 return size <= MAX_NAT_JENTRIES(journal); 473 return size <= MAX_SIT_JENTRIES(journal); 474 } 475 476 /* for inline stuff */ 477 #define DEF_INLINE_RESERVED_SIZE 1 478 static inline int get_extra_isize(struct inode *inode); 479 static inline int get_inline_xattr_addrs(struct inode *inode); 480 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 481 (CUR_ADDRS_PER_INODE(inode) - \ 482 get_inline_xattr_addrs(inode) - \ 483 DEF_INLINE_RESERVED_SIZE)) 484 485 /* for inline dir */ 486 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 487 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 488 BITS_PER_BYTE + 1)) 489 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 490 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 491 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 492 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 493 NR_INLINE_DENTRY(inode) + \ 494 INLINE_DENTRY_BITMAP_SIZE(inode))) 495 496 /* 497 * For INODE and NODE manager 498 */ 499 /* for directory operations */ 500 501 struct f2fs_filename { 502 /* 503 * The filename the user specified. This is NULL for some 504 * filesystem-internal operations, e.g. converting an inline directory 505 * to a non-inline one, or roll-forward recovering an encrypted dentry. 506 */ 507 const struct qstr *usr_fname; 508 509 /* 510 * The on-disk filename. For encrypted directories, this is encrypted. 511 * This may be NULL for lookups in an encrypted dir without the key. 512 */ 513 struct fscrypt_str disk_name; 514 515 /* The dirhash of this filename */ 516 f2fs_hash_t hash; 517 518 #ifdef CONFIG_FS_ENCRYPTION 519 /* 520 * For lookups in encrypted directories: either the buffer backing 521 * disk_name, or a buffer that holds the decoded no-key name. 522 */ 523 struct fscrypt_str crypto_buf; 524 #endif 525 #if IS_ENABLED(CONFIG_UNICODE) 526 /* 527 * For casefolded directories: the casefolded name, but it's left NULL 528 * if the original name is not valid Unicode, if the original name is 529 * "." or "..", if the directory is both casefolded and encrypted and 530 * its encryption key is unavailable, or if the filesystem is doing an 531 * internal operation where usr_fname is also NULL. In all these cases 532 * we fall back to treating the name as an opaque byte sequence. 533 */ 534 struct fscrypt_str cf_name; 535 #endif 536 }; 537 538 struct f2fs_dentry_ptr { 539 struct inode *inode; 540 void *bitmap; 541 struct f2fs_dir_entry *dentry; 542 __u8 (*filename)[F2FS_SLOT_LEN]; 543 int max; 544 int nr_bitmap; 545 }; 546 547 static inline void make_dentry_ptr_block(struct inode *inode, 548 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 549 { 550 d->inode = inode; 551 d->max = NR_DENTRY_IN_BLOCK; 552 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 553 d->bitmap = t->dentry_bitmap; 554 d->dentry = t->dentry; 555 d->filename = t->filename; 556 } 557 558 static inline void make_dentry_ptr_inline(struct inode *inode, 559 struct f2fs_dentry_ptr *d, void *t) 560 { 561 int entry_cnt = NR_INLINE_DENTRY(inode); 562 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 563 int reserved_size = INLINE_RESERVED_SIZE(inode); 564 565 d->inode = inode; 566 d->max = entry_cnt; 567 d->nr_bitmap = bitmap_size; 568 d->bitmap = t; 569 d->dentry = t + bitmap_size + reserved_size; 570 d->filename = t + bitmap_size + reserved_size + 571 SIZE_OF_DIR_ENTRY * entry_cnt; 572 } 573 574 /* 575 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 576 * as its node offset to distinguish from index node blocks. 577 * But some bits are used to mark the node block. 578 */ 579 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 580 >> OFFSET_BIT_SHIFT) 581 enum { 582 ALLOC_NODE, /* allocate a new node page if needed */ 583 LOOKUP_NODE, /* look up a node without readahead */ 584 LOOKUP_NODE_RA, /* 585 * look up a node with readahead called 586 * by get_data_block. 587 */ 588 }; 589 590 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 591 592 /* congestion wait timeout value, default: 20ms */ 593 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 594 595 /* maximum retry quota flush count */ 596 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 597 598 /* maximum retry of EIO'ed page */ 599 #define MAX_RETRY_PAGE_EIO 100 600 601 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 602 603 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 604 605 /* dirty segments threshold for triggering CP */ 606 #define DEFAULT_DIRTY_THRESHOLD 4 607 608 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS 609 #define RECOVERY_MIN_RA_BLOCKS 1 610 611 #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */ 612 613 /* for in-memory extent cache entry */ 614 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 615 616 /* number of extent info in extent cache we try to shrink */ 617 #define READ_EXTENT_CACHE_SHRINK_NUMBER 128 618 619 /* number of age extent info in extent cache we try to shrink */ 620 #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128 621 #define LAST_AGE_WEIGHT 30 622 #define SAME_AGE_REGION 1024 623 624 /* 625 * Define data block with age less than 1GB as hot data 626 * define data block with age less than 10GB but more than 1GB as warm data 627 */ 628 #define DEF_HOT_DATA_AGE_THRESHOLD 262144 629 #define DEF_WARM_DATA_AGE_THRESHOLD 2621440 630 631 /* extent cache type */ 632 enum extent_type { 633 EX_READ, 634 EX_BLOCK_AGE, 635 NR_EXTENT_CACHES, 636 }; 637 638 struct extent_info { 639 unsigned int fofs; /* start offset in a file */ 640 unsigned int len; /* length of the extent */ 641 union { 642 /* read extent_cache */ 643 struct { 644 /* start block address of the extent */ 645 block_t blk; 646 #ifdef CONFIG_F2FS_FS_COMPRESSION 647 /* physical extent length of compressed blocks */ 648 unsigned int c_len; 649 #endif 650 }; 651 /* block age extent_cache */ 652 struct { 653 /* block age of the extent */ 654 unsigned long long age; 655 /* last total blocks allocated */ 656 unsigned long long last_blocks; 657 }; 658 }; 659 }; 660 661 struct extent_node { 662 struct rb_node rb_node; /* rb node located in rb-tree */ 663 struct extent_info ei; /* extent info */ 664 struct list_head list; /* node in global extent list of sbi */ 665 struct extent_tree *et; /* extent tree pointer */ 666 }; 667 668 struct extent_tree { 669 nid_t ino; /* inode number */ 670 enum extent_type type; /* keep the extent tree type */ 671 struct rb_root_cached root; /* root of extent info rb-tree */ 672 struct extent_node *cached_en; /* recently accessed extent node */ 673 struct list_head list; /* to be used by sbi->zombie_list */ 674 rwlock_t lock; /* protect extent info rb-tree */ 675 atomic_t node_cnt; /* # of extent node in rb-tree*/ 676 bool largest_updated; /* largest extent updated */ 677 struct extent_info largest; /* largest cached extent for EX_READ */ 678 }; 679 680 struct extent_tree_info { 681 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 682 struct mutex extent_tree_lock; /* locking extent radix tree */ 683 struct list_head extent_list; /* lru list for shrinker */ 684 spinlock_t extent_lock; /* locking extent lru list */ 685 atomic_t total_ext_tree; /* extent tree count */ 686 struct list_head zombie_list; /* extent zombie tree list */ 687 atomic_t total_zombie_tree; /* extent zombie tree count */ 688 atomic_t total_ext_node; /* extent info count */ 689 }; 690 691 /* 692 * State of block returned by f2fs_map_blocks. 693 */ 694 #define F2FS_MAP_NEW (1U << 0) 695 #define F2FS_MAP_MAPPED (1U << 1) 696 #define F2FS_MAP_DELALLOC (1U << 2) 697 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 698 F2FS_MAP_DELALLOC) 699 700 struct f2fs_map_blocks { 701 struct block_device *m_bdev; /* for multi-device dio */ 702 block_t m_pblk; 703 block_t m_lblk; 704 unsigned int m_len; 705 unsigned int m_flags; 706 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 707 pgoff_t *m_next_extent; /* point to next possible extent */ 708 int m_seg_type; 709 bool m_may_create; /* indicate it is from write path */ 710 bool m_multidev_dio; /* indicate it allows multi-device dio */ 711 }; 712 713 /* for flag in get_data_block */ 714 enum { 715 F2FS_GET_BLOCK_DEFAULT, 716 F2FS_GET_BLOCK_FIEMAP, 717 F2FS_GET_BLOCK_BMAP, 718 F2FS_GET_BLOCK_DIO, 719 F2FS_GET_BLOCK_PRE_DIO, 720 F2FS_GET_BLOCK_PRE_AIO, 721 F2FS_GET_BLOCK_PRECACHE, 722 }; 723 724 /* 725 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 726 */ 727 #define FADVISE_COLD_BIT 0x01 728 #define FADVISE_LOST_PINO_BIT 0x02 729 #define FADVISE_ENCRYPT_BIT 0x04 730 #define FADVISE_ENC_NAME_BIT 0x08 731 #define FADVISE_KEEP_SIZE_BIT 0x10 732 #define FADVISE_HOT_BIT 0x20 733 #define FADVISE_VERITY_BIT 0x40 734 #define FADVISE_TRUNC_BIT 0x80 735 736 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 737 738 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 739 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 740 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 741 742 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 743 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 744 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 745 746 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 747 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 748 749 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 750 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 751 752 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 753 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 754 755 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 756 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 757 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 758 759 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 760 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 761 762 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) 763 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) 764 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) 765 766 #define DEF_DIR_LEVEL 0 767 768 /* used for f2fs_inode_info->flags */ 769 enum { 770 FI_NEW_INODE, /* indicate newly allocated inode */ 771 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 772 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 773 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 774 FI_INC_LINK, /* need to increment i_nlink */ 775 FI_ACL_MODE, /* indicate acl mode */ 776 FI_NO_ALLOC, /* should not allocate any blocks */ 777 FI_FREE_NID, /* free allocated nide */ 778 FI_NO_EXTENT, /* not to use the extent cache */ 779 FI_INLINE_XATTR, /* used for inline xattr */ 780 FI_INLINE_DATA, /* used for inline data*/ 781 FI_INLINE_DENTRY, /* used for inline dentry */ 782 FI_APPEND_WRITE, /* inode has appended data */ 783 FI_UPDATE_WRITE, /* inode has in-place-update data */ 784 FI_NEED_IPU, /* used for ipu per file */ 785 FI_ATOMIC_FILE, /* indicate atomic file */ 786 FI_DATA_EXIST, /* indicate data exists */ 787 FI_INLINE_DOTS, /* indicate inline dot dentries */ 788 FI_SKIP_WRITES, /* should skip data page writeback */ 789 FI_OPU_WRITE, /* used for opu per file */ 790 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 791 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ 792 FI_HOT_DATA, /* indicate file is hot */ 793 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 794 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 795 FI_PIN_FILE, /* indicate file should not be gced */ 796 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 797 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 798 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 799 FI_MMAP_FILE, /* indicate file was mmapped */ 800 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 801 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 802 FI_ALIGNED_WRITE, /* enable aligned write */ 803 FI_COW_FILE, /* indicate COW file */ 804 FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ 805 FI_ATOMIC_REPLACE, /* indicate atomic replace */ 806 FI_MAX, /* max flag, never be used */ 807 }; 808 809 struct f2fs_inode_info { 810 struct inode vfs_inode; /* serve a vfs inode */ 811 unsigned long i_flags; /* keep an inode flags for ioctl */ 812 unsigned char i_advise; /* use to give file attribute hints */ 813 unsigned char i_dir_level; /* use for dentry level for large dir */ 814 union { 815 unsigned int i_current_depth; /* only for directory depth */ 816 unsigned short i_gc_failures; /* for gc failure statistic */ 817 }; 818 unsigned int i_pino; /* parent inode number */ 819 umode_t i_acl_mode; /* keep file acl mode temporarily */ 820 821 /* Use below internally in f2fs*/ 822 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 823 struct f2fs_rwsem i_sem; /* protect fi info */ 824 atomic_t dirty_pages; /* # of dirty pages */ 825 f2fs_hash_t chash; /* hash value of given file name */ 826 unsigned int clevel; /* maximum level of given file name */ 827 struct task_struct *task; /* lookup and create consistency */ 828 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 829 struct task_struct *wb_task; /* indicate inode is in context of writeback */ 830 nid_t i_xattr_nid; /* node id that contains xattrs */ 831 loff_t last_disk_size; /* lastly written file size */ 832 spinlock_t i_size_lock; /* protect last_disk_size */ 833 834 #ifdef CONFIG_QUOTA 835 struct dquot __rcu *i_dquot[MAXQUOTAS]; 836 837 /* quota space reservation, managed internally by quota code */ 838 qsize_t i_reserved_quota; 839 #endif 840 struct list_head dirty_list; /* dirty list for dirs and files */ 841 struct list_head gdirty_list; /* linked in global dirty list */ 842 struct task_struct *atomic_write_task; /* store atomic write task */ 843 struct extent_tree *extent_tree[NR_EXTENT_CACHES]; 844 /* cached extent_tree entry */ 845 struct inode *cow_inode; /* copy-on-write inode for atomic write */ 846 847 /* avoid racing between foreground op and gc */ 848 struct f2fs_rwsem i_gc_rwsem[2]; 849 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ 850 851 int i_extra_isize; /* size of extra space located in i_addr */ 852 kprojid_t i_projid; /* id for project quota */ 853 int i_inline_xattr_size; /* inline xattr size */ 854 struct timespec64 i_crtime; /* inode creation time */ 855 struct timespec64 i_disk_time[3];/* inode disk times */ 856 857 /* for file compress */ 858 atomic_t i_compr_blocks; /* # of compressed blocks */ 859 unsigned char i_compress_algorithm; /* algorithm type */ 860 unsigned char i_log_cluster_size; /* log of cluster size */ 861 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 862 unsigned char i_compress_flag; /* compress flag */ 863 unsigned int i_cluster_size; /* cluster size */ 864 865 unsigned int atomic_write_cnt; 866 loff_t original_i_size; /* original i_size before atomic write */ 867 }; 868 869 static inline void get_read_extent_info(struct extent_info *ext, 870 struct f2fs_extent *i_ext) 871 { 872 ext->fofs = le32_to_cpu(i_ext->fofs); 873 ext->blk = le32_to_cpu(i_ext->blk); 874 ext->len = le32_to_cpu(i_ext->len); 875 } 876 877 static inline void set_raw_read_extent(struct extent_info *ext, 878 struct f2fs_extent *i_ext) 879 { 880 i_ext->fofs = cpu_to_le32(ext->fofs); 881 i_ext->blk = cpu_to_le32(ext->blk); 882 i_ext->len = cpu_to_le32(ext->len); 883 } 884 885 static inline bool __is_discard_mergeable(struct discard_info *back, 886 struct discard_info *front, unsigned int max_len) 887 { 888 return (back->lstart + back->len == front->lstart) && 889 (back->len + front->len <= max_len); 890 } 891 892 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 893 struct discard_info *back, unsigned int max_len) 894 { 895 return __is_discard_mergeable(back, cur, max_len); 896 } 897 898 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 899 struct discard_info *front, unsigned int max_len) 900 { 901 return __is_discard_mergeable(cur, front, max_len); 902 } 903 904 /* 905 * For free nid management 906 */ 907 enum nid_state { 908 FREE_NID, /* newly added to free nid list */ 909 PREALLOC_NID, /* it is preallocated */ 910 MAX_NID_STATE, 911 }; 912 913 enum nat_state { 914 TOTAL_NAT, 915 DIRTY_NAT, 916 RECLAIMABLE_NAT, 917 MAX_NAT_STATE, 918 }; 919 920 struct f2fs_nm_info { 921 block_t nat_blkaddr; /* base disk address of NAT */ 922 nid_t max_nid; /* maximum possible node ids */ 923 nid_t available_nids; /* # of available node ids */ 924 nid_t next_scan_nid; /* the next nid to be scanned */ 925 nid_t max_rf_node_blocks; /* max # of nodes for recovery */ 926 unsigned int ram_thresh; /* control the memory footprint */ 927 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 928 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 929 930 /* NAT cache management */ 931 struct radix_tree_root nat_root;/* root of the nat entry cache */ 932 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 933 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ 934 struct list_head nat_entries; /* cached nat entry list (clean) */ 935 spinlock_t nat_list_lock; /* protect clean nat entry list */ 936 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 937 unsigned int nat_blocks; /* # of nat blocks */ 938 939 /* free node ids management */ 940 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 941 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 942 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 943 spinlock_t nid_list_lock; /* protect nid lists ops */ 944 struct mutex build_lock; /* lock for build free nids */ 945 unsigned char **free_nid_bitmap; 946 unsigned char *nat_block_bitmap; 947 unsigned short *free_nid_count; /* free nid count of NAT block */ 948 949 /* for checkpoint */ 950 char *nat_bitmap; /* NAT bitmap pointer */ 951 952 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 953 unsigned char *nat_bits; /* NAT bits blocks */ 954 unsigned char *full_nat_bits; /* full NAT pages */ 955 unsigned char *empty_nat_bits; /* empty NAT pages */ 956 #ifdef CONFIG_F2FS_CHECK_FS 957 char *nat_bitmap_mir; /* NAT bitmap mirror */ 958 #endif 959 int bitmap_size; /* bitmap size */ 960 }; 961 962 /* 963 * this structure is used as one of function parameters. 964 * all the information are dedicated to a given direct node block determined 965 * by the data offset in a file. 966 */ 967 struct dnode_of_data { 968 struct inode *inode; /* vfs inode pointer */ 969 struct page *inode_page; /* its inode page, NULL is possible */ 970 struct page *node_page; /* cached direct node page */ 971 nid_t nid; /* node id of the direct node block */ 972 unsigned int ofs_in_node; /* data offset in the node page */ 973 bool inode_page_locked; /* inode page is locked or not */ 974 bool node_changed; /* is node block changed */ 975 char cur_level; /* level of hole node page */ 976 char max_level; /* level of current page located */ 977 block_t data_blkaddr; /* block address of the node block */ 978 }; 979 980 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 981 struct page *ipage, struct page *npage, nid_t nid) 982 { 983 memset(dn, 0, sizeof(*dn)); 984 dn->inode = inode; 985 dn->inode_page = ipage; 986 dn->node_page = npage; 987 dn->nid = nid; 988 } 989 990 /* 991 * For SIT manager 992 * 993 * By default, there are 6 active log areas across the whole main area. 994 * When considering hot and cold data separation to reduce cleaning overhead, 995 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 996 * respectively. 997 * In the current design, you should not change the numbers intentionally. 998 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 999 * logs individually according to the underlying devices. (default: 6) 1000 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 1001 * data and 8 for node logs. 1002 */ 1003 #define NR_CURSEG_DATA_TYPE (3) 1004 #define NR_CURSEG_NODE_TYPE (3) 1005 #define NR_CURSEG_INMEM_TYPE (2) 1006 #define NR_CURSEG_RO_TYPE (2) 1007 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 1008 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 1009 1010 enum { 1011 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 1012 CURSEG_WARM_DATA, /* data blocks */ 1013 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 1014 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 1015 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 1016 CURSEG_COLD_NODE, /* indirect node blocks */ 1017 NR_PERSISTENT_LOG, /* number of persistent log */ 1018 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 1019 /* pinned file that needs consecutive block address */ 1020 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 1021 NO_CHECK_TYPE, /* number of persistent & inmem log */ 1022 }; 1023 1024 struct flush_cmd { 1025 struct completion wait; 1026 struct llist_node llnode; 1027 nid_t ino; 1028 int ret; 1029 }; 1030 1031 struct flush_cmd_control { 1032 struct task_struct *f2fs_issue_flush; /* flush thread */ 1033 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1034 atomic_t issued_flush; /* # of issued flushes */ 1035 atomic_t queued_flush; /* # of queued flushes */ 1036 struct llist_head issue_list; /* list for command issue */ 1037 struct llist_node *dispatch_list; /* list for command dispatch */ 1038 }; 1039 1040 struct f2fs_sm_info { 1041 struct sit_info *sit_info; /* whole segment information */ 1042 struct free_segmap_info *free_info; /* free segment information */ 1043 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1044 struct curseg_info *curseg_array; /* active segment information */ 1045 1046 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ 1047 1048 block_t seg0_blkaddr; /* block address of 0'th segment */ 1049 block_t main_blkaddr; /* start block address of main area */ 1050 block_t ssa_blkaddr; /* start block address of SSA area */ 1051 1052 unsigned int segment_count; /* total # of segments */ 1053 unsigned int main_segments; /* # of segments in main area */ 1054 unsigned int reserved_segments; /* # of reserved segments */ 1055 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ 1056 unsigned int ovp_segments; /* # of overprovision segments */ 1057 1058 /* a threshold to reclaim prefree segments */ 1059 unsigned int rec_prefree_segments; 1060 1061 struct list_head sit_entry_set; /* sit entry set list */ 1062 1063 unsigned int ipu_policy; /* in-place-update policy */ 1064 unsigned int min_ipu_util; /* in-place-update threshold */ 1065 unsigned int min_fsync_blocks; /* threshold for fsync */ 1066 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1067 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1068 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1069 1070 /* for flush command control */ 1071 struct flush_cmd_control *fcc_info; 1072 1073 /* for discard command control */ 1074 struct discard_cmd_control *dcc_info; 1075 }; 1076 1077 /* 1078 * For superblock 1079 */ 1080 /* 1081 * COUNT_TYPE for monitoring 1082 * 1083 * f2fs monitors the number of several block types such as on-writeback, 1084 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1085 */ 1086 #define WB_DATA_TYPE(p, f) \ 1087 (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1088 enum count_type { 1089 F2FS_DIRTY_DENTS, 1090 F2FS_DIRTY_DATA, 1091 F2FS_DIRTY_QDATA, 1092 F2FS_DIRTY_NODES, 1093 F2FS_DIRTY_META, 1094 F2FS_DIRTY_IMETA, 1095 F2FS_WB_CP_DATA, 1096 F2FS_WB_DATA, 1097 F2FS_RD_DATA, 1098 F2FS_RD_NODE, 1099 F2FS_RD_META, 1100 F2FS_DIO_WRITE, 1101 F2FS_DIO_READ, 1102 NR_COUNT_TYPE, 1103 }; 1104 1105 /* 1106 * The below are the page types of bios used in submit_bio(). 1107 * The available types are: 1108 * DATA User data pages. It operates as async mode. 1109 * NODE Node pages. It operates as async mode. 1110 * META FS metadata pages such as SIT, NAT, CP. 1111 * NR_PAGE_TYPE The number of page types. 1112 * META_FLUSH Make sure the previous pages are written 1113 * with waiting the bio's completion 1114 * ... Only can be used with META. 1115 */ 1116 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1117 #define PAGE_TYPE_ON_MAIN(type) ((type) == DATA || (type) == NODE) 1118 enum page_type { 1119 DATA = 0, 1120 NODE = 1, /* should not change this */ 1121 META, 1122 NR_PAGE_TYPE, 1123 META_FLUSH, 1124 IPU, /* the below types are used by tracepoints only. */ 1125 OPU, 1126 }; 1127 1128 enum temp_type { 1129 HOT = 0, /* must be zero for meta bio */ 1130 WARM, 1131 COLD, 1132 NR_TEMP_TYPE, 1133 }; 1134 1135 enum need_lock_type { 1136 LOCK_REQ = 0, 1137 LOCK_DONE, 1138 LOCK_RETRY, 1139 }; 1140 1141 enum cp_reason_type { 1142 CP_NO_NEEDED, 1143 CP_NON_REGULAR, 1144 CP_COMPRESSED, 1145 CP_HARDLINK, 1146 CP_SB_NEED_CP, 1147 CP_WRONG_PINO, 1148 CP_NO_SPC_ROLL, 1149 CP_NODE_NEED_CP, 1150 CP_FASTBOOT_MODE, 1151 CP_SPEC_LOG_NUM, 1152 CP_RECOVER_DIR, 1153 }; 1154 1155 enum iostat_type { 1156 /* WRITE IO */ 1157 APP_DIRECT_IO, /* app direct write IOs */ 1158 APP_BUFFERED_IO, /* app buffered write IOs */ 1159 APP_WRITE_IO, /* app write IOs */ 1160 APP_MAPPED_IO, /* app mapped IOs */ 1161 APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */ 1162 APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */ 1163 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1164 FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */ 1165 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1166 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1167 FS_GC_DATA_IO, /* data IOs from forground gc */ 1168 FS_GC_NODE_IO, /* node IOs from forground gc */ 1169 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1170 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1171 FS_CP_META_IO, /* meta IOs from checkpoint */ 1172 1173 /* READ IO */ 1174 APP_DIRECT_READ_IO, /* app direct read IOs */ 1175 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1176 APP_READ_IO, /* app read IOs */ 1177 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1178 APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */ 1179 APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */ 1180 FS_DATA_READ_IO, /* data read IOs */ 1181 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1182 FS_CDATA_READ_IO, /* compressed data read IOs */ 1183 FS_NODE_READ_IO, /* node read IOs */ 1184 FS_META_READ_IO, /* meta read IOs */ 1185 1186 /* other */ 1187 FS_DISCARD_IO, /* discard */ 1188 FS_FLUSH_IO, /* flush */ 1189 FS_ZONE_RESET_IO, /* zone reset */ 1190 NR_IO_TYPE, 1191 }; 1192 1193 struct f2fs_io_info { 1194 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1195 nid_t ino; /* inode number */ 1196 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1197 enum temp_type temp; /* contains HOT/WARM/COLD */ 1198 enum req_op op; /* contains REQ_OP_ */ 1199 blk_opf_t op_flags; /* req_flag_bits */ 1200 block_t new_blkaddr; /* new block address to be written */ 1201 block_t old_blkaddr; /* old block address before Cow */ 1202 struct page *page; /* page to be written */ 1203 struct page *encrypted_page; /* encrypted page */ 1204 struct page *compressed_page; /* compressed page */ 1205 struct list_head list; /* serialize IOs */ 1206 unsigned int compr_blocks; /* # of compressed block addresses */ 1207 unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */ 1208 unsigned int version:8; /* version of the node */ 1209 unsigned int submitted:1; /* indicate IO submission */ 1210 unsigned int in_list:1; /* indicate fio is in io_list */ 1211 unsigned int is_por:1; /* indicate IO is from recovery or not */ 1212 unsigned int encrypted:1; /* indicate file is encrypted */ 1213 unsigned int post_read:1; /* require post read */ 1214 enum iostat_type io_type; /* io type */ 1215 struct writeback_control *io_wbc; /* writeback control */ 1216 struct bio **bio; /* bio for ipu */ 1217 sector_t *last_block; /* last block number in bio */ 1218 }; 1219 1220 struct bio_entry { 1221 struct bio *bio; 1222 struct list_head list; 1223 }; 1224 1225 #define is_read_io(rw) ((rw) == READ) 1226 struct f2fs_bio_info { 1227 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1228 struct bio *bio; /* bios to merge */ 1229 sector_t last_block_in_bio; /* last block number */ 1230 struct f2fs_io_info fio; /* store buffered io info. */ 1231 #ifdef CONFIG_BLK_DEV_ZONED 1232 struct completion zone_wait; /* condition value for the previous open zone to close */ 1233 struct bio *zone_pending_bio; /* pending bio for the previous zone */ 1234 void *bi_private; /* previous bi_private for pending bio */ 1235 #endif 1236 struct f2fs_rwsem io_rwsem; /* blocking op for bio */ 1237 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1238 struct list_head io_list; /* track fios */ 1239 struct list_head bio_list; /* bio entry list head */ 1240 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ 1241 }; 1242 1243 #define FDEV(i) (sbi->devs[i]) 1244 #define RDEV(i) (raw_super->devs[i]) 1245 struct f2fs_dev_info { 1246 struct file *bdev_file; 1247 struct block_device *bdev; 1248 char path[MAX_PATH_LEN]; 1249 unsigned int total_segments; 1250 block_t start_blk; 1251 block_t end_blk; 1252 #ifdef CONFIG_BLK_DEV_ZONED 1253 unsigned int nr_blkz; /* Total number of zones */ 1254 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1255 #endif 1256 }; 1257 1258 enum inode_type { 1259 DIR_INODE, /* for dirty dir inode */ 1260 FILE_INODE, /* for dirty regular/symlink inode */ 1261 DIRTY_META, /* for all dirtied inode metadata */ 1262 NR_INODE_TYPE, 1263 }; 1264 1265 /* for inner inode cache management */ 1266 struct inode_management { 1267 struct radix_tree_root ino_root; /* ino entry array */ 1268 spinlock_t ino_lock; /* for ino entry lock */ 1269 struct list_head ino_list; /* inode list head */ 1270 unsigned long ino_num; /* number of entries */ 1271 }; 1272 1273 /* for GC_AT */ 1274 struct atgc_management { 1275 bool atgc_enabled; /* ATGC is enabled or not */ 1276 struct rb_root_cached root; /* root of victim rb-tree */ 1277 struct list_head victim_list; /* linked with all victim entries */ 1278 unsigned int victim_count; /* victim count in rb-tree */ 1279 unsigned int candidate_ratio; /* candidate ratio */ 1280 unsigned int max_candidate_count; /* max candidate count */ 1281 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1282 unsigned long long age_threshold; /* age threshold */ 1283 }; 1284 1285 struct f2fs_gc_control { 1286 unsigned int victim_segno; /* target victim segment number */ 1287 int init_gc_type; /* FG_GC or BG_GC */ 1288 bool no_bg_gc; /* check the space and stop bg_gc */ 1289 bool should_migrate_blocks; /* should migrate blocks */ 1290 bool err_gc_skipped; /* return EAGAIN if GC skipped */ 1291 unsigned int nr_free_secs; /* # of free sections to do GC */ 1292 }; 1293 1294 /* 1295 * For s_flag in struct f2fs_sb_info 1296 * Modification on enum should be synchronized with s_flag array 1297 */ 1298 enum { 1299 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1300 SBI_IS_CLOSE, /* specify unmounting */ 1301 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1302 SBI_POR_DOING, /* recovery is doing or not */ 1303 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1304 SBI_NEED_CP, /* need to checkpoint */ 1305 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1306 SBI_IS_RECOVERED, /* recovered orphan/data */ 1307 SBI_CP_DISABLED, /* CP was disabled last mount */ 1308 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1309 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1310 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1311 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1312 SBI_IS_RESIZEFS, /* resizefs is in process */ 1313 SBI_IS_FREEZING, /* freezefs is in process */ 1314 SBI_IS_WRITABLE, /* remove ro mountoption transiently */ 1315 MAX_SBI_FLAG, 1316 }; 1317 1318 enum { 1319 CP_TIME, 1320 REQ_TIME, 1321 DISCARD_TIME, 1322 GC_TIME, 1323 DISABLE_TIME, 1324 UMOUNT_DISCARD_TIMEOUT, 1325 MAX_TIME, 1326 }; 1327 1328 /* Note that you need to keep synchronization with this gc_mode_names array */ 1329 enum { 1330 GC_NORMAL, 1331 GC_IDLE_CB, 1332 GC_IDLE_GREEDY, 1333 GC_IDLE_AT, 1334 GC_URGENT_HIGH, 1335 GC_URGENT_LOW, 1336 GC_URGENT_MID, 1337 MAX_GC_MODE, 1338 }; 1339 1340 enum { 1341 BGGC_MODE_ON, /* background gc is on */ 1342 BGGC_MODE_OFF, /* background gc is off */ 1343 BGGC_MODE_SYNC, /* 1344 * background gc is on, migrating blocks 1345 * like foreground gc 1346 */ 1347 }; 1348 1349 enum { 1350 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1351 FS_MODE_LFS, /* use lfs allocation only */ 1352 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1353 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1354 }; 1355 1356 enum { 1357 ALLOC_MODE_DEFAULT, /* stay default */ 1358 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1359 }; 1360 1361 enum fsync_mode { 1362 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1363 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1364 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1365 }; 1366 1367 enum { 1368 COMPR_MODE_FS, /* 1369 * automatically compress compression 1370 * enabled files 1371 */ 1372 COMPR_MODE_USER, /* 1373 * automatical compression is disabled. 1374 * user can control the file compression 1375 * using ioctls 1376 */ 1377 }; 1378 1379 enum { 1380 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1381 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1382 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1383 }; 1384 1385 enum { 1386 MEMORY_MODE_NORMAL, /* memory mode for normal devices */ 1387 MEMORY_MODE_LOW, /* memory mode for low memry devices */ 1388 }; 1389 1390 enum errors_option { 1391 MOUNT_ERRORS_READONLY, /* remount fs ro on errors */ 1392 MOUNT_ERRORS_CONTINUE, /* continue on errors */ 1393 MOUNT_ERRORS_PANIC, /* panic on errors */ 1394 }; 1395 1396 enum { 1397 BACKGROUND, 1398 FOREGROUND, 1399 MAX_CALL_TYPE, 1400 TOTAL_CALL = FOREGROUND, 1401 }; 1402 1403 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1404 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1405 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1406 1407 /* 1408 * Layout of f2fs page.private: 1409 * 1410 * Layout A: lowest bit should be 1 1411 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1412 * bit 0 PAGE_PRIVATE_NOT_POINTER 1413 * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION 1414 * bit 2 PAGE_PRIVATE_INLINE_INODE 1415 * bit 3 PAGE_PRIVATE_REF_RESOURCE 1416 * bit 4- f2fs private data 1417 * 1418 * Layout B: lowest bit should be 0 1419 * page.private is a wrapped pointer. 1420 */ 1421 enum { 1422 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1423 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1424 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1425 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1426 PAGE_PRIVATE_MAX 1427 }; 1428 1429 /* For compression */ 1430 enum compress_algorithm_type { 1431 COMPRESS_LZO, 1432 COMPRESS_LZ4, 1433 COMPRESS_ZSTD, 1434 COMPRESS_LZORLE, 1435 COMPRESS_MAX, 1436 }; 1437 1438 enum compress_flag { 1439 COMPRESS_CHKSUM, 1440 COMPRESS_MAX_FLAG, 1441 }; 1442 1443 #define COMPRESS_WATERMARK 20 1444 #define COMPRESS_PERCENT 20 1445 1446 #define COMPRESS_DATA_RESERVED_SIZE 4 1447 struct compress_data { 1448 __le32 clen; /* compressed data size */ 1449 __le32 chksum; /* compressed data chksum */ 1450 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1451 u8 cdata[]; /* compressed data */ 1452 }; 1453 1454 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1455 1456 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1457 1458 #define F2FS_ZSTD_DEFAULT_CLEVEL 1 1459 1460 #define COMPRESS_LEVEL_OFFSET 8 1461 1462 /* compress context */ 1463 struct compress_ctx { 1464 struct inode *inode; /* inode the context belong to */ 1465 pgoff_t cluster_idx; /* cluster index number */ 1466 unsigned int cluster_size; /* page count in cluster */ 1467 unsigned int log_cluster_size; /* log of cluster size */ 1468 struct page **rpages; /* pages store raw data in cluster */ 1469 unsigned int nr_rpages; /* total page number in rpages */ 1470 struct page **cpages; /* pages store compressed data in cluster */ 1471 unsigned int nr_cpages; /* total page number in cpages */ 1472 unsigned int valid_nr_cpages; /* valid page number in cpages */ 1473 void *rbuf; /* virtual mapped address on rpages */ 1474 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1475 size_t rlen; /* valid data length in rbuf */ 1476 size_t clen; /* valid data length in cbuf */ 1477 void *private; /* payload buffer for specified compression algorithm */ 1478 void *private2; /* extra payload buffer */ 1479 }; 1480 1481 /* compress context for write IO path */ 1482 struct compress_io_ctx { 1483 u32 magic; /* magic number to indicate page is compressed */ 1484 struct inode *inode; /* inode the context belong to */ 1485 struct page **rpages; /* pages store raw data in cluster */ 1486 unsigned int nr_rpages; /* total page number in rpages */ 1487 atomic_t pending_pages; /* in-flight compressed page count */ 1488 }; 1489 1490 /* Context for decompressing one cluster on the read IO path */ 1491 struct decompress_io_ctx { 1492 u32 magic; /* magic number to indicate page is compressed */ 1493 struct inode *inode; /* inode the context belong to */ 1494 pgoff_t cluster_idx; /* cluster index number */ 1495 unsigned int cluster_size; /* page count in cluster */ 1496 unsigned int log_cluster_size; /* log of cluster size */ 1497 struct page **rpages; /* pages store raw data in cluster */ 1498 unsigned int nr_rpages; /* total page number in rpages */ 1499 struct page **cpages; /* pages store compressed data in cluster */ 1500 unsigned int nr_cpages; /* total page number in cpages */ 1501 struct page **tpages; /* temp pages to pad holes in cluster */ 1502 void *rbuf; /* virtual mapped address on rpages */ 1503 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1504 size_t rlen; /* valid data length in rbuf */ 1505 size_t clen; /* valid data length in cbuf */ 1506 1507 /* 1508 * The number of compressed pages remaining to be read in this cluster. 1509 * This is initially nr_cpages. It is decremented by 1 each time a page 1510 * has been read (or failed to be read). When it reaches 0, the cluster 1511 * is decompressed (or an error is reported). 1512 * 1513 * If an error occurs before all the pages have been submitted for I/O, 1514 * then this will never reach 0. In this case the I/O submitter is 1515 * responsible for calling f2fs_decompress_end_io() instead. 1516 */ 1517 atomic_t remaining_pages; 1518 1519 /* 1520 * Number of references to this decompress_io_ctx. 1521 * 1522 * One reference is held for I/O completion. This reference is dropped 1523 * after the pagecache pages are updated and unlocked -- either after 1524 * decompression (and verity if enabled), or after an error. 1525 * 1526 * In addition, each compressed page holds a reference while it is in a 1527 * bio. These references are necessary prevent compressed pages from 1528 * being freed while they are still in a bio. 1529 */ 1530 refcount_t refcnt; 1531 1532 bool failed; /* IO error occurred before decompression? */ 1533 bool need_verity; /* need fs-verity verification after decompression? */ 1534 void *private; /* payload buffer for specified decompression algorithm */ 1535 void *private2; /* extra payload buffer */ 1536 struct work_struct verity_work; /* work to verify the decompressed pages */ 1537 struct work_struct free_work; /* work for late free this structure itself */ 1538 }; 1539 1540 #define NULL_CLUSTER ((unsigned int)(~0)) 1541 #define MIN_COMPRESS_LOG_SIZE 2 1542 #define MAX_COMPRESS_LOG_SIZE 8 1543 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1544 1545 struct f2fs_sb_info { 1546 struct super_block *sb; /* pointer to VFS super block */ 1547 struct proc_dir_entry *s_proc; /* proc entry */ 1548 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1549 struct f2fs_rwsem sb_lock; /* lock for raw super block */ 1550 int valid_super_block; /* valid super block no */ 1551 unsigned long s_flag; /* flags for sbi */ 1552 struct mutex writepages; /* mutex for writepages() */ 1553 1554 #ifdef CONFIG_BLK_DEV_ZONED 1555 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1556 unsigned int max_open_zones; /* max open zone resources of the zoned device */ 1557 #endif 1558 1559 /* for node-related operations */ 1560 struct f2fs_nm_info *nm_info; /* node manager */ 1561 struct inode *node_inode; /* cache node blocks */ 1562 1563 /* for segment-related operations */ 1564 struct f2fs_sm_info *sm_info; /* segment manager */ 1565 1566 /* for bio operations */ 1567 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1568 /* keep migration IO order for LFS mode */ 1569 struct f2fs_rwsem io_order_lock; 1570 pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */ 1571 int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */ 1572 1573 /* for checkpoint */ 1574 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1575 int cur_cp_pack; /* remain current cp pack */ 1576 spinlock_t cp_lock; /* for flag in ckpt */ 1577 struct inode *meta_inode; /* cache meta blocks */ 1578 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ 1579 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ 1580 struct f2fs_rwsem node_write; /* locking node writes */ 1581 struct f2fs_rwsem node_change; /* locking node change */ 1582 wait_queue_head_t cp_wait; 1583 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1584 long interval_time[MAX_TIME]; /* to store thresholds */ 1585 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1586 1587 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1588 1589 spinlock_t fsync_node_lock; /* for node entry lock */ 1590 struct list_head fsync_node_list; /* node list head */ 1591 unsigned int fsync_seg_id; /* sequence id */ 1592 unsigned int fsync_node_num; /* number of node entries */ 1593 1594 /* for orphan inode, use 0'th array */ 1595 unsigned int max_orphans; /* max orphan inodes */ 1596 1597 /* for inode management */ 1598 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1599 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1600 struct mutex flush_lock; /* for flush exclusion */ 1601 1602 /* for extent tree cache */ 1603 struct extent_tree_info extent_tree[NR_EXTENT_CACHES]; 1604 atomic64_t allocated_data_blocks; /* for block age extent_cache */ 1605 1606 /* The threshold used for hot and warm data seperation*/ 1607 unsigned int hot_data_age_threshold; 1608 unsigned int warm_data_age_threshold; 1609 unsigned int last_age_weight; 1610 1611 /* basic filesystem units */ 1612 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1613 unsigned int log_blocksize; /* log2 block size */ 1614 unsigned int blocksize; /* block size */ 1615 unsigned int root_ino_num; /* root inode number*/ 1616 unsigned int node_ino_num; /* node inode number*/ 1617 unsigned int meta_ino_num; /* meta inode number*/ 1618 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1619 unsigned int blocks_per_seg; /* blocks per segment */ 1620 unsigned int unusable_blocks_per_sec; /* unusable blocks per section */ 1621 unsigned int segs_per_sec; /* segments per section */ 1622 unsigned int secs_per_zone; /* sections per zone */ 1623 unsigned int total_sections; /* total section count */ 1624 unsigned int total_node_count; /* total node block count */ 1625 unsigned int total_valid_node_count; /* valid node block count */ 1626 int dir_level; /* directory level */ 1627 bool readdir_ra; /* readahead inode in readdir */ 1628 u64 max_io_bytes; /* max io bytes to merge IOs */ 1629 1630 block_t user_block_count; /* # of user blocks */ 1631 block_t total_valid_block_count; /* # of valid blocks */ 1632 block_t discard_blks; /* discard command candidats */ 1633 block_t last_valid_block_count; /* for recovery */ 1634 block_t reserved_blocks; /* configurable reserved blocks */ 1635 block_t current_reserved_blocks; /* current reserved blocks */ 1636 1637 /* Additional tracking for no checkpoint mode */ 1638 block_t unusable_block_count; /* # of blocks saved by last cp */ 1639 1640 unsigned int nquota_files; /* # of quota sysfile */ 1641 struct f2fs_rwsem quota_sem; /* blocking cp for flags */ 1642 1643 /* # of pages, see count_type */ 1644 atomic_t nr_pages[NR_COUNT_TYPE]; 1645 /* # of allocated blocks */ 1646 struct percpu_counter alloc_valid_block_count; 1647 /* # of node block writes as roll forward recovery */ 1648 struct percpu_counter rf_node_block_count; 1649 1650 /* writeback control */ 1651 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1652 1653 /* valid inode count */ 1654 struct percpu_counter total_valid_inode_count; 1655 1656 struct f2fs_mount_info mount_opt; /* mount options */ 1657 1658 /* for cleaning operations */ 1659 struct f2fs_rwsem gc_lock; /* 1660 * semaphore for GC, avoid 1661 * race between GC and GC or CP 1662 */ 1663 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1664 struct atgc_management am; /* atgc management */ 1665 unsigned int cur_victim_sec; /* current victim section num */ 1666 unsigned int gc_mode; /* current GC state */ 1667 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1668 spinlock_t gc_remaining_trials_lock; 1669 /* remaining trial count for GC_URGENT_* and GC_IDLE_* */ 1670 unsigned int gc_remaining_trials; 1671 1672 /* for skip statistic */ 1673 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1674 1675 /* threshold for gc trials on pinned files */ 1676 unsigned short gc_pin_file_threshold; 1677 struct f2fs_rwsem pin_sem; 1678 1679 /* maximum # of trials to find a victim segment for SSR and GC */ 1680 unsigned int max_victim_search; 1681 /* migration granularity of garbage collection, unit: segment */ 1682 unsigned int migration_granularity; 1683 1684 /* 1685 * for stat information. 1686 * one is for the LFS mode, and the other is for the SSR mode. 1687 */ 1688 #ifdef CONFIG_F2FS_STAT_FS 1689 struct f2fs_stat_info *stat_info; /* FS status information */ 1690 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1691 unsigned int segment_count[2]; /* # of allocated segments */ 1692 unsigned int block_count[2]; /* # of allocated blocks */ 1693 atomic_t inplace_count; /* # of inplace update */ 1694 /* # of lookup extent cache */ 1695 atomic64_t total_hit_ext[NR_EXTENT_CACHES]; 1696 /* # of hit rbtree extent node */ 1697 atomic64_t read_hit_rbtree[NR_EXTENT_CACHES]; 1698 /* # of hit cached extent node */ 1699 atomic64_t read_hit_cached[NR_EXTENT_CACHES]; 1700 /* # of hit largest extent node in read extent cache */ 1701 atomic64_t read_hit_largest; 1702 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1703 atomic_t inline_inode; /* # of inline_data inodes */ 1704 atomic_t inline_dir; /* # of inline_dentry inodes */ 1705 atomic_t compr_inode; /* # of compressed inodes */ 1706 atomic64_t compr_blocks; /* # of compressed blocks */ 1707 atomic_t swapfile_inode; /* # of swapfile inodes */ 1708 atomic_t atomic_files; /* # of opened atomic file */ 1709 atomic_t max_aw_cnt; /* max # of atomic writes */ 1710 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1711 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1712 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1713 atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */ 1714 #endif 1715 spinlock_t stat_lock; /* lock for stat operations */ 1716 1717 /* to attach REQ_META|REQ_FUA flags */ 1718 unsigned int data_io_flag; 1719 unsigned int node_io_flag; 1720 1721 /* For sysfs support */ 1722 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1723 struct completion s_kobj_unregister; 1724 1725 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1726 struct completion s_stat_kobj_unregister; 1727 1728 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1729 struct completion s_feature_list_kobj_unregister; 1730 1731 /* For shrinker support */ 1732 struct list_head s_list; 1733 struct mutex umount_mutex; 1734 unsigned int shrinker_run_no; 1735 1736 /* For multi devices */ 1737 int s_ndevs; /* number of devices */ 1738 struct f2fs_dev_info *devs; /* for device list */ 1739 unsigned int dirty_device; /* for checkpoint data flush */ 1740 spinlock_t dev_lock; /* protect dirty_device */ 1741 bool aligned_blksize; /* all devices has the same logical blksize */ 1742 1743 /* For write statistics */ 1744 u64 sectors_written_start; 1745 u64 kbytes_written; 1746 1747 /* Reference to checksum algorithm driver via cryptoapi */ 1748 struct crypto_shash *s_chksum_driver; 1749 1750 /* Precomputed FS UUID checksum for seeding other checksums */ 1751 __u32 s_chksum_seed; 1752 1753 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1754 1755 /* 1756 * If we are in irq context, let's update error information into 1757 * on-disk superblock in the work. 1758 */ 1759 struct work_struct s_error_work; 1760 unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */ 1761 unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */ 1762 spinlock_t error_lock; /* protect errors/stop_reason array */ 1763 bool error_dirty; /* errors of sb is dirty */ 1764 1765 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1766 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1767 1768 /* For reclaimed segs statistics per each GC mode */ 1769 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1770 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1771 1772 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1773 1774 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1775 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1776 1777 /* For atomic write statistics */ 1778 atomic64_t current_atomic_write; 1779 s64 peak_atomic_write; 1780 u64 committed_atomic_block; 1781 u64 revoked_atomic_block; 1782 1783 #ifdef CONFIG_F2FS_FS_COMPRESSION 1784 struct kmem_cache *page_array_slab; /* page array entry */ 1785 unsigned int page_array_slab_size; /* default page array slab size */ 1786 1787 /* For runtime compression statistics */ 1788 u64 compr_written_block; 1789 u64 compr_saved_block; 1790 u32 compr_new_inode; 1791 1792 /* For compressed block cache */ 1793 struct inode *compress_inode; /* cache compressed blocks */ 1794 unsigned int compress_percent; /* cache page percentage */ 1795 unsigned int compress_watermark; /* cache page watermark */ 1796 atomic_t compress_page_hit; /* cache hit count */ 1797 #endif 1798 1799 #ifdef CONFIG_F2FS_IOSTAT 1800 /* For app/fs IO statistics */ 1801 spinlock_t iostat_lock; 1802 unsigned long long iostat_count[NR_IO_TYPE]; 1803 unsigned long long iostat_bytes[NR_IO_TYPE]; 1804 unsigned long long prev_iostat_bytes[NR_IO_TYPE]; 1805 bool iostat_enable; 1806 unsigned long iostat_next_period; 1807 unsigned int iostat_period_ms; 1808 1809 /* For io latency related statistics info in one iostat period */ 1810 spinlock_t iostat_lat_lock; 1811 struct iostat_lat_info *iostat_io_lat; 1812 #endif 1813 }; 1814 1815 /* Definitions to access f2fs_sb_info */ 1816 #define SEGS_TO_BLKS(sbi, segs) \ 1817 ((segs) << (sbi)->log_blocks_per_seg) 1818 #define BLKS_TO_SEGS(sbi, blks) \ 1819 ((blks) >> (sbi)->log_blocks_per_seg) 1820 1821 #define BLKS_PER_SEG(sbi) ((sbi)->blocks_per_seg) 1822 #define BLKS_PER_SEC(sbi) (SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec)) 1823 #define SEGS_PER_SEC(sbi) ((sbi)->segs_per_sec) 1824 1825 __printf(3, 4) 1826 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...); 1827 1828 #define f2fs_err(sbi, fmt, ...) \ 1829 f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__) 1830 #define f2fs_warn(sbi, fmt, ...) \ 1831 f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__) 1832 #define f2fs_notice(sbi, fmt, ...) \ 1833 f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__) 1834 #define f2fs_info(sbi, fmt, ...) \ 1835 f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__) 1836 #define f2fs_debug(sbi, fmt, ...) \ 1837 f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__) 1838 1839 #define f2fs_err_ratelimited(sbi, fmt, ...) \ 1840 f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__) 1841 #define f2fs_warn_ratelimited(sbi, fmt, ...) \ 1842 f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__) 1843 #define f2fs_info_ratelimited(sbi, fmt, ...) \ 1844 f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__) 1845 1846 #ifdef CONFIG_F2FS_FAULT_INJECTION 1847 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \ 1848 __builtin_return_address(0)) 1849 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type, 1850 const char *func, const char *parent_func) 1851 { 1852 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1853 1854 if (!ffi->inject_rate) 1855 return false; 1856 1857 if (!IS_FAULT_SET(ffi, type)) 1858 return false; 1859 1860 atomic_inc(&ffi->inject_ops); 1861 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1862 atomic_set(&ffi->inject_ops, 0); 1863 f2fs_info_ratelimited(sbi, "inject %s in %s of %pS", 1864 f2fs_fault_name[type], func, parent_func); 1865 return true; 1866 } 1867 return false; 1868 } 1869 #else 1870 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1871 { 1872 return false; 1873 } 1874 #endif 1875 1876 /* 1877 * Test if the mounted volume is a multi-device volume. 1878 * - For a single regular disk volume, sbi->s_ndevs is 0. 1879 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1880 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1881 */ 1882 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1883 { 1884 return sbi->s_ndevs > 1; 1885 } 1886 1887 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1888 { 1889 unsigned long now = jiffies; 1890 1891 sbi->last_time[type] = now; 1892 1893 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1894 if (type == REQ_TIME) { 1895 sbi->last_time[DISCARD_TIME] = now; 1896 sbi->last_time[GC_TIME] = now; 1897 } 1898 } 1899 1900 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1901 { 1902 unsigned long interval = sbi->interval_time[type] * HZ; 1903 1904 return time_after(jiffies, sbi->last_time[type] + interval); 1905 } 1906 1907 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1908 int type) 1909 { 1910 unsigned long interval = sbi->interval_time[type] * HZ; 1911 unsigned int wait_ms = 0; 1912 long delta; 1913 1914 delta = (sbi->last_time[type] + interval) - jiffies; 1915 if (delta > 0) 1916 wait_ms = jiffies_to_msecs(delta); 1917 1918 return wait_ms; 1919 } 1920 1921 /* 1922 * Inline functions 1923 */ 1924 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1925 const void *address, unsigned int length) 1926 { 1927 struct { 1928 struct shash_desc shash; 1929 char ctx[4]; 1930 } desc; 1931 int err; 1932 1933 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1934 1935 desc.shash.tfm = sbi->s_chksum_driver; 1936 *(u32 *)desc.ctx = crc; 1937 1938 err = crypto_shash_update(&desc.shash, address, length); 1939 BUG_ON(err); 1940 1941 return *(u32 *)desc.ctx; 1942 } 1943 1944 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1945 unsigned int length) 1946 { 1947 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1948 } 1949 1950 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1951 void *buf, size_t buf_size) 1952 { 1953 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1954 } 1955 1956 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1957 const void *address, unsigned int length) 1958 { 1959 return __f2fs_crc32(sbi, crc, address, length); 1960 } 1961 1962 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1963 { 1964 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1965 } 1966 1967 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1968 { 1969 return sb->s_fs_info; 1970 } 1971 1972 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1973 { 1974 return F2FS_SB(inode->i_sb); 1975 } 1976 1977 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1978 { 1979 return F2FS_I_SB(mapping->host); 1980 } 1981 1982 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1983 { 1984 return F2FS_M_SB(page_file_mapping(page)); 1985 } 1986 1987 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1988 { 1989 return (struct f2fs_super_block *)(sbi->raw_super); 1990 } 1991 1992 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1993 { 1994 return (struct f2fs_checkpoint *)(sbi->ckpt); 1995 } 1996 1997 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1998 { 1999 return (struct f2fs_node *)page_address(page); 2000 } 2001 2002 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 2003 { 2004 return &((struct f2fs_node *)page_address(page))->i; 2005 } 2006 2007 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 2008 { 2009 return (struct f2fs_nm_info *)(sbi->nm_info); 2010 } 2011 2012 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 2013 { 2014 return (struct f2fs_sm_info *)(sbi->sm_info); 2015 } 2016 2017 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 2018 { 2019 return (struct sit_info *)(SM_I(sbi)->sit_info); 2020 } 2021 2022 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 2023 { 2024 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 2025 } 2026 2027 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 2028 { 2029 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 2030 } 2031 2032 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 2033 { 2034 return sbi->meta_inode->i_mapping; 2035 } 2036 2037 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 2038 { 2039 return sbi->node_inode->i_mapping; 2040 } 2041 2042 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 2043 { 2044 return test_bit(type, &sbi->s_flag); 2045 } 2046 2047 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2048 { 2049 set_bit(type, &sbi->s_flag); 2050 } 2051 2052 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2053 { 2054 clear_bit(type, &sbi->s_flag); 2055 } 2056 2057 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2058 { 2059 return le64_to_cpu(cp->checkpoint_ver); 2060 } 2061 2062 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2063 { 2064 if (type < F2FS_MAX_QUOTAS) 2065 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2066 return 0; 2067 } 2068 2069 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2070 { 2071 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2072 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2073 } 2074 2075 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2076 { 2077 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2078 2079 return ckpt_flags & f; 2080 } 2081 2082 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2083 { 2084 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2085 } 2086 2087 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2088 { 2089 unsigned int ckpt_flags; 2090 2091 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2092 ckpt_flags |= f; 2093 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2094 } 2095 2096 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2097 { 2098 unsigned long flags; 2099 2100 spin_lock_irqsave(&sbi->cp_lock, flags); 2101 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2102 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2103 } 2104 2105 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2106 { 2107 unsigned int ckpt_flags; 2108 2109 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2110 ckpt_flags &= (~f); 2111 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2112 } 2113 2114 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2115 { 2116 unsigned long flags; 2117 2118 spin_lock_irqsave(&sbi->cp_lock, flags); 2119 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2120 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2121 } 2122 2123 #define init_f2fs_rwsem(sem) \ 2124 do { \ 2125 static struct lock_class_key __key; \ 2126 \ 2127 __init_f2fs_rwsem((sem), #sem, &__key); \ 2128 } while (0) 2129 2130 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, 2131 const char *sem_name, struct lock_class_key *key) 2132 { 2133 __init_rwsem(&sem->internal_rwsem, sem_name, key); 2134 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2135 init_waitqueue_head(&sem->read_waiters); 2136 #endif 2137 } 2138 2139 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) 2140 { 2141 return rwsem_is_locked(&sem->internal_rwsem); 2142 } 2143 2144 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) 2145 { 2146 return rwsem_is_contended(&sem->internal_rwsem); 2147 } 2148 2149 static inline void f2fs_down_read(struct f2fs_rwsem *sem) 2150 { 2151 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2152 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); 2153 #else 2154 down_read(&sem->internal_rwsem); 2155 #endif 2156 } 2157 2158 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) 2159 { 2160 return down_read_trylock(&sem->internal_rwsem); 2161 } 2162 2163 static inline void f2fs_up_read(struct f2fs_rwsem *sem) 2164 { 2165 up_read(&sem->internal_rwsem); 2166 } 2167 2168 static inline void f2fs_down_write(struct f2fs_rwsem *sem) 2169 { 2170 down_write(&sem->internal_rwsem); 2171 } 2172 2173 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2174 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) 2175 { 2176 down_read_nested(&sem->internal_rwsem, subclass); 2177 } 2178 2179 static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass) 2180 { 2181 down_write_nested(&sem->internal_rwsem, subclass); 2182 } 2183 #else 2184 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) 2185 #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem) 2186 #endif 2187 2188 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) 2189 { 2190 return down_write_trylock(&sem->internal_rwsem); 2191 } 2192 2193 static inline void f2fs_up_write(struct f2fs_rwsem *sem) 2194 { 2195 up_write(&sem->internal_rwsem); 2196 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2197 wake_up_all(&sem->read_waiters); 2198 #endif 2199 } 2200 2201 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2202 { 2203 f2fs_down_read(&sbi->cp_rwsem); 2204 } 2205 2206 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2207 { 2208 if (time_to_inject(sbi, FAULT_LOCK_OP)) 2209 return 0; 2210 return f2fs_down_read_trylock(&sbi->cp_rwsem); 2211 } 2212 2213 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2214 { 2215 f2fs_up_read(&sbi->cp_rwsem); 2216 } 2217 2218 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2219 { 2220 f2fs_down_write(&sbi->cp_rwsem); 2221 } 2222 2223 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2224 { 2225 f2fs_up_write(&sbi->cp_rwsem); 2226 } 2227 2228 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2229 { 2230 int reason = CP_SYNC; 2231 2232 if (test_opt(sbi, FASTBOOT)) 2233 reason = CP_FASTBOOT; 2234 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2235 reason = CP_UMOUNT; 2236 return reason; 2237 } 2238 2239 static inline bool __remain_node_summaries(int reason) 2240 { 2241 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2242 } 2243 2244 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2245 { 2246 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2247 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2248 } 2249 2250 /* 2251 * Check whether the inode has blocks or not 2252 */ 2253 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2254 { 2255 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2256 2257 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2258 } 2259 2260 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2261 { 2262 return ofs == XATTR_NODE_OFFSET; 2263 } 2264 2265 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2266 struct inode *inode, bool cap) 2267 { 2268 if (!inode) 2269 return true; 2270 if (!test_opt(sbi, RESERVE_ROOT)) 2271 return false; 2272 if (IS_NOQUOTA(inode)) 2273 return true; 2274 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2275 return true; 2276 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2277 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2278 return true; 2279 if (cap && capable(CAP_SYS_RESOURCE)) 2280 return true; 2281 return false; 2282 } 2283 2284 static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi, 2285 struct inode *inode, bool cap) 2286 { 2287 block_t avail_user_block_count; 2288 2289 avail_user_block_count = sbi->user_block_count - 2290 sbi->current_reserved_blocks; 2291 2292 if (!__allow_reserved_blocks(sbi, inode, cap)) 2293 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2294 2295 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2296 if (avail_user_block_count > sbi->unusable_block_count) 2297 avail_user_block_count -= sbi->unusable_block_count; 2298 else 2299 avail_user_block_count = 0; 2300 } 2301 2302 return avail_user_block_count; 2303 } 2304 2305 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2306 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2307 struct inode *inode, blkcnt_t *count, bool partial) 2308 { 2309 long long diff = 0, release = 0; 2310 block_t avail_user_block_count; 2311 int ret; 2312 2313 ret = dquot_reserve_block(inode, *count); 2314 if (ret) 2315 return ret; 2316 2317 if (time_to_inject(sbi, FAULT_BLOCK)) { 2318 release = *count; 2319 goto release_quota; 2320 } 2321 2322 /* 2323 * let's increase this in prior to actual block count change in order 2324 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2325 */ 2326 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2327 2328 spin_lock(&sbi->stat_lock); 2329 2330 avail_user_block_count = get_available_block_count(sbi, inode, true); 2331 diff = (long long)sbi->total_valid_block_count + *count - 2332 avail_user_block_count; 2333 if (unlikely(diff > 0)) { 2334 if (!partial) { 2335 spin_unlock(&sbi->stat_lock); 2336 release = *count; 2337 goto enospc; 2338 } 2339 if (diff > *count) 2340 diff = *count; 2341 *count -= diff; 2342 release = diff; 2343 if (!*count) { 2344 spin_unlock(&sbi->stat_lock); 2345 goto enospc; 2346 } 2347 } 2348 sbi->total_valid_block_count += (block_t)(*count); 2349 2350 spin_unlock(&sbi->stat_lock); 2351 2352 if (unlikely(release)) { 2353 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2354 dquot_release_reservation_block(inode, release); 2355 } 2356 f2fs_i_blocks_write(inode, *count, true, true); 2357 return 0; 2358 2359 enospc: 2360 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2361 release_quota: 2362 dquot_release_reservation_block(inode, release); 2363 return -ENOSPC; 2364 } 2365 2366 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 2367 static inline bool page_private_##name(struct page *page) \ 2368 { \ 2369 return PagePrivate(page) && \ 2370 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 2371 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 2372 } 2373 2374 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 2375 static inline void set_page_private_##name(struct page *page) \ 2376 { \ 2377 if (!PagePrivate(page)) \ 2378 attach_page_private(page, (void *)0); \ 2379 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 2380 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 2381 } 2382 2383 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 2384 static inline void clear_page_private_##name(struct page *page) \ 2385 { \ 2386 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 2387 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ 2388 detach_page_private(page); \ 2389 } 2390 2391 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 2392 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 2393 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 2394 2395 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 2396 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 2397 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 2398 2399 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 2400 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 2401 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 2402 2403 static inline unsigned long get_page_private_data(struct page *page) 2404 { 2405 unsigned long data = page_private(page); 2406 2407 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 2408 return 0; 2409 return data >> PAGE_PRIVATE_MAX; 2410 } 2411 2412 static inline void set_page_private_data(struct page *page, unsigned long data) 2413 { 2414 if (!PagePrivate(page)) 2415 attach_page_private(page, (void *)0); 2416 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 2417 page_private(page) |= data << PAGE_PRIVATE_MAX; 2418 } 2419 2420 static inline void clear_page_private_data(struct page *page) 2421 { 2422 page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0); 2423 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) 2424 detach_page_private(page); 2425 } 2426 2427 static inline void clear_page_private_all(struct page *page) 2428 { 2429 clear_page_private_data(page); 2430 clear_page_private_reference(page); 2431 clear_page_private_gcing(page); 2432 clear_page_private_inline(page); 2433 2434 f2fs_bug_on(F2FS_P_SB(page), page_private(page)); 2435 } 2436 2437 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2438 struct inode *inode, 2439 block_t count) 2440 { 2441 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2442 2443 spin_lock(&sbi->stat_lock); 2444 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2445 sbi->total_valid_block_count -= (block_t)count; 2446 if (sbi->reserved_blocks && 2447 sbi->current_reserved_blocks < sbi->reserved_blocks) 2448 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2449 sbi->current_reserved_blocks + count); 2450 spin_unlock(&sbi->stat_lock); 2451 if (unlikely(inode->i_blocks < sectors)) { 2452 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2453 inode->i_ino, 2454 (unsigned long long)inode->i_blocks, 2455 (unsigned long long)sectors); 2456 set_sbi_flag(sbi, SBI_NEED_FSCK); 2457 return; 2458 } 2459 f2fs_i_blocks_write(inode, count, false, true); 2460 } 2461 2462 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2463 { 2464 atomic_inc(&sbi->nr_pages[count_type]); 2465 2466 if (count_type == F2FS_DIRTY_DENTS || 2467 count_type == F2FS_DIRTY_NODES || 2468 count_type == F2FS_DIRTY_META || 2469 count_type == F2FS_DIRTY_QDATA || 2470 count_type == F2FS_DIRTY_IMETA) 2471 set_sbi_flag(sbi, SBI_IS_DIRTY); 2472 } 2473 2474 static inline void inode_inc_dirty_pages(struct inode *inode) 2475 { 2476 atomic_inc(&F2FS_I(inode)->dirty_pages); 2477 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2478 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2479 if (IS_NOQUOTA(inode)) 2480 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2481 } 2482 2483 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2484 { 2485 atomic_dec(&sbi->nr_pages[count_type]); 2486 } 2487 2488 static inline void inode_dec_dirty_pages(struct inode *inode) 2489 { 2490 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2491 !S_ISLNK(inode->i_mode)) 2492 return; 2493 2494 atomic_dec(&F2FS_I(inode)->dirty_pages); 2495 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2496 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2497 if (IS_NOQUOTA(inode)) 2498 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2499 } 2500 2501 static inline void inc_atomic_write_cnt(struct inode *inode) 2502 { 2503 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2504 struct f2fs_inode_info *fi = F2FS_I(inode); 2505 u64 current_write; 2506 2507 fi->atomic_write_cnt++; 2508 atomic64_inc(&sbi->current_atomic_write); 2509 current_write = atomic64_read(&sbi->current_atomic_write); 2510 if (current_write > sbi->peak_atomic_write) 2511 sbi->peak_atomic_write = current_write; 2512 } 2513 2514 static inline void release_atomic_write_cnt(struct inode *inode) 2515 { 2516 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2517 struct f2fs_inode_info *fi = F2FS_I(inode); 2518 2519 atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write); 2520 fi->atomic_write_cnt = 0; 2521 } 2522 2523 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2524 { 2525 return atomic_read(&sbi->nr_pages[count_type]); 2526 } 2527 2528 static inline int get_dirty_pages(struct inode *inode) 2529 { 2530 return atomic_read(&F2FS_I(inode)->dirty_pages); 2531 } 2532 2533 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2534 { 2535 return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1, 2536 BLKS_PER_SEC(sbi)); 2537 } 2538 2539 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2540 { 2541 return sbi->total_valid_block_count; 2542 } 2543 2544 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2545 { 2546 return sbi->discard_blks; 2547 } 2548 2549 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2550 { 2551 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2552 2553 /* return NAT or SIT bitmap */ 2554 if (flag == NAT_BITMAP) 2555 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2556 else if (flag == SIT_BITMAP) 2557 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2558 2559 return 0; 2560 } 2561 2562 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2563 { 2564 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2565 } 2566 2567 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2568 { 2569 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2570 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2571 int offset; 2572 2573 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2574 offset = (flag == SIT_BITMAP) ? 2575 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2576 /* 2577 * if large_nat_bitmap feature is enabled, leave checksum 2578 * protection for all nat/sit bitmaps. 2579 */ 2580 return tmp_ptr + offset + sizeof(__le32); 2581 } 2582 2583 if (__cp_payload(sbi) > 0) { 2584 if (flag == NAT_BITMAP) 2585 return tmp_ptr; 2586 else 2587 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2588 } else { 2589 offset = (flag == NAT_BITMAP) ? 2590 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2591 return tmp_ptr + offset; 2592 } 2593 } 2594 2595 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2596 { 2597 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2598 2599 if (sbi->cur_cp_pack == 2) 2600 start_addr += BLKS_PER_SEG(sbi); 2601 return start_addr; 2602 } 2603 2604 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2605 { 2606 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2607 2608 if (sbi->cur_cp_pack == 1) 2609 start_addr += BLKS_PER_SEG(sbi); 2610 return start_addr; 2611 } 2612 2613 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2614 { 2615 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2616 } 2617 2618 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2619 { 2620 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2621 } 2622 2623 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 2624 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2625 struct inode *inode, bool is_inode) 2626 { 2627 block_t valid_block_count; 2628 unsigned int valid_node_count; 2629 unsigned int avail_user_block_count; 2630 int err; 2631 2632 if (is_inode) { 2633 if (inode) { 2634 err = dquot_alloc_inode(inode); 2635 if (err) 2636 return err; 2637 } 2638 } else { 2639 err = dquot_reserve_block(inode, 1); 2640 if (err) 2641 return err; 2642 } 2643 2644 if (time_to_inject(sbi, FAULT_BLOCK)) 2645 goto enospc; 2646 2647 spin_lock(&sbi->stat_lock); 2648 2649 valid_block_count = sbi->total_valid_block_count + 1; 2650 avail_user_block_count = get_available_block_count(sbi, inode, false); 2651 2652 if (unlikely(valid_block_count > avail_user_block_count)) { 2653 spin_unlock(&sbi->stat_lock); 2654 goto enospc; 2655 } 2656 2657 valid_node_count = sbi->total_valid_node_count + 1; 2658 if (unlikely(valid_node_count > sbi->total_node_count)) { 2659 spin_unlock(&sbi->stat_lock); 2660 goto enospc; 2661 } 2662 2663 sbi->total_valid_node_count++; 2664 sbi->total_valid_block_count++; 2665 spin_unlock(&sbi->stat_lock); 2666 2667 if (inode) { 2668 if (is_inode) 2669 f2fs_mark_inode_dirty_sync(inode, true); 2670 else 2671 f2fs_i_blocks_write(inode, 1, true, true); 2672 } 2673 2674 percpu_counter_inc(&sbi->alloc_valid_block_count); 2675 return 0; 2676 2677 enospc: 2678 if (is_inode) { 2679 if (inode) 2680 dquot_free_inode(inode); 2681 } else { 2682 dquot_release_reservation_block(inode, 1); 2683 } 2684 return -ENOSPC; 2685 } 2686 2687 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2688 struct inode *inode, bool is_inode) 2689 { 2690 spin_lock(&sbi->stat_lock); 2691 2692 if (unlikely(!sbi->total_valid_block_count || 2693 !sbi->total_valid_node_count)) { 2694 f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", 2695 sbi->total_valid_block_count, 2696 sbi->total_valid_node_count); 2697 set_sbi_flag(sbi, SBI_NEED_FSCK); 2698 } else { 2699 sbi->total_valid_block_count--; 2700 sbi->total_valid_node_count--; 2701 } 2702 2703 if (sbi->reserved_blocks && 2704 sbi->current_reserved_blocks < sbi->reserved_blocks) 2705 sbi->current_reserved_blocks++; 2706 2707 spin_unlock(&sbi->stat_lock); 2708 2709 if (is_inode) { 2710 dquot_free_inode(inode); 2711 } else { 2712 if (unlikely(inode->i_blocks == 0)) { 2713 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2714 inode->i_ino, 2715 (unsigned long long)inode->i_blocks); 2716 set_sbi_flag(sbi, SBI_NEED_FSCK); 2717 return; 2718 } 2719 f2fs_i_blocks_write(inode, 1, false, true); 2720 } 2721 } 2722 2723 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2724 { 2725 return sbi->total_valid_node_count; 2726 } 2727 2728 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2729 { 2730 percpu_counter_inc(&sbi->total_valid_inode_count); 2731 } 2732 2733 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2734 { 2735 percpu_counter_dec(&sbi->total_valid_inode_count); 2736 } 2737 2738 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2739 { 2740 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2741 } 2742 2743 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2744 pgoff_t index, bool for_write) 2745 { 2746 struct page *page; 2747 unsigned int flags; 2748 2749 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2750 if (!for_write) 2751 page = find_get_page_flags(mapping, index, 2752 FGP_LOCK | FGP_ACCESSED); 2753 else 2754 page = find_lock_page(mapping, index); 2755 if (page) 2756 return page; 2757 2758 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) 2759 return NULL; 2760 } 2761 2762 if (!for_write) 2763 return grab_cache_page(mapping, index); 2764 2765 flags = memalloc_nofs_save(); 2766 page = grab_cache_page_write_begin(mapping, index); 2767 memalloc_nofs_restore(flags); 2768 2769 return page; 2770 } 2771 2772 static inline struct page *f2fs_pagecache_get_page( 2773 struct address_space *mapping, pgoff_t index, 2774 fgf_t fgp_flags, gfp_t gfp_mask) 2775 { 2776 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) 2777 return NULL; 2778 2779 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2780 } 2781 2782 static inline void f2fs_put_page(struct page *page, int unlock) 2783 { 2784 if (!page) 2785 return; 2786 2787 if (unlock) { 2788 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2789 unlock_page(page); 2790 } 2791 put_page(page); 2792 } 2793 2794 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2795 { 2796 if (dn->node_page) 2797 f2fs_put_page(dn->node_page, 1); 2798 if (dn->inode_page && dn->node_page != dn->inode_page) 2799 f2fs_put_page(dn->inode_page, 0); 2800 dn->node_page = NULL; 2801 dn->inode_page = NULL; 2802 } 2803 2804 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2805 size_t size) 2806 { 2807 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2808 } 2809 2810 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2811 gfp_t flags) 2812 { 2813 void *entry; 2814 2815 entry = kmem_cache_alloc(cachep, flags); 2816 if (!entry) 2817 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2818 return entry; 2819 } 2820 2821 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2822 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2823 { 2824 if (nofail) 2825 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2826 2827 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) 2828 return NULL; 2829 2830 return kmem_cache_alloc(cachep, flags); 2831 } 2832 2833 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2834 { 2835 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2836 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2837 get_pages(sbi, F2FS_WB_CP_DATA) || 2838 get_pages(sbi, F2FS_DIO_READ) || 2839 get_pages(sbi, F2FS_DIO_WRITE)) 2840 return true; 2841 2842 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2843 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2844 return true; 2845 2846 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2847 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2848 return true; 2849 return false; 2850 } 2851 2852 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2853 { 2854 if (sbi->gc_mode == GC_URGENT_HIGH) 2855 return true; 2856 2857 if (is_inflight_io(sbi, type)) 2858 return false; 2859 2860 if (sbi->gc_mode == GC_URGENT_MID) 2861 return true; 2862 2863 if (sbi->gc_mode == GC_URGENT_LOW && 2864 (type == DISCARD_TIME || type == GC_TIME)) 2865 return true; 2866 2867 return f2fs_time_over(sbi, type); 2868 } 2869 2870 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2871 unsigned long index, void *item) 2872 { 2873 while (radix_tree_insert(root, index, item)) 2874 cond_resched(); 2875 } 2876 2877 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2878 2879 static inline bool IS_INODE(struct page *page) 2880 { 2881 struct f2fs_node *p = F2FS_NODE(page); 2882 2883 return RAW_IS_INODE(p); 2884 } 2885 2886 static inline int offset_in_addr(struct f2fs_inode *i) 2887 { 2888 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2889 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2890 } 2891 2892 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2893 { 2894 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2895 } 2896 2897 static inline int f2fs_has_extra_attr(struct inode *inode); 2898 static inline block_t data_blkaddr(struct inode *inode, 2899 struct page *node_page, unsigned int offset) 2900 { 2901 struct f2fs_node *raw_node; 2902 __le32 *addr_array; 2903 int base = 0; 2904 bool is_inode = IS_INODE(node_page); 2905 2906 raw_node = F2FS_NODE(node_page); 2907 2908 if (is_inode) { 2909 if (!inode) 2910 /* from GC path only */ 2911 base = offset_in_addr(&raw_node->i); 2912 else if (f2fs_has_extra_attr(inode)) 2913 base = get_extra_isize(inode); 2914 } 2915 2916 addr_array = blkaddr_in_node(raw_node); 2917 return le32_to_cpu(addr_array[base + offset]); 2918 } 2919 2920 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2921 { 2922 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2923 } 2924 2925 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2926 { 2927 int mask; 2928 2929 addr += (nr >> 3); 2930 mask = BIT(7 - (nr & 0x07)); 2931 return mask & *addr; 2932 } 2933 2934 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2935 { 2936 int mask; 2937 2938 addr += (nr >> 3); 2939 mask = BIT(7 - (nr & 0x07)); 2940 *addr |= mask; 2941 } 2942 2943 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2944 { 2945 int mask; 2946 2947 addr += (nr >> 3); 2948 mask = BIT(7 - (nr & 0x07)); 2949 *addr &= ~mask; 2950 } 2951 2952 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2953 { 2954 int mask; 2955 int ret; 2956 2957 addr += (nr >> 3); 2958 mask = BIT(7 - (nr & 0x07)); 2959 ret = mask & *addr; 2960 *addr |= mask; 2961 return ret; 2962 } 2963 2964 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2965 { 2966 int mask; 2967 int ret; 2968 2969 addr += (nr >> 3); 2970 mask = BIT(7 - (nr & 0x07)); 2971 ret = mask & *addr; 2972 *addr &= ~mask; 2973 return ret; 2974 } 2975 2976 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2977 { 2978 int mask; 2979 2980 addr += (nr >> 3); 2981 mask = BIT(7 - (nr & 0x07)); 2982 *addr ^= mask; 2983 } 2984 2985 /* 2986 * On-disk inode flags (f2fs_inode::i_flags) 2987 */ 2988 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2989 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2990 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2991 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2992 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2993 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2994 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2995 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2996 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2997 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2998 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2999 3000 #define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL) 3001 3002 /* Flags that should be inherited by new inodes from their parent. */ 3003 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 3004 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 3005 F2FS_CASEFOLD_FL) 3006 3007 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 3008 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 3009 F2FS_CASEFOLD_FL)) 3010 3011 /* Flags that are appropriate for non-directories/regular files. */ 3012 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 3013 3014 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 3015 { 3016 if (S_ISDIR(mode)) 3017 return flags; 3018 else if (S_ISREG(mode)) 3019 return flags & F2FS_REG_FLMASK; 3020 else 3021 return flags & F2FS_OTHER_FLMASK; 3022 } 3023 3024 static inline void __mark_inode_dirty_flag(struct inode *inode, 3025 int flag, bool set) 3026 { 3027 switch (flag) { 3028 case FI_INLINE_XATTR: 3029 case FI_INLINE_DATA: 3030 case FI_INLINE_DENTRY: 3031 case FI_NEW_INODE: 3032 if (set) 3033 return; 3034 fallthrough; 3035 case FI_DATA_EXIST: 3036 case FI_INLINE_DOTS: 3037 case FI_PIN_FILE: 3038 case FI_COMPRESS_RELEASED: 3039 case FI_ATOMIC_COMMITTED: 3040 f2fs_mark_inode_dirty_sync(inode, true); 3041 } 3042 } 3043 3044 static inline void set_inode_flag(struct inode *inode, int flag) 3045 { 3046 set_bit(flag, F2FS_I(inode)->flags); 3047 __mark_inode_dirty_flag(inode, flag, true); 3048 } 3049 3050 static inline int is_inode_flag_set(struct inode *inode, int flag) 3051 { 3052 return test_bit(flag, F2FS_I(inode)->flags); 3053 } 3054 3055 static inline void clear_inode_flag(struct inode *inode, int flag) 3056 { 3057 clear_bit(flag, F2FS_I(inode)->flags); 3058 __mark_inode_dirty_flag(inode, flag, false); 3059 } 3060 3061 static inline bool f2fs_verity_in_progress(struct inode *inode) 3062 { 3063 return IS_ENABLED(CONFIG_FS_VERITY) && 3064 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 3065 } 3066 3067 static inline void set_acl_inode(struct inode *inode, umode_t mode) 3068 { 3069 F2FS_I(inode)->i_acl_mode = mode; 3070 set_inode_flag(inode, FI_ACL_MODE); 3071 f2fs_mark_inode_dirty_sync(inode, false); 3072 } 3073 3074 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 3075 { 3076 if (inc) 3077 inc_nlink(inode); 3078 else 3079 drop_nlink(inode); 3080 f2fs_mark_inode_dirty_sync(inode, true); 3081 } 3082 3083 static inline void f2fs_i_blocks_write(struct inode *inode, 3084 block_t diff, bool add, bool claim) 3085 { 3086 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3087 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3088 3089 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 3090 if (add) { 3091 if (claim) 3092 dquot_claim_block(inode, diff); 3093 else 3094 dquot_alloc_block_nofail(inode, diff); 3095 } else { 3096 dquot_free_block(inode, diff); 3097 } 3098 3099 f2fs_mark_inode_dirty_sync(inode, true); 3100 if (clean || recover) 3101 set_inode_flag(inode, FI_AUTO_RECOVER); 3102 } 3103 3104 static inline bool f2fs_is_atomic_file(struct inode *inode); 3105 3106 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 3107 { 3108 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3109 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3110 3111 if (i_size_read(inode) == i_size) 3112 return; 3113 3114 i_size_write(inode, i_size); 3115 3116 if (f2fs_is_atomic_file(inode)) 3117 return; 3118 3119 f2fs_mark_inode_dirty_sync(inode, true); 3120 if (clean || recover) 3121 set_inode_flag(inode, FI_AUTO_RECOVER); 3122 } 3123 3124 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 3125 { 3126 F2FS_I(inode)->i_current_depth = depth; 3127 f2fs_mark_inode_dirty_sync(inode, true); 3128 } 3129 3130 static inline void f2fs_i_gc_failures_write(struct inode *inode, 3131 unsigned int count) 3132 { 3133 F2FS_I(inode)->i_gc_failures = count; 3134 f2fs_mark_inode_dirty_sync(inode, true); 3135 } 3136 3137 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 3138 { 3139 F2FS_I(inode)->i_xattr_nid = xnid; 3140 f2fs_mark_inode_dirty_sync(inode, true); 3141 } 3142 3143 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 3144 { 3145 F2FS_I(inode)->i_pino = pino; 3146 f2fs_mark_inode_dirty_sync(inode, true); 3147 } 3148 3149 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 3150 { 3151 struct f2fs_inode_info *fi = F2FS_I(inode); 3152 3153 if (ri->i_inline & F2FS_INLINE_XATTR) 3154 set_bit(FI_INLINE_XATTR, fi->flags); 3155 if (ri->i_inline & F2FS_INLINE_DATA) 3156 set_bit(FI_INLINE_DATA, fi->flags); 3157 if (ri->i_inline & F2FS_INLINE_DENTRY) 3158 set_bit(FI_INLINE_DENTRY, fi->flags); 3159 if (ri->i_inline & F2FS_DATA_EXIST) 3160 set_bit(FI_DATA_EXIST, fi->flags); 3161 if (ri->i_inline & F2FS_INLINE_DOTS) 3162 set_bit(FI_INLINE_DOTS, fi->flags); 3163 if (ri->i_inline & F2FS_EXTRA_ATTR) 3164 set_bit(FI_EXTRA_ATTR, fi->flags); 3165 if (ri->i_inline & F2FS_PIN_FILE) 3166 set_bit(FI_PIN_FILE, fi->flags); 3167 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 3168 set_bit(FI_COMPRESS_RELEASED, fi->flags); 3169 } 3170 3171 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 3172 { 3173 ri->i_inline = 0; 3174 3175 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 3176 ri->i_inline |= F2FS_INLINE_XATTR; 3177 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 3178 ri->i_inline |= F2FS_INLINE_DATA; 3179 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 3180 ri->i_inline |= F2FS_INLINE_DENTRY; 3181 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 3182 ri->i_inline |= F2FS_DATA_EXIST; 3183 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 3184 ri->i_inline |= F2FS_INLINE_DOTS; 3185 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 3186 ri->i_inline |= F2FS_EXTRA_ATTR; 3187 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3188 ri->i_inline |= F2FS_PIN_FILE; 3189 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 3190 ri->i_inline |= F2FS_COMPRESS_RELEASED; 3191 } 3192 3193 static inline int f2fs_has_extra_attr(struct inode *inode) 3194 { 3195 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 3196 } 3197 3198 static inline int f2fs_has_inline_xattr(struct inode *inode) 3199 { 3200 return is_inode_flag_set(inode, FI_INLINE_XATTR); 3201 } 3202 3203 static inline int f2fs_compressed_file(struct inode *inode) 3204 { 3205 return S_ISREG(inode->i_mode) && 3206 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3207 } 3208 3209 static inline bool f2fs_need_compress_data(struct inode *inode) 3210 { 3211 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3212 3213 if (!f2fs_compressed_file(inode)) 3214 return false; 3215 3216 if (compress_mode == COMPR_MODE_FS) 3217 return true; 3218 else if (compress_mode == COMPR_MODE_USER && 3219 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3220 return true; 3221 3222 return false; 3223 } 3224 3225 static inline unsigned int addrs_per_inode(struct inode *inode) 3226 { 3227 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3228 get_inline_xattr_addrs(inode); 3229 3230 if (!f2fs_compressed_file(inode)) 3231 return addrs; 3232 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3233 } 3234 3235 static inline unsigned int addrs_per_block(struct inode *inode) 3236 { 3237 if (!f2fs_compressed_file(inode)) 3238 return DEF_ADDRS_PER_BLOCK; 3239 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3240 } 3241 3242 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3243 { 3244 struct f2fs_inode *ri = F2FS_INODE(page); 3245 3246 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3247 get_inline_xattr_addrs(inode)]); 3248 } 3249 3250 static inline int inline_xattr_size(struct inode *inode) 3251 { 3252 if (f2fs_has_inline_xattr(inode)) 3253 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3254 return 0; 3255 } 3256 3257 /* 3258 * Notice: check inline_data flag without inode page lock is unsafe. 3259 * It could change at any time by f2fs_convert_inline_page(). 3260 */ 3261 static inline int f2fs_has_inline_data(struct inode *inode) 3262 { 3263 return is_inode_flag_set(inode, FI_INLINE_DATA); 3264 } 3265 3266 static inline int f2fs_exist_data(struct inode *inode) 3267 { 3268 return is_inode_flag_set(inode, FI_DATA_EXIST); 3269 } 3270 3271 static inline int f2fs_has_inline_dots(struct inode *inode) 3272 { 3273 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3274 } 3275 3276 static inline int f2fs_is_mmap_file(struct inode *inode) 3277 { 3278 return is_inode_flag_set(inode, FI_MMAP_FILE); 3279 } 3280 3281 static inline bool f2fs_is_pinned_file(struct inode *inode) 3282 { 3283 return is_inode_flag_set(inode, FI_PIN_FILE); 3284 } 3285 3286 static inline bool f2fs_is_atomic_file(struct inode *inode) 3287 { 3288 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3289 } 3290 3291 static inline bool f2fs_is_cow_file(struct inode *inode) 3292 { 3293 return is_inode_flag_set(inode, FI_COW_FILE); 3294 } 3295 3296 static inline __le32 *get_dnode_addr(struct inode *inode, 3297 struct page *node_page); 3298 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3299 { 3300 __le32 *addr = get_dnode_addr(inode, page); 3301 3302 return (void *)(addr + DEF_INLINE_RESERVED_SIZE); 3303 } 3304 3305 static inline int f2fs_has_inline_dentry(struct inode *inode) 3306 { 3307 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3308 } 3309 3310 static inline int is_file(struct inode *inode, int type) 3311 { 3312 return F2FS_I(inode)->i_advise & type; 3313 } 3314 3315 static inline void set_file(struct inode *inode, int type) 3316 { 3317 if (is_file(inode, type)) 3318 return; 3319 F2FS_I(inode)->i_advise |= type; 3320 f2fs_mark_inode_dirty_sync(inode, true); 3321 } 3322 3323 static inline void clear_file(struct inode *inode, int type) 3324 { 3325 if (!is_file(inode, type)) 3326 return; 3327 F2FS_I(inode)->i_advise &= ~type; 3328 f2fs_mark_inode_dirty_sync(inode, true); 3329 } 3330 3331 static inline bool f2fs_is_time_consistent(struct inode *inode) 3332 { 3333 struct timespec64 ts = inode_get_atime(inode); 3334 3335 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts)) 3336 return false; 3337 ts = inode_get_ctime(inode); 3338 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts)) 3339 return false; 3340 ts = inode_get_mtime(inode); 3341 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts)) 3342 return false; 3343 return true; 3344 } 3345 3346 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3347 { 3348 bool ret; 3349 3350 if (dsync) { 3351 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3352 3353 spin_lock(&sbi->inode_lock[DIRTY_META]); 3354 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3355 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3356 return ret; 3357 } 3358 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3359 file_keep_isize(inode) || 3360 i_size_read(inode) & ~PAGE_MASK) 3361 return false; 3362 3363 if (!f2fs_is_time_consistent(inode)) 3364 return false; 3365 3366 spin_lock(&F2FS_I(inode)->i_size_lock); 3367 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3368 spin_unlock(&F2FS_I(inode)->i_size_lock); 3369 3370 return ret; 3371 } 3372 3373 static inline bool f2fs_readonly(struct super_block *sb) 3374 { 3375 return sb_rdonly(sb); 3376 } 3377 3378 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3379 { 3380 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3381 } 3382 3383 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3384 size_t size, gfp_t flags) 3385 { 3386 if (time_to_inject(sbi, FAULT_KMALLOC)) 3387 return NULL; 3388 3389 return kmalloc(size, flags); 3390 } 3391 3392 static inline void *f2fs_getname(struct f2fs_sb_info *sbi) 3393 { 3394 if (time_to_inject(sbi, FAULT_KMALLOC)) 3395 return NULL; 3396 3397 return __getname(); 3398 } 3399 3400 static inline void f2fs_putname(char *buf) 3401 { 3402 __putname(buf); 3403 } 3404 3405 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3406 size_t size, gfp_t flags) 3407 { 3408 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3409 } 3410 3411 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3412 size_t size, gfp_t flags) 3413 { 3414 if (time_to_inject(sbi, FAULT_KVMALLOC)) 3415 return NULL; 3416 3417 return kvmalloc(size, flags); 3418 } 3419 3420 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3421 size_t size, gfp_t flags) 3422 { 3423 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3424 } 3425 3426 static inline int get_extra_isize(struct inode *inode) 3427 { 3428 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3429 } 3430 3431 static inline int get_inline_xattr_addrs(struct inode *inode) 3432 { 3433 return F2FS_I(inode)->i_inline_xattr_size; 3434 } 3435 3436 static inline __le32 *get_dnode_addr(struct inode *inode, 3437 struct page *node_page) 3438 { 3439 int base = 0; 3440 3441 if (IS_INODE(node_page) && f2fs_has_extra_attr(inode)) 3442 base = get_extra_isize(inode); 3443 3444 return blkaddr_in_node(F2FS_NODE(node_page)) + base; 3445 } 3446 3447 #define f2fs_get_inode_mode(i) \ 3448 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3449 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3450 3451 #define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32)) 3452 3453 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3454 (offsetof(struct f2fs_inode, i_extra_end) - \ 3455 offsetof(struct f2fs_inode, i_extra_isize)) \ 3456 3457 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3458 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3459 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3460 sizeof((f2fs_inode)->field)) \ 3461 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3462 3463 #define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1) 3464 3465 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3466 3467 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3468 block_t blkaddr, int type); 3469 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3470 block_t blkaddr, int type) 3471 { 3472 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) 3473 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3474 blkaddr, type); 3475 } 3476 3477 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3478 { 3479 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3480 blkaddr == COMPRESS_ADDR) 3481 return false; 3482 return true; 3483 } 3484 3485 /* 3486 * file.c 3487 */ 3488 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3489 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3490 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3491 int f2fs_truncate(struct inode *inode); 3492 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path, 3493 struct kstat *stat, u32 request_mask, unsigned int flags); 3494 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 3495 struct iattr *attr); 3496 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3497 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3498 int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, 3499 bool readonly); 3500 int f2fs_precache_extents(struct inode *inode); 3501 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3502 int f2fs_fileattr_set(struct mnt_idmap *idmap, 3503 struct dentry *dentry, struct fileattr *fa); 3504 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3505 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3506 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3507 int f2fs_pin_file_control(struct inode *inode, bool inc); 3508 3509 /* 3510 * inode.c 3511 */ 3512 void f2fs_set_inode_flags(struct inode *inode); 3513 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3514 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3515 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3516 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3517 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3518 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3519 void f2fs_update_inode_page(struct inode *inode); 3520 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3521 void f2fs_evict_inode(struct inode *inode); 3522 void f2fs_handle_failed_inode(struct inode *inode); 3523 3524 /* 3525 * namei.c 3526 */ 3527 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3528 bool hot, bool set); 3529 struct dentry *f2fs_get_parent(struct dentry *child); 3530 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 3531 struct inode **new_inode); 3532 3533 /* 3534 * dir.c 3535 */ 3536 int f2fs_init_casefolded_name(const struct inode *dir, 3537 struct f2fs_filename *fname); 3538 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3539 int lookup, struct f2fs_filename *fname); 3540 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3541 struct f2fs_filename *fname); 3542 void f2fs_free_filename(struct f2fs_filename *fname); 3543 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3544 const struct f2fs_filename *fname, int *max_slots); 3545 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3546 unsigned int start_pos, struct fscrypt_str *fstr); 3547 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3548 struct f2fs_dentry_ptr *d); 3549 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3550 const struct f2fs_filename *fname, struct page *dpage); 3551 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3552 unsigned int current_depth); 3553 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3554 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3555 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3556 const struct f2fs_filename *fname, 3557 struct page **res_page); 3558 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3559 const struct qstr *child, struct page **res_page); 3560 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3561 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3562 struct page **page); 3563 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3564 struct page *page, struct inode *inode); 3565 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3566 const struct f2fs_filename *fname); 3567 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3568 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3569 unsigned int bit_pos); 3570 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3571 struct inode *inode, nid_t ino, umode_t mode); 3572 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3573 struct inode *inode, nid_t ino, umode_t mode); 3574 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3575 struct inode *inode, nid_t ino, umode_t mode); 3576 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3577 struct inode *dir, struct inode *inode); 3578 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir, 3579 struct f2fs_filename *fname); 3580 bool f2fs_empty_dir(struct inode *dir); 3581 3582 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3583 { 3584 if (fscrypt_is_nokey_name(dentry)) 3585 return -ENOKEY; 3586 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3587 inode, inode->i_ino, inode->i_mode); 3588 } 3589 3590 /* 3591 * super.c 3592 */ 3593 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3594 void f2fs_inode_synced(struct inode *inode); 3595 int f2fs_dquot_initialize(struct inode *inode); 3596 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3597 int f2fs_quota_sync(struct super_block *sb, int type); 3598 loff_t max_file_blocks(struct inode *inode); 3599 void f2fs_quota_off_umount(struct super_block *sb); 3600 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); 3601 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, 3602 bool irq_context); 3603 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); 3604 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error); 3605 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3606 int f2fs_sync_fs(struct super_block *sb, int sync); 3607 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3608 3609 /* 3610 * hash.c 3611 */ 3612 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3613 3614 /* 3615 * node.c 3616 */ 3617 struct node_info; 3618 3619 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3620 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3621 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3622 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3623 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3624 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3625 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3626 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3627 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3628 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3629 struct node_info *ni, bool checkpoint_context); 3630 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3631 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3632 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3633 int f2fs_truncate_xattr_node(struct inode *inode); 3634 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3635 unsigned int seq_id); 3636 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3637 int f2fs_remove_inode_page(struct inode *inode); 3638 struct page *f2fs_new_inode_page(struct inode *inode); 3639 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3640 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3641 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3642 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3643 int f2fs_move_node_page(struct page *node_page, int gc_type); 3644 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3645 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3646 struct writeback_control *wbc, bool atomic, 3647 unsigned int *seq_id); 3648 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3649 struct writeback_control *wbc, 3650 bool do_balance, enum iostat_type io_type); 3651 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3652 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3653 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3654 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3655 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3656 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3657 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3658 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3659 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3660 unsigned int segno, struct f2fs_summary_block *sum); 3661 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3662 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3663 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3664 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3665 int __init f2fs_create_node_manager_caches(void); 3666 void f2fs_destroy_node_manager_caches(void); 3667 3668 /* 3669 * segment.c 3670 */ 3671 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3672 int f2fs_commit_atomic_write(struct inode *inode); 3673 void f2fs_abort_atomic_write(struct inode *inode, bool clean); 3674 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3675 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3676 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3677 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3678 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3679 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3680 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3681 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3682 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3683 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3684 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3685 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3686 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3687 struct cp_control *cpc); 3688 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3689 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3690 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3691 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3692 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3693 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3694 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3695 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3696 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3697 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3698 unsigned int start, unsigned int end); 3699 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3700 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi); 3701 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3702 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3703 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3704 struct cp_control *cpc); 3705 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3706 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3707 block_t blk_addr); 3708 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3709 enum iostat_type io_type); 3710 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3711 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3712 struct f2fs_io_info *fio); 3713 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3714 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3715 block_t old_blkaddr, block_t new_blkaddr, 3716 bool recover_curseg, bool recover_newaddr, 3717 bool from_gc); 3718 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3719 block_t old_addr, block_t new_addr, 3720 unsigned char version, bool recover_curseg, 3721 bool recover_newaddr); 3722 int f2fs_get_segment_temp(int seg_type); 3723 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3724 block_t old_blkaddr, block_t *new_blkaddr, 3725 struct f2fs_summary *sum, int type, 3726 struct f2fs_io_info *fio); 3727 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3728 block_t blkaddr, unsigned int blkcnt); 3729 void f2fs_wait_on_page_writeback(struct page *page, 3730 enum page_type type, bool ordered, bool locked); 3731 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3732 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3733 block_t len); 3734 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3735 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3736 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3737 unsigned int val, int alloc); 3738 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3739 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3740 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3741 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3742 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3743 int __init f2fs_create_segment_manager_caches(void); 3744 void f2fs_destroy_segment_manager_caches(void); 3745 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint); 3746 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3747 enum page_type type, enum temp_type temp); 3748 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3749 unsigned int segno); 3750 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3751 unsigned int segno); 3752 3753 #define DEF_FRAGMENT_SIZE 4 3754 #define MIN_FRAGMENT_SIZE 1 3755 #define MAX_FRAGMENT_SIZE 512 3756 3757 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3758 { 3759 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3760 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3761 } 3762 3763 /* 3764 * checkpoint.c 3765 */ 3766 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, 3767 unsigned char reason); 3768 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); 3769 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3770 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3771 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3772 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3773 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3774 block_t blkaddr, int type); 3775 bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, 3776 block_t blkaddr, int type); 3777 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3778 int type, bool sync); 3779 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, 3780 unsigned int ra_blocks); 3781 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3782 long nr_to_write, enum iostat_type io_type); 3783 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3784 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3785 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3786 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3787 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3788 unsigned int devidx, int type); 3789 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3790 unsigned int devidx, int type); 3791 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3792 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3793 void f2fs_add_orphan_inode(struct inode *inode); 3794 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3795 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3796 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3797 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); 3798 void f2fs_remove_dirty_inode(struct inode *inode); 3799 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, 3800 bool from_cp); 3801 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3802 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3803 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3804 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3805 int __init f2fs_create_checkpoint_caches(void); 3806 void f2fs_destroy_checkpoint_caches(void); 3807 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3808 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3809 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3810 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3811 3812 /* 3813 * data.c 3814 */ 3815 int __init f2fs_init_bioset(void); 3816 void f2fs_destroy_bioset(void); 3817 bool f2fs_is_cp_guaranteed(struct page *page); 3818 int f2fs_init_bio_entry_cache(void); 3819 void f2fs_destroy_bio_entry_cache(void); 3820 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, 3821 enum page_type type); 3822 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi); 3823 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3824 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3825 struct inode *inode, struct page *page, 3826 nid_t ino, enum page_type type); 3827 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3828 struct bio **bio, struct page *page); 3829 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3830 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3831 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3832 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3833 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3834 block_t blk_addr, sector_t *sector); 3835 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3836 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3837 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3838 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3839 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3840 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index); 3841 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3842 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3843 blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs); 3844 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index, 3845 pgoff_t *next_pgofs); 3846 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3847 bool for_write); 3848 struct page *f2fs_get_new_data_page(struct inode *inode, 3849 struct page *ipage, pgoff_t index, bool new_i_size); 3850 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3851 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag); 3852 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3853 u64 start, u64 len); 3854 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3855 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3856 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3857 int f2fs_write_single_data_page(struct page *page, int *submitted, 3858 struct bio **bio, sector_t *last_block, 3859 struct writeback_control *wbc, 3860 enum iostat_type io_type, 3861 int compr_blocks, bool allow_balance); 3862 void f2fs_write_failed(struct inode *inode, loff_t to); 3863 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); 3864 bool f2fs_release_folio(struct folio *folio, gfp_t wait); 3865 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3866 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3867 int f2fs_init_post_read_processing(void); 3868 void f2fs_destroy_post_read_processing(void); 3869 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3870 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3871 extern const struct iomap_ops f2fs_iomap_ops; 3872 3873 /* 3874 * gc.c 3875 */ 3876 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3877 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3878 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3879 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control); 3880 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3881 int f2fs_gc_range(struct f2fs_sb_info *sbi, 3882 unsigned int start_seg, unsigned int end_seg, 3883 bool dry_run, unsigned int dry_run_sections); 3884 int f2fs_resize_fs(struct file *filp, __u64 block_count); 3885 int __init f2fs_create_garbage_collection_cache(void); 3886 void f2fs_destroy_garbage_collection_cache(void); 3887 /* victim selection function for cleaning and SSR */ 3888 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, 3889 int gc_type, int type, char alloc_mode, 3890 unsigned long long age); 3891 3892 /* 3893 * recovery.c 3894 */ 3895 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3896 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3897 int __init f2fs_create_recovery_cache(void); 3898 void f2fs_destroy_recovery_cache(void); 3899 3900 /* 3901 * debug.c 3902 */ 3903 #ifdef CONFIG_F2FS_STAT_FS 3904 struct f2fs_stat_info { 3905 struct list_head stat_list; 3906 struct f2fs_sb_info *sbi; 3907 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3908 int main_area_segs, main_area_sections, main_area_zones; 3909 unsigned long long hit_cached[NR_EXTENT_CACHES]; 3910 unsigned long long hit_rbtree[NR_EXTENT_CACHES]; 3911 unsigned long long total_ext[NR_EXTENT_CACHES]; 3912 unsigned long long hit_total[NR_EXTENT_CACHES]; 3913 int ext_tree[NR_EXTENT_CACHES]; 3914 int zombie_tree[NR_EXTENT_CACHES]; 3915 int ext_node[NR_EXTENT_CACHES]; 3916 /* to count memory footprint */ 3917 unsigned long long ext_mem[NR_EXTENT_CACHES]; 3918 /* for read extent cache */ 3919 unsigned long long hit_largest; 3920 /* for block age extent cache */ 3921 unsigned long long allocated_data_blocks; 3922 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3923 int ndirty_data, ndirty_qdata; 3924 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3925 int nats, dirty_nats, sits, dirty_sits; 3926 int free_nids, avail_nids, alloc_nids; 3927 int total_count, utilization; 3928 int nr_wb_cp_data, nr_wb_data; 3929 int nr_rd_data, nr_rd_node, nr_rd_meta; 3930 int nr_dio_read, nr_dio_write; 3931 unsigned int io_skip_bggc, other_skip_bggc; 3932 int nr_flushing, nr_flushed, flush_list_empty; 3933 int nr_discarding, nr_discarded; 3934 int nr_discard_cmd; 3935 unsigned int undiscard_blks; 3936 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3937 unsigned int cur_ckpt_time, peak_ckpt_time; 3938 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3939 int compr_inode, swapfile_inode; 3940 unsigned long long compr_blocks; 3941 int aw_cnt, max_aw_cnt; 3942 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3943 unsigned int bimodal, avg_vblocks; 3944 int util_free, util_valid, util_invalid; 3945 int rsvd_segs, overp_segs; 3946 int dirty_count, node_pages, meta_pages, compress_pages; 3947 int compress_page_hit; 3948 int prefree_count, free_segs, free_secs; 3949 int cp_call_count[MAX_CALL_TYPE], cp_count; 3950 int gc_call_count[MAX_CALL_TYPE]; 3951 int gc_segs[2][2]; 3952 int gc_secs[2][2]; 3953 int tot_blks, data_blks, node_blks; 3954 int bg_data_blks, bg_node_blks; 3955 int curseg[NR_CURSEG_TYPE]; 3956 int cursec[NR_CURSEG_TYPE]; 3957 int curzone[NR_CURSEG_TYPE]; 3958 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3959 unsigned int full_seg[NR_CURSEG_TYPE]; 3960 unsigned int valid_blks[NR_CURSEG_TYPE]; 3961 3962 unsigned int meta_count[META_MAX]; 3963 unsigned int segment_count[2]; 3964 unsigned int block_count[2]; 3965 unsigned int inplace_count; 3966 unsigned long long base_mem, cache_mem, page_mem; 3967 }; 3968 3969 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3970 { 3971 return (struct f2fs_stat_info *)sbi->stat_info; 3972 } 3973 3974 #define stat_inc_cp_call_count(sbi, foreground) \ 3975 atomic_inc(&sbi->cp_call_count[(foreground)]) 3976 #define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++) 3977 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3978 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3979 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3980 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3981 #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type])) 3982 #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type])) 3983 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3984 #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type])) 3985 #define stat_inc_inline_xattr(inode) \ 3986 do { \ 3987 if (f2fs_has_inline_xattr(inode)) \ 3988 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3989 } while (0) 3990 #define stat_dec_inline_xattr(inode) \ 3991 do { \ 3992 if (f2fs_has_inline_xattr(inode)) \ 3993 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3994 } while (0) 3995 #define stat_inc_inline_inode(inode) \ 3996 do { \ 3997 if (f2fs_has_inline_data(inode)) \ 3998 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3999 } while (0) 4000 #define stat_dec_inline_inode(inode) \ 4001 do { \ 4002 if (f2fs_has_inline_data(inode)) \ 4003 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 4004 } while (0) 4005 #define stat_inc_inline_dir(inode) \ 4006 do { \ 4007 if (f2fs_has_inline_dentry(inode)) \ 4008 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 4009 } while (0) 4010 #define stat_dec_inline_dir(inode) \ 4011 do { \ 4012 if (f2fs_has_inline_dentry(inode)) \ 4013 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 4014 } while (0) 4015 #define stat_inc_compr_inode(inode) \ 4016 do { \ 4017 if (f2fs_compressed_file(inode)) \ 4018 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 4019 } while (0) 4020 #define stat_dec_compr_inode(inode) \ 4021 do { \ 4022 if (f2fs_compressed_file(inode)) \ 4023 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 4024 } while (0) 4025 #define stat_add_compr_blocks(inode, blocks) \ 4026 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 4027 #define stat_sub_compr_blocks(inode, blocks) \ 4028 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 4029 #define stat_inc_swapfile_inode(inode) \ 4030 (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode)) 4031 #define stat_dec_swapfile_inode(inode) \ 4032 (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode)) 4033 #define stat_inc_atomic_inode(inode) \ 4034 (atomic_inc(&F2FS_I_SB(inode)->atomic_files)) 4035 #define stat_dec_atomic_inode(inode) \ 4036 (atomic_dec(&F2FS_I_SB(inode)->atomic_files)) 4037 #define stat_inc_meta_count(sbi, blkaddr) \ 4038 do { \ 4039 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 4040 atomic_inc(&(sbi)->meta_count[META_CP]); \ 4041 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 4042 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 4043 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 4044 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 4045 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 4046 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 4047 } while (0) 4048 #define stat_inc_seg_type(sbi, curseg) \ 4049 ((sbi)->segment_count[(curseg)->alloc_type]++) 4050 #define stat_inc_block_count(sbi, curseg) \ 4051 ((sbi)->block_count[(curseg)->alloc_type]++) 4052 #define stat_inc_inplace_blocks(sbi) \ 4053 (atomic_inc(&(sbi)->inplace_count)) 4054 #define stat_update_max_atomic_write(inode) \ 4055 do { \ 4056 int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \ 4057 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 4058 if (cur > max) \ 4059 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 4060 } while (0) 4061 #define stat_inc_gc_call_count(sbi, foreground) \ 4062 (F2FS_STAT(sbi)->gc_call_count[(foreground)]++) 4063 #define stat_inc_gc_sec_count(sbi, type, gc_type) \ 4064 (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++) 4065 #define stat_inc_gc_seg_count(sbi, type, gc_type) \ 4066 (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++) 4067 4068 #define stat_inc_tot_blk_count(si, blks) \ 4069 ((si)->tot_blks += (blks)) 4070 4071 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 4072 do { \ 4073 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4074 stat_inc_tot_blk_count(si, blks); \ 4075 si->data_blks += (blks); \ 4076 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 4077 } while (0) 4078 4079 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 4080 do { \ 4081 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4082 stat_inc_tot_blk_count(si, blks); \ 4083 si->node_blks += (blks); \ 4084 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 4085 } while (0) 4086 4087 int f2fs_build_stats(struct f2fs_sb_info *sbi); 4088 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 4089 void __init f2fs_create_root_stats(void); 4090 void f2fs_destroy_root_stats(void); 4091 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 4092 #else 4093 #define stat_inc_cp_call_count(sbi, foreground) do { } while (0) 4094 #define stat_inc_cp_count(sbi) do { } while (0) 4095 #define stat_io_skip_bggc_count(sbi) do { } while (0) 4096 #define stat_other_skip_bggc_count(sbi) do { } while (0) 4097 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 4098 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 4099 #define stat_inc_total_hit(sbi, type) do { } while (0) 4100 #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0) 4101 #define stat_inc_largest_node_hit(sbi) do { } while (0) 4102 #define stat_inc_cached_node_hit(sbi, type) do { } while (0) 4103 #define stat_inc_inline_xattr(inode) do { } while (0) 4104 #define stat_dec_inline_xattr(inode) do { } while (0) 4105 #define stat_inc_inline_inode(inode) do { } while (0) 4106 #define stat_dec_inline_inode(inode) do { } while (0) 4107 #define stat_inc_inline_dir(inode) do { } while (0) 4108 #define stat_dec_inline_dir(inode) do { } while (0) 4109 #define stat_inc_compr_inode(inode) do { } while (0) 4110 #define stat_dec_compr_inode(inode) do { } while (0) 4111 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 4112 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 4113 #define stat_inc_swapfile_inode(inode) do { } while (0) 4114 #define stat_dec_swapfile_inode(inode) do { } while (0) 4115 #define stat_inc_atomic_inode(inode) do { } while (0) 4116 #define stat_dec_atomic_inode(inode) do { } while (0) 4117 #define stat_update_max_atomic_write(inode) do { } while (0) 4118 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 4119 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 4120 #define stat_inc_block_count(sbi, curseg) do { } while (0) 4121 #define stat_inc_inplace_blocks(sbi) do { } while (0) 4122 #define stat_inc_gc_call_count(sbi, foreground) do { } while (0) 4123 #define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0) 4124 #define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0) 4125 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 4126 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 4127 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 4128 4129 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 4130 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 4131 static inline void __init f2fs_create_root_stats(void) { } 4132 static inline void f2fs_destroy_root_stats(void) { } 4133 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 4134 #endif 4135 4136 extern const struct file_operations f2fs_dir_operations; 4137 extern const struct file_operations f2fs_file_operations; 4138 extern const struct inode_operations f2fs_file_inode_operations; 4139 extern const struct address_space_operations f2fs_dblock_aops; 4140 extern const struct address_space_operations f2fs_node_aops; 4141 extern const struct address_space_operations f2fs_meta_aops; 4142 extern const struct inode_operations f2fs_dir_inode_operations; 4143 extern const struct inode_operations f2fs_symlink_inode_operations; 4144 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 4145 extern const struct inode_operations f2fs_special_inode_operations; 4146 extern struct kmem_cache *f2fs_inode_entry_slab; 4147 4148 /* 4149 * inline.c 4150 */ 4151 bool f2fs_may_inline_data(struct inode *inode); 4152 bool f2fs_sanity_check_inline_data(struct inode *inode); 4153 bool f2fs_may_inline_dentry(struct inode *inode); 4154 void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage); 4155 void f2fs_truncate_inline_inode(struct inode *inode, 4156 struct page *ipage, u64 from); 4157 int f2fs_read_inline_data(struct inode *inode, struct folio *folio); 4158 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 4159 int f2fs_convert_inline_inode(struct inode *inode); 4160 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 4161 int f2fs_write_inline_data(struct inode *inode, struct page *page); 4162 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 4163 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 4164 const struct f2fs_filename *fname, 4165 struct page **res_page); 4166 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 4167 struct page *ipage); 4168 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 4169 struct inode *inode, nid_t ino, umode_t mode); 4170 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 4171 struct page *page, struct inode *dir, 4172 struct inode *inode); 4173 bool f2fs_empty_inline_dir(struct inode *dir); 4174 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 4175 struct fscrypt_str *fstr); 4176 int f2fs_inline_data_fiemap(struct inode *inode, 4177 struct fiemap_extent_info *fieinfo, 4178 __u64 start, __u64 len); 4179 4180 /* 4181 * shrinker.c 4182 */ 4183 unsigned long f2fs_shrink_count(struct shrinker *shrink, 4184 struct shrink_control *sc); 4185 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 4186 struct shrink_control *sc); 4187 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 4188 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 4189 4190 /* 4191 * extent_cache.c 4192 */ 4193 bool sanity_check_extent_cache(struct inode *inode); 4194 void f2fs_init_extent_tree(struct inode *inode); 4195 void f2fs_drop_extent_tree(struct inode *inode); 4196 void f2fs_destroy_extent_node(struct inode *inode); 4197 void f2fs_destroy_extent_tree(struct inode *inode); 4198 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 4199 int __init f2fs_create_extent_cache(void); 4200 void f2fs_destroy_extent_cache(void); 4201 4202 /* read extent cache ops */ 4203 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage); 4204 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, 4205 struct extent_info *ei); 4206 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index, 4207 block_t *blkaddr); 4208 void f2fs_update_read_extent_cache(struct dnode_of_data *dn); 4209 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, 4210 pgoff_t fofs, block_t blkaddr, unsigned int len); 4211 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, 4212 int nr_shrink); 4213 4214 /* block age extent cache ops */ 4215 void f2fs_init_age_extent_tree(struct inode *inode); 4216 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, 4217 struct extent_info *ei); 4218 void f2fs_update_age_extent_cache(struct dnode_of_data *dn); 4219 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, 4220 pgoff_t fofs, unsigned int len); 4221 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, 4222 int nr_shrink); 4223 4224 /* 4225 * sysfs.c 4226 */ 4227 #define MIN_RA_MUL 2 4228 #define MAX_RA_MUL 256 4229 4230 int __init f2fs_init_sysfs(void); 4231 void f2fs_exit_sysfs(void); 4232 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4233 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4234 4235 /* verity.c */ 4236 extern const struct fsverity_operations f2fs_verityops; 4237 4238 /* 4239 * crypto support 4240 */ 4241 static inline bool f2fs_encrypted_file(struct inode *inode) 4242 { 4243 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4244 } 4245 4246 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4247 { 4248 #ifdef CONFIG_FS_ENCRYPTION 4249 file_set_encrypt(inode); 4250 f2fs_set_inode_flags(inode); 4251 #endif 4252 } 4253 4254 /* 4255 * Returns true if the reads of the inode's data need to undergo some 4256 * postprocessing step, like decryption or authenticity verification. 4257 */ 4258 static inline bool f2fs_post_read_required(struct inode *inode) 4259 { 4260 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4261 f2fs_compressed_file(inode); 4262 } 4263 4264 /* 4265 * compress.c 4266 */ 4267 #ifdef CONFIG_F2FS_FS_COMPRESSION 4268 bool f2fs_is_compressed_page(struct page *page); 4269 struct page *f2fs_compress_control_page(struct page *page); 4270 int f2fs_prepare_compress_overwrite(struct inode *inode, 4271 struct page **pagep, pgoff_t index, void **fsdata); 4272 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4273 pgoff_t index, unsigned copied); 4274 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4275 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4276 bool f2fs_is_compress_backend_ready(struct inode *inode); 4277 bool f2fs_is_compress_level_valid(int alg, int lvl); 4278 int __init f2fs_init_compress_mempool(void); 4279 void f2fs_destroy_compress_mempool(void); 4280 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); 4281 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4282 block_t blkaddr, bool in_task); 4283 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4284 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4285 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, 4286 int index, int nr_pages, bool uptodate); 4287 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4288 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4289 int f2fs_write_multi_pages(struct compress_ctx *cc, 4290 int *submitted, 4291 struct writeback_control *wbc, 4292 enum iostat_type io_type); 4293 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4294 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, 4295 pgoff_t fofs, block_t blkaddr, 4296 unsigned int llen, unsigned int c_len); 4297 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4298 unsigned nr_pages, sector_t *last_block_in_bio, 4299 bool is_readahead, bool for_write); 4300 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4301 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, 4302 bool in_task); 4303 void f2fs_put_page_dic(struct page *page, bool in_task); 4304 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn, 4305 unsigned int ofs_in_node); 4306 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4307 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4308 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4309 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4310 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4311 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4312 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4313 int __init f2fs_init_compress_cache(void); 4314 void f2fs_destroy_compress_cache(void); 4315 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4316 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4317 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4318 nid_t ino, block_t blkaddr); 4319 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4320 block_t blkaddr); 4321 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4322 #define inc_compr_inode_stat(inode) \ 4323 do { \ 4324 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4325 sbi->compr_new_inode++; \ 4326 } while (0) 4327 #define add_compr_block_stat(inode, blocks) \ 4328 do { \ 4329 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4330 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4331 sbi->compr_written_block += blocks; \ 4332 sbi->compr_saved_block += diff; \ 4333 } while (0) 4334 #else 4335 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4336 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4337 { 4338 if (!f2fs_compressed_file(inode)) 4339 return true; 4340 /* not support compression */ 4341 return false; 4342 } 4343 static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; } 4344 static inline struct page *f2fs_compress_control_page(struct page *page) 4345 { 4346 WARN_ON_ONCE(1); 4347 return ERR_PTR(-EINVAL); 4348 } 4349 static inline int __init f2fs_init_compress_mempool(void) { return 0; } 4350 static inline void f2fs_destroy_compress_mempool(void) { } 4351 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, 4352 bool in_task) { } 4353 static inline void f2fs_end_read_compressed_page(struct page *page, 4354 bool failed, block_t blkaddr, bool in_task) 4355 { 4356 WARN_ON_ONCE(1); 4357 } 4358 static inline void f2fs_put_page_dic(struct page *page, bool in_task) 4359 { 4360 WARN_ON_ONCE(1); 4361 } 4362 static inline unsigned int f2fs_cluster_blocks_are_contiguous( 4363 struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; } 4364 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4365 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4366 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4367 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4368 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4369 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4370 static inline void f2fs_destroy_compress_cache(void) { } 4371 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4372 block_t blkaddr) { } 4373 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4374 struct page *page, nid_t ino, block_t blkaddr) { } 4375 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4376 struct page *page, block_t blkaddr) { return false; } 4377 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4378 nid_t ino) { } 4379 #define inc_compr_inode_stat(inode) do { } while (0) 4380 static inline void f2fs_update_read_extent_tree_range_compressed( 4381 struct inode *inode, 4382 pgoff_t fofs, block_t blkaddr, 4383 unsigned int llen, unsigned int c_len) { } 4384 #endif 4385 4386 static inline int set_compress_context(struct inode *inode) 4387 { 4388 #ifdef CONFIG_F2FS_FS_COMPRESSION 4389 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4390 4391 F2FS_I(inode)->i_compress_algorithm = 4392 F2FS_OPTION(sbi).compress_algorithm; 4393 F2FS_I(inode)->i_log_cluster_size = 4394 F2FS_OPTION(sbi).compress_log_size; 4395 F2FS_I(inode)->i_compress_flag = 4396 F2FS_OPTION(sbi).compress_chksum ? 4397 BIT(COMPRESS_CHKSUM) : 0; 4398 F2FS_I(inode)->i_cluster_size = 4399 BIT(F2FS_I(inode)->i_log_cluster_size); 4400 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4401 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4402 F2FS_OPTION(sbi).compress_level) 4403 F2FS_I(inode)->i_compress_level = 4404 F2FS_OPTION(sbi).compress_level; 4405 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4406 set_inode_flag(inode, FI_COMPRESSED_FILE); 4407 stat_inc_compr_inode(inode); 4408 inc_compr_inode_stat(inode); 4409 f2fs_mark_inode_dirty_sync(inode, true); 4410 return 0; 4411 #else 4412 return -EOPNOTSUPP; 4413 #endif 4414 } 4415 4416 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4417 { 4418 struct f2fs_inode_info *fi = F2FS_I(inode); 4419 4420 f2fs_down_write(&F2FS_I(inode)->i_sem); 4421 4422 if (!f2fs_compressed_file(inode)) { 4423 f2fs_up_write(&F2FS_I(inode)->i_sem); 4424 return true; 4425 } 4426 if (f2fs_is_mmap_file(inode) || 4427 (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { 4428 f2fs_up_write(&F2FS_I(inode)->i_sem); 4429 return false; 4430 } 4431 4432 fi->i_flags &= ~F2FS_COMPR_FL; 4433 stat_dec_compr_inode(inode); 4434 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4435 f2fs_mark_inode_dirty_sync(inode, true); 4436 4437 f2fs_up_write(&F2FS_I(inode)->i_sem); 4438 return true; 4439 } 4440 4441 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4442 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4443 { \ 4444 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4445 } 4446 4447 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4448 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4449 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4450 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4451 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4452 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4453 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4454 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4455 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4456 F2FS_FEATURE_FUNCS(verity, VERITY); 4457 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4458 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4459 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4460 F2FS_FEATURE_FUNCS(readonly, RO); 4461 4462 #ifdef CONFIG_BLK_DEV_ZONED 4463 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4464 block_t blkaddr) 4465 { 4466 unsigned int zno = blkaddr / sbi->blocks_per_blkz; 4467 4468 return test_bit(zno, FDEV(devi).blkz_seq); 4469 } 4470 #endif 4471 4472 static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi, 4473 struct block_device *bdev) 4474 { 4475 int i; 4476 4477 if (!f2fs_is_multi_device(sbi)) 4478 return 0; 4479 4480 for (i = 0; i < sbi->s_ndevs; i++) 4481 if (FDEV(i).bdev == bdev) 4482 return i; 4483 4484 WARN_ON(1); 4485 return -1; 4486 } 4487 4488 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4489 { 4490 return f2fs_sb_has_blkzoned(sbi); 4491 } 4492 4493 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4494 { 4495 return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev); 4496 } 4497 4498 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4499 { 4500 int i; 4501 4502 if (!f2fs_is_multi_device(sbi)) 4503 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4504 4505 for (i = 0; i < sbi->s_ndevs; i++) 4506 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4507 return true; 4508 return false; 4509 } 4510 4511 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4512 { 4513 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4514 f2fs_hw_should_discard(sbi); 4515 } 4516 4517 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4518 { 4519 int i; 4520 4521 if (!f2fs_is_multi_device(sbi)) 4522 return bdev_read_only(sbi->sb->s_bdev); 4523 4524 for (i = 0; i < sbi->s_ndevs; i++) 4525 if (bdev_read_only(FDEV(i).bdev)) 4526 return true; 4527 return false; 4528 } 4529 4530 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi) 4531 { 4532 return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi); 4533 } 4534 4535 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4536 { 4537 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4538 } 4539 4540 static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi, 4541 block_t blkaddr) 4542 { 4543 if (f2fs_sb_has_blkzoned(sbi)) { 4544 int devi = f2fs_target_device_index(sbi, blkaddr); 4545 4546 return !bdev_is_zoned(FDEV(devi).bdev); 4547 } 4548 return true; 4549 } 4550 4551 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) 4552 { 4553 return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; 4554 } 4555 4556 static inline bool f2fs_may_compress(struct inode *inode) 4557 { 4558 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4559 f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) || 4560 f2fs_is_mmap_file(inode)) 4561 return false; 4562 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4563 } 4564 4565 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4566 u64 blocks, bool add) 4567 { 4568 struct f2fs_inode_info *fi = F2FS_I(inode); 4569 int diff = fi->i_cluster_size - blocks; 4570 4571 /* don't update i_compr_blocks if saved blocks were released */ 4572 if (!add && !atomic_read(&fi->i_compr_blocks)) 4573 return; 4574 4575 if (add) { 4576 atomic_add(diff, &fi->i_compr_blocks); 4577 stat_add_compr_blocks(inode, diff); 4578 } else { 4579 atomic_sub(diff, &fi->i_compr_blocks); 4580 stat_sub_compr_blocks(inode, diff); 4581 } 4582 f2fs_mark_inode_dirty_sync(inode, true); 4583 } 4584 4585 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4586 int flag) 4587 { 4588 if (!f2fs_is_multi_device(sbi)) 4589 return false; 4590 if (flag != F2FS_GET_BLOCK_DIO) 4591 return false; 4592 return sbi->aligned_blksize; 4593 } 4594 4595 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4596 { 4597 return fsverity_active(inode) && 4598 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4599 } 4600 4601 #ifdef CONFIG_F2FS_FAULT_INJECTION 4602 extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate, 4603 unsigned long type); 4604 #else 4605 static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, 4606 unsigned long rate, unsigned long type) 4607 { 4608 return 0; 4609 } 4610 #endif 4611 4612 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4613 { 4614 #ifdef CONFIG_QUOTA 4615 if (f2fs_sb_has_quota_ino(sbi)) 4616 return true; 4617 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4618 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4619 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4620 return true; 4621 #endif 4622 return false; 4623 } 4624 4625 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4626 { 4627 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4628 } 4629 4630 static inline void f2fs_io_schedule_timeout(long timeout) 4631 { 4632 set_current_state(TASK_UNINTERRUPTIBLE); 4633 io_schedule_timeout(timeout); 4634 } 4635 4636 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs, 4637 enum page_type type) 4638 { 4639 if (unlikely(f2fs_cp_error(sbi))) 4640 return; 4641 4642 if (ofs == sbi->page_eio_ofs[type]) { 4643 if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO) 4644 set_ckpt_flags(sbi, CP_ERROR_FLAG); 4645 } else { 4646 sbi->page_eio_ofs[type] = ofs; 4647 sbi->page_eio_cnt[type] = 0; 4648 } 4649 } 4650 4651 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi) 4652 { 4653 return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb); 4654 } 4655 4656 static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi, 4657 block_t blkaddr, unsigned int cnt) 4658 { 4659 bool need_submit = false; 4660 int i = 0; 4661 4662 do { 4663 struct page *page; 4664 4665 page = find_get_page(META_MAPPING(sbi), blkaddr + i); 4666 if (page) { 4667 if (folio_test_writeback(page_folio(page))) 4668 need_submit = true; 4669 f2fs_put_page(page, 0); 4670 } 4671 } while (++i < cnt && !need_submit); 4672 4673 if (need_submit) 4674 f2fs_submit_merged_write_cond(sbi, sbi->meta_inode, 4675 NULL, 0, DATA); 4676 4677 truncate_inode_pages_range(META_MAPPING(sbi), 4678 F2FS_BLK_TO_BYTES((loff_t)blkaddr), 4679 F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1))); 4680 } 4681 4682 static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi, 4683 block_t blkaddr) 4684 { 4685 f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1); 4686 f2fs_invalidate_compress_page(sbi, blkaddr); 4687 } 4688 4689 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4690 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4691 4692 #endif /* _LINUX_F2FS_H */ 4693