Lines Matching +full:multi +full:- +full:cluster
1 /* SPDX-License-Identifier: GPL-2.0 */
13 #include <linux/page-flags.h>
88 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
136 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
143 ((long long)((a) - (b)) > 0))
146 * should not change u32, since it is the on-disk block
173 * prevents priority inversion when a low-priority reader acquires the read lock
175 * higher-priority clients.
221 unsigned char compress_log_size; /* cluster log size */
250 ((raw_super->feature & cpu_to_le32(mask)) != 0)
251 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
383 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
413 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
429 struct rb_node rb_node; /* rb node located in rb-tree */
474 struct list_head wait_list; /* store on-flushing entries */
475 struct list_head fstrim_list; /* in-flight discard from fstrim */
476 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
494 struct rb_root_cached root; /* root of discard rb-tree */
507 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
508 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
510 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
511 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
512 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
513 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
515 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
516 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
522 journal->n_nats = cpu_to_le16(before + i); in update_nats_in_cursum()
530 journal->n_sits = cpu_to_le16(before + i); in update_sits_in_cursum()
547 (CUR_ADDRS_PER_INODE(inode) - \
548 get_inline_xattr_addrs(inode) - \
557 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
570 * filesystem-internal operations, e.g. converting an inline directory
571 * to a non-inline one, or roll-forward recovering an encrypted dentry.
576 * The on-disk filename. For encrypted directories, this is encrypted.
587 * disk_name, or a buffer that holds the decoded no-key name.
616 d->inode = inode; in make_dentry_ptr_block()
617 d->max = NR_DENTRY_IN_BLOCK; in make_dentry_ptr_block()
618 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; in make_dentry_ptr_block()
619 d->bitmap = t->dentry_bitmap; in make_dentry_ptr_block()
620 d->dentry = t->dentry; in make_dentry_ptr_block()
621 d->filename = t->filename; in make_dentry_ptr_block()
631 d->inode = inode; in make_dentry_ptr_inline()
632 d->max = entry_cnt; in make_dentry_ptr_inline()
633 d->nr_bitmap = bitmap_size; in make_dentry_ptr_inline()
634 d->bitmap = t; in make_dentry_ptr_inline()
635 d->dentry = t + bitmap_size + reserved_size; in make_dentry_ptr_inline()
636 d->filename = t + bitmap_size + reserved_size + in make_dentry_ptr_inline()
641 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
645 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
682 /* for in-memory extent cache entry */
734 struct rb_node rb_node; /* rb node located in rb-tree */
743 struct rb_root_cached root; /* root of extent info rb-tree */
745 struct list_head list; /* to be used by sbi->zombie_list */
746 rwlock_t lock; /* protect extent info rb-tree */
747 atomic_t node_cnt; /* # of extent node in rb-tree*/
773 struct block_device *m_bdev; /* for multi-device dio */
779 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
783 bool m_multidev_dio; /* indicate it allows multi-device dio */
841 /* used for f2fs_inode_info->flags */
856 FI_UPDATE_WRITE, /* inode has in-place-update data */
868 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
870 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
897 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
928 struct inode *cow_inode; /* copy-on-write inode for atomic write */
946 unsigned char i_log_cluster_size; /* log of cluster size */
949 unsigned int i_cluster_size; /* cluster size */
964 ext->fofs = le32_to_cpu(i_ext->fofs); in get_read_extent_info()
965 ext->blk = le32_to_cpu(i_ext->blk); in get_read_extent_info()
966 ext->len = le32_to_cpu(i_ext->len); in get_read_extent_info()
972 i_ext->fofs = cpu_to_le32(ext->fofs); in set_raw_read_extent()
973 i_ext->blk = cpu_to_le32(ext->blk); in set_raw_read_extent()
974 i_ext->len = cpu_to_le32(ext->len); in set_raw_read_extent()
980 return (back->lstart + back->len == front->lstart) && in __is_discard_mergeable()
981 (back->len + front->len <= max_len); in __is_discard_mergeable()
1076 dn->inode = inode; in set_new_dnode()
1077 dn->inode_folio = ifolio; in set_new_dnode()
1078 dn->node_folio = nfolio; in set_new_dnode()
1079 dn->nid = nid; in set_new_dnode()
1092 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
1125 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
1154 unsigned int ipu_policy; /* in-place-update policy */
1155 unsigned int min_ipu_util; /* in-place-update threshold */
1174 * f2fs monitors the number of several block types such as on-writeback,
1338 #define FDEV(i) (sbi->devs[i])
1339 #define RDEV(i) (raw_super->devs[i])
1372 struct rb_root_cached root; /* root of victim rb-tree */
1374 unsigned int victim_count; /* victim count in rb-tree */
1377 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
1521 * bit 5- f2fs private data
1527 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
1528 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
1571 pgoff_t cluster_idx; /* cluster index number */
1572 unsigned int cluster_size; /* page count in cluster */
1573 unsigned int log_cluster_size; /* log of cluster size */
1574 struct page **rpages; /* pages store raw data in cluster */
1576 struct page **cpages; /* pages store compressed data in cluster */
1591 struct page **rpages; /* pages store raw data in cluster */
1593 atomic_t pending_pages; /* in-flight compressed page count */
1596 /* Context for decompressing one cluster on the read IO path */
1601 pgoff_t cluster_idx; /* cluster index number */
1602 unsigned int cluster_size; /* page count in cluster */
1603 unsigned int log_cluster_size; /* log of cluster size */
1604 struct page **rpages; /* pages store raw data in cluster */
1606 struct page **cpages; /* pages store compressed data in cluster */
1608 struct page **tpages; /* temp pages to pad holes in cluster */
1615 * The number of compressed pages remaining to be read in this cluster.
1617 * has been read (or failed to be read). When it reaches 0, the cluster
1630 * after the pagecache pages are updated and unlocked -- either after
1640 bool need_verity; /* need fs-verity verification after decompression? */
1669 /* for node-related operations */
1673 /* for segment-related operations */
1831 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1857 /* For multi devices */
1879 * on-disk superblock in the work.
1942 ((segs) << (sbi)->log_blocks_per_seg)
1944 ((blks) >> (sbi)->log_blocks_per_seg)
1946 #define BLKS_PER_SEG(sbi) ((sbi)->blocks_per_seg)
1947 #define BLKS_PER_SEC(sbi) (SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec))
1948 #define SEGS_PER_SEC(sbi) ((sbi)->segs_per_sec)
1979 if (!ffi->inject_rate) in __time_to_inject()
1985 atomic_inc(&ffi->inject_ops); in __time_to_inject()
1986 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { in __time_to_inject()
1987 atomic_set(&ffi->inject_ops, 0); in __time_to_inject()
1988 ffi->inject_count[type]++; in __time_to_inject()
2003 * Test if the mounted volume is a multi-device volume.
2004 * - For a single regular disk volume, sbi->s_ndevs is 0.
2005 * - For a single zoned disk volume, sbi->s_ndevs is 1.
2006 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
2010 return sbi->s_ndevs > 1; in f2fs_is_multi_device()
2017 sbi->last_time[type] = now; in f2fs_update_time()
2021 sbi->last_time[DISCARD_TIME] = now; in f2fs_update_time()
2022 sbi->last_time[GC_TIME] = now; in f2fs_update_time()
2028 unsigned long interval = sbi->interval_time[type] * HZ; in f2fs_time_over()
2030 return time_after(jiffies, sbi->last_time[type] + interval); in f2fs_time_over()
2036 unsigned long interval = sbi->interval_time[type] * HZ; in f2fs_time_to_wait()
2040 delta = (sbi->last_time[type] + interval) - jiffies; in f2fs_time_to_wait()
2073 return sb->s_fs_info; in F2FS_SB()
2078 return F2FS_SB(inode->i_sb); in F2FS_I_SB()
2083 return F2FS_I_SB(mapping->host); in F2FS_M_SB()
2088 return F2FS_M_SB(folio->mapping); in F2FS_F_SB()
2093 return (struct f2fs_super_block *)(sbi->raw_super); in F2FS_RAW_SUPER()
2108 return (struct f2fs_checkpoint *)(sbi->ckpt); in F2FS_CKPT()
2118 return &((struct f2fs_node *)folio_address(folio))->i; in F2FS_INODE()
2123 return (struct f2fs_nm_info *)(sbi->nm_info); in NM_I()
2128 return (struct f2fs_sm_info *)(sbi->sm_info); in SM_I()
2133 return (struct sit_info *)(SM_I(sbi)->sit_info); in SIT_I()
2138 return (struct free_segmap_info *)(SM_I(sbi)->free_info); in FREE_I()
2143 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); in DIRTY_I()
2148 return sbi->meta_inode->i_mapping; in META_MAPPING()
2153 return sbi->node_inode->i_mapping; in NODE_MAPPING()
2158 return folio->mapping == META_MAPPING(F2FS_F_SB(folio)); in is_meta_folio()
2163 return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio)); in is_node_folio()
2168 return test_bit(type, &sbi->s_flag); in is_sbi_flag_set()
2173 set_bit(type, &sbi->s_flag); in set_sbi_flag()
2178 clear_bit(type, &sbi->s_flag); in clear_sbi_flag()
2183 return le64_to_cpu(cp->checkpoint_ver); in cur_cp_version()
2189 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); in f2fs_qf_ino()
2195 size_t crc_offset = le32_to_cpu(cp->checksum_offset); in cur_cp_crc()
2201 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); in __is_set_ckpt_flags()
2215 ckpt_flags = le32_to_cpu(cp->ckpt_flags); in __set_ckpt_flags()
2217 cp->ckpt_flags = cpu_to_le32(ckpt_flags); in __set_ckpt_flags()
2224 spin_lock_irqsave(&sbi->cp_lock, flags); in set_ckpt_flags()
2226 spin_unlock_irqrestore(&sbi->cp_lock, flags); in set_ckpt_flags()
2233 ckpt_flags = le32_to_cpu(cp->ckpt_flags); in __clear_ckpt_flags()
2235 cp->ckpt_flags = cpu_to_le32(ckpt_flags); in __clear_ckpt_flags()
2242 spin_lock_irqsave(&sbi->cp_lock, flags); in clear_ckpt_flags()
2244 spin_unlock_irqrestore(&sbi->cp_lock, flags); in clear_ckpt_flags()
2257 __init_rwsem(&sem->internal_rwsem, sem_name, key); in __init_f2fs_rwsem()
2259 init_waitqueue_head(&sem->read_waiters); in __init_f2fs_rwsem()
2265 return rwsem_is_locked(&sem->internal_rwsem); in f2fs_rwsem_is_locked()
2270 return rwsem_is_contended(&sem->internal_rwsem); in f2fs_rwsem_is_contended()
2276 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); in f2fs_down_read()
2278 down_read(&sem->internal_rwsem); in f2fs_down_read()
2284 return down_read_trylock(&sem->internal_rwsem); in f2fs_down_read_trylock()
2289 up_read(&sem->internal_rwsem); in f2fs_up_read()
2294 down_write(&sem->internal_rwsem); in f2fs_down_write()
2300 down_read_nested(&sem->internal_rwsem, subclass); in f2fs_down_read_nested()
2305 down_write_nested(&sem->internal_rwsem, subclass); in f2fs_down_write_nested()
2314 return down_write_trylock(&sem->internal_rwsem); in f2fs_down_write_trylock()
2319 up_write(&sem->internal_rwsem); in f2fs_up_write()
2321 wake_up_all(&sem->read_waiters); in f2fs_up_write()
2331 * In order to re-enable nat_bits we need to call fsck.f2fs by in disable_nat_bits()
2337 spin_lock_irqsave(&sbi->cp_lock, flags); in disable_nat_bits()
2339 nat_bits = NM_I(sbi)->nat_bits; in disable_nat_bits()
2340 NM_I(sbi)->nat_bits = NULL; in disable_nat_bits()
2342 spin_unlock_irqrestore(&sbi->cp_lock, flags); in disable_nat_bits()
2352 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; in enabled_nat_bits()
2357 f2fs_down_read(&sbi->cp_rwsem); in f2fs_lock_op()
2364 return f2fs_down_read_trylock(&sbi->cp_rwsem); in f2fs_trylock_op()
2369 f2fs_up_read(&sbi->cp_rwsem); in f2fs_unlock_op()
2374 f2fs_down_write(&sbi->cp_rwsem); in f2fs_lock_all()
2379 f2fs_up_write(&sbi->cp_rwsem); in f2fs_unlock_all()
2409 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; in F2FS_HAS_BLOCKS()
2411 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; in F2FS_HAS_BLOCKS()
2441 avail_user_block_count = sbi->user_block_count - in get_available_block_count()
2442 sbi->current_reserved_blocks; in get_available_block_count()
2445 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; in get_available_block_count()
2448 if (avail_user_block_count > sbi->unusable_block_count) in get_available_block_count()
2449 avail_user_block_count -= sbi->unusable_block_count; in get_available_block_count()
2478 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); in inc_valid_block_count()
2480 spin_lock(&sbi->stat_lock); in inc_valid_block_count()
2483 diff = (long long)sbi->total_valid_block_count + *count - in inc_valid_block_count()
2487 spin_unlock(&sbi->stat_lock); in inc_valid_block_count()
2493 *count -= diff; in inc_valid_block_count()
2496 spin_unlock(&sbi->stat_lock); in inc_valid_block_count()
2500 sbi->total_valid_block_count += (block_t)(*count); in inc_valid_block_count()
2502 spin_unlock(&sbi->stat_lock); in inc_valid_block_count()
2505 percpu_counter_sub(&sbi->alloc_valid_block_count, release); in inc_valid_block_count()
2512 percpu_counter_sub(&sbi->alloc_valid_block_count, release); in inc_valid_block_count()
2515 return -ENOSPC; in inc_valid_block_count()
2521 unsigned long priv = (unsigned long)folio->private; \
2538 if (!folio->private) \
2541 v |= (unsigned long)folio->private; \
2542 folio->private = (void *)v; \
2556 unsigned long v = (unsigned long)folio->private; \
2562 folio->private = (void *)v; \
2588 unsigned long data = (unsigned long)folio->private; in folio_get_f2fs_data()
2602 folio->private = (void *)((unsigned long)folio->private | data); in folio_set_f2fs_data()
2611 spin_lock(&sbi->stat_lock); in dec_valid_block_count()
2612 if (unlikely(sbi->total_valid_block_count < count)) { in dec_valid_block_count()
2614 sbi->total_valid_block_count, inode->i_ino, count); in dec_valid_block_count()
2615 sbi->total_valid_block_count = 0; in dec_valid_block_count()
2618 sbi->total_valid_block_count -= count; in dec_valid_block_count()
2620 if (sbi->reserved_blocks && in dec_valid_block_count()
2621 sbi->current_reserved_blocks < sbi->reserved_blocks) in dec_valid_block_count()
2622 sbi->current_reserved_blocks = min(sbi->reserved_blocks, in dec_valid_block_count()
2623 sbi->current_reserved_blocks + count); in dec_valid_block_count()
2624 spin_unlock(&sbi->stat_lock); in dec_valid_block_count()
2625 if (unlikely(inode->i_blocks < sectors)) { in dec_valid_block_count()
2627 inode->i_ino, in dec_valid_block_count()
2628 (unsigned long long)inode->i_blocks, in dec_valid_block_count()
2638 atomic_inc(&sbi->nr_pages[count_type]); in inc_page_count()
2650 atomic_inc(&F2FS_I(inode)->dirty_pages); in inode_inc_dirty_pages()
2651 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? in inode_inc_dirty_pages()
2659 atomic_dec(&sbi->nr_pages[count_type]); in dec_page_count()
2664 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in inode_dec_dirty_pages()
2665 !S_ISLNK(inode->i_mode)) in inode_dec_dirty_pages()
2668 atomic_dec(&F2FS_I(inode)->dirty_pages); in inode_dec_dirty_pages()
2669 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? in inode_dec_dirty_pages()
2681 fi->atomic_write_cnt++; in inc_atomic_write_cnt()
2682 atomic64_inc(&sbi->current_atomic_write); in inc_atomic_write_cnt()
2683 current_write = atomic64_read(&sbi->current_atomic_write); in inc_atomic_write_cnt()
2684 if (current_write > sbi->peak_atomic_write) in inc_atomic_write_cnt()
2685 sbi->peak_atomic_write = current_write; in inc_atomic_write_cnt()
2693 atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write); in release_atomic_write_cnt()
2694 fi->atomic_write_cnt = 0; in release_atomic_write_cnt()
2699 return atomic_read(&sbi->nr_pages[count_type]); in get_pages()
2704 return atomic_read(&F2FS_I(inode)->dirty_pages); in get_dirty_pages()
2709 return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1, in get_blocktype_secs()
2715 return sbi->total_valid_block_count; in valid_user_blocks()
2720 return sbi->discard_blks; in discard_blocks()
2729 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); in __bitmap_size()
2731 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); in __bitmap_size()
2738 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); in __cp_payload()
2744 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; in __bitmap_ptr()
2749 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; in __bitmap_ptr()
2764 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; in __bitmap_ptr()
2771 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); in __start_cp_addr()
2773 if (sbi->cur_cp_pack == 2) in __start_cp_addr()
2780 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); in __start_cp_next_addr()
2782 if (sbi->cur_cp_pack == 1) in __start_cp_next_addr()
2789 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; in __set_cp_next_pack()
2794 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); in __start_sum_addr()
2821 spin_lock(&sbi->stat_lock); in inc_valid_node_count()
2823 valid_block_count = sbi->total_valid_block_count + 1; in inc_valid_node_count()
2828 spin_unlock(&sbi->stat_lock); in inc_valid_node_count()
2832 avail_user_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in inc_valid_node_count()
2835 avail_user_node_count -= F2FS_OPTION(sbi).root_reserved_nodes; in inc_valid_node_count()
2836 valid_node_count = sbi->total_valid_node_count + 1; in inc_valid_node_count()
2838 spin_unlock(&sbi->stat_lock); in inc_valid_node_count()
2842 sbi->total_valid_node_count++; in inc_valid_node_count()
2843 sbi->total_valid_block_count++; in inc_valid_node_count()
2844 spin_unlock(&sbi->stat_lock); in inc_valid_node_count()
2853 percpu_counter_inc(&sbi->alloc_valid_block_count); in inc_valid_node_count()
2863 return -ENOSPC; in inc_valid_node_count()
2869 spin_lock(&sbi->stat_lock); in dec_valid_node_count()
2871 if (unlikely(!sbi->total_valid_block_count || in dec_valid_node_count()
2872 !sbi->total_valid_node_count)) { in dec_valid_node_count()
2874 sbi->total_valid_block_count, in dec_valid_node_count()
2875 sbi->total_valid_node_count); in dec_valid_node_count()
2878 sbi->total_valid_block_count--; in dec_valid_node_count()
2879 sbi->total_valid_node_count--; in dec_valid_node_count()
2882 if (sbi->reserved_blocks && in dec_valid_node_count()
2883 sbi->current_reserved_blocks < sbi->reserved_blocks) in dec_valid_node_count()
2884 sbi->current_reserved_blocks++; in dec_valid_node_count()
2886 spin_unlock(&sbi->stat_lock); in dec_valid_node_count()
2891 if (unlikely(inode->i_blocks == 0)) { in dec_valid_node_count()
2893 inode->i_ino, in dec_valid_node_count()
2894 (unsigned long long)inode->i_blocks); in dec_valid_node_count()
2904 return sbi->total_valid_node_count; in valid_node_count()
2909 percpu_counter_inc(&sbi->total_valid_inode_count); in inc_valid_inode_count()
2914 percpu_counter_dec(&sbi->total_valid_inode_count); in dec_valid_inode_count()
2919 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); in valid_inode_count()
2940 return ERR_PTR(-ENOMEM); in f2fs_grab_cache_folio()
2959 return ERR_PTR(-ENOMEM); in f2fs_filemap_get_folio()
2995 if (dn->node_folio) in f2fs_put_dnode()
2996 f2fs_folio_put(dn->node_folio, true); in f2fs_put_dnode()
2997 if (dn->inode_folio && dn->node_folio != dn->inode_folio) in f2fs_put_dnode()
2998 f2fs_folio_put(dn->inode_folio, false); in f2fs_put_dnode()
2999 dn->node_folio = NULL; in f2fs_put_dnode()
3000 dn->inode_folio = NULL; in f2fs_put_dnode()
3041 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && in is_inflight_io()
3042 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) in is_inflight_io()
3045 if (SM_I(sbi) && SM_I(sbi)->fcc_info && in is_inflight_io()
3046 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) in is_inflight_io()
3061 if (sbi->gc_mode == GC_URGENT_HIGH) in is_idle()
3064 if (sbi->bggc_io_aware == AWARE_READ_IO && is_inflight_read_io(sbi)) in is_idle()
3066 if (sbi->bggc_io_aware == AWARE_ALL_IO && is_inflight_io(sbi, type)) in is_idle()
3069 if (sbi->gc_mode == GC_URGENT_MID) in is_idle()
3072 if (sbi->gc_mode == GC_URGENT_LOW && in is_idle()
3089 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
3100 return (i->i_inline & F2FS_EXTRA_ATTR) ? in offset_in_addr()
3101 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; in offset_in_addr()
3106 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; in blkaddr_in_node()
3117 offset_in_addr(&F2FS_NODE(node_folio)->i); in get_dnode_base()
3135 return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node); in f2fs_data_blkaddr()
3143 mask = BIT(7 - (nr & 0x07)); in f2fs_test_bit()
3152 mask = BIT(7 - (nr & 0x07)); in f2fs_set_bit()
3161 mask = BIT(7 - (nr & 0x07)); in f2fs_clear_bit()
3171 mask = BIT(7 - (nr & 0x07)); in f2fs_test_and_set_bit()
3183 mask = BIT(7 - (nr & 0x07)); in f2fs_test_and_clear_bit()
3194 mask = BIT(7 - (nr & 0x07)); in f2fs_change_bit()
3199 * On-disk inode flags (f2fs_inode::i_flags)
3208 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
3221 /* Flags that are appropriate for regular files (all but dir-specific ones). */
3225 /* Flags that are appropriate for non-directories/regular files. */
3228 #define IS_DEVICE_ALIASING(inode) (F2FS_I(inode)->i_flags & F2FS_DEVICE_ALIAS_FL)
3260 set_bit(flag, F2FS_I(inode)->flags); in set_inode_flag()
3266 return test_bit(flag, F2FS_I(inode)->flags); in is_inode_flag_set()
3271 clear_bit(flag, F2FS_I(inode)->flags); in clear_inode_flag()
3283 F2FS_I(inode)->i_acl_mode = mode; in set_acl_inode()
3340 F2FS_I(inode)->i_current_depth = depth; in f2fs_i_depth_write()
3347 F2FS_I(inode)->i_gc_failures = count; in f2fs_i_gc_failures_write()
3353 F2FS_I(inode)->i_xattr_nid = xnid; in f2fs_i_xnid_write()
3359 F2FS_I(inode)->i_pino = pino; in f2fs_i_pino_write()
3367 if (ri->i_inline & F2FS_INLINE_XATTR) in get_inline_info()
3368 set_bit(FI_INLINE_XATTR, fi->flags); in get_inline_info()
3369 if (ri->i_inline & F2FS_INLINE_DATA) in get_inline_info()
3370 set_bit(FI_INLINE_DATA, fi->flags); in get_inline_info()
3371 if (ri->i_inline & F2FS_INLINE_DENTRY) in get_inline_info()
3372 set_bit(FI_INLINE_DENTRY, fi->flags); in get_inline_info()
3373 if (ri->i_inline & F2FS_DATA_EXIST) in get_inline_info()
3374 set_bit(FI_DATA_EXIST, fi->flags); in get_inline_info()
3375 if (ri->i_inline & F2FS_EXTRA_ATTR) in get_inline_info()
3376 set_bit(FI_EXTRA_ATTR, fi->flags); in get_inline_info()
3377 if (ri->i_inline & F2FS_PIN_FILE) in get_inline_info()
3378 set_bit(FI_PIN_FILE, fi->flags); in get_inline_info()
3379 if (ri->i_inline & F2FS_COMPRESS_RELEASED) in get_inline_info()
3380 set_bit(FI_COMPRESS_RELEASED, fi->flags); in get_inline_info()
3385 ri->i_inline = 0; in set_raw_inline()
3388 ri->i_inline |= F2FS_INLINE_XATTR; in set_raw_inline()
3390 ri->i_inline |= F2FS_INLINE_DATA; in set_raw_inline()
3392 ri->i_inline |= F2FS_INLINE_DENTRY; in set_raw_inline()
3394 ri->i_inline |= F2FS_DATA_EXIST; in set_raw_inline()
3396 ri->i_inline |= F2FS_EXTRA_ATTR; in set_raw_inline()
3398 ri->i_inline |= F2FS_PIN_FILE; in set_raw_inline()
3400 ri->i_inline |= F2FS_COMPRESS_RELEASED; in set_raw_inline()
3415 return S_ISREG(inode->i_mode) && in f2fs_compressed_file()
3438 unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) - in addrs_per_page()
3442 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); in addrs_per_page()
3451 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - in inline_xattr_addr()
3510 return F2FS_I(inode)->i_advise & type; in is_file()
3517 F2FS_I(inode)->i_advise |= type; in set_file()
3525 F2FS_I(inode)->i_advise &= ~type; in clear_file()
3533 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts)) in f2fs_is_time_consistent()
3536 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts)) in f2fs_is_time_consistent()
3539 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts)) in f2fs_is_time_consistent()
3551 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_skip_inode_update()
3552 ret = list_empty(&F2FS_I(inode)->gdirty_list); in f2fs_skip_inode_update()
3553 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_skip_inode_update()
3564 spin_lock(&F2FS_I(inode)->i_size_lock); in f2fs_skip_inode_update()
3565 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); in f2fs_skip_inode_update()
3566 spin_unlock(&F2FS_I(inode)->i_size_lock); in f2fs_skip_inode_update()
3634 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); in get_extra_isize()
3639 return F2FS_I(inode)->i_inline_xattr_size; in get_inline_xattr_addrs()
3644 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3649 (offsetof(struct f2fs_inode, i_extra_end) - \
3655 sizeof((f2fs_inode)->field)) \
3660 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3795 return -ENOKEY; in f2fs_add_link()
3796 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, in f2fs_add_link()
3797 inode, inode->i_ino, inode->i_mode); in f2fs_add_link()
3970 return fio->folio->mapping->host; in fio_inode()
4206 return (struct f2fs_stat_info *)sbi->stat_info; in F2FS_STAT()
4210 atomic_inc(&sbi->cp_call_count[(foreground)])
4211 #define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
4212 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
4213 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
4214 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
4215 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
4216 #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
4217 #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
4218 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
4219 #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
4223 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
4228 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
4233 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
4238 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
4243 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
4248 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
4253 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
4258 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
4261 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
4263 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
4265 (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
4267 (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
4269 (atomic_inc(&F2FS_I_SB(inode)->atomic_files))
4271 (atomic_dec(&F2FS_I_SB(inode)->atomic_files))
4274 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
4275 atomic_inc(&(sbi)->meta_count[META_CP]); \
4276 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
4277 atomic_inc(&(sbi)->meta_count[META_SIT]); \
4278 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
4279 atomic_inc(&(sbi)->meta_count[META_NAT]); \
4280 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
4281 atomic_inc(&(sbi)->meta_count[META_SSA]); \
4284 ((sbi)->segment_count[(curseg)->alloc_type]++)
4286 ((sbi)->block_count[(curseg)->alloc_type]++)
4288 (atomic_inc(&(sbi)->inplace_count))
4291 int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \
4292 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
4294 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
4297 (F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
4299 (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
4301 (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
4304 ((si)->tot_blks += (blks))
4310 si->data_blks += (blks); \
4311 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
4318 si->node_blks += (blks); \
4319 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
4479 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); in f2fs_encrypted_file()
4515 CLUSTER_IS_COMPR, /* check only if compressed cluster */
4516 CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
4517 CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
4576 sbi->compr_new_inode++; \
4581 int diff = F2FS_I(inode)->i_cluster_size - blocks; \
4582 sbi->compr_written_block += blocks; \
4583 sbi->compr_saved_block += diff; \
4598 return ERR_PTR(-EINVAL); in f2fs_compress_control_folio()
4647 fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm; in set_compress_context()
4648 fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size; in set_compress_context()
4649 fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ? in set_compress_context()
4651 fi->i_cluster_size = BIT(fi->i_log_cluster_size); in set_compress_context()
4652 if ((fi->i_compress_algorithm == COMPRESS_LZ4 || in set_compress_context()
4653 fi->i_compress_algorithm == COMPRESS_ZSTD) && in set_compress_context()
4655 fi->i_compress_level = F2FS_OPTION(sbi).compress_level; in set_compress_context()
4656 fi->i_flags |= F2FS_COMPR_FL; in set_compress_context()
4663 return -EOPNOTSUPP; in set_compress_context()
4671 f2fs_down_write(&fi->i_sem); in f2fs_disable_compressed_file()
4674 f2fs_up_write(&fi->i_sem); in f2fs_disable_compressed_file()
4678 (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { in f2fs_disable_compressed_file()
4679 f2fs_up_write(&fi->i_sem); in f2fs_disable_compressed_file()
4683 fi->i_flags &= ~F2FS_COMPR_FL; in f2fs_disable_compressed_file()
4688 f2fs_up_write(&fi->i_sem); in f2fs_disable_compressed_file()
4724 return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz); in f2fs_blkz_is_seq()
4736 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_bdev_index()
4741 return -1; in f2fs_bdev_index()
4759 return f2fs_bdev_support_discard(sbi->sb->s_bdev); in f2fs_hw_support_discard()
4761 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_hw_support_discard()
4778 return bdev_read_only(sbi->sb->s_bdev); in f2fs_hw_is_readonly()
4780 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_hw_is_readonly()
4812 blkaddr -= FDEV(devi).start_blk; in f2fs_is_sequential_zone_area()
4834 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); in f2fs_may_compress()
4841 int diff = fi->i_cluster_size - blocks; in f2fs_i_compr_blocks_update()
4844 if (!add && !atomic_read(&fi->i_compr_blocks)) in f2fs_i_compr_blocks_update()
4848 atomic_add(diff, &fi->i_compr_blocks); in f2fs_i_compr_blocks_update()
4851 atomic_sub(diff, &fi->i_compr_blocks); in f2fs_i_compr_blocks_update()
4864 return sbi->aligned_blksize; in f2fs_allow_multi_device_dio()
4870 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); in f2fs_need_verity()
4918 timeout -= DEFAULT_IO_TIMEOUT; in f2fs_io_schedule_timeout_killable()
4925 pgoff_t ofs = folio->index; in f2fs_handle_page_eio()
4930 if (ofs == sbi->page_eio_ofs[type]) { in f2fs_handle_page_eio()
4931 if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO) in f2fs_handle_page_eio()
4934 sbi->page_eio_ofs[type] = ofs; in f2fs_handle_page_eio()
4935 sbi->page_eio_cnt[type] = 0; in f2fs_handle_page_eio()
4941 return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb); in f2fs_is_readonly()
4962 f2fs_submit_merged_write_cond(sbi, sbi->meta_inode, in f2fs_truncate_meta_inode_pages()
4967 F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1))); in f2fs_truncate_meta_inode_pages()