Home
last modified time | relevance | path

Searched refs:node_folio (Results 1 – 11 of 11) sorted by relevance

/linux/fs/f2fs/
H A Dinode.c71 static void __get_inode_rdev(struct inode *inode, struct folio *node_folio) in __get_inode_rdev() argument
73 __le32 *addr = get_dnode_addr(inode, node_folio); in __get_inode_rdev()
84 static void __set_inode_rdev(struct inode *inode, struct folio *node_folio) in __set_inode_rdev() argument
86 __le32 *addr = get_dnode_addr(inode, node_folio); in __set_inode_rdev()
270 static bool sanity_check_inode(struct inode *inode, struct folio *node_folio) in sanity_check_inode() argument
274 struct f2fs_inode *ri = F2FS_INODE(node_folio); in sanity_check_inode()
277 iblocks = le64_to_cpu(F2FS_INODE(node_folio)->i_blocks); in sanity_check_inode()
284 if (ino_of_node(node_folio) != nid_of_node(node_folio)) { in sanity_check_inode()
287 ino_of_node(node_folio), nid_of_node(node_folio)); in sanity_check_inode()
291 if (ino_of_node(node_folio) == fi->i_xattr_nid) { in sanity_check_inode()
[all …]
H A Dnode.h247 static inline nid_t ino_of_node(const struct folio *node_folio) in ino_of_node() argument
249 struct f2fs_node *rn = F2FS_NODE(node_folio); in ino_of_node()
253 static inline nid_t nid_of_node(const struct folio *node_folio) in nid_of_node() argument
255 struct f2fs_node *rn = F2FS_NODE(node_folio); in nid_of_node()
259 static inline unsigned int ofs_of_node(const struct folio *node_folio) in ofs_of_node() argument
261 struct f2fs_node *rn = F2FS_NODE(node_folio); in ofs_of_node()
266 static inline __u64 cpver_of_node(const struct folio *node_folio) in cpver_of_node() argument
268 struct f2fs_node *rn = F2FS_NODE(node_folio); in cpver_of_node()
272 static inline block_t next_blkaddr_of_node(const struct folio *node_folio) in next_blkaddr_of_node() argument
274 struct f2fs_node *rn = F2FS_NODE(node_folio); in next_blkaddr_of_node()
[all …]
H A Drecovery.c500 struct folio *sum_folio, *node_folio; in check_index_in_prev_nodes() local
533 max_addrs = ADDRS_PER_PAGE(dn->node_folio, dn->inode); in check_index_in_prev_nodes()
545 tdn.node_folio = dn->inode_folio; in check_index_in_prev_nodes()
554 node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR); in check_index_in_prev_nodes()
555 if (IS_ERR(node_folio)) in check_index_in_prev_nodes()
556 return PTR_ERR(node_folio); in check_index_in_prev_nodes()
558 offset = ofs_of_node(node_folio); in check_index_in_prev_nodes()
559 ino = ino_of_node(node_folio); in check_index_in_prev_nodes()
560 f2fs_folio_put(node_folio, true); in check_index_in_prev_nodes()
668 f2fs_folio_wait_writeback(dn.node_folio, NODE, true, true); in do_recover_data()
[all …]
H A Dgc.c1054 struct folio *node_folio; in gc_node_segment() local
1077 node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR); in gc_node_segment()
1078 if (IS_ERR(node_folio)) in gc_node_segment()
1083 f2fs_folio_put(node_folio, true); in gc_node_segment()
1088 f2fs_folio_put(node_folio, true); in gc_node_segment()
1093 f2fs_folio_put(node_folio, true); in gc_node_segment()
1097 err = f2fs_move_node_folio(node_folio, gc_type); in gc_node_segment()
1143 struct folio *node_folio; in is_alive() local
1151 node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR); in is_alive()
1152 if (IS_ERR(node_folio)) in is_alive()
[all …]
H A Dnode.c894 dn->node_folio = nfolio[level]; in f2fs_get_dnode_of_data()
915 blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node); in f2fs_get_dnode_of_data()
917 blkaddr = data_blkaddr(dn->inode, dn->node_folio, in f2fs_get_dnode_of_data()
932 dn->node_folio = NULL; in f2fs_get_dnode_of_data()
973 clear_node_folio_dirty(dn->node_folio); in truncate_node()
976 index = dn->node_folio->index; in truncate_node()
977 f2fs_folio_put(dn->node_folio, true); in truncate_node()
982 dn->node_folio = NULL; in truncate_node()
1014 dn->node_folio = folio; in truncate_dnode()
1087 dn->node_folio = folio; in truncate_nodes()
[all …]
H A Df2fs.h1083 struct folio *node_folio; /* cached direct node folio */ member
1099 dn->node_folio = nfolio; in set_new_dnode()
3004 if (dn->node_folio) in f2fs_put_dnode()
3005 f2fs_folio_put(dn->node_folio, true); in f2fs_put_dnode()
3006 if (dn->inode_folio && dn->node_folio != dn->inode_folio) in f2fs_put_dnode()
3008 dn->node_folio = NULL; in f2fs_put_dnode()
3120 struct folio *node_folio) in get_dnode_base() argument
3122 if (!IS_INODE(node_folio)) in get_dnode_base()
3126 offset_in_addr(&F2FS_NODE(node_folio)->i); in get_dnode_base()
3130 struct folio *node_folio) in get_dnode_addr() argument
[all …]
H A Dcompress.c910 block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, in f2fs_sanity_check_cluster()
951 block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, in __f2fs_get_cluster_blocks()
1323 if (data_blkaddr(dn.inode, dn.node_folio, in f2fs_write_compressed_pages()
1355 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio, in f2fs_write_compressed_pages()
1888 bool compressed = data_blkaddr(dn->inode, dn->node_folio, in f2fs_cluster_blocks_are_contiguous()
1891 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio, in f2fs_cluster_blocks_are_contiguous()
1895 block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, in f2fs_cluster_blocks_are_contiguous()
H A Dfile.c428 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio, in __found_offset()
498 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_seek_block()
656 addr = get_dnode_addr(dn->inode, dn->node_folio) + ofs; in f2fs_truncate_data_blocks_range()
720 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio), in f2fs_truncate_data_blocks_range()
827 count = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_do_truncate_blocks()
832 if (dn.ofs_in_node || IS_INODE(dn.node_folio)) { in f2fs_do_truncate_blocks()
1254 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_truncate_hole()
1351 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, inode) - in __read_out_blkaddrs()
1440 ADDRS_PER_PAGE(dn.node_folio, dst_inode) - in __clone_blkaddrs()
1737 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_zero_range()
[all …]
H A Ddata.c1109 __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio); in __set_data_blkaddr()
1123 f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); in f2fs_set_data_blkaddr()
1125 if (folio_mark_dirty(dn->node_folio)) in f2fs_set_data_blkaddr()
1153 f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); in f2fs_reserve_new_blocks()
1164 if (folio_mark_dirty(dn->node_folio)) in f2fs_reserve_new_blocks()
1592 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_map_blocks()
2241 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio, in f2fs_read_multi_pages()
2275 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio, in f2fs_read_multi_pages()
H A Dextent_cache.c952 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio), dn->inode) + in __update_extent_cache()
H A Dsegment.c337 blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, cow_inode), in __f2fs_commit_atomic_write()