Lines Matching refs:sbi

30 static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid)  in is_invalid_nid()  argument
32 return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid; in is_invalid_nid()
38 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument
40 if (unlikely(is_invalid_nid(sbi, nid))) { in f2fs_check_nid_range()
41 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_check_nid_range()
42 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", in f2fs_check_nid_range()
44 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); in f2fs_check_nid_range()
50 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) in f2fs_available_free_memory() argument
52 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_available_free_memory()
53 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_available_free_memory()
78 if (excess_cached_nats(sbi)) in f2fs_available_free_memory()
81 if (sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
83 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); in f2fs_available_free_memory()
89 mem_size += sbi->im[i].ino_num * in f2fs_available_free_memory()
96 struct extent_tree_info *eti = &sbi->extent_tree[etype]; in f2fs_available_free_memory()
115 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && in f2fs_available_free_memory()
116 (COMPRESS_MAPPING(sbi)->nrpages < in f2fs_available_free_memory()
117 free_ram * sbi->compress_percent / 100); in f2fs_available_free_memory()
122 if (!sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
138 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_folio() argument
140 return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid)); in get_current_nat_folio()
143 static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_folio() argument
150 struct f2fs_nm_info *nm_i = NM_I(sbi); in get_next_nat_folio()
152 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); in get_next_nat_folio()
155 src_folio = get_current_nat_folio(sbi, nid); in get_next_nat_folio()
158 dst_folio = f2fs_grab_meta_folio(sbi, dst_off); in get_next_nat_folio()
159 f2fs_bug_on(sbi, folio_test_dirty(src_folio)); in get_next_nat_folio()
172 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, in __alloc_nat_entry() argument
178 GFP_F2FS_ZERO, no_fail, sbi); in __alloc_nat_entry()
328 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio) in f2fs_in_warm_node_list() argument
333 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) in f2fs_init_fsync_node_info() argument
335 spin_lock_init(&sbi->fsync_node_lock); in f2fs_init_fsync_node_info()
336 INIT_LIST_HEAD(&sbi->fsync_node_list); in f2fs_init_fsync_node_info()
337 sbi->fsync_seg_id = 0; in f2fs_init_fsync_node_info()
338 sbi->fsync_node_num = 0; in f2fs_init_fsync_node_info()
341 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, in f2fs_add_fsync_node_entry() argument
355 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_add_fsync_node_entry()
356 list_add_tail(&fn->list, &sbi->fsync_node_list); in f2fs_add_fsync_node_entry()
357 fn->seq_id = sbi->fsync_seg_id++; in f2fs_add_fsync_node_entry()
359 sbi->fsync_node_num++; in f2fs_add_fsync_node_entry()
360 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_add_fsync_node_entry()
365 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio) in f2fs_del_fsync_node_entry() argument
370 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
371 list_for_each_entry(fn, &sbi->fsync_node_list, list) { in f2fs_del_fsync_node_entry()
374 sbi->fsync_node_num--; in f2fs_del_fsync_node_entry()
375 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
381 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
382 f2fs_bug_on(sbi, 1); in f2fs_del_fsync_node_entry()
385 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) in f2fs_reset_fsync_node_info() argument
389 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_reset_fsync_node_info()
390 sbi->fsync_seg_id = 0; in f2fs_reset_fsync_node_info()
391 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_reset_fsync_node_info()
394 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_need_dentry_mark() argument
396 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_need_dentry_mark()
411 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_is_checkpointed_node() argument
413 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_is_checkpointed_node()
425 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_need_inode_block_update() argument
427 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_need_inode_block_update()
442 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, in cache_nat_entry() argument
445 struct f2fs_nm_info *nm_i = NM_I(sbi); in cache_nat_entry()
449 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) in cache_nat_entry()
452 new = __alloc_nat_entry(sbi, nid, false); in cache_nat_entry()
461 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || in cache_nat_entry()
470 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, in set_node_addr() argument
473 struct f2fs_nm_info *nm_i = NM_I(sbi); in set_node_addr()
475 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); in set_node_addr()
484 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); in set_node_addr()
492 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); in set_node_addr()
499 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); in set_node_addr()
500 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && in set_node_addr()
502 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && in set_node_addr()
504 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && in set_node_addr()
531 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) in f2fs_try_to_free_nats() argument
533 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_try_to_free_nats()
562 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, in f2fs_get_node_info() argument
565 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_get_node_info()
566 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in f2fs_get_node_info()
601 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { in f2fs_get_node_info()
621 index = current_nat_addr(sbi, nid); in f2fs_get_node_info()
624 folio = f2fs_get_meta_folio(sbi, index); in f2fs_get_node_info()
634 !f2fs_is_valid_blkaddr(sbi, ni->blk_addr, in f2fs_get_node_info()
636 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_get_node_info()
637 f2fs_err_ratelimited(sbi, in f2fs_get_node_info()
642 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); in f2fs_get_node_info()
648 cache_nat_entry(sbi, nid, &ne); in f2fs_get_node_info()
657 struct f2fs_sb_info *sbi = F2FS_F_SB(parent); in f2fs_ra_node_pages() local
669 f2fs_ra_node_page(sbi, nid); in f2fs_ra_node_pages()
795 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_get_dnode_of_data() local
811 nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]); in f2fs_get_dnode_of_data()
837 f2fs_err_ratelimited(sbi, in f2fs_get_dnode_of_data()
841 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_get_dnode_of_data()
847 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { in f2fs_get_dnode_of_data()
855 f2fs_alloc_nid_failed(sbi, nids[i]); in f2fs_get_dnode_of_data()
861 f2fs_alloc_nid_done(sbi, nids[i]); in f2fs_get_dnode_of_data()
879 nfolio[i] = f2fs_get_node_folio(sbi, nids[i], in f2fs_get_dnode_of_data()
898 f2fs_sb_has_readonly(sbi)) { in f2fs_get_dnode_of_data()
943 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in truncate_node() local
948 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); in truncate_node()
953 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) { in truncate_node()
954 f2fs_err_ratelimited(sbi, in truncate_node()
957 set_sbi_flag(sbi, SBI_NEED_FSCK); in truncate_node()
958 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); in truncate_node()
963 f2fs_invalidate_blocks(sbi, ni.blk_addr, 1); in truncate_node()
964 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); in truncate_node()
965 set_node_addr(sbi, &ni, NULL_ADDR, false); in truncate_node()
968 f2fs_remove_orphan_inode(sbi, dn->nid); in truncate_node()
969 dec_valid_inode_count(sbi); in truncate_node()
974 set_sbi_flag(sbi, SBI_IS_DIRTY); in truncate_node()
979 invalidate_mapping_pages(NODE_MAPPING(sbi), in truncate_node()
990 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in truncate_dnode() local
998 folio = f2fs_get_node_folio(sbi, dn->nid, NODE_TYPE_NON_INODE); in truncate_dnode()
1005 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u", in truncate_dnode()
1007 set_sbi_flag(sbi, SBI_NEED_FSCK); in truncate_dnode()
1008 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE); in truncate_dnode()
1172 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_truncate_inode_blocks() local
1186 f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u", in f2fs_truncate_inode_blocks()
1189 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_truncate_inode_blocks()
1195 folio = f2fs_get_inode_folio(sbi, inode->i_ino); in f2fs_truncate_inode_blocks()
1255 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); in f2fs_truncate_inode_blocks()
1256 f2fs_err_ratelimited(sbi, in f2fs_truncate_inode_blocks()
1284 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_truncate_xattr_node() local
1293 nfolio = f2fs_get_xnode_folio(sbi, nid); in f2fs_truncate_xattr_node()
1370 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_new_node_folio() local
1378 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false); in f2fs_new_node_folio()
1382 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) in f2fs_new_node_folio()
1386 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); in f2fs_new_node_folio()
1388 dec_valid_node_count(sbi, dn->inode, !ofs); in f2fs_new_node_folio()
1393 dec_valid_node_count(sbi, dn->inode, !ofs); in f2fs_new_node_folio()
1394 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_new_node_folio()
1395 f2fs_warn_ratelimited(sbi, in f2fs_new_node_folio()
1400 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); in f2fs_new_node_folio()
1409 set_node_addr(sbi, &new_ni, NEW_ADDR, false); in f2fs_new_node_folio()
1423 inc_valid_inode_count(sbi); in f2fs_new_node_folio()
1438 struct f2fs_sb_info *sbi = F2FS_F_SB(folio); in read_node_folio() local
1441 .sbi = sbi, in read_node_folio()
1451 if (!f2fs_inode_chksum_verify(sbi, folio)) { in read_node_folio()
1458 err = f2fs_get_node_info(sbi, folio->index, &ni, false); in read_node_folio()
1473 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE); in read_node_folio()
1481 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_ra_node_page() argument
1488 if (f2fs_check_nid_range(sbi, nid)) in f2fs_ra_node_page()
1491 afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); in f2fs_ra_node_page()
1495 afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false); in f2fs_ra_node_page()
1503 static int sanity_check_node_footer(struct f2fs_sb_info *sbi, in sanity_check_node_footer() argument
1526 if (time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER)) in sanity_check_node_footer()
1530 f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, " in sanity_check_node_footer()
1535 set_sbi_flag(sbi, SBI_NEED_FSCK); in sanity_check_node_footer()
1536 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); in sanity_check_node_footer()
1540 static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid, in __get_node_folio() argument
1548 if (f2fs_check_nid_range(sbi, nid)) in __get_node_folio()
1551 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false); in __get_node_folio()
1576 if (!f2fs_inode_chksum_verify(sbi, folio)) { in __get_node_folio()
1581 err = sanity_check_node_footer(sbi, folio, nid, ntype); in __get_node_folio()
1589 f2fs_handle_page_eio(sbi, folio, NODE); in __get_node_folio()
1594 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid, in f2fs_get_node_folio() argument
1597 return __get_node_folio(sbi, nid, NULL, 0, node_type); in f2fs_get_node_folio()
1600 struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino) in f2fs_get_inode_folio() argument
1602 return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE); in f2fs_get_inode_folio()
1605 struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid) in f2fs_get_xnode_folio() argument
1607 return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR); in f2fs_get_xnode_folio()
1612 struct f2fs_sb_info *sbi = F2FS_F_SB(parent); in f2fs_get_node_folio_ra() local
1615 return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR); in f2fs_get_node_folio_ra()
1618 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) in flush_inline_data() argument
1625 inode = ilookup(sbi->sb, ino); in flush_inline_data()
1654 static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) in last_fsync_dnode() argument
1664 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, in last_fsync_dnode()
1672 if (unlikely(f2fs_cp_error(sbi))) { in last_fsync_dnode()
1715 struct f2fs_sb_info *sbi = F2FS_F_SB(folio); in __write_node_folio() local
1719 .sbi = sbi, in __write_node_folio()
1734 if (unlikely(f2fs_cp_error(sbi))) { in __write_node_folio()
1736 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) in __write_node_folio()
1739 dec_page_count(sbi, F2FS_DIRTY_NODES); in __write_node_folio()
1744 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __write_node_folio()
1747 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in __write_node_folio()
1754 f2fs_bug_on(sbi, folio->index != nid); in __write_node_folio()
1756 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) in __write_node_folio()
1759 f2fs_down_read(&sbi->node_write); in __write_node_folio()
1764 dec_page_count(sbi, F2FS_DIRTY_NODES); in __write_node_folio()
1765 f2fs_up_read(&sbi->node_write); in __write_node_folio()
1771 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, in __write_node_folio()
1773 f2fs_up_read(&sbi->node_write); in __write_node_folio()
1777 if (atomic && !test_opt(sbi, NOBARRIER)) in __write_node_folio()
1781 if (f2fs_in_warm_node_list(sbi, folio)) { in __write_node_folio()
1782 seq = f2fs_add_fsync_node_entry(sbi, folio); in __write_node_folio()
1791 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio)); in __write_node_folio()
1792 dec_page_count(sbi, F2FS_DIRTY_NODES); in __write_node_folio()
1793 f2fs_up_read(&sbi->node_write); in __write_node_folio()
1797 if (unlikely(f2fs_cp_error(sbi))) { in __write_node_folio()
1798 f2fs_submit_merged_write(sbi, NODE); in __write_node_folio()
1805 f2fs_balance_fs(sbi, false); in __write_node_folio()
1849 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, in f2fs_fsync_node_pages() argument
1863 last_folio = last_fsync_dnode(sbi, ino); in f2fs_fsync_node_pages()
1871 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, in f2fs_fsync_node_pages()
1880 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_fsync_node_pages()
1914 percpu_counter_inc(&sbi->rf_node_block_count); in f2fs_fsync_node_pages()
1920 f2fs_need_dentry_mark(sbi, ino)); in f2fs_fsync_node_pages()
1953 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", in f2fs_fsync_node_pages()
1963 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); in f2fs_fsync_node_pages()
1969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_match_ino() local
1978 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_match_ino()
1980 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_match_ino()
1993 struct f2fs_sb_info *sbi = F2FS_F_SB(folio); in flush_dirty_inode() local
1997 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); in flush_dirty_inode()
2008 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) in f2fs_flush_inline_data() argument
2016 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, in f2fs_flush_inline_data()
2038 flush_inline_data(sbi, ino_of_node(folio)); in f2fs_flush_inline_data()
2049 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, in f2fs_sync_node_pages() argument
2065 while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), in f2fs_sync_node_pages()
2075 if (atomic_read(&sbi->wb_sync_req[NODE]) && in f2fs_sync_node_pages()
2120 flush_inline_data(sbi, ino_of_node(folio)); in f2fs_sync_node_pages()
2158 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in f2fs_sync_node_pages()
2166 f2fs_submit_merged_write(sbi, NODE); in f2fs_sync_node_pages()
2168 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_node_pages()
2173 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, in f2fs_wait_on_node_pages_writeback() argument
2177 struct list_head *head = &sbi->fsync_node_list; in f2fs_wait_on_node_pages_writeback()
2184 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2186 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2191 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2197 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2204 return filemap_check_errors(NODE_MAPPING(sbi)); in f2fs_wait_on_node_pages_writeback()
2210 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_node_pages() local
2214 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_write_node_pages()
2218 f2fs_balance_fs_bg(sbi, true); in f2fs_write_node_pages()
2222 get_pages(sbi, F2FS_DIRTY_NODES) < in f2fs_write_node_pages()
2223 nr_pages_to_skip(sbi, NODE)) in f2fs_write_node_pages()
2227 atomic_inc(&sbi->wb_sync_req[NODE]); in f2fs_write_node_pages()
2228 else if (atomic_read(&sbi->wb_sync_req[NODE])) { in f2fs_write_node_pages()
2237 diff = nr_pages_to_write(sbi, NODE, wbc); in f2fs_write_node_pages()
2239 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); in f2fs_write_node_pages()
2244 atomic_dec(&sbi->wb_sync_req[NODE]); in f2fs_write_node_pages()
2248 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); in f2fs_write_node_pages()
2289 static int __insert_free_nid(struct f2fs_sb_info *sbi, in __insert_free_nid() argument
2292 struct f2fs_nm_info *nm_i = NM_I(sbi); in __insert_free_nid()
2303 static void __remove_free_nid(struct f2fs_sb_info *sbi, in __remove_free_nid() argument
2306 struct f2fs_nm_info *nm_i = NM_I(sbi); in __remove_free_nid()
2308 f2fs_bug_on(sbi, state != i->state); in __remove_free_nid()
2315 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, in __move_free_nid() argument
2318 struct f2fs_nm_info *nm_i = NM_I(sbi); in __move_free_nid()
2320 f2fs_bug_on(sbi, org_state != i->state); in __move_free_nid()
2337 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, in update_free_nid_bitmap() argument
2340 struct f2fs_nm_info *nm_i = NM_I(sbi); in update_free_nid_bitmap()
2362 static bool add_free_nid(struct f2fs_sb_info *sbi, in add_free_nid() argument
2365 struct f2fs_nm_info *nm_i = NM_I(sbi); in add_free_nid()
2375 if (unlikely(f2fs_check_nid_range(sbi, nid))) in add_free_nid()
2383 f2fs_bug_on(sbi, err); in add_free_nid()
2424 err = __insert_free_nid(sbi, i); in add_free_nid()
2427 update_free_nid_bitmap(sbi, nid, ret, build); in add_free_nid()
2439 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) in remove_free_nid() argument
2441 struct f2fs_nm_info *nm_i = NM_I(sbi); in remove_free_nid()
2448 __remove_free_nid(sbi, i, FREE_NID); in remove_free_nid()
2457 static int scan_nat_page(struct f2fs_sb_info *sbi, in scan_nat_page() argument
2460 struct f2fs_nm_info *nm_i = NM_I(sbi); in scan_nat_page()
2479 add_free_nid(sbi, start_nid, true, true); in scan_nat_page()
2481 spin_lock(&NM_I(sbi)->nid_list_lock); in scan_nat_page()
2482 update_free_nid_bitmap(sbi, start_nid, false, true); in scan_nat_page()
2483 spin_unlock(&NM_I(sbi)->nid_list_lock); in scan_nat_page()
2490 static void scan_curseg_cache(struct f2fs_sb_info *sbi) in scan_curseg_cache() argument
2492 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in scan_curseg_cache()
2504 add_free_nid(sbi, nid, true, false); in scan_curseg_cache()
2506 remove_free_nid(sbi, nid); in scan_curseg_cache()
2511 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) in scan_free_nid_bits() argument
2513 struct f2fs_nm_info *nm_i = NM_I(sbi); in scan_free_nid_bits()
2531 add_free_nid(sbi, nid, true, false); in scan_free_nid_bits()
2538 scan_curseg_cache(sbi); in scan_free_nid_bits()
2543 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, in __f2fs_build_free_nids() argument
2546 struct f2fs_nm_info *nm_i = NM_I(sbi); in __f2fs_build_free_nids()
2560 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) in __f2fs_build_free_nids()
2565 scan_free_nid_bits(sbi); in __f2fs_build_free_nids()
2572 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, in __f2fs_build_free_nids()
2580 struct folio *folio = get_current_nat_folio(sbi, nid); in __f2fs_build_free_nids()
2585 ret = scan_nat_page(sbi, folio_address(folio), in __f2fs_build_free_nids()
2594 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); in __f2fs_build_free_nids()
2595 set_sbi_flag(sbi, SBI_NEED_FSCK); in __f2fs_build_free_nids()
2596 f2fs_handle_error(sbi, in __f2fs_build_free_nids()
2616 scan_curseg_cache(sbi); in __f2fs_build_free_nids()
2620 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), in __f2fs_build_free_nids()
2626 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) in f2fs_build_free_nids() argument
2630 mutex_lock(&NM_I(sbi)->build_lock); in f2fs_build_free_nids()
2631 ret = __f2fs_build_free_nids(sbi, sync, mount); in f2fs_build_free_nids()
2632 mutex_unlock(&NM_I(sbi)->build_lock); in f2fs_build_free_nids()
2642 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) in f2fs_alloc_nid() argument
2644 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid()
2647 if (time_to_inject(sbi, FAULT_ALLOC_NID)) in f2fs_alloc_nid()
2659 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); in f2fs_alloc_nid()
2663 if (unlikely(is_invalid_nid(sbi, i->nid))) { in f2fs_alloc_nid()
2665 f2fs_err(sbi, "Corrupted nid %u in free_nid_list", in f2fs_alloc_nid()
2667 f2fs_stop_checkpoint(sbi, false, in f2fs_alloc_nid()
2674 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); in f2fs_alloc_nid()
2677 update_free_nid_bitmap(sbi, *nid, false, false); in f2fs_alloc_nid()
2685 if (!f2fs_build_free_nids(sbi, true, false)) in f2fs_alloc_nid()
2693 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_alloc_nid_done() argument
2695 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid_done()
2700 f2fs_bug_on(sbi, !i); in f2fs_alloc_nid_done()
2701 __remove_free_nid(sbi, i, PREALLOC_NID); in f2fs_alloc_nid_done()
2710 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_alloc_nid_failed() argument
2712 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid_failed()
2721 f2fs_bug_on(sbi, !i); in f2fs_alloc_nid_failed()
2723 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { in f2fs_alloc_nid_failed()
2724 __remove_free_nid(sbi, i, PREALLOC_NID); in f2fs_alloc_nid_failed()
2727 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); in f2fs_alloc_nid_failed()
2732 update_free_nid_bitmap(sbi, nid, true, false); in f2fs_alloc_nid_failed()
2740 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) in f2fs_try_to_free_nids() argument
2742 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_try_to_free_nids()
2760 __remove_free_nid(sbi, i, FREE_NID); in f2fs_try_to_free_nids()
2812 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_recover_xattr_data() local
2824 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); in f2fs_recover_xattr_data()
2828 f2fs_invalidate_blocks(sbi, ni.blk_addr, 1); in f2fs_recover_xattr_data()
2829 dec_valid_node_count(sbi, inode, false); in f2fs_recover_xattr_data()
2830 set_node_addr(sbi, &ni, NULL_ADDR, false); in f2fs_recover_xattr_data()
2834 if (!f2fs_alloc_nid(sbi, &new_xnid)) in f2fs_recover_xattr_data()
2840 f2fs_alloc_nid_failed(sbi, new_xnid); in f2fs_recover_xattr_data()
2844 f2fs_alloc_nid_done(sbi, new_xnid); in f2fs_recover_xattr_data()
2858 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio) in f2fs_recover_inode_page() argument
2866 err = f2fs_get_node_info(sbi, ino, &old_ni, false); in f2fs_recover_inode_page()
2873 ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false); in f2fs_recover_inode_page()
2880 remove_free_nid(sbi, ino); in f2fs_recover_inode_page()
2899 if (f2fs_sb_has_flexible_inline_xattr(sbi) && in f2fs_recover_inode_page()
2904 if (f2fs_sb_has_project_quota(sbi) && in f2fs_recover_inode_page()
2909 if (f2fs_sb_has_inode_crtime(sbi) && in f2fs_recover_inode_page()
2920 if (unlikely(inc_valid_node_count(sbi, NULL, true))) in f2fs_recover_inode_page()
2922 set_node_addr(sbi, &new_ni, NEW_ADDR, false); in f2fs_recover_inode_page()
2923 inc_valid_inode_count(sbi); in f2fs_recover_inode_page()
2929 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, in f2fs_restore_node_summary() argument
2938 last_offset = BLKS_PER_SEG(sbi); in f2fs_restore_node_summary()
2939 addr = START_BLOCK(sbi, segno); in f2fs_restore_node_summary()
2946 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); in f2fs_restore_node_summary()
2949 struct folio *folio = f2fs_get_tmp_folio(sbi, idx); in f2fs_restore_node_summary()
2962 invalidate_mapping_pages(META_MAPPING(sbi), addr, in f2fs_restore_node_summary()
2968 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) in remove_nats_in_journal() argument
2970 struct f2fs_nm_info *nm_i = NM_I(sbi); in remove_nats_in_journal()
2971 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in remove_nats_in_journal()
2982 if (f2fs_check_nid_range(sbi, nid)) in remove_nats_in_journal()
2992 ne = __alloc_nat_entry(sbi, nid, true); in remove_nats_in_journal()
3032 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, in __update_nat_bits() argument
3035 struct f2fs_nm_info *nm_i = NM_I(sbi); in __update_nat_bits()
3040 if (!enabled_nat_bits(sbi, NULL)) in __update_nat_bits()
3064 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, in __flush_nat_entry_set() argument
3067 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in __flush_nat_entry_set()
3080 if (enabled_nat_bits(sbi, cpc) || in __flush_nat_entry_set()
3087 folio = get_next_nat_folio(sbi, start_nid); in __flush_nat_entry_set()
3092 f2fs_bug_on(sbi, !nat_blk); in __flush_nat_entry_set()
3101 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); in __flush_nat_entry_set()
3106 f2fs_bug_on(sbi, offset < 0); in __flush_nat_entry_set()
3114 __clear_nat_cache_dirty(NM_I(sbi), set, ne); in __flush_nat_entry_set()
3116 add_free_nid(sbi, nid, false, true); in __flush_nat_entry_set()
3118 spin_lock(&NM_I(sbi)->nid_list_lock); in __flush_nat_entry_set()
3119 update_free_nid_bitmap(sbi, nid, false, false); in __flush_nat_entry_set()
3120 spin_unlock(&NM_I(sbi)->nid_list_lock); in __flush_nat_entry_set()
3127 __update_nat_bits(sbi, start_nid, nat_blk); in __flush_nat_entry_set()
3133 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); in __flush_nat_entry_set()
3142 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_flush_nat_entries() argument
3144 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_flush_nat_entries()
3145 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in f2fs_flush_nat_entries()
3158 if (enabled_nat_bits(sbi, cpc)) { in f2fs_flush_nat_entries()
3160 remove_nats_in_journal(sbi); in f2fs_flush_nat_entries()
3174 if (enabled_nat_bits(sbi, cpc) || in f2fs_flush_nat_entries()
3177 remove_nats_in_journal(sbi); in f2fs_flush_nat_entries()
3191 err = __flush_nat_entry_set(sbi, set, cpc); in f2fs_flush_nat_entries()
3202 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) in __get_nat_bitmaps() argument
3204 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in __get_nat_bitmaps()
3205 struct f2fs_nm_info *nm_i = NM_I(sbi); in __get_nat_bitmaps()
3211 if (!enabled_nat_bits(sbi, NULL)) in __get_nat_bitmaps()
3215 nm_i->nat_bits = f2fs_kvzalloc(sbi, in __get_nat_bitmaps()
3220 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - in __get_nat_bitmaps()
3225 folio = f2fs_get_meta_folio(sbi, nat_bits_addr++); in __get_nat_bitmaps()
3236 disable_nat_bits(sbi, true); in __get_nat_bitmaps()
3243 f2fs_notice(sbi, "Found nat_bits in checkpoint"); in __get_nat_bitmaps()
3247 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) in load_free_nid_bitmap() argument
3249 struct f2fs_nm_info *nm_i = NM_I(sbi); in load_free_nid_bitmap()
3253 if (!enabled_nat_bits(sbi, NULL)) in load_free_nid_bitmap()
3266 spin_lock(&NM_I(sbi)->nid_list_lock); in load_free_nid_bitmap()
3268 update_free_nid_bitmap(sbi, nid, true, true); in load_free_nid_bitmap()
3269 spin_unlock(&NM_I(sbi)->nid_list_lock); in load_free_nid_bitmap()
3281 static int init_node_manager(struct f2fs_sb_info *sbi) in init_node_manager() argument
3283 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); in init_node_manager()
3284 struct f2fs_nm_info *nm_i = NM_I(sbi); in init_node_manager()
3297 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - in init_node_manager()
3317 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); in init_node_manager()
3318 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); in init_node_manager()
3319 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); in init_node_manager()
3325 if (!test_opt(sbi, NAT_BITS)) in init_node_manager()
3326 disable_nat_bits(sbi, true); in init_node_manager()
3328 err = __get_nat_bitmaps(sbi); in init_node_manager()
3342 static int init_free_nid_cache(struct f2fs_sb_info *sbi) in init_free_nid_cache() argument
3344 struct f2fs_nm_info *nm_i = NM_I(sbi); in init_free_nid_cache()
3348 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), in init_free_nid_cache()
3355 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, in init_free_nid_cache()
3361 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, in init_free_nid_cache()
3367 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), in init_free_nid_cache()
3375 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) in f2fs_build_node_manager() argument
3379 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), in f2fs_build_node_manager()
3381 if (!sbi->nm_info) in f2fs_build_node_manager()
3384 err = init_node_manager(sbi); in f2fs_build_node_manager()
3388 err = init_free_nid_cache(sbi); in f2fs_build_node_manager()
3393 load_free_nid_bitmap(sbi); in f2fs_build_node_manager()
3395 return f2fs_build_free_nids(sbi, true, true); in f2fs_build_node_manager()
3398 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) in f2fs_destroy_node_manager() argument
3400 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_destroy_node_manager()
3414 __remove_free_nid(sbi, i, FREE_NID); in f2fs_destroy_node_manager()
3419 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); in f2fs_destroy_node_manager()
3420 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); in f2fs_destroy_node_manager()
3421 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); in f2fs_destroy_node_manager()
3439 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); in f2fs_destroy_node_manager()
3451 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); in f2fs_destroy_node_manager()
3473 sbi->nm_info = NULL; in f2fs_destroy_node_manager()