Lines Matching full:sbi

29 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,  in f2fs_stop_checkpoint()  argument
32 f2fs_build_fault_attr(sbi, 0, 0); in f2fs_stop_checkpoint()
34 f2fs_flush_merged_writes(sbi); in f2fs_stop_checkpoint()
35 f2fs_handle_critical_error(sbi, reason, end_io); in f2fs_stop_checkpoint()
41 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_grab_meta_page() argument
43 struct address_space *mapping = META_MAPPING(sbi); in f2fs_grab_meta_page()
57 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() argument
60 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page()
63 .sbi = sbi, in __get_meta_page()
93 f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE); in __get_meta_page()
102 f2fs_handle_page_eio(sbi, page_folio(page), META); in __get_meta_page()
110 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page() argument
112 return __get_meta_page(sbi, index, true); in f2fs_get_meta_page()
115 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page_retry() argument
121 page = __get_meta_page(sbi, index, true); in f2fs_get_meta_page_retry()
126 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE); in f2fs_get_meta_page_retry()
132 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_tmp_page() argument
134 return __get_meta_page(sbi, index, false); in f2fs_get_tmp_page()
137 static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, in __is_bitmap_valid() argument
147 segno = GET_SEGNO(sbi, blkaddr); in __is_bitmap_valid()
148 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in __is_bitmap_valid()
149 se = get_seg_entry(sbi, segno); in __is_bitmap_valid()
154 if (unlikely(f2fs_cp_error(sbi))) in __is_bitmap_valid()
165 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", in __is_bitmap_valid()
167 set_sbi_flag(sbi, SBI_NEED_FSCK); in __is_bitmap_valid()
170 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); in __is_bitmap_valid()
174 static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, in __f2fs_is_valid_blkaddr() argument
181 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) in __f2fs_is_valid_blkaddr()
185 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || in __f2fs_is_valid_blkaddr()
186 blkaddr < SM_I(sbi)->ssa_blkaddr)) in __f2fs_is_valid_blkaddr()
190 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in __f2fs_is_valid_blkaddr()
191 blkaddr < __start_cp_addr(sbi))) in __f2fs_is_valid_blkaddr()
195 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || in __f2fs_is_valid_blkaddr()
196 blkaddr < MAIN_BLKADDR(sbi))) in __f2fs_is_valid_blkaddr()
203 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || in __f2fs_is_valid_blkaddr()
204 blkaddr < MAIN_BLKADDR(sbi))) { in __f2fs_is_valid_blkaddr()
207 if (unlikely(f2fs_cp_error(sbi))) in __f2fs_is_valid_blkaddr()
210 f2fs_warn(sbi, "access invalid blkaddr:%u", in __f2fs_is_valid_blkaddr()
212 set_sbi_flag(sbi, SBI_NEED_FSCK); in __f2fs_is_valid_blkaddr()
216 return __is_bitmap_valid(sbi, blkaddr, type); in __f2fs_is_valid_blkaddr()
220 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) || in __f2fs_is_valid_blkaddr()
221 blkaddr >= MAIN_BLKADDR(sbi))) in __f2fs_is_valid_blkaddr()
230 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); in __f2fs_is_valid_blkaddr()
235 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, in f2fs_is_valid_blkaddr() argument
238 if (time_to_inject(sbi, FAULT_BLKADDR_VALIDITY)) in f2fs_is_valid_blkaddr()
240 return __f2fs_is_valid_blkaddr(sbi, blkaddr, type); in f2fs_is_valid_blkaddr()
243 bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, in f2fs_is_valid_blkaddr_raw() argument
246 return __f2fs_is_valid_blkaddr(sbi, blkaddr, type); in f2fs_is_valid_blkaddr_raw()
252 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, in f2fs_ra_meta_pages() argument
258 .sbi = sbi, in f2fs_ra_meta_pages()
275 if (!f2fs_is_valid_blkaddr(sbi, blkno, type)) in f2fs_ra_meta_pages()
281 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in f2fs_ra_meta_pages()
284 fio.new_blkaddr = current_nat_addr(sbi, in f2fs_ra_meta_pages()
288 if (unlikely(blkno >= TOTAL_SEGS(sbi))) in f2fs_ra_meta_pages()
291 fio.new_blkaddr = current_sit_addr(sbi, in f2fs_ra_meta_pages()
303 page = f2fs_grab_cache_page(META_MAPPING(sbi), in f2fs_ra_meta_pages()
317 f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, in f2fs_ra_meta_pages()
325 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, in f2fs_ra_meta_pages_cond() argument
334 page = find_get_page(META_MAPPING(sbi), index); in f2fs_ra_meta_pages_cond()
340 f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true); in f2fs_ra_meta_pages_cond()
347 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in __f2fs_write_meta_page() local
352 if (unlikely(f2fs_cp_error(sbi))) { in __f2fs_write_meta_page()
353 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { in __f2fs_write_meta_page()
355 dec_page_count(sbi, F2FS_DIRTY_META); in __f2fs_write_meta_page()
361 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __f2fs_write_meta_page()
363 if (wbc->for_reclaim && folio->index < GET_SUM_BLOCK(sbi, 0)) in __f2fs_write_meta_page()
366 f2fs_do_write_meta_page(sbi, folio, io_type); in __f2fs_write_meta_page()
367 dec_page_count(sbi, F2FS_DIRTY_META); in __f2fs_write_meta_page()
370 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META); in __f2fs_write_meta_page()
374 if (unlikely(f2fs_cp_error(sbi))) in __f2fs_write_meta_page()
375 f2fs_submit_merged_write(sbi, META); in __f2fs_write_meta_page()
393 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_meta_pages() local
396 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_write_meta_pages()
401 get_pages(sbi, F2FS_DIRTY_META) < in f2fs_write_meta_pages()
402 nr_pages_to_skip(sbi, META)) in f2fs_write_meta_pages()
406 if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) in f2fs_write_meta_pages()
410 diff = nr_pages_to_write(sbi, META, wbc); in f2fs_write_meta_pages()
411 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); in f2fs_write_meta_pages()
412 f2fs_up_write(&sbi->cp_global_sem); in f2fs_write_meta_pages()
417 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
422 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, in f2fs_sync_meta_pages() argument
425 struct address_space *mapping = META_MAPPING(sbi); in f2fs_sync_meta_pages()
487 f2fs_submit_merged_write(sbi, type); in f2fs_sync_meta_pages()
518 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, in __add_ino_entry() argument
521 struct inode_management *im = &sbi->im[type]; in __add_ino_entry()
547 f2fs_bug_on(sbi, 1); in __add_ino_entry()
567 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in __remove_ino_entry() argument
569 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry()
585 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in f2fs_add_ino_entry() argument
588 __add_ino_entry(sbi, ino, 0, type); in f2fs_add_ino_entry()
591 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in f2fs_remove_ino_entry() argument
594 __remove_ino_entry(sbi, ino, type); in f2fs_remove_ino_entry()
598 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) in f2fs_exist_written_data() argument
600 struct inode_management *im = &sbi->im[mode]; in f2fs_exist_written_data()
609 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all) in f2fs_release_ino_entry() argument
615 struct inode_management *im = &sbi->im[i]; in f2fs_release_ino_entry()
628 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_set_dirty_device() argument
631 __add_ino_entry(sbi, ino, devidx, type); in f2fs_set_dirty_device()
634 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_is_dirty_device() argument
637 struct inode_management *im = &sbi->im[type]; in f2fs_is_dirty_device()
649 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi) in f2fs_acquire_orphan_inode() argument
651 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_acquire_orphan_inode()
656 if (time_to_inject(sbi, FAULT_ORPHAN)) { in f2fs_acquire_orphan_inode()
661 if (unlikely(im->ino_num >= sbi->max_orphans)) in f2fs_acquire_orphan_inode()
670 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi) in f2fs_release_orphan_inode() argument
672 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_release_orphan_inode()
675 f2fs_bug_on(sbi, im->ino_num == 0); in f2fs_release_orphan_inode()
687 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_remove_orphan_inode() argument
690 __remove_ino_entry(sbi, ino, ORPHAN_INO); in f2fs_remove_orphan_inode()
693 static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in recover_orphan_inode() argument
699 inode = f2fs_iget_retry(sbi->sb, ino); in recover_orphan_inode()
705 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); in recover_orphan_inode()
720 err = f2fs_get_node_info(sbi, ino, &ni, false); in recover_orphan_inode()
732 set_sbi_flag(sbi, SBI_NEED_FSCK); in recover_orphan_inode()
733 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.", in recover_orphan_inode()
738 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) in f2fs_recover_orphan_inodes() argument
743 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) in f2fs_recover_orphan_inodes()
746 if (f2fs_hw_is_readonly(sbi)) { in f2fs_recover_orphan_inodes()
747 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup"); in f2fs_recover_orphan_inodes()
751 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) in f2fs_recover_orphan_inodes()
752 f2fs_info(sbi, "orphan cleanup on readonly fs"); in f2fs_recover_orphan_inodes()
754 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); in f2fs_recover_orphan_inodes()
755 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in f2fs_recover_orphan_inodes()
757 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true); in f2fs_recover_orphan_inodes()
763 page = f2fs_get_meta_page(sbi, start_blk + i); in f2fs_recover_orphan_inodes()
773 err = recover_orphan_inode(sbi, ino); in f2fs_recover_orphan_inodes()
782 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG); in f2fs_recover_orphan_inodes()
784 set_sbi_flag(sbi, SBI_IS_RECOVERED); in f2fs_recover_orphan_inodes()
789 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) in write_orphan_inodes() argument
798 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes()
812 page = f2fs_grab_meta_page(sbi, start_blk++); in write_orphan_inodes()
846 static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi, in f2fs_checkpoint_chksum() argument
852 chksum = f2fs_crc32(sbi, ckpt, chksum_ofs); in f2fs_checkpoint_chksum()
855 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs, in f2fs_checkpoint_chksum()
861 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, in get_checkpoint_version() argument
868 *cp_page = f2fs_get_meta_page(sbi, cp_addr); in get_checkpoint_version()
878 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset); in get_checkpoint_version()
882 crc = f2fs_checkpoint_chksum(sbi, *cp_block); in get_checkpoint_version()
885 f2fs_warn(sbi, "invalid crc value"); in get_checkpoint_version()
893 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, in validate_checkpoint() argument
902 err = get_checkpoint_version(sbi, cp_addr, &cp_block, in validate_checkpoint()
909 if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) { in validate_checkpoint()
910 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", in validate_checkpoint()
917 err = get_checkpoint_version(sbi, cp_addr, &cp_block, in validate_checkpoint()
934 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) in f2fs_get_valid_checkpoint() argument
937 struct f2fs_super_block *fsb = sbi->raw_super; in f2fs_get_valid_checkpoint()
939 unsigned long blk_size = sbi->blocksize; in f2fs_get_valid_checkpoint()
942 unsigned int cp_blks = 1 + __cp_payload(sbi); in f2fs_get_valid_checkpoint()
947 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), in f2fs_get_valid_checkpoint()
949 if (!sbi->ckpt) in f2fs_get_valid_checkpoint()
956 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); in f2fs_get_valid_checkpoint()
961 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); in f2fs_get_valid_checkpoint()
978 memcpy(sbi->ckpt, cp_block, blk_size); in f2fs_get_valid_checkpoint()
981 sbi->cur_cp_pack = 1; in f2fs_get_valid_checkpoint()
983 sbi->cur_cp_pack = 2; in f2fs_get_valid_checkpoint()
986 if (f2fs_sanity_check_ckpt(sbi)) { in f2fs_get_valid_checkpoint()
1000 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in f2fs_get_valid_checkpoint()
1002 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); in f2fs_get_valid_checkpoint()
1020 kvfree(sbi->ckpt); in f2fs_get_valid_checkpoint()
1026 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __add_dirty_inode() local
1033 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]); in __add_dirty_inode()
1034 stat_inc_dirty_inode(sbi, type); in __add_dirty_inode()
1051 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_update_dirty_folio() local
1058 spin_lock(&sbi->inode_lock[type]); in f2fs_update_dirty_folio()
1059 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) in f2fs_update_dirty_folio()
1062 spin_unlock(&sbi->inode_lock[type]); in f2fs_update_dirty_folio()
1069 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_remove_dirty_inode() local
1076 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH)) in f2fs_remove_dirty_inode()
1079 spin_lock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1081 spin_unlock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1084 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, in f2fs_sync_dirty_inodes() argument
1093 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1094 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
1097 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_sync_dirty_inodes()
1098 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1099 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
1104 spin_lock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1106 head = &sbi->inode_list[type]; in f2fs_sync_dirty_inodes()
1108 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1109 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1110 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
1116 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1141 f2fs_submit_merged_write(sbi, DATA); in f2fs_sync_dirty_inodes()
1147 static int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) in f2fs_sync_inode_meta() argument
1149 struct list_head *head = &sbi->inode_list[DIRTY_META]; in f2fs_sync_inode_meta()
1152 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA); in f2fs_sync_inode_meta()
1155 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_inode_meta()
1158 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1160 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1166 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1179 static void __prepare_cp_block(struct f2fs_sb_info *sbi) in __prepare_cp_block() argument
1181 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in __prepare_cp_block()
1182 struct f2fs_nm_info *nm_i = NM_I(sbi); in __prepare_cp_block()
1185 next_free_nid(sbi, &last_nid); in __prepare_cp_block()
1186 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in __prepare_cp_block()
1187 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in __prepare_cp_block()
1188 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in __prepare_cp_block()
1192 sbi->last_valid_block_count = sbi->total_valid_block_count; in __prepare_cp_block()
1193 percpu_counter_set(&sbi->alloc_valid_block_count, 0); in __prepare_cp_block()
1194 percpu_counter_set(&sbi->rf_node_block_count, 0); in __prepare_cp_block()
1197 static bool __need_flush_quota(struct f2fs_sb_info *sbi) in __need_flush_quota() argument
1201 if (!is_journalled_quota(sbi)) in __need_flush_quota()
1204 if (!f2fs_down_write_trylock(&sbi->quota_sem)) in __need_flush_quota()
1206 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { in __need_flush_quota()
1208 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { in __need_flush_quota()
1210 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) { in __need_flush_quota()
1211 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in __need_flush_quota()
1213 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { in __need_flush_quota()
1216 f2fs_up_write(&sbi->quota_sem); in __need_flush_quota()
1223 static int block_operations(struct f2fs_sb_info *sbi) in block_operations() argument
1235 f2fs_flush_inline_data(sbi); in block_operations()
1238 f2fs_lock_all(sbi); in block_operations()
1239 if (__need_flush_quota(sbi)) { in block_operations()
1243 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); in block_operations()
1244 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in block_operations()
1247 f2fs_unlock_all(sbi); in block_operations()
1250 locked = down_read_trylock(&sbi->sb->s_umount); in block_operations()
1251 f2fs_quota_sync(sbi->sb, -1); in block_operations()
1253 up_read(&sbi->sb->s_umount); in block_operations()
1260 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { in block_operations()
1261 f2fs_unlock_all(sbi); in block_operations()
1262 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true); in block_operations()
1273 f2fs_down_write(&sbi->node_change); in block_operations()
1275 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { in block_operations()
1276 f2fs_up_write(&sbi->node_change); in block_operations()
1277 f2fs_unlock_all(sbi); in block_operations()
1278 err = f2fs_sync_inode_meta(sbi); in block_operations()
1286 f2fs_down_write(&sbi->node_write); in block_operations()
1288 if (get_pages(sbi, F2FS_DIRTY_NODES)) { in block_operations()
1289 f2fs_up_write(&sbi->node_write); in block_operations()
1290 atomic_inc(&sbi->wb_sync_req[NODE]); in block_operations()
1291 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); in block_operations()
1292 atomic_dec(&sbi->wb_sync_req[NODE]); in block_operations()
1294 f2fs_up_write(&sbi->node_change); in block_operations()
1295 f2fs_unlock_all(sbi); in block_operations()
1303 * sbi->node_change is used only for AIO write_begin path which produces in block_operations()
1306 __prepare_cp_block(sbi); in block_operations()
1307 f2fs_up_write(&sbi->node_change); in block_operations()
1311 static void unblock_operations(struct f2fs_sb_info *sbi) in unblock_operations() argument
1313 f2fs_up_write(&sbi->node_write); in unblock_operations()
1314 f2fs_unlock_all(sbi); in unblock_operations()
1317 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) in f2fs_wait_on_all_pages() argument
1322 if (!get_pages(sbi, type)) in f2fs_wait_on_all_pages()
1325 if (unlikely(f2fs_cp_error(sbi) && in f2fs_wait_on_all_pages()
1326 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))) in f2fs_wait_on_all_pages()
1330 f2fs_sync_meta_pages(sbi, META, LONG_MAX, in f2fs_wait_on_all_pages()
1333 f2fs_submit_merged_write(sbi, DATA); in f2fs_wait_on_all_pages()
1335 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in f2fs_wait_on_all_pages()
1338 finish_wait(&sbi->cp_wait, &wait); in f2fs_wait_on_all_pages()
1341 static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) in update_ckpt_flags() argument
1343 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in update_ckpt_flags()
1344 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in update_ckpt_flags()
1349 NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) { in update_ckpt_flags()
1350 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); in update_ckpt_flags()
1351 f2fs_notice(sbi, "Disable nat_bits due to no space"); in update_ckpt_flags()
1352 } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) && in update_ckpt_flags()
1353 f2fs_nat_bitmap_enabled(sbi)) { in update_ckpt_flags()
1354 f2fs_enable_nat_bits(sbi); in update_ckpt_flags()
1355 set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); in update_ckpt_flags()
1356 f2fs_notice(sbi, "Rebuild and enable nat_bits"); in update_ckpt_flags()
1360 spin_lock_irqsave(&sbi->cp_lock, flags); in update_ckpt_flags()
1382 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) in update_ckpt_flags()
1385 if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) in update_ckpt_flags()
1390 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) in update_ckpt_flags()
1395 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK)) in update_ckpt_flags()
1400 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) in update_ckpt_flags()
1405 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) in update_ckpt_flags()
1412 spin_unlock_irqrestore(&sbi->cp_lock, flags); in update_ckpt_flags()
1415 static void commit_checkpoint(struct f2fs_sb_info *sbi, in commit_checkpoint() argument
1427 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in commit_checkpoint()
1436 f2fs_bug_on(sbi, 1); in commit_checkpoint()
1440 if (unlikely(err && f2fs_cp_error(sbi))) { in commit_checkpoint()
1445 f2fs_bug_on(sbi, err); in commit_checkpoint()
1449 f2fs_submit_merged_write(sbi, META_FLUSH); in commit_checkpoint()
1457 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi) in f2fs_get_sectors_written() argument
1459 if (f2fs_is_multi_device(sbi)) { in f2fs_get_sectors_written()
1463 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_sectors_written()
1469 return get_sectors_written(sbi->sb->s_bdev); in f2fs_get_sectors_written()
1472 static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in do_checkpoint() argument
1474 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in do_checkpoint()
1475 struct f2fs_nm_info *nm_i = NM_I(sbi); in do_checkpoint()
1476 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; in do_checkpoint()
1481 int cp_payload_blks = __cp_payload(sbi); in do_checkpoint()
1482 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in do_checkpoint()
1487 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); in do_checkpoint()
1490 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); in do_checkpoint()
1491 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
1493 struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_NODE); in do_checkpoint()
1500 struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_DATA); in do_checkpoint()
1508 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false); in do_checkpoint()
1509 spin_lock_irqsave(&sbi->cp_lock, flags); in do_checkpoint()
1514 spin_unlock_irqrestore(&sbi->cp_lock, flags); in do_checkpoint()
1530 update_ckpt_flags(sbi, cpc); in do_checkpoint()
1533 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); in do_checkpoint()
1534 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); in do_checkpoint()
1536 crc32 = f2fs_checkpoint_chksum(sbi, ckpt); in do_checkpoint()
1541 start_blk = __start_cp_next_addr(sbi); in do_checkpoint()
1545 is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) { in do_checkpoint()
1552 blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks; in do_checkpoint()
1554 f2fs_update_meta_page(sbi, nm_i->nat_bits + in do_checkpoint()
1559 f2fs_update_meta_page(sbi, ckpt, start_blk++); in do_checkpoint()
1562 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE, in do_checkpoint()
1566 write_orphan_inodes(sbi, start_blk); in do_checkpoint()
1570 f2fs_write_data_summaries(sbi, start_blk); in do_checkpoint()
1574 kbytes_written = sbi->kbytes_written; in do_checkpoint()
1575 kbytes_written += (f2fs_get_sectors_written(sbi) - in do_checkpoint()
1576 sbi->sectors_written_start) >> 1; in do_checkpoint()
1580 f2fs_write_node_summaries(sbi, start_blk); in do_checkpoint()
1585 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); in do_checkpoint()
1587 f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); in do_checkpoint()
1590 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); in do_checkpoint()
1593 err = f2fs_flush_device_cache(sbi); in do_checkpoint()
1598 commit_checkpoint(sbi, ckpt, start_blk); in do_checkpoint()
1599 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); in do_checkpoint()
1605 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) || in do_checkpoint()
1606 f2fs_sb_has_compression(sbi)) in do_checkpoint()
1607 f2fs_bug_on(sbi, in do_checkpoint()
1608 invalidate_inode_pages2_range(META_MAPPING(sbi), in do_checkpoint()
1609 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1)); in do_checkpoint()
1611 f2fs_release_ino_entry(sbi, false); in do_checkpoint()
1613 f2fs_reset_fsync_node_info(sbi); in do_checkpoint()
1615 clear_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1616 clear_sbi_flag(sbi, SBI_NEED_CP); in do_checkpoint()
1617 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); in do_checkpoint()
1619 spin_lock(&sbi->stat_lock); in do_checkpoint()
1620 sbi->unusable_block_count = 0; in do_checkpoint()
1621 spin_unlock(&sbi->stat_lock); in do_checkpoint()
1623 __set_cp_next_pack(sbi); in do_checkpoint()
1629 if (get_pages(sbi, F2FS_DIRTY_NODES) || in do_checkpoint()
1630 get_pages(sbi, F2FS_DIRTY_IMETA)) in do_checkpoint()
1631 set_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1633 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS)); in do_checkpoint()
1635 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; in do_checkpoint()
1638 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_write_checkpoint() argument
1640 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_write_checkpoint()
1644 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi)) in f2fs_write_checkpoint()
1647 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_write_checkpoint()
1650 f2fs_warn(sbi, "Start checkpoint disabled!"); in f2fs_write_checkpoint()
1653 f2fs_down_write(&sbi->cp_global_sem); in f2fs_write_checkpoint()
1655 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && in f2fs_write_checkpoint()
1657 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) in f2fs_write_checkpoint()
1659 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_write_checkpoint()
1664 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in f2fs_write_checkpoint()
1666 err = block_operations(sbi); in f2fs_write_checkpoint()
1670 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in f2fs_write_checkpoint()
1672 f2fs_flush_merged_writes(sbi); in f2fs_write_checkpoint()
1676 if (!f2fs_exist_trim_candidates(sbi, cpc)) { in f2fs_write_checkpoint()
1677 unblock_operations(sbi); in f2fs_write_checkpoint()
1681 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && in f2fs_write_checkpoint()
1682 SIT_I(sbi)->dirty_sentries == 0 && in f2fs_write_checkpoint()
1683 prefree_segments(sbi) == 0) { in f2fs_write_checkpoint()
1684 f2fs_flush_sit_entries(sbi, cpc); in f2fs_write_checkpoint()
1685 f2fs_clear_prefree_segments(sbi, cpc); in f2fs_write_checkpoint()
1686 unblock_operations(sbi); in f2fs_write_checkpoint()
1700 err = f2fs_flush_nat_entries(sbi, cpc); in f2fs_write_checkpoint()
1702 f2fs_err(sbi, "f2fs_flush_nat_entries failed err:%d, stop checkpoint", err); in f2fs_write_checkpoint()
1703 f2fs_bug_on(sbi, !f2fs_cp_error(sbi)); in f2fs_write_checkpoint()
1707 f2fs_flush_sit_entries(sbi, cpc); in f2fs_write_checkpoint()
1710 f2fs_save_inmem_curseg(sbi); in f2fs_write_checkpoint()
1712 err = do_checkpoint(sbi, cpc); in f2fs_write_checkpoint()
1714 f2fs_err(sbi, "do_checkpoint failed err:%d, stop checkpoint", err); in f2fs_write_checkpoint()
1715 f2fs_bug_on(sbi, !f2fs_cp_error(sbi)); in f2fs_write_checkpoint()
1716 f2fs_release_discard_addrs(sbi); in f2fs_write_checkpoint()
1718 f2fs_clear_prefree_segments(sbi, cpc); in f2fs_write_checkpoint()
1721 f2fs_restore_inmem_curseg(sbi); in f2fs_write_checkpoint()
1722 f2fs_reinit_atgc_curseg(sbi); in f2fs_write_checkpoint()
1723 stat_inc_cp_count(sbi); in f2fs_write_checkpoint()
1725 unblock_operations(sbi); in f2fs_write_checkpoint()
1728 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); in f2fs_write_checkpoint()
1731 f2fs_update_time(sbi, CP_TIME); in f2fs_write_checkpoint()
1732 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in f2fs_write_checkpoint()
1735 f2fs_up_write(&sbi->cp_global_sem); in f2fs_write_checkpoint()
1739 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) in f2fs_init_ino_entry_info() argument
1744 struct inode_management *im = &sbi->im[i]; in f2fs_init_ino_entry_info()
1752 sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS - in f2fs_init_ino_entry_info()
1753 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) * in f2fs_init_ino_entry_info()
1778 static int __write_checkpoint_sync(struct f2fs_sb_info *sbi) in __write_checkpoint_sync() argument
1783 f2fs_down_write(&sbi->gc_lock); in __write_checkpoint_sync()
1784 err = f2fs_write_checkpoint(sbi, &cpc); in __write_checkpoint_sync()
1785 f2fs_up_write(&sbi->gc_lock); in __write_checkpoint_sync()
1790 static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi) in __checkpoint_and_complete_reqs() argument
1792 struct ckpt_req_control *cprc = &sbi->cprc_info; in __checkpoint_and_complete_reqs()
1803 ret = __write_checkpoint_sync(sbi); in __checkpoint_and_complete_reqs()
1826 struct f2fs_sb_info *sbi = data; in issue_checkpoint_thread() local
1827 struct ckpt_req_control *cprc = &sbi->cprc_info; in issue_checkpoint_thread()
1834 __checkpoint_and_complete_reqs(sbi); in issue_checkpoint_thread()
1841 static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi, in flush_remained_ckpt_reqs() argument
1844 struct ckpt_req_control *cprc = &sbi->cprc_info; in flush_remained_ckpt_reqs()
1847 __checkpoint_and_complete_reqs(sbi); in flush_remained_ckpt_reqs()
1863 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi) in f2fs_issue_checkpoint() argument
1865 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_issue_checkpoint()
1869 cpc.reason = __get_cp_reason(sbi); in f2fs_issue_checkpoint()
1870 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { in f2fs_issue_checkpoint()
1873 f2fs_down_write(&sbi->gc_lock); in f2fs_issue_checkpoint()
1874 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_issue_checkpoint()
1875 f2fs_up_write(&sbi->gc_lock); in f2fs_issue_checkpoint()
1881 return __write_checkpoint_sync(sbi); in f2fs_issue_checkpoint()
1901 flush_remained_ckpt_reqs(sbi, &req); in f2fs_issue_checkpoint()
1906 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi) in f2fs_start_ckpt_thread() argument
1908 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_ckpt_thread()
1909 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_start_ckpt_thread()
1914 cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi, in f2fs_start_ckpt_thread()
1928 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi) in f2fs_stop_ckpt_thread() argument
1930 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_stop_ckpt_thread()
1940 f2fs_flush_ckpt_thread(sbi); in f2fs_stop_ckpt_thread()
1943 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi) in f2fs_flush_ckpt_thread() argument
1945 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_flush_ckpt_thread()
1947 flush_remained_ckpt_reqs(sbi, NULL); in f2fs_flush_ckpt_thread()
1954 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi) in f2fs_init_ckpt_req_control() argument
1956 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_init_ckpt_req_control()