Lines Matching +full:im +full:-

1 // SPDX-License-Identifier: GPL-2.0
104 return ERR_PTR(-EIO); in __get_meta_folio()
123 if (PTR_ERR(folio) == -EIO && in f2fs_get_meta_folio_retry()
151 exist = f2fs_test_bit(offset, se->cur_valid_map); in __is_bitmap_valid()
186 blkaddr < SM_I(sbi)->ssa_blkaddr)) in __f2fs_is_valid_blkaddr()
190 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in __f2fs_is_valid_blkaddr()
272 for (; nrpages-- > 0; blkno++) { in f2fs_ra_meta_pages()
281 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in f2fs_ra_meta_pages()
322 return blkno - start; in f2fs_ra_meta_pages()
388 if (wbc->sync_mode != WB_SYNC_ALL && in f2fs_write_meta_pages()
394 if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) in f2fs_write_meta_pages()
397 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages()
399 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); in f2fs_write_meta_pages()
400 f2fs_up_write(&sbi->cp_global_sem); in f2fs_write_meta_pages()
401 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); in f2fs_write_meta_pages()
405 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
406 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages()
426 (pgoff_t)-1, in f2fs_sync_meta_pages()
434 folio->index != prev + in f2fs_sync_meta_pages()
435 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages()
463 prev = folio->index; in f2fs_sync_meta_pages()
505 struct inode_management *im = &sbi->im[type]; in __add_ino_entry() local
511 e = radix_tree_lookup(&im->ino_root, ino); in __add_ino_entry()
523 spin_lock(&im->ino_lock); in __add_ino_entry()
524 e = radix_tree_lookup(&im->ino_root, ino); in __add_ino_entry()
527 spin_unlock(&im->ino_lock); in __add_ino_entry()
532 if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) in __add_ino_entry()
536 e->ino = ino; in __add_ino_entry()
538 list_add_tail(&e->list, &im->ino_list); in __add_ino_entry()
540 im->ino_num++; in __add_ino_entry()
544 f2fs_set_bit(devidx, (char *)&e->dirty_device); in __add_ino_entry()
546 spin_unlock(&im->ino_lock); in __add_ino_entry()
555 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry() local
558 spin_lock(&im->ino_lock); in __remove_ino_entry()
559 e = radix_tree_lookup(&im->ino_root, ino); in __remove_ino_entry()
561 list_del(&e->list); in __remove_ino_entry()
562 radix_tree_delete(&im->ino_root, ino); in __remove_ino_entry()
563 im->ino_num--; in __remove_ino_entry()
564 spin_unlock(&im->ino_lock); in __remove_ino_entry()
568 spin_unlock(&im->ino_lock); in __remove_ino_entry()
586 struct inode_management *im = &sbi->im[mode]; in f2fs_exist_written_data() local
589 spin_lock(&im->ino_lock); in f2fs_exist_written_data()
590 e = radix_tree_lookup(&im->ino_root, ino); in f2fs_exist_written_data()
591 spin_unlock(&im->ino_lock); in f2fs_exist_written_data()
601 struct inode_management *im = &sbi->im[i]; in f2fs_release_ino_entry() local
603 spin_lock(&im->ino_lock); in f2fs_release_ino_entry()
604 list_for_each_entry_safe(e, tmp, &im->ino_list, list) { in f2fs_release_ino_entry()
605 list_del(&e->list); in f2fs_release_ino_entry()
606 radix_tree_delete(&im->ino_root, e->ino); in f2fs_release_ino_entry()
608 im->ino_num--; in f2fs_release_ino_entry()
610 spin_unlock(&im->ino_lock); in f2fs_release_ino_entry()
623 struct inode_management *im = &sbi->im[type]; in f2fs_is_dirty_device() local
627 spin_lock(&im->ino_lock); in f2fs_is_dirty_device()
628 e = radix_tree_lookup(&im->ino_root, ino); in f2fs_is_dirty_device()
629 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device)) in f2fs_is_dirty_device()
631 spin_unlock(&im->ino_lock); in f2fs_is_dirty_device()
637 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_acquire_orphan_inode() local
640 spin_lock(&im->ino_lock); in f2fs_acquire_orphan_inode()
643 spin_unlock(&im->ino_lock); in f2fs_acquire_orphan_inode()
644 return -ENOSPC; in f2fs_acquire_orphan_inode()
647 if (unlikely(im->ino_num >= sbi->max_orphans)) in f2fs_acquire_orphan_inode()
648 err = -ENOSPC; in f2fs_acquire_orphan_inode()
650 im->ino_num++; in f2fs_acquire_orphan_inode()
651 spin_unlock(&im->ino_lock); in f2fs_acquire_orphan_inode()
658 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_release_orphan_inode() local
660 spin_lock(&im->ino_lock); in f2fs_release_orphan_inode()
661 f2fs_bug_on(sbi, im->ino_num == 0); in f2fs_release_orphan_inode()
662 im->ino_num--; in f2fs_release_orphan_inode()
663 spin_unlock(&im->ino_lock); in f2fs_release_orphan_inode()
669 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO); in f2fs_add_orphan_inode()
685 inode = f2fs_iget_retry(sbi->sb, ino); in recover_orphan_inode()
691 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); in recover_orphan_inode()
712 err = -EIO; in recover_orphan_inode()
741 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in f2fs_recover_orphan_inodes()
756 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { in f2fs_recover_orphan_inodes()
757 nid_t ino = le32_to_cpu(orphan_blk->ino[j]); in f2fs_recover_orphan_inodes()
784 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes() local
786 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num); in write_orphan_inodes()
789 * we don't need to do spin_lock(&im->ino_lock) here, since all the in write_orphan_inodes()
793 head = &im->ino_list; in write_orphan_inodes()
803 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); in write_orphan_inodes()
811 orphan_blk->blk_addr = cpu_to_le16(index); in write_orphan_inodes()
812 orphan_blk->blk_count = cpu_to_le16(orphan_blocks); in write_orphan_inodes()
813 orphan_blk->entry_count = cpu_to_le32(nentries); in write_orphan_inodes()
823 orphan_blk->blk_addr = cpu_to_le16(index); in write_orphan_inodes()
824 orphan_blk->blk_count = cpu_to_le16(orphan_blocks); in write_orphan_inodes()
825 orphan_blk->entry_count = cpu_to_le32(nentries); in write_orphan_inodes()
833 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset); in f2fs_checkpoint_chksum()
840 F2FS_BLKSIZE - chksum_ofs); in f2fs_checkpoint_chksum()
858 crc_offset = le32_to_cpu((*cp_block)->checksum_offset); in get_checkpoint_version()
863 return -EINVAL; in get_checkpoint_version()
870 return -EINVAL; in get_checkpoint_version()
891 cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count); in validate_checkpoint()
895 le32_to_cpu(cp_block->cp_pack_total_block_count)); in validate_checkpoint()
900 cp_addr += cp_blocks - 1; in validate_checkpoint()
921 struct f2fs_super_block *fsb = sbi->raw_super; in f2fs_get_valid_checkpoint()
923 unsigned long blk_size = sbi->blocksize; in f2fs_get_valid_checkpoint()
931 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), in f2fs_get_valid_checkpoint()
933 if (!sbi->ckpt) in f2fs_get_valid_checkpoint()
934 return -ENOMEM; in f2fs_get_valid_checkpoint()
939 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); in f2fs_get_valid_checkpoint()
944 le32_to_cpu(fsb->log_blocks_per_seg); in f2fs_get_valid_checkpoint()
957 err = -EFSCORRUPTED; in f2fs_get_valid_checkpoint()
962 memcpy(sbi->ckpt, cp_block, blk_size); in f2fs_get_valid_checkpoint()
965 sbi->cur_cp_pack = 1; in f2fs_get_valid_checkpoint()
967 sbi->cur_cp_pack = 2; in f2fs_get_valid_checkpoint()
971 err = -EFSCORRUPTED; in f2fs_get_valid_checkpoint()
978 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr); in f2fs_get_valid_checkpoint()
980 cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg)); in f2fs_get_valid_checkpoint()
984 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in f2fs_get_valid_checkpoint()
1004 kvfree(sbi->ckpt); in f2fs_get_valid_checkpoint()
1017 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]); in __add_dirty_inode()
1028 list_del_init(&F2FS_I(inode)->dirty_list); in __remove_dirty_inode()
1036 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; in f2fs_update_dirty_folio()
1038 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in f2fs_update_dirty_folio()
1039 !S_ISLNK(inode->i_mode)) in f2fs_update_dirty_folio()
1042 spin_lock(&sbi->inode_lock[type]); in f2fs_update_dirty_folio()
1046 spin_unlock(&sbi->inode_lock[type]); in f2fs_update_dirty_folio()
1054 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; in f2fs_remove_dirty_inode()
1056 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in f2fs_remove_dirty_inode()
1057 !S_ISLNK(inode->i_mode)) in f2fs_remove_dirty_inode()
1063 spin_lock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1065 spin_unlock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1077 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1082 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1085 return -EIO; in f2fs_sync_dirty_inodes()
1088 spin_lock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1090 head = &sbi->inode_list[type]; in f2fs_sync_dirty_inodes()
1092 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1093 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1099 inode = igrab(&fi->vfs_inode); in f2fs_sync_dirty_inodes()
1100 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1102 unsigned long cur_ino = inode->i_ino; in f2fs_sync_dirty_inodes()
1105 F2FS_I(inode)->cp_task = current; in f2fs_sync_dirty_inodes()
1106 F2FS_I(inode)->wb_task = current; in f2fs_sync_dirty_inodes()
1108 filemap_fdatawrite(inode->i_mapping); in f2fs_sync_dirty_inodes()
1110 F2FS_I(inode)->wb_task = NULL; in f2fs_sync_dirty_inodes()
1112 F2FS_I(inode)->cp_task = NULL; in f2fs_sync_dirty_inodes()
1133 struct list_head *head = &sbi->inode_list[DIRTY_META]; in f2fs_sync_inode_meta()
1138 while (total--) { in f2fs_sync_inode_meta()
1140 return -EIO; in f2fs_sync_inode_meta()
1142 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1144 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1149 inode = igrab(&fi->vfs_inode); in f2fs_sync_inode_meta()
1150 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1167 nid_t last_nid = nm_i->next_scan_nid; in __prepare_cp_block()
1170 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in __prepare_cp_block()
1171 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in __prepare_cp_block()
1172 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in __prepare_cp_block()
1173 ckpt->next_free_nid = cpu_to_le32(last_nid); in __prepare_cp_block()
1176 sbi->last_valid_block_count = sbi->total_valid_block_count; in __prepare_cp_block()
1177 percpu_counter_set(&sbi->alloc_valid_block_count, 0); in __prepare_cp_block()
1178 percpu_counter_set(&sbi->rf_node_block_count, 0); in __prepare_cp_block()
1188 if (!f2fs_down_write_trylock(&sbi->quota_sem)) in __need_flush_quota()
1200 f2fs_up_write(&sbi->quota_sem); in __need_flush_quota()
1205 * Freeze all the FS-operations for checkpoint.
1223 bool need_lock = sbi->umount_lock_holder != current; in block_operations()
1234 f2fs_do_quota_sync(sbi->sb, -1); in block_operations()
1235 } else if (down_read_trylock(&sbi->sb->s_umount)) { in block_operations()
1236 f2fs_do_quota_sync(sbi->sb, -1); in block_operations()
1237 up_read(&sbi->sb->s_umount); in block_operations()
1256 * until finishing nat/sit flush. inode->i_blocks can be updated. in block_operations()
1258 f2fs_down_write(&sbi->node_change); in block_operations()
1261 f2fs_up_write(&sbi->node_change); in block_operations()
1271 f2fs_down_write(&sbi->node_write); in block_operations()
1274 f2fs_up_write(&sbi->node_write); in block_operations()
1275 atomic_inc(&sbi->wb_sync_req[NODE]); in block_operations()
1277 atomic_dec(&sbi->wb_sync_req[NODE]); in block_operations()
1279 f2fs_up_write(&sbi->node_change); in block_operations()
1288 * sbi->node_change is used only for AIO write_begin path which produces in block_operations()
1292 f2fs_up_write(&sbi->node_change); in block_operations()
1298 f2fs_up_write(&sbi->node_write); in unblock_operations()
1320 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in f2fs_wait_on_all_pages()
1323 finish_wait(&sbi->cp_wait, &wait); in f2fs_wait_on_all_pages()
1328 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in update_ckpt_flags()
1332 spin_lock_irqsave(&sbi->cp_lock, flags); in update_ckpt_flags()
1334 if ((cpc->reason & CP_UMOUNT) && in update_ckpt_flags()
1335 le32_to_cpu(ckpt->cp_pack_total_block_count) > in update_ckpt_flags()
1336 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) in update_ckpt_flags()
1339 if (cpc->reason & CP_TRIMMED) in update_ckpt_flags()
1344 if (cpc->reason & CP_UMOUNT) in update_ckpt_flags()
1349 if (cpc->reason & CP_FASTBOOT) in update_ckpt_flags()
1389 spin_unlock_irqrestore(&sbi->cp_lock, flags); in update_ckpt_flags()
1436 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_sectors_written()
1442 return get_sectors_written(sbi->sb->s_bdev); in f2fs_get_sectors_written()
1447 cpc->stats.times[type] = ktime_get(); in stat_cp_time()
1455 sb_diff = (u64)ktime_ms_delta(sbi->cp_stats.times[CP_TIME_END], in check_cp_time()
1456 sbi->cp_stats.times[CP_TIME_START]); in check_cp_time()
1457 cur_diff = (u64)ktime_ms_delta(cpc->stats.times[CP_TIME_END], in check_cp_time()
1458 cpc->stats.times[CP_TIME_START]); in check_cp_time()
1461 sbi->cp_stats = cpc->stats; in check_cp_time()
1466 for (ct = CP_TIME_START; ct < CP_TIME_MAX - 1; ct++) in check_cp_time()
1468 (u64)ktime_ms_delta(cpc->stats.times[ct + 1], in check_cp_time()
1469 cpc->stats.times[ct])); in check_cp_time()
1477 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; in do_checkpoint()
1493 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); in do_checkpoint()
1494 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
1498 ckpt->cur_node_segno[i] = cpu_to_le32(curseg->segno); in do_checkpoint()
1499 ckpt->cur_node_blkoff[i] = cpu_to_le16(curseg->next_blkoff); in do_checkpoint()
1500 ckpt->alloc_type[i + CURSEG_HOT_NODE] = curseg->alloc_type; in do_checkpoint()
1505 ckpt->cur_data_segno[i] = cpu_to_le32(curseg->segno); in do_checkpoint()
1506 ckpt->cur_data_blkoff[i] = cpu_to_le16(curseg->next_blkoff); in do_checkpoint()
1507 ckpt->alloc_type[i + CURSEG_HOT_DATA] = curseg->alloc_type; in do_checkpoint()
1512 spin_lock_irqsave(&sbi->cp_lock, flags); in do_checkpoint()
1517 spin_unlock_irqrestore(&sbi->cp_lock, flags); in do_checkpoint()
1520 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + in do_checkpoint()
1523 if (__remain_node_summaries(cpc->reason)) in do_checkpoint()
1524 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + in do_checkpoint()
1528 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + in do_checkpoint()
1541 le32_to_cpu(ckpt->checksum_offset))) in do_checkpoint()
1552 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver); in do_checkpoint()
1554 blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks; in do_checkpoint()
1555 for (i = 0; i < nm_i->nat_bits_blocks; i++) in do_checkpoint()
1556 f2fs_update_meta_page(sbi, nm_i->nat_bits + in do_checkpoint()
1576 kbytes_written = sbi->kbytes_written; in do_checkpoint()
1577 kbytes_written += (f2fs_get_sectors_written(sbi) - in do_checkpoint()
1578 sbi->sectors_written_start) >> 1; in do_checkpoint()
1579 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written); in do_checkpoint()
1581 if (__remain_node_summaries(cpc->reason)) { in do_checkpoint()
1617 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1)); in do_checkpoint()
1627 spin_lock(&sbi->stat_lock); in do_checkpoint()
1628 sbi->unusable_block_count = 0; in do_checkpoint()
1629 spin_unlock(&sbi->stat_lock); in do_checkpoint()
1643 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; in do_checkpoint()
1654 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi)) in f2fs_write_checkpoint()
1655 return -EROFS; in f2fs_write_checkpoint()
1658 if (cpc->reason != CP_PAUSE) in f2fs_write_checkpoint()
1662 if (cpc->reason != CP_RESIZE) in f2fs_write_checkpoint()
1663 f2fs_down_write(&sbi->cp_global_sem); in f2fs_write_checkpoint()
1668 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || in f2fs_write_checkpoint()
1669 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) in f2fs_write_checkpoint()
1672 err = -EIO; in f2fs_write_checkpoint()
1676 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in f2fs_write_checkpoint()
1684 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in f2fs_write_checkpoint()
1689 if (cpc->reason & CP_DISCARD) { in f2fs_write_checkpoint()
1695 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && in f2fs_write_checkpoint()
1696 SIT_I(sbi)->dirty_sentries == 0 && in f2fs_write_checkpoint()
1711 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); in f2fs_write_checkpoint()
1745 if (cpc->reason & CP_RECOVERY) in f2fs_write_checkpoint()
1750 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in f2fs_write_checkpoint()
1752 if (cpc->reason != CP_RESIZE) in f2fs_write_checkpoint()
1753 f2fs_up_write(&sbi->cp_global_sem); in f2fs_write_checkpoint()
1762 struct inode_management *im = &sbi->im[i]; in f2fs_init_ino_entry_info() local
1764 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC); in f2fs_init_ino_entry_info()
1765 spin_lock_init(&im->ino_lock); in f2fs_init_ino_entry_info()
1766 INIT_LIST_HEAD(&im->ino_list); in f2fs_init_ino_entry_info()
1767 im->ino_num = 0; in f2fs_init_ino_entry_info()
1770 sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS - in f2fs_init_ino_entry_info()
1771 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) * in f2fs_init_ino_entry_info()
1780 return -ENOMEM; in f2fs_create_checkpoint_caches()
1785 return -ENOMEM; in f2fs_create_checkpoint_caches()
1801 f2fs_down_write(&sbi->gc_lock); in __write_checkpoint_sync()
1803 f2fs_up_write(&sbi->gc_lock); in __write_checkpoint_sync()
1810 struct ckpt_req_control *cprc = &sbi->cprc_info; in __checkpoint_and_complete_reqs()
1816 dispatch_list = llist_del_all(&cprc->issue_list); in __checkpoint_and_complete_reqs()
1822 atomic_inc(&cprc->issued_ckpt); in __checkpoint_and_complete_reqs()
1825 diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time); in __checkpoint_and_complete_reqs()
1826 req->ret = ret; in __checkpoint_and_complete_reqs()
1827 req->delta_time = diff; in __checkpoint_and_complete_reqs()
1828 complete(&req->wait); in __checkpoint_and_complete_reqs()
1833 atomic_sub(count, &cprc->queued_ckpt); in __checkpoint_and_complete_reqs()
1834 atomic_add(count, &cprc->total_ckpt); in __checkpoint_and_complete_reqs()
1836 spin_lock(&cprc->stat_lock); in __checkpoint_and_complete_reqs()
1837 cprc->cur_time = (unsigned int)div64_u64(sum_diff, count); in __checkpoint_and_complete_reqs()
1838 if (cprc->peak_time < cprc->cur_time) in __checkpoint_and_complete_reqs()
1839 cprc->peak_time = cprc->cur_time; in __checkpoint_and_complete_reqs()
1840 spin_unlock(&cprc->stat_lock); in __checkpoint_and_complete_reqs()
1846 struct ckpt_req_control *cprc = &sbi->cprc_info; in issue_checkpoint_thread()
1847 wait_queue_head_t *q = &cprc->ckpt_wait_queue; in issue_checkpoint_thread()
1852 if (!llist_empty(&cprc->issue_list)) in issue_checkpoint_thread()
1856 kthread_should_stop() || !llist_empty(&cprc->issue_list)); in issue_checkpoint_thread()
1863 struct ckpt_req_control *cprc = &sbi->cprc_info; in flush_remained_ckpt_reqs()
1865 if (!llist_empty(&cprc->issue_list)) { in flush_remained_ckpt_reqs()
1870 wait_for_completion(&wait_req->wait); in flush_remained_ckpt_reqs()
1878 init_completion(&req->wait); in init_ckpt_req()
1879 req->queue_time = ktime_get(); in init_ckpt_req()
1884 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_issue_checkpoint()
1890 sbi->umount_lock_holder == current) { in f2fs_issue_checkpoint()
1893 f2fs_down_write(&sbi->gc_lock); in f2fs_issue_checkpoint()
1895 f2fs_up_write(&sbi->gc_lock); in f2fs_issue_checkpoint()
1900 if (!cprc->f2fs_issue_ckpt) in f2fs_issue_checkpoint()
1905 llist_add(&req.llnode, &cprc->issue_list); in f2fs_issue_checkpoint()
1906 atomic_inc(&cprc->queued_ckpt); in f2fs_issue_checkpoint()
1915 if (waitqueue_active(&cprc->ckpt_wait_queue)) in f2fs_issue_checkpoint()
1916 wake_up(&cprc->ckpt_wait_queue); in f2fs_issue_checkpoint()
1918 if (cprc->f2fs_issue_ckpt) in f2fs_issue_checkpoint()
1925 "blocked on checkpoint for %u ms", cprc->peak_time); in f2fs_issue_checkpoint()
1934 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_ckpt_thread()
1935 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_start_ckpt_thread()
1937 if (cprc->f2fs_issue_ckpt) in f2fs_start_ckpt_thread()
1940 cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi, in f2fs_start_ckpt_thread()
1941 "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev)); in f2fs_start_ckpt_thread()
1942 if (IS_ERR(cprc->f2fs_issue_ckpt)) { in f2fs_start_ckpt_thread()
1943 int err = PTR_ERR(cprc->f2fs_issue_ckpt); in f2fs_start_ckpt_thread()
1945 cprc->f2fs_issue_ckpt = NULL; in f2fs_start_ckpt_thread()
1949 set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio); in f2fs_start_ckpt_thread()
1956 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_stop_ckpt_thread()
1959 if (!cprc->f2fs_issue_ckpt) in f2fs_stop_ckpt_thread()
1962 ckpt_task = cprc->f2fs_issue_ckpt; in f2fs_stop_ckpt_thread()
1963 cprc->f2fs_issue_ckpt = NULL; in f2fs_stop_ckpt_thread()
1971 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_flush_ckpt_thread()
1976 while (atomic_read(&cprc->queued_ckpt)) in f2fs_flush_ckpt_thread()
1982 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_init_ckpt_req_control()
1984 atomic_set(&cprc->issued_ckpt, 0); in f2fs_init_ckpt_req_control()
1985 atomic_set(&cprc->total_ckpt, 0); in f2fs_init_ckpt_req_control()
1986 atomic_set(&cprc->queued_ckpt, 0); in f2fs_init_ckpt_req_control()
1987 cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO; in f2fs_init_ckpt_req_control()
1988 init_waitqueue_head(&cprc->ckpt_wait_queue); in f2fs_init_ckpt_req_control()
1989 init_llist_head(&cprc->issue_list); in f2fs_init_ckpt_req_control()
1990 spin_lock_init(&cprc->stat_lock); in f2fs_init_ckpt_req_control()