Lines Matching +full:double +full:- +full:phase

1 // SPDX-License-Identifier: GPL-2.0
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; in gc_thread_func()
43 wait_ms = gc_th->min_sleep_time; in gc_thread_func()
52 gc_th->gc_wake, in gc_thread_func()
59 if (gc_th->gc_wake) in gc_thread_func()
60 gc_th->gc_wake = false; in gc_thread_func()
62 if (f2fs_readonly(sbi->sb)) { in gc_thread_func()
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
79 if (!sb_start_write_trylock(sbi->sb)) { in gc_thread_func()
99 if (sbi->gc_mode == GC_URGENT_HIGH || in gc_thread_func()
100 sbi->gc_mode == GC_URGENT_MID) { in gc_thread_func()
101 wait_ms = gc_th->urgent_sleep_time; in gc_thread_func()
102 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
107 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { in gc_thread_func()
116 f2fs_up_write(&sbi->gc_lock); in gc_thread_func()
123 gc_th->no_zoned_gc_percent)) { in gc_thread_func()
124 wait_ms = gc_th->no_gc_sleep_time; in gc_thread_func()
125 f2fs_up_write(&sbi->gc_lock); in gc_thread_func()
128 if (wait_ms == gc_th->no_gc_sleep_time) in gc_thread_func()
129 wait_ms = gc_th->max_sleep_time; in gc_thread_func()
144 (gc_control.one_time && gc_th->boost_gc_greedy); in gc_thread_func()
158 wait_ms = gc_th->no_gc_sleep_time; in gc_thread_func()
161 if (wait_ms == gc_th->no_gc_sleep_time) in gc_thread_func()
162 wait_ms = gc_th->min_sleep_time; in gc_thread_func()
166 wake_up_all(&gc_th->fggc_wq); in gc_thread_func()
168 trace_f2fs_background_gc(sbi->sb, wait_ms, in gc_thread_func()
174 if (sbi->gc_mode != GC_NORMAL) { in gc_thread_func()
175 spin_lock(&sbi->gc_remaining_trials_lock); in gc_thread_func()
176 if (sbi->gc_remaining_trials) { in gc_thread_func()
177 sbi->gc_remaining_trials--; in gc_thread_func()
178 if (!sbi->gc_remaining_trials) in gc_thread_func()
179 sbi->gc_mode = GC_NORMAL; in gc_thread_func()
181 spin_unlock(&sbi->gc_remaining_trials_lock); in gc_thread_func()
183 sb_end_write(sbi->sb); in gc_thread_func()
192 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_gc_thread()
196 return -ENOMEM; in f2fs_start_gc_thread()
198 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; in f2fs_start_gc_thread()
199 gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO; in f2fs_start_gc_thread()
200 gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE; in f2fs_start_gc_thread()
201 gc_th->boost_gc_greedy = GC_GREEDY; in f2fs_start_gc_thread()
204 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED; in f2fs_start_gc_thread()
205 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED; in f2fs_start_gc_thread()
206 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED; in f2fs_start_gc_thread()
207 gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC; in f2fs_start_gc_thread()
208 gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC; in f2fs_start_gc_thread()
210 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; in f2fs_start_gc_thread()
211 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; in f2fs_start_gc_thread()
212 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; in f2fs_start_gc_thread()
213 gc_th->no_zoned_gc_percent = 0; in f2fs_start_gc_thread()
214 gc_th->boost_zoned_gc_percent = 0; in f2fs_start_gc_thread()
217 gc_th->gc_wake = false; in f2fs_start_gc_thread()
219 sbi->gc_thread = gc_th; in f2fs_start_gc_thread()
220 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in f2fs_start_gc_thread()
221 init_waitqueue_head(&sbi->gc_thread->fggc_wq); in f2fs_start_gc_thread()
222 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in f2fs_start_gc_thread()
223 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); in f2fs_start_gc_thread()
224 if (IS_ERR(gc_th->f2fs_gc_task)) { in f2fs_start_gc_thread()
225 int err = PTR_ERR(gc_th->f2fs_gc_task); in f2fs_start_gc_thread()
228 sbi->gc_thread = NULL; in f2fs_start_gc_thread()
237 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in f2fs_stop_gc_thread()
241 kthread_stop(gc_th->f2fs_gc_task); in f2fs_stop_gc_thread()
242 wake_up_all(&gc_th->fggc_wq); in f2fs_stop_gc_thread()
244 sbi->gc_thread = NULL; in f2fs_stop_gc_thread()
252 if (sbi->am.atgc_enabled) in select_gc_type()
260 switch (sbi->gc_mode) { in select_gc_type()
283 if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) { in select_policy()
284 p->gc_mode = GC_GREEDY; in select_policy()
285 p->dirty_bitmap = dirty_i->dirty_segmap[type]; in select_policy()
286 p->max_search = dirty_i->nr_dirty[type]; in select_policy()
287 p->ofs_unit = 1; in select_policy()
289 p->gc_mode = select_gc_type(sbi, gc_type); in select_policy()
290 p->ofs_unit = SEGS_PER_SEC(sbi); in select_policy()
292 p->dirty_bitmap = dirty_i->dirty_secmap; in select_policy()
293 p->max_search = count_bits(p->dirty_bitmap, in select_policy()
296 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; in select_policy()
297 p->max_search = dirty_i->nr_dirty[DIRTY]; in select_policy()
306 (sbi->gc_mode != GC_URGENT_HIGH) && in select_policy()
307 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) && in select_policy()
308 p->max_search > sbi->max_victim_search) in select_policy()
309 p->max_search = sbi->max_victim_search; in select_policy()
313 p->offset = get_random_u32_below(MAIN_SECS(sbi) * in select_policy()
316 p->offset = 0; in select_policy()
318 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; in select_policy()
325 if (p->alloc_mode == SSR) in get_max_cost()
327 else if (p->alloc_mode == AT_SSR) in get_max_cost()
331 if (p->gc_mode == GC_GREEDY) in get_max_cost()
332 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit); in get_max_cost()
333 else if (p->gc_mode == GC_CB) in get_max_cost()
335 else if (p->gc_mode == GC_AT) in get_max_cost()
351 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
354 clear_bit(secno, dirty_i->victim_secmap); in check_bg_victims()
377 if (mtime < sit_i->min_mtime) in get_cb_cost()
378 sit_i->min_mtime = mtime; in get_cb_cost()
379 if (mtime > sit_i->max_mtime) in get_cb_cost()
380 sit_i->max_mtime = mtime; in get_cb_cost()
381 if (sit_i->max_mtime != sit_i->min_mtime) in get_cb_cost()
382 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), in get_cb_cost()
383 sit_i->max_mtime - sit_i->min_mtime); in get_cb_cost()
385 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); in get_cb_cost()
392 if (p->alloc_mode == SSR) in get_gc_cost()
393 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
395 if (p->one_time_gc && (valid_thresh_ratio < 100) && in get_gc_cost()
401 if (p->gc_mode == GC_GREEDY) in get_gc_cost()
403 else if (p->gc_mode == GC_CB) in get_gc_cost()
437 if (cur_ve->mtime > next_ve->mtime) { in f2fs_check_victim_tree()
440 cur_ve->mtime, next_ve->mtime); in f2fs_check_victim_tree()
452 struct atgc_management *am = &sbi->am; in __lookup_victim_entry()
453 struct rb_node *node = am->root.rb_root.rb_node; in __lookup_victim_entry()
459 if (mtime < ve->mtime) in __lookup_victim_entry()
460 node = node->rb_left; in __lookup_victim_entry()
462 node = node->rb_right; in __lookup_victim_entry()
470 struct atgc_management *am = &sbi->am; in __create_victim_entry()
475 ve->mtime = mtime; in __create_victim_entry()
476 ve->segno = segno; in __create_victim_entry()
478 list_add_tail(&ve->list, &am->victim_list); in __create_victim_entry()
479 am->victim_count++; in __create_victim_entry()
487 struct atgc_management *am = &sbi->am; in __insert_victim_entry()
488 struct rb_root_cached *root = &am->root; in __insert_victim_entry()
489 struct rb_node **p = &root->rb_root.rb_node; in __insert_victim_entry()
499 if (mtime < ve->mtime) { in __insert_victim_entry()
500 p = &(*p)->rb_left; in __insert_victim_entry()
502 p = &(*p)->rb_right; in __insert_victim_entry()
509 rb_link_node(&ve->rb_node, parent, p); in __insert_victim_entry()
510 rb_insert_color_cached(&ve->rb_node, root, left_most); in __insert_victim_entry()
520 if (p->gc_mode == GC_AT && in add_victim_entry()
529 if (mtime < sit_i->min_mtime) in add_victim_entry()
530 sit_i->min_mtime = mtime; in add_victim_entry()
531 if (mtime > sit_i->max_mtime) in add_victim_entry()
532 sit_i->max_mtime = mtime; in add_victim_entry()
533 if (mtime < sit_i->dirty_min_mtime) in add_victim_entry()
534 sit_i->dirty_min_mtime = mtime; in add_victim_entry()
535 if (mtime > sit_i->dirty_max_mtime) in add_victim_entry()
536 sit_i->dirty_max_mtime = mtime; in add_victim_entry()
539 if (sit_i->dirty_max_mtime - mtime < p->age_threshold) in add_victim_entry()
549 struct atgc_management *am = &sbi->am; in atgc_lookup_victim()
550 struct rb_root_cached *root = &am->root; in atgc_lookup_victim()
555 unsigned long long max_mtime = sit_i->dirty_max_mtime; in atgc_lookup_victim()
556 unsigned long long min_mtime = sit_i->dirty_min_mtime; in atgc_lookup_victim()
559 unsigned int dirty_threshold = max(am->max_candidate_count, in atgc_lookup_victim()
560 am->candidate_ratio * in atgc_lookup_victim()
561 am->victim_count / 100); in atgc_lookup_victim()
562 unsigned int age_weight = am->age_weight; in atgc_lookup_victim()
570 total_time = max_mtime - min_mtime; in atgc_lookup_victim()
582 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) in atgc_lookup_victim()
586 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * in atgc_lookup_victim()
589 vblocks = get_valid_blocks(sbi, ve->segno, true); in atgc_lookup_victim()
593 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) * in atgc_lookup_victim()
594 (100 - age_weight); in atgc_lookup_victim()
598 cost = UINT_MAX - (age + u); in atgc_lookup_victim()
601 if (cost < p->min_cost || in atgc_lookup_victim()
602 (cost == p->min_cost && age > p->oldest_age)) { in atgc_lookup_victim()
603 p->min_cost = cost; in atgc_lookup_victim()
604 p->oldest_age = age; in atgc_lookup_victim()
605 p->min_segno = ve->segno; in atgc_lookup_victim()
616 * [target - dirty_threshold, target + dirty_threshold]
622 struct atgc_management *am = &sbi->am; in atssr_lookup_victim()
625 unsigned long long max_mtime = sit_i->dirty_max_mtime; in atssr_lookup_victim()
626 unsigned long long min_mtime = sit_i->dirty_min_mtime; in atssr_lookup_victim()
628 unsigned int dirty_threshold = max(am->max_candidate_count, in atssr_lookup_victim()
629 am->candidate_ratio * in atssr_lookup_victim()
630 am->victim_count / 100); in atssr_lookup_victim()
639 ve = __lookup_victim_entry(sbi, p->age); in atssr_lookup_victim()
647 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) in atssr_lookup_victim()
650 age = max_mtime - ve->mtime; in atssr_lookup_victim()
652 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; in atssr_lookup_victim()
661 age = max_mtime - abs(p->age - age); in atssr_lookup_victim()
662 cost = UINT_MAX - vblocks; in atssr_lookup_victim()
664 if (cost < p->min_cost || in atssr_lookup_victim()
665 (cost == p->min_cost && age > p->oldest_age)) { in atssr_lookup_victim()
666 p->min_cost = cost; in atssr_lookup_victim()
667 p->oldest_age = age; in atssr_lookup_victim()
668 p->min_segno = ve->segno; in atssr_lookup_victim()
672 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) : in atssr_lookup_victim()
673 rb_next(&ve->rb_node), in atssr_lookup_victim()
685 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); in lookup_victim_by_age()
687 if (p->gc_mode == GC_AT) in lookup_victim_by_age()
689 else if (p->alloc_mode == AT_SSR) in lookup_victim_by_age()
697 struct atgc_management *am = &sbi->am; in release_victim_entry()
700 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) { in release_victim_entry()
701 list_del(&ve->list); in release_victim_entry()
703 am->victim_count--; in release_victim_entry()
706 am->root = RB_ROOT_CACHED; in release_victim_entry()
708 f2fs_bug_on(sbi, am->victim_count); in release_victim_entry()
709 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); in release_victim_entry()
717 if (!dirty_i->enable_pin_section) in f2fs_pin_section()
719 if (!test_and_set_bit(secno, dirty_i->pinned_secmap)) in f2fs_pin_section()
720 dirty_i->pinned_secmap_cnt++; in f2fs_pin_section()
726 return dirty_i->pinned_secmap_cnt; in f2fs_pinned_section_exists()
732 return dirty_i->enable_pin_section && in f2fs_section_is_pinned()
734 test_bit(secno, dirty_i->pinned_secmap); in f2fs_section_is_pinned()
742 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size); in f2fs_unpin_all_sections()
743 DIRTY_I(sbi)->pinned_secmap_cnt = 0; in f2fs_unpin_all_sections()
745 DIRTY_I(sbi)->enable_pin_section = enable; in f2fs_unpin_all_sections()
754 return -EBUSY; in f2fs_gc_pinned_control()
757 return -EAGAIN; in f2fs_gc_pinned_control()
782 mutex_lock(&dirty_i->seglist_lock); in f2fs_get_victim()
787 p.age_threshold = sbi->am.age_threshold; in f2fs_get_victim()
791 valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio; in f2fs_get_victim()
804 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; in f2fs_get_victim()
808 ret = -ENODATA; in f2fs_get_victim()
813 ret = -EBUSY; in f2fs_get_victim()
817 clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap); in f2fs_get_victim()
822 ret = -ENODATA; in f2fs_get_victim()
827 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { in f2fs_get_victim()
828 p.min_segno = sbi->next_victim_seg[BG_GC]; in f2fs_get_victim()
830 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in f2fs_get_victim()
834 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { in f2fs_get_victim()
835 p.min_segno = sbi->next_victim_seg[FG_GC]; in f2fs_get_victim()
837 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in f2fs_get_victim()
842 last_victim = sm->last_victim[p.gc_mode]; in f2fs_get_victim()
859 if (sm->last_victim[p.gc_mode]) { in f2fs_get_victim()
861 sm->last_victim[p.gc_mode]; in f2fs_get_victim()
862 sm->last_victim[p.gc_mode] = 0; in f2fs_get_victim()
878 if (test_bit(segno, sm->invalid_segmap)) in f2fs_get_victim()
907 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) in f2fs_get_victim()
926 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) in f2fs_get_victim()
927 sm->last_victim[p.gc_mode] = in f2fs_get_victim()
930 sm->last_victim[p.gc_mode] = segno + p.ofs_unit; in f2fs_get_victim()
931 sm->last_victim[p.gc_mode] %= in f2fs_get_victim()
944 sm->elapsed_time < p.age_threshold) { in f2fs_get_victim()
956 sbi->cur_victim_sec = secno; in f2fs_get_victim()
958 set_bit(secno, dirty_i->victim_secmap); in f2fs_get_victim()
965 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in f2fs_get_victim()
966 sbi->cur_victim_sec, in f2fs_get_victim()
968 mutex_unlock(&dirty_i->seglist_lock); in f2fs_get_victim()
977 ie = radix_tree_lookup(&gc_list->iroot, ino); in find_gc_inode()
979 return ie->inode; in find_gc_inode()
987 if (inode == find_gc_inode(gc_list, inode->i_ino)) { in add_gc_inode()
993 new_ie->inode = inode; in add_gc_inode()
995 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); in add_gc_inode()
996 list_add_tail(&new_ie->list, &gc_list->ilist); in add_gc_inode()
1003 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { in put_gc_inode()
1004 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); in put_gc_inode()
1005 iput(ie->inode); in put_gc_inode()
1006 list_del(&ie->list); in put_gc_inode()
1018 down_read(&sit_i->sentry_lock); in check_valid_map()
1020 ret = f2fs_test_bit(offset, sentry->cur_valid_map); in check_valid_map()
1021 up_read(&sit_i->sentry_lock); in check_valid_map()
1036 int phase = 0; in gc_node_segment() local
1046 if (fggc && phase == 2) in gc_node_segment()
1047 atomic_inc(&sbi->wb_sync_req[NODE]); in gc_node_segment()
1050 nid_t nid = le32_to_cpu(entry->nid); in gc_node_segment()
1062 if (phase == 0) { in gc_node_segment()
1068 if (phase == 1) { in gc_node_segment()
1073 /* phase == 2 */ in gc_node_segment()
1100 if (++phase < 3) in gc_node_segment()
1104 atomic_dec(&sbi->wb_sync_req[NODE]); in gc_node_segment()
1112 * as indirect or double indirect node blocks, are given, it must be a caller's
1124 bidx = node_ofs - 1; in f2fs_start_bidx_of_node()
1126 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); in f2fs_start_bidx_of_node()
1128 bidx = node_ofs - 2 - dec; in f2fs_start_bidx_of_node()
1130 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); in f2fs_start_bidx_of_node()
1132 bidx = node_ofs - 5 - dec; in f2fs_start_bidx_of_node()
1145 nid = le32_to_cpu(sum->nid); in is_alive()
1146 ofs_in_node = le16_to_cpu(sum->ofs_in_node); in is_alive()
1157 if (sum->version != dni->version) { in is_alive()
1163 if (f2fs_check_nid_range(sbi, dni->ino)) { in is_alive()
1178 base, ofs_in_node, max_addrs, dni->ino, dni->nid); in is_alive()
1193 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { in is_alive()
1209 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; in ra_data_block()
1214 .ino = inode->i_ino, in ra_data_block()
1232 err = -EFSCORRUPTED; in ra_data_block()
1245 err = -ENOENT; in ra_data_block()
1250 err = -EFSCORRUPTED; in ra_data_block()
1270 err = -ENOMEM; in ra_data_block()
1299 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; in move_data_block()
1302 .ino = inode->i_ino, in move_data_block()
1317 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && in move_data_block()
1318 (fio.sbi->gc_mode != GC_URGENT_HIGH) ? in move_data_block()
1327 err = -ENOENT; in move_data_block()
1342 err = -ENOENT; in move_data_block()
1363 f2fs_down_write(&fio.sbi->io_order_lock); in move_data_block()
1390 err = -EIO; in move_data_block()
1410 err = -ENOMEM; in move_data_block()
1446 f2fs_up_write(&fio.sbi->io_order_lock); in move_data_block()
1465 err = -ENOENT; in move_data_page()
1475 err = -EAGAIN; in move_data_page()
1483 .ino = inode->i_ino, in move_data_page()
1510 if (err == -ENOMEM) { in move_data_page()
1534 struct super_block *sb = sbi->sb; in gc_data_segment()
1538 int phase = 0; in gc_data_segment() local
1552 nid_t nid = le32_to_cpu(entry->nid); in gc_data_segment()
1567 if (phase == 0) { in gc_data_segment()
1573 if (phase == 1) { in gc_data_segment()
1582 if (phase == 2) { in gc_data_segment()
1587 ofs_in_node = le16_to_cpu(entry->ofs_in_node); in gc_data_segment()
1589 if (phase == 3) { in gc_data_segment()
1598 special_file(inode->i_mode)) { in gc_data_segment()
1609 inode->i_ino, dni.nid, ofs_in_node); in gc_data_segment()
1614 if (err == -EAGAIN) { in gc_data_segment()
1620 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { in gc_data_segment()
1622 sbi->skipped_gc_rwsem++; in gc_data_segment()
1632 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in gc_data_segment()
1643 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in gc_data_segment()
1654 /* phase 4 */ in gc_data_segment()
1661 if (S_ISREG(inode->i_mode)) { in gc_data_segment()
1662 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) { in gc_data_segment()
1663 sbi->skipped_gc_rwsem++; in gc_data_segment()
1667 &fi->i_gc_rwsem[READ])) { in gc_data_segment()
1668 sbi->skipped_gc_rwsem++; in gc_data_segment()
1669 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); in gc_data_segment()
1692 f2fs_up_write(&fi->i_gc_rwsem[READ]); in gc_data_segment()
1693 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); in gc_data_segment()
1700 if (++phase < 5) in gc_data_segment()
1712 down_write(&sit_i->sentry_lock); in __get_victim()
1715 up_write(&sit_i->sentry_lock); in __get_victim()
1729 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? in do_garbage_collect()
1738 * zone-capacity can be less than zone-size in zoned devices, in do_garbage_collect()
1744 sec_end_segno -= SEGS_PER_SEC(sbi) - in do_garbage_collect()
1749 sbi->migration_window_granularity; in do_garbage_collect()
1753 sbi->gc_thread->boost_zoned_gc_percent)) in do_garbage_collect()
1755 sbi->gc_thread->boost_gc_multiple; in do_garbage_collect()
1764 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); in do_garbage_collect()
1769 end_segno - segno, META_SSA, true); in do_garbage_collect()
1777 end_segno = segno - 1; in do_garbage_collect()
1807 migrated >= sbi->migration_granularity) in do_garbage_collect()
1814 if (type != GET_SUM_TYPE((&sum->footer))) { in do_garbage_collect()
1816 segno, type, GET_SUM_TYPE((&sum->footer))); in do_garbage_collect()
1824 * - lock_page(sum_page) - f2fs_replace_block in do_garbage_collect()
1825 * - check_valid_map() - down_write(sentry_lock) in do_garbage_collect()
1826 * - down_read(sentry_lock) - change_curseg() in do_garbage_collect()
1827 * - lock_page(sum_page) in do_garbage_collect()
1830 submitted += gc_node_segment(sbi, sum->entries, segno, in do_garbage_collect()
1833 submitted += gc_data_segment(sbi, sum->entries, gc_list, in do_garbage_collect()
1838 sbi->gc_reclaimed_segs[sbi->gc_mode]++; in do_garbage_collect()
1847 sbi->next_victim_seg[gc_type] = in do_garbage_collect()
1867 int gc_type = gc_control->init_gc_type; in f2fs_gc()
1868 unsigned int segno = gc_control->victim_segno; in f2fs_gc()
1879 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, in f2fs_gc()
1880 gc_control->nr_free_secs, in f2fs_gc()
1891 sbi->skipped_gc_rwsem = 0; in f2fs_gc()
1892 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { in f2fs_gc()
1893 ret = -EINVAL; in f2fs_gc()
1897 ret = -EIO; in f2fs_gc()
1904 gc_control->one_time = false; in f2fs_gc()
1922 if (gc_type == BG_GC && gc_control->no_bg_gc) { in f2fs_gc()
1923 ret = -EINVAL; in f2fs_gc()
1927 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time); in f2fs_gc()
1930 if (ret == -ENODATA && gc_type == FG_GC && in f2fs_gc()
1939 gc_control->should_migrate_blocks, in f2fs_gc()
1940 gc_control->one_time); in f2fs_gc()
1951 if (gc_control->one_time) in f2fs_gc()
1955 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
1958 if (!gc_control->no_bg_gc && in f2fs_gc()
1959 total_sec_freed < gc_control->nr_free_secs) in f2fs_gc()
1963 if (sbi->skipped_gc_rwsem) in f2fs_gc()
1996 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; in f2fs_gc()
1997 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno; in f2fs_gc()
2002 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed, in f2fs_gc()
2011 f2fs_up_write(&sbi->gc_lock); in f2fs_gc()
2015 if (gc_control->err_gc_skipped && !ret) in f2fs_gc()
2016 ret = total_sec_freed ? 0 : -EAGAIN; in f2fs_gc()
2024 return victim_entry_slab ? 0 : -ENOMEM; in f2fs_create_garbage_collection_cache()
2034 struct atgc_management *am = &sbi->am; in init_atgc_management()
2037 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) in init_atgc_management()
2038 am->atgc_enabled = true; in init_atgc_management()
2040 am->root = RB_ROOT_CACHED; in init_atgc_management()
2041 INIT_LIST_HEAD(&am->victim_list); in init_atgc_management()
2042 am->victim_count = 0; in init_atgc_management()
2044 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; in init_atgc_management()
2045 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; in init_atgc_management()
2046 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; in init_atgc_management()
2047 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; in init_atgc_management()
2052 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; in f2fs_build_gc_manager()
2056 SIT_I(sbi)->last_victim[ALLOC_NEXT] = in f2fs_build_gc_manager()
2070 return -EIO; in f2fs_gc_range()
2092 return -EAGAIN; in f2fs_gc_range()
2094 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0) in f2fs_gc_range()
2098 return -ERESTARTSYS; in f2fs_gc_range()
2114 MAIN_SECS(sbi) -= secs; in free_segment_range()
2116 end = MAIN_SEGS(sbi) - 1; in free_segment_range()
2118 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
2120 if (SIT_I(sbi)->last_victim[gc_mode] >= start) in free_segment_range()
2121 SIT_I(sbi)->last_victim[gc_mode] = 0; in free_segment_range()
2124 if (sbi->next_victim_seg[gc_type] >= start) in free_segment_range()
2125 sbi->next_victim_seg[gc_type] = NULL_SEGNO; in free_segment_range()
2126 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
2165 f2fs_down_write(&sbi->sb_lock); in update_sb_metadata()
2167 section_count = le32_to_cpu(raw_sb->section_count); in update_sb_metadata()
2168 segment_count = le32_to_cpu(raw_sb->segment_count); in update_sb_metadata()
2169 segment_count_main = le32_to_cpu(raw_sb->segment_count_main); in update_sb_metadata()
2170 block_count = le64_to_cpu(raw_sb->block_count); in update_sb_metadata()
2172 raw_sb->section_count = cpu_to_le32(section_count + secs); in update_sb_metadata()
2173 raw_sb->segment_count = cpu_to_le32(segment_count + segs); in update_sb_metadata()
2174 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); in update_sb_metadata()
2175 raw_sb->block_count = cpu_to_le64(block_count + in update_sb_metadata()
2178 int last_dev = sbi->s_ndevs - 1; in update_sb_metadata()
2180 le32_to_cpu(raw_sb->devs[last_dev].total_segments); in update_sb_metadata()
2182 raw_sb->devs[last_dev].total_segments = in update_sb_metadata()
2186 f2fs_up_write(&sbi->sb_lock); in update_sb_metadata()
2194 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); in update_fs_metadata()
2196 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; in update_fs_metadata()
2199 if (sbi->allocate_section_hint > MAIN_SECS(sbi)) in update_fs_metadata()
2200 sbi->allocate_section_hint = MAIN_SECS(sbi); in update_fs_metadata()
2201 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; in update_fs_metadata()
2202 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; in update_fs_metadata()
2203 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); in update_fs_metadata()
2206 int last_dev = sbi->s_ndevs - 1; in update_fs_metadata()
2208 sbi->allocate_section_hint = FDEV(0).total_segments / in update_fs_metadata()
2217 div_u64(blks, sbi->blocks_per_blkz); in update_fs_metadata()
2231 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); in f2fs_resize_fs()
2233 return -EINVAL; in f2fs_resize_fs()
2236 int last_dev = sbi->s_ndevs - 1; in f2fs_resize_fs()
2241 return -EINVAL; in f2fs_resize_fs()
2247 return -EINVAL; in f2fs_resize_fs()
2254 return -EFSCORRUPTED; in f2fs_resize_fs()
2259 return -EINVAL; in f2fs_resize_fs()
2266 shrunk_blocks = old_block_count - block_count; in f2fs_resize_fs()
2270 if (!f2fs_down_write_trylock(&sbi->gc_lock)) { in f2fs_resize_fs()
2271 err = -EAGAIN; in f2fs_resize_fs()
2278 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2280 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2281 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2282 err = -ENOSPC; in f2fs_resize_fs()
2283 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2292 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2298 err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); in f2fs_resize_fs()
2302 if (f2fs_readonly(sbi->sb)) { in f2fs_resize_fs()
2303 err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); in f2fs_resize_fs()
2306 return -EROFS; in f2fs_resize_fs()
2309 f2fs_down_write(&sbi->gc_lock); in f2fs_resize_fs()
2310 f2fs_down_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2312 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2314 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2315 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2316 err = -ENOSPC; in f2fs_resize_fs()
2318 sbi->user_block_count -= shrunk_blocks; in f2fs_resize_fs()
2319 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2328 update_sb_metadata(sbi, -secs); in f2fs_resize_fs()
2336 update_fs_metadata(sbi, -secs); in f2fs_resize_fs()
2353 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2354 sbi->user_block_count += shrunk_blocks; in f2fs_resize_fs()
2355 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2358 f2fs_up_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2359 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2360 thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); in f2fs_resize_fs()