1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "gc.h"
23 #include "iostat.h"
24 #include <trace/events/f2fs.h>
25
26 static struct kmem_cache *victim_entry_slab;
27
28 static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
gc_thread_func(void * data)31 static int gc_thread_func(void *data)
32 {
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37 unsigned int wait_ms;
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
42
43 wait_ms = gc_th->min_sleep_time;
44
45 set_freezable();
46 do {
47 bool sync_mode, foreground = false;
48
49 wait_event_freezable_timeout(*wq,
50 kthread_should_stop() ||
51 waitqueue_active(fggc_wq) ||
52 gc_th->gc_wake,
53 msecs_to_jiffies(wait_ms));
54
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56 foreground = true;
57
58 /* give it a try one time */
59 if (gc_th->gc_wake)
60 gc_th->gc_wake = false;
61
62 if (f2fs_readonly(sbi->sb)) {
63 stat_other_skip_bggc_count(sbi);
64 continue;
65 }
66 if (kthread_should_stop())
67 break;
68
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70 increase_sleep_time(gc_th, &wait_ms);
71 stat_other_skip_bggc_count(sbi);
72 continue;
73 }
74
75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
76 f2fs_stop_checkpoint(sbi, false,
77 STOP_CP_REASON_FAULT_INJECT);
78
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
81 continue;
82 }
83
84 gc_control.one_time = false;
85
86 /*
87 * [GC triggering condition]
88 * 0. GC is not conducted currently.
89 * 1. There are enough dirty segments.
90 * 2. IO subsystem is idle by checking the # of writeback pages.
91 * 3. IO subsystem is idle by checking the # of requests in
92 * bdev's request list.
93 *
94 * Note) We have to avoid triggering GCs frequently.
95 * Because it is possible that some segments can be
96 * invalidated soon after by user update or deletion.
97 * So, I'd like to wait some time to collect dirty segments.
98 */
99 if (sbi->gc_mode == GC_URGENT_HIGH ||
100 sbi->gc_mode == GC_URGENT_MID) {
101 wait_ms = gc_th->urgent_sleep_time;
102 f2fs_down_write(&sbi->gc_lock);
103 goto do_gc;
104 }
105
106 if (foreground) {
107 f2fs_down_write(&sbi->gc_lock);
108 goto do_gc;
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
110 stat_other_skip_bggc_count(sbi);
111 goto next;
112 }
113
114 if (!is_idle(sbi, GC_TIME)) {
115 increase_sleep_time(gc_th, &wait_ms);
116 f2fs_up_write(&sbi->gc_lock);
117 stat_io_skip_bggc_count(sbi);
118 goto next;
119 }
120
121 if (f2fs_sb_has_blkzoned(sbi)) {
122 if (has_enough_free_blocks(sbi,
123 gc_th->no_zoned_gc_percent)) {
124 wait_ms = gc_th->no_gc_sleep_time;
125 f2fs_up_write(&sbi->gc_lock);
126 goto next;
127 }
128 if (wait_ms == gc_th->no_gc_sleep_time)
129 wait_ms = gc_th->max_sleep_time;
130 }
131
132 if (need_to_boost_gc(sbi)) {
133 decrease_sleep_time(gc_th, &wait_ms);
134 if (f2fs_sb_has_blkzoned(sbi))
135 gc_control.one_time = true;
136 } else {
137 increase_sleep_time(gc_th, &wait_ms);
138 }
139 do_gc:
140 stat_inc_gc_call_count(sbi, foreground ?
141 FOREGROUND : BACKGROUND);
142
143 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
144 (gc_control.one_time && gc_th->boost_gc_greedy);
145
146 /* foreground GC was been triggered via f2fs_balance_fs() */
147 if (foreground && !f2fs_sb_has_blkzoned(sbi))
148 sync_mode = false;
149
150 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
151 gc_control.no_bg_gc = foreground;
152 gc_control.nr_free_secs = foreground ? 1 : 0;
153
154 /* if return value is not zero, no victim was selected */
155 if (f2fs_gc(sbi, &gc_control)) {
156 /* don't bother wait_ms by foreground gc */
157 if (!foreground)
158 wait_ms = gc_th->no_gc_sleep_time;
159 } else {
160 /* reset wait_ms to default sleep time */
161 if (wait_ms == gc_th->no_gc_sleep_time)
162 wait_ms = gc_th->min_sleep_time;
163 }
164
165 if (foreground)
166 wake_up_all(&gc_th->fggc_wq);
167
168 trace_f2fs_background_gc(sbi->sb, wait_ms,
169 prefree_segments(sbi), free_segments(sbi));
170
171 /* balancing f2fs's metadata periodically */
172 f2fs_balance_fs_bg(sbi, true);
173 next:
174 if (sbi->gc_mode != GC_NORMAL) {
175 spin_lock(&sbi->gc_remaining_trials_lock);
176 if (sbi->gc_remaining_trials) {
177 sbi->gc_remaining_trials--;
178 if (!sbi->gc_remaining_trials)
179 sbi->gc_mode = GC_NORMAL;
180 }
181 spin_unlock(&sbi->gc_remaining_trials_lock);
182 }
183 sb_end_write(sbi->sb);
184
185 } while (!kthread_should_stop());
186 return 0;
187 }
188
f2fs_start_gc_thread(struct f2fs_sb_info * sbi)189 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
190 {
191 struct f2fs_gc_kthread *gc_th;
192 dev_t dev = sbi->sb->s_bdev->bd_dev;
193
194 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
195 if (!gc_th)
196 return -ENOMEM;
197
198 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
199 gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
200 gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE;
201 gc_th->boost_gc_greedy = GC_GREEDY;
202
203 if (f2fs_sb_has_blkzoned(sbi)) {
204 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
205 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
206 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
207 gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
208 gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
209 } else {
210 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
211 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
212 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
213 gc_th->no_zoned_gc_percent = 0;
214 gc_th->boost_zoned_gc_percent = 0;
215 }
216
217 gc_th->gc_wake = false;
218
219 sbi->gc_thread = gc_th;
220 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
221 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
222 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
223 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
224 if (IS_ERR(gc_th->f2fs_gc_task)) {
225 int err = PTR_ERR(gc_th->f2fs_gc_task);
226
227 kfree(gc_th);
228 sbi->gc_thread = NULL;
229 return err;
230 }
231
232 return 0;
233 }
234
f2fs_stop_gc_thread(struct f2fs_sb_info * sbi)235 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
236 {
237 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
238
239 if (!gc_th)
240 return;
241 kthread_stop(gc_th->f2fs_gc_task);
242 wake_up_all(&gc_th->fggc_wq);
243 kfree(gc_th);
244 sbi->gc_thread = NULL;
245 }
246
select_gc_type(struct f2fs_sb_info * sbi,int gc_type)247 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
248 {
249 int gc_mode;
250
251 if (gc_type == BG_GC) {
252 if (sbi->am.atgc_enabled)
253 gc_mode = GC_AT;
254 else
255 gc_mode = GC_CB;
256 } else {
257 gc_mode = GC_GREEDY;
258 }
259
260 switch (sbi->gc_mode) {
261 case GC_IDLE_CB:
262 case GC_URGENT_LOW:
263 case GC_URGENT_MID:
264 gc_mode = GC_CB;
265 break;
266 case GC_IDLE_GREEDY:
267 case GC_URGENT_HIGH:
268 gc_mode = GC_GREEDY;
269 break;
270 case GC_IDLE_AT:
271 gc_mode = GC_AT;
272 break;
273 }
274
275 return gc_mode;
276 }
277
select_policy(struct f2fs_sb_info * sbi,int gc_type,int type,struct victim_sel_policy * p)278 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
279 int type, struct victim_sel_policy *p)
280 {
281 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
282
283 if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) {
284 p->gc_mode = GC_GREEDY;
285 p->dirty_bitmap = dirty_i->dirty_segmap[type];
286 p->max_search = dirty_i->nr_dirty[type];
287 p->ofs_unit = 1;
288 } else {
289 p->gc_mode = select_gc_type(sbi, gc_type);
290 p->ofs_unit = SEGS_PER_SEC(sbi);
291 if (__is_large_section(sbi)) {
292 p->dirty_bitmap = dirty_i->dirty_secmap;
293 p->max_search = count_bits(p->dirty_bitmap,
294 0, MAIN_SECS(sbi));
295 } else {
296 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
297 p->max_search = dirty_i->nr_dirty[DIRTY];
298 }
299 }
300
301 /*
302 * adjust candidates range, should select all dirty segments for
303 * foreground GC and urgent GC cases.
304 */
305 if (gc_type != FG_GC &&
306 (sbi->gc_mode != GC_URGENT_HIGH) &&
307 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
308 p->max_search > sbi->max_victim_search)
309 p->max_search = sbi->max_victim_search;
310
311 /* let's select beginning hot/small space first. */
312 if (f2fs_need_rand_seg(sbi))
313 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
314 SEGS_PER_SEC(sbi));
315 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
316 p->offset = 0;
317 else
318 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
319 }
320
get_max_cost(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)321 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
322 struct victim_sel_policy *p)
323 {
324 /* SSR allocates in a segment unit */
325 if (p->alloc_mode == SSR)
326 return BLKS_PER_SEG(sbi);
327 else if (p->alloc_mode == AT_SSR)
328 return UINT_MAX;
329
330 /* LFS */
331 if (p->gc_mode == GC_GREEDY)
332 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
333 else if (p->gc_mode == GC_CB)
334 return UINT_MAX;
335 else if (p->gc_mode == GC_AT)
336 return UINT_MAX;
337 else /* No other gc_mode */
338 return 0;
339 }
340
check_bg_victims(struct f2fs_sb_info * sbi)341 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
342 {
343 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
344 unsigned int secno;
345
346 /*
347 * If the gc_type is FG_GC, we can select victim segments
348 * selected by background GC before.
349 * Those segments guarantee they have small valid blocks.
350 */
351 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
352 if (sec_usage_check(sbi, secno))
353 continue;
354 clear_bit(secno, dirty_i->victim_secmap);
355 return GET_SEG_FROM_SEC(sbi, secno);
356 }
357 return NULL_SEGNO;
358 }
359
get_cb_cost(struct f2fs_sb_info * sbi,unsigned int segno)360 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
361 {
362 struct sit_info *sit_i = SIT_I(sbi);
363 unsigned long long mtime = 0;
364 unsigned int vblocks;
365 unsigned char age = 0;
366 unsigned char u;
367 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
368
369 mtime = f2fs_get_section_mtime(sbi, segno);
370 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
371 vblocks = get_valid_blocks(sbi, segno, true);
372 vblocks = div_u64(vblocks, usable_segs_per_sec);
373
374 u = BLKS_TO_SEGS(sbi, vblocks * 100);
375
376 /* Handle if the system time has changed by the user */
377 if (mtime < sit_i->min_mtime)
378 sit_i->min_mtime = mtime;
379 if (mtime > sit_i->max_mtime)
380 sit_i->max_mtime = mtime;
381 if (sit_i->max_mtime != sit_i->min_mtime)
382 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
383 sit_i->max_mtime - sit_i->min_mtime);
384
385 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
386 }
387
get_gc_cost(struct f2fs_sb_info * sbi,unsigned int segno,struct victim_sel_policy * p,unsigned int valid_thresh_ratio)388 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
389 unsigned int segno, struct victim_sel_policy *p,
390 unsigned int valid_thresh_ratio)
391 {
392 if (p->alloc_mode == SSR)
393 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
394
395 if (p->one_time_gc && (valid_thresh_ratio < 100) &&
396 (get_valid_blocks(sbi, segno, true) >=
397 CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100))
398 return UINT_MAX;
399
400 /* alloc_mode == LFS */
401 if (p->gc_mode == GC_GREEDY)
402 return get_valid_blocks(sbi, segno, true);
403 else if (p->gc_mode == GC_CB)
404 return get_cb_cost(sbi, segno);
405
406 f2fs_bug_on(sbi, 1);
407 return 0;
408 }
409
count_bits(const unsigned long * addr,unsigned int offset,unsigned int len)410 static unsigned int count_bits(const unsigned long *addr,
411 unsigned int offset, unsigned int len)
412 {
413 unsigned int end = offset + len, sum = 0;
414
415 while (offset < end) {
416 if (test_bit(offset++, addr))
417 ++sum;
418 }
419 return sum;
420 }
421
f2fs_check_victim_tree(struct f2fs_sb_info * sbi,struct rb_root_cached * root)422 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
423 struct rb_root_cached *root)
424 {
425 #ifdef CONFIG_F2FS_CHECK_FS
426 struct rb_node *cur = rb_first_cached(root), *next;
427 struct victim_entry *cur_ve, *next_ve;
428
429 while (cur) {
430 next = rb_next(cur);
431 if (!next)
432 return true;
433
434 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
435 next_ve = rb_entry(next, struct victim_entry, rb_node);
436
437 if (cur_ve->mtime > next_ve->mtime) {
438 f2fs_info(sbi, "broken victim_rbtree, "
439 "cur_mtime(%llu) next_mtime(%llu)",
440 cur_ve->mtime, next_ve->mtime);
441 return false;
442 }
443 cur = next;
444 }
445 #endif
446 return true;
447 }
448
__lookup_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime)449 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
450 unsigned long long mtime)
451 {
452 struct atgc_management *am = &sbi->am;
453 struct rb_node *node = am->root.rb_root.rb_node;
454 struct victim_entry *ve = NULL;
455
456 while (node) {
457 ve = rb_entry(node, struct victim_entry, rb_node);
458
459 if (mtime < ve->mtime)
460 node = node->rb_left;
461 else
462 node = node->rb_right;
463 }
464 return ve;
465 }
466
__create_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)467 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
468 unsigned long long mtime, unsigned int segno)
469 {
470 struct atgc_management *am = &sbi->am;
471 struct victim_entry *ve;
472
473 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
474
475 ve->mtime = mtime;
476 ve->segno = segno;
477
478 list_add_tail(&ve->list, &am->victim_list);
479 am->victim_count++;
480
481 return ve;
482 }
483
__insert_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)484 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
485 unsigned long long mtime, unsigned int segno)
486 {
487 struct atgc_management *am = &sbi->am;
488 struct rb_root_cached *root = &am->root;
489 struct rb_node **p = &root->rb_root.rb_node;
490 struct rb_node *parent = NULL;
491 struct victim_entry *ve;
492 bool left_most = true;
493
494 /* look up rb tree to find parent node */
495 while (*p) {
496 parent = *p;
497 ve = rb_entry(parent, struct victim_entry, rb_node);
498
499 if (mtime < ve->mtime) {
500 p = &(*p)->rb_left;
501 } else {
502 p = &(*p)->rb_right;
503 left_most = false;
504 }
505 }
506
507 ve = __create_victim_entry(sbi, mtime, segno);
508
509 rb_link_node(&ve->rb_node, parent, p);
510 rb_insert_color_cached(&ve->rb_node, root, left_most);
511 }
512
add_victim_entry(struct f2fs_sb_info * sbi,struct victim_sel_policy * p,unsigned int segno)513 static void add_victim_entry(struct f2fs_sb_info *sbi,
514 struct victim_sel_policy *p, unsigned int segno)
515 {
516 struct sit_info *sit_i = SIT_I(sbi);
517 unsigned long long mtime = 0;
518
519 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
520 if (p->gc_mode == GC_AT &&
521 get_valid_blocks(sbi, segno, true) == 0)
522 return;
523 }
524
525 mtime = f2fs_get_section_mtime(sbi, segno);
526 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
527
528 /* Handle if the system time has changed by the user */
529 if (mtime < sit_i->min_mtime)
530 sit_i->min_mtime = mtime;
531 if (mtime > sit_i->max_mtime)
532 sit_i->max_mtime = mtime;
533 if (mtime < sit_i->dirty_min_mtime)
534 sit_i->dirty_min_mtime = mtime;
535 if (mtime > sit_i->dirty_max_mtime)
536 sit_i->dirty_max_mtime = mtime;
537
538 /* don't choose young section as candidate */
539 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
540 return;
541
542 __insert_victim_entry(sbi, mtime, segno);
543 }
544
atgc_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)545 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
546 struct victim_sel_policy *p)
547 {
548 struct sit_info *sit_i = SIT_I(sbi);
549 struct atgc_management *am = &sbi->am;
550 struct rb_root_cached *root = &am->root;
551 struct rb_node *node;
552 struct victim_entry *ve;
553 unsigned long long total_time;
554 unsigned long long age, u, accu;
555 unsigned long long max_mtime = sit_i->dirty_max_mtime;
556 unsigned long long min_mtime = sit_i->dirty_min_mtime;
557 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
558 unsigned int vblocks;
559 unsigned int dirty_threshold = max(am->max_candidate_count,
560 am->candidate_ratio *
561 am->victim_count / 100);
562 unsigned int age_weight = am->age_weight;
563 unsigned int cost;
564 unsigned int iter = 0;
565
566 if (max_mtime < min_mtime)
567 return;
568
569 max_mtime += 1;
570 total_time = max_mtime - min_mtime;
571
572 accu = div64_u64(ULLONG_MAX, total_time);
573 accu = min_t(unsigned long long, div_u64(accu, 100),
574 DEFAULT_ACCURACY_CLASS);
575
576 node = rb_first_cached(root);
577 next:
578 ve = rb_entry_safe(node, struct victim_entry, rb_node);
579 if (!ve)
580 return;
581
582 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
583 goto skip;
584
585 /* age = 10000 * x% * 60 */
586 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
587 age_weight;
588
589 vblocks = get_valid_blocks(sbi, ve->segno, true);
590 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
591
592 /* u = 10000 * x% * 40 */
593 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
594 (100 - age_weight);
595
596 f2fs_bug_on(sbi, age + u >= UINT_MAX);
597
598 cost = UINT_MAX - (age + u);
599 iter++;
600
601 if (cost < p->min_cost ||
602 (cost == p->min_cost && age > p->oldest_age)) {
603 p->min_cost = cost;
604 p->oldest_age = age;
605 p->min_segno = ve->segno;
606 }
607 skip:
608 if (iter < dirty_threshold) {
609 node = rb_next(node);
610 goto next;
611 }
612 }
613
614 /*
615 * select candidates around source section in range of
616 * [target - dirty_threshold, target + dirty_threshold]
617 */
atssr_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)618 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
619 struct victim_sel_policy *p)
620 {
621 struct sit_info *sit_i = SIT_I(sbi);
622 struct atgc_management *am = &sbi->am;
623 struct victim_entry *ve;
624 unsigned long long age;
625 unsigned long long max_mtime = sit_i->dirty_max_mtime;
626 unsigned long long min_mtime = sit_i->dirty_min_mtime;
627 unsigned int vblocks;
628 unsigned int dirty_threshold = max(am->max_candidate_count,
629 am->candidate_ratio *
630 am->victim_count / 100);
631 unsigned int cost, iter;
632 int stage = 0;
633
634 if (max_mtime < min_mtime)
635 return;
636 max_mtime += 1;
637 next_stage:
638 iter = 0;
639 ve = __lookup_victim_entry(sbi, p->age);
640 next_node:
641 if (!ve) {
642 if (stage++ == 0)
643 goto next_stage;
644 return;
645 }
646
647 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
648 goto skip_node;
649
650 age = max_mtime - ve->mtime;
651
652 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
653 f2fs_bug_on(sbi, !vblocks);
654
655 /* rare case */
656 if (vblocks == BLKS_PER_SEG(sbi))
657 goto skip_node;
658
659 iter++;
660
661 age = max_mtime - abs(p->age - age);
662 cost = UINT_MAX - vblocks;
663
664 if (cost < p->min_cost ||
665 (cost == p->min_cost && age > p->oldest_age)) {
666 p->min_cost = cost;
667 p->oldest_age = age;
668 p->min_segno = ve->segno;
669 }
670 skip_node:
671 if (iter < dirty_threshold) {
672 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
673 rb_next(&ve->rb_node),
674 struct victim_entry, rb_node);
675 goto next_node;
676 }
677
678 if (stage++ == 0)
679 goto next_stage;
680 }
681
lookup_victim_by_age(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)682 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
683 struct victim_sel_policy *p)
684 {
685 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
686
687 if (p->gc_mode == GC_AT)
688 atgc_lookup_victim(sbi, p);
689 else if (p->alloc_mode == AT_SSR)
690 atssr_lookup_victim(sbi, p);
691 else
692 f2fs_bug_on(sbi, 1);
693 }
694
release_victim_entry(struct f2fs_sb_info * sbi)695 static void release_victim_entry(struct f2fs_sb_info *sbi)
696 {
697 struct atgc_management *am = &sbi->am;
698 struct victim_entry *ve, *tmp;
699
700 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
701 list_del(&ve->list);
702 kmem_cache_free(victim_entry_slab, ve);
703 am->victim_count--;
704 }
705
706 am->root = RB_ROOT_CACHED;
707
708 f2fs_bug_on(sbi, am->victim_count);
709 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
710 }
711
f2fs_pin_section(struct f2fs_sb_info * sbi,unsigned int segno)712 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
713 {
714 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
715 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
716
717 if (!dirty_i->enable_pin_section)
718 return false;
719 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
720 dirty_i->pinned_secmap_cnt++;
721 return true;
722 }
723
f2fs_pinned_section_exists(struct dirty_seglist_info * dirty_i)724 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
725 {
726 return dirty_i->pinned_secmap_cnt;
727 }
728
f2fs_section_is_pinned(struct dirty_seglist_info * dirty_i,unsigned int secno)729 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
730 unsigned int secno)
731 {
732 return dirty_i->enable_pin_section &&
733 f2fs_pinned_section_exists(dirty_i) &&
734 test_bit(secno, dirty_i->pinned_secmap);
735 }
736
f2fs_unpin_all_sections(struct f2fs_sb_info * sbi,bool enable)737 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
738 {
739 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
740
741 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
742 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
743 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
744 }
745 DIRTY_I(sbi)->enable_pin_section = enable;
746 }
747
f2fs_gc_pinned_control(struct inode * inode,int gc_type,unsigned int segno)748 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
749 unsigned int segno)
750 {
751 if (!f2fs_is_pinned_file(inode))
752 return 0;
753 if (gc_type != FG_GC)
754 return -EBUSY;
755 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
756 f2fs_pin_file_control(inode, true);
757 return -EAGAIN;
758 }
759
760 /*
761 * This function is called from two paths.
762 * One is garbage collection and the other is SSR segment selection.
763 * When it is called during GC, it just gets a victim segment
764 * and it does not remove it from dirty seglist.
765 * When it is called from SSR segment selection, it finds a segment
766 * which has minimum valid blocks and removes it from dirty seglist.
767 */
f2fs_get_victim(struct f2fs_sb_info * sbi,unsigned int * result,int gc_type,int type,char alloc_mode,unsigned long long age,bool one_time)768 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
769 int gc_type, int type, char alloc_mode,
770 unsigned long long age, bool one_time)
771 {
772 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
773 struct sit_info *sm = SIT_I(sbi);
774 struct victim_sel_policy p;
775 unsigned int secno, last_victim;
776 unsigned int last_segment;
777 unsigned int nsearched;
778 unsigned int valid_thresh_ratio = 100;
779 bool is_atgc;
780 int ret = 0;
781
782 mutex_lock(&dirty_i->seglist_lock);
783 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
784
785 p.alloc_mode = alloc_mode;
786 p.age = age;
787 p.age_threshold = sbi->am.age_threshold;
788 if (one_time) {
789 p.one_time_gc = one_time;
790 if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG))
791 valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio;
792 }
793
794 retry:
795 select_policy(sbi, gc_type, type, &p);
796 p.min_segno = NULL_SEGNO;
797 p.oldest_age = 0;
798 p.min_cost = get_max_cost(sbi, &p);
799
800 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
801 nsearched = 0;
802
803 if (is_atgc)
804 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
805
806 if (*result != NULL_SEGNO) {
807 if (!get_valid_blocks(sbi, *result, false)) {
808 ret = -ENODATA;
809 goto out;
810 }
811
812 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
813 ret = -EBUSY;
814 goto out;
815 }
816 if (gc_type == FG_GC)
817 clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
818 p.min_segno = *result;
819 goto got_result;
820 }
821
822 ret = -ENODATA;
823 if (p.max_search == 0)
824 goto out;
825
826 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
827 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
828 p.min_segno = sbi->next_victim_seg[BG_GC];
829 *result = p.min_segno;
830 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
831 goto got_result;
832 }
833 if (gc_type == FG_GC &&
834 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
835 p.min_segno = sbi->next_victim_seg[FG_GC];
836 *result = p.min_segno;
837 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
838 goto got_result;
839 }
840 }
841
842 last_victim = sm->last_victim[p.gc_mode];
843 if (p.alloc_mode == LFS && gc_type == FG_GC) {
844 p.min_segno = check_bg_victims(sbi);
845 if (p.min_segno != NULL_SEGNO)
846 goto got_it;
847 }
848
849 while (1) {
850 unsigned long cost, *dirty_bitmap;
851 unsigned int unit_no, segno;
852
853 dirty_bitmap = p.dirty_bitmap;
854 unit_no = find_next_bit(dirty_bitmap,
855 last_segment / p.ofs_unit,
856 p.offset / p.ofs_unit);
857 segno = unit_no * p.ofs_unit;
858 if (segno >= last_segment) {
859 if (sm->last_victim[p.gc_mode]) {
860 last_segment =
861 sm->last_victim[p.gc_mode];
862 sm->last_victim[p.gc_mode] = 0;
863 p.offset = 0;
864 continue;
865 }
866 break;
867 }
868
869 p.offset = segno + p.ofs_unit;
870 nsearched++;
871
872 #ifdef CONFIG_F2FS_CHECK_FS
873 /*
874 * skip selecting the invalid segno (that is failed due to block
875 * validity check failure during GC) to avoid endless GC loop in
876 * such cases.
877 */
878 if (test_bit(segno, sm->invalid_segmap))
879 goto next;
880 #endif
881
882 secno = GET_SEC_FROM_SEG(sbi, segno);
883
884 if (sec_usage_check(sbi, secno))
885 goto next;
886
887 /* Don't touch checkpointed data */
888 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
889 if (p.alloc_mode == LFS) {
890 /*
891 * LFS is set to find source section during GC.
892 * The victim should have no checkpointed data.
893 */
894 if (get_ckpt_valid_blocks(sbi, segno, true))
895 goto next;
896 } else {
897 /*
898 * SSR | AT_SSR are set to find target segment
899 * for writes which can be full by checkpointed
900 * and newly written blocks.
901 */
902 if (!f2fs_segment_has_free_slot(sbi, segno))
903 goto next;
904 }
905 }
906
907 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
908 goto next;
909
910 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
911 goto next;
912
913 if (is_atgc) {
914 add_victim_entry(sbi, &p, segno);
915 goto next;
916 }
917
918 cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio);
919
920 if (p.min_cost > cost) {
921 p.min_segno = segno;
922 p.min_cost = cost;
923 }
924 next:
925 if (nsearched >= p.max_search) {
926 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
927 sm->last_victim[p.gc_mode] =
928 last_victim + p.ofs_unit;
929 else
930 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
931 sm->last_victim[p.gc_mode] %=
932 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
933 break;
934 }
935 }
936
937 /* get victim for GC_AT/AT_SSR */
938 if (is_atgc) {
939 lookup_victim_by_age(sbi, &p);
940 release_victim_entry(sbi);
941 }
942
943 if (is_atgc && p.min_segno == NULL_SEGNO &&
944 sm->elapsed_time < p.age_threshold) {
945 p.age_threshold = 0;
946 goto retry;
947 }
948
949 if (p.min_segno != NULL_SEGNO) {
950 got_it:
951 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
952 got_result:
953 if (p.alloc_mode == LFS) {
954 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
955 if (gc_type == FG_GC)
956 sbi->cur_victim_sec = secno;
957 else
958 set_bit(secno, dirty_i->victim_secmap);
959 }
960 ret = 0;
961
962 }
963 out:
964 if (p.min_segno != NULL_SEGNO)
965 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
966 sbi->cur_victim_sec,
967 prefree_segments(sbi), free_segments(sbi));
968 mutex_unlock(&dirty_i->seglist_lock);
969
970 return ret;
971 }
972
find_gc_inode(struct gc_inode_list * gc_list,nid_t ino)973 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
974 {
975 struct inode_entry *ie;
976
977 ie = radix_tree_lookup(&gc_list->iroot, ino);
978 if (ie)
979 return ie->inode;
980 return NULL;
981 }
982
add_gc_inode(struct gc_inode_list * gc_list,struct inode * inode)983 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
984 {
985 struct inode_entry *new_ie;
986
987 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
988 iput(inode);
989 return;
990 }
991 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
992 GFP_NOFS, true, NULL);
993 new_ie->inode = inode;
994
995 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
996 list_add_tail(&new_ie->list, &gc_list->ilist);
997 }
998
put_gc_inode(struct gc_inode_list * gc_list)999 static void put_gc_inode(struct gc_inode_list *gc_list)
1000 {
1001 struct inode_entry *ie, *next_ie;
1002
1003 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
1004 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
1005 iput(ie->inode);
1006 list_del(&ie->list);
1007 kmem_cache_free(f2fs_inode_entry_slab, ie);
1008 }
1009 }
1010
check_valid_map(struct f2fs_sb_info * sbi,unsigned int segno,int offset)1011 static int check_valid_map(struct f2fs_sb_info *sbi,
1012 unsigned int segno, int offset)
1013 {
1014 struct sit_info *sit_i = SIT_I(sbi);
1015 struct seg_entry *sentry;
1016 int ret;
1017
1018 down_read(&sit_i->sentry_lock);
1019 sentry = get_seg_entry(sbi, segno);
1020 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
1021 up_read(&sit_i->sentry_lock);
1022 return ret;
1023 }
1024
1025 /*
1026 * This function compares node address got in summary with that in NAT.
1027 * On validity, copy that node with cold status, otherwise (invalid node)
1028 * ignore that.
1029 */
gc_node_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,unsigned int segno,int gc_type)1030 static int gc_node_segment(struct f2fs_sb_info *sbi,
1031 struct f2fs_summary *sum, unsigned int segno, int gc_type)
1032 {
1033 struct f2fs_summary *entry;
1034 block_t start_addr;
1035 int off;
1036 int phase = 0;
1037 bool fggc = (gc_type == FG_GC);
1038 int submitted = 0;
1039 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1040
1041 start_addr = START_BLOCK(sbi, segno);
1042
1043 next_step:
1044 entry = sum;
1045
1046 if (fggc && phase == 2)
1047 atomic_inc(&sbi->wb_sync_req[NODE]);
1048
1049 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1050 nid_t nid = le32_to_cpu(entry->nid);
1051 struct folio *node_folio;
1052 struct node_info ni;
1053 int err;
1054
1055 /* stop BG_GC if there is not enough free sections. */
1056 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1057 return submitted;
1058
1059 if (check_valid_map(sbi, segno, off) == 0)
1060 continue;
1061
1062 if (phase == 0) {
1063 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1064 META_NAT, true);
1065 continue;
1066 }
1067
1068 if (phase == 1) {
1069 f2fs_ra_node_page(sbi, nid);
1070 continue;
1071 }
1072
1073 /* phase == 2 */
1074 node_folio = f2fs_get_node_folio(sbi, nid);
1075 if (IS_ERR(node_folio))
1076 continue;
1077
1078 /* block may become invalid during f2fs_get_node_folio */
1079 if (check_valid_map(sbi, segno, off) == 0) {
1080 f2fs_folio_put(node_folio, true);
1081 continue;
1082 }
1083
1084 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1085 f2fs_folio_put(node_folio, true);
1086 continue;
1087 }
1088
1089 if (ni.blk_addr != start_addr + off) {
1090 f2fs_folio_put(node_folio, true);
1091 continue;
1092 }
1093
1094 err = f2fs_move_node_folio(node_folio, gc_type);
1095 if (!err && gc_type == FG_GC)
1096 submitted++;
1097 stat_inc_node_blk_count(sbi, 1, gc_type);
1098 }
1099
1100 if (++phase < 3)
1101 goto next_step;
1102
1103 if (fggc)
1104 atomic_dec(&sbi->wb_sync_req[NODE]);
1105 return submitted;
1106 }
1107
1108 /*
1109 * Calculate start block index indicating the given node offset.
1110 * Be careful, caller should give this node offset only indicating direct node
1111 * blocks. If any node offsets, which point the other types of node blocks such
1112 * as indirect or double indirect node blocks, are given, it must be a caller's
1113 * bug.
1114 */
f2fs_start_bidx_of_node(unsigned int node_ofs,struct inode * inode)1115 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1116 {
1117 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1118 unsigned int bidx;
1119
1120 if (node_ofs == 0)
1121 return 0;
1122
1123 if (node_ofs <= 2) {
1124 bidx = node_ofs - 1;
1125 } else if (node_ofs <= indirect_blks) {
1126 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1127
1128 bidx = node_ofs - 2 - dec;
1129 } else {
1130 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1131
1132 bidx = node_ofs - 5 - dec;
1133 }
1134 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1135 }
1136
is_alive(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct node_info * dni,block_t blkaddr,unsigned int * nofs)1137 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1138 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1139 {
1140 struct folio *node_folio;
1141 nid_t nid;
1142 unsigned int ofs_in_node, max_addrs, base;
1143 block_t source_blkaddr;
1144
1145 nid = le32_to_cpu(sum->nid);
1146 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1147
1148 node_folio = f2fs_get_node_folio(sbi, nid);
1149 if (IS_ERR(node_folio))
1150 return false;
1151
1152 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1153 f2fs_folio_put(node_folio, true);
1154 return false;
1155 }
1156
1157 if (sum->version != dni->version) {
1158 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1159 __func__);
1160 set_sbi_flag(sbi, SBI_NEED_FSCK);
1161 }
1162
1163 if (f2fs_check_nid_range(sbi, dni->ino)) {
1164 f2fs_folio_put(node_folio, true);
1165 return false;
1166 }
1167
1168 if (IS_INODE(node_folio)) {
1169 base = offset_in_addr(F2FS_INODE(node_folio));
1170 max_addrs = DEF_ADDRS_PER_INODE;
1171 } else {
1172 base = 0;
1173 max_addrs = DEF_ADDRS_PER_BLOCK;
1174 }
1175
1176 if (base + ofs_in_node >= max_addrs) {
1177 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1178 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1179 f2fs_folio_put(node_folio, true);
1180 return false;
1181 }
1182
1183 *nofs = ofs_of_node(node_folio);
1184 source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
1185 f2fs_folio_put(node_folio, true);
1186
1187 if (source_blkaddr != blkaddr) {
1188 #ifdef CONFIG_F2FS_CHECK_FS
1189 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1190 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1191
1192 if (unlikely(check_valid_map(sbi, segno, offset))) {
1193 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1194 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1195 blkaddr, source_blkaddr, segno);
1196 set_sbi_flag(sbi, SBI_NEED_FSCK);
1197 }
1198 }
1199 #endif
1200 return false;
1201 }
1202 return true;
1203 }
1204
ra_data_block(struct inode * inode,pgoff_t index)1205 static int ra_data_block(struct inode *inode, pgoff_t index)
1206 {
1207 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1208 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1209 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1210 struct dnode_of_data dn;
1211 struct folio *folio;
1212 struct f2fs_io_info fio = {
1213 .sbi = sbi,
1214 .ino = inode->i_ino,
1215 .type = DATA,
1216 .temp = COLD,
1217 .op = REQ_OP_READ,
1218 .op_flags = 0,
1219 .encrypted_page = NULL,
1220 .in_list = 0,
1221 };
1222 int err;
1223
1224 folio = f2fs_grab_cache_folio(mapping, index, true);
1225 if (IS_ERR(folio))
1226 return PTR_ERR(folio);
1227
1228 if (f2fs_lookup_read_extent_cache_block(inode, index,
1229 &dn.data_blkaddr)) {
1230 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1231 DATA_GENERIC_ENHANCE_READ))) {
1232 err = -EFSCORRUPTED;
1233 goto put_folio;
1234 }
1235 goto got_it;
1236 }
1237
1238 set_new_dnode(&dn, inode, NULL, NULL, 0);
1239 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1240 if (err)
1241 goto put_folio;
1242 f2fs_put_dnode(&dn);
1243
1244 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1245 err = -ENOENT;
1246 goto put_folio;
1247 }
1248 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1249 DATA_GENERIC_ENHANCE))) {
1250 err = -EFSCORRUPTED;
1251 goto put_folio;
1252 }
1253 got_it:
1254 /* read folio */
1255 fio.folio = folio;
1256 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1257
1258 /*
1259 * don't cache encrypted data into meta inode until previous dirty
1260 * data were writebacked to avoid racing between GC and flush.
1261 */
1262 f2fs_folio_wait_writeback(folio, DATA, true, true);
1263
1264 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1265
1266 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1267 dn.data_blkaddr,
1268 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1269 if (!fio.encrypted_page) {
1270 err = -ENOMEM;
1271 goto put_folio;
1272 }
1273
1274 err = f2fs_submit_page_bio(&fio);
1275 if (err)
1276 goto put_encrypted_page;
1277 f2fs_put_page(fio.encrypted_page, 0);
1278 f2fs_folio_put(folio, true);
1279
1280 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1281 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1282
1283 return 0;
1284 put_encrypted_page:
1285 f2fs_put_page(fio.encrypted_page, 1);
1286 put_folio:
1287 f2fs_folio_put(folio, true);
1288 return err;
1289 }
1290
1291 /*
1292 * Move data block via META_MAPPING while keeping locked data page.
1293 * This can be used to move blocks, aka LBAs, directly on disk.
1294 */
move_data_block(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1295 static int move_data_block(struct inode *inode, block_t bidx,
1296 int gc_type, unsigned int segno, int off)
1297 {
1298 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1299 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1300 struct f2fs_io_info fio = {
1301 .sbi = F2FS_I_SB(inode),
1302 .ino = inode->i_ino,
1303 .type = DATA,
1304 .temp = COLD,
1305 .op = REQ_OP_READ,
1306 .op_flags = 0,
1307 .encrypted_page = NULL,
1308 .in_list = 0,
1309 };
1310 struct dnode_of_data dn;
1311 struct f2fs_summary sum;
1312 struct node_info ni;
1313 struct folio *folio, *mfolio;
1314 block_t newaddr;
1315 int err = 0;
1316 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1317 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1318 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1319 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1320
1321 /* do not read out */
1322 folio = f2fs_grab_cache_folio(mapping, bidx, false);
1323 if (IS_ERR(folio))
1324 return PTR_ERR(folio);
1325
1326 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1327 err = -ENOENT;
1328 goto out;
1329 }
1330
1331 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1332 if (err)
1333 goto out;
1334
1335 set_new_dnode(&dn, inode, NULL, NULL, 0);
1336 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1337 if (err)
1338 goto out;
1339
1340 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1341 folio_clear_uptodate(folio);
1342 err = -ENOENT;
1343 goto put_out;
1344 }
1345
1346 /*
1347 * don't cache encrypted data into meta inode until previous dirty
1348 * data were writebacked to avoid racing between GC and flush.
1349 */
1350 f2fs_folio_wait_writeback(folio, DATA, true, true);
1351
1352 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1353
1354 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1355 if (err)
1356 goto put_out;
1357
1358 /* read page */
1359 fio.folio = folio;
1360 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1361
1362 if (lfs_mode)
1363 f2fs_down_write(&fio.sbi->io_order_lock);
1364
1365 mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
1366 fio.old_blkaddr, false);
1367 if (IS_ERR(mfolio)) {
1368 err = PTR_ERR(mfolio);
1369 goto up_out;
1370 }
1371
1372 fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr);
1373
1374 /* read source block in mfolio */
1375 if (!folio_test_uptodate(mfolio)) {
1376 err = f2fs_submit_page_bio(&fio);
1377 if (err) {
1378 f2fs_folio_put(mfolio, true);
1379 goto up_out;
1380 }
1381
1382 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1383 F2FS_BLKSIZE);
1384 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1385 F2FS_BLKSIZE);
1386
1387 folio_lock(mfolio);
1388 if (unlikely(!is_meta_folio(mfolio) ||
1389 !folio_test_uptodate(mfolio))) {
1390 err = -EIO;
1391 f2fs_folio_put(mfolio, true);
1392 goto up_out;
1393 }
1394 }
1395
1396 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1397
1398 /* allocate block address */
1399 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1400 &sum, type, NULL);
1401 if (err) {
1402 f2fs_folio_put(mfolio, true);
1403 /* filesystem should shutdown, no need to recovery block */
1404 goto up_out;
1405 }
1406
1407 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1408 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1409 if (!fio.encrypted_page) {
1410 err = -ENOMEM;
1411 f2fs_folio_put(mfolio, true);
1412 goto recover_block;
1413 }
1414
1415 /* write target block */
1416 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1417 memcpy(page_address(fio.encrypted_page),
1418 folio_address(mfolio), PAGE_SIZE);
1419 f2fs_folio_put(mfolio, true);
1420
1421 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
1422
1423 set_page_dirty(fio.encrypted_page);
1424 if (clear_page_dirty_for_io(fio.encrypted_page))
1425 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1426
1427 set_page_writeback(fio.encrypted_page);
1428
1429 fio.op = REQ_OP_WRITE;
1430 fio.op_flags = REQ_SYNC;
1431 fio.new_blkaddr = newaddr;
1432 f2fs_submit_page_write(&fio);
1433
1434 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1435
1436 f2fs_update_data_blkaddr(&dn, newaddr);
1437 set_inode_flag(inode, FI_APPEND_WRITE);
1438
1439 f2fs_put_page(fio.encrypted_page, 1);
1440 recover_block:
1441 if (err)
1442 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1443 true, true, true);
1444 up_out:
1445 if (lfs_mode)
1446 f2fs_up_write(&fio.sbi->io_order_lock);
1447 put_out:
1448 f2fs_put_dnode(&dn);
1449 out:
1450 f2fs_folio_put(folio, true);
1451 return err;
1452 }
1453
move_data_page(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1454 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1455 unsigned int segno, int off)
1456 {
1457 struct folio *folio;
1458 int err = 0;
1459
1460 folio = f2fs_get_lock_data_folio(inode, bidx, true);
1461 if (IS_ERR(folio))
1462 return PTR_ERR(folio);
1463
1464 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1465 err = -ENOENT;
1466 goto out;
1467 }
1468
1469 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1470 if (err)
1471 goto out;
1472
1473 if (gc_type == BG_GC) {
1474 if (folio_test_writeback(folio)) {
1475 err = -EAGAIN;
1476 goto out;
1477 }
1478 folio_mark_dirty(folio);
1479 folio_set_f2fs_gcing(folio);
1480 } else {
1481 struct f2fs_io_info fio = {
1482 .sbi = F2FS_I_SB(inode),
1483 .ino = inode->i_ino,
1484 .type = DATA,
1485 .temp = COLD,
1486 .op = REQ_OP_WRITE,
1487 .op_flags = REQ_SYNC,
1488 .old_blkaddr = NULL_ADDR,
1489 .folio = folio,
1490 .encrypted_page = NULL,
1491 .need_lock = LOCK_REQ,
1492 .io_type = FS_GC_DATA_IO,
1493 };
1494 bool is_dirty = folio_test_dirty(folio);
1495
1496 retry:
1497 f2fs_folio_wait_writeback(folio, DATA, true, true);
1498
1499 folio_mark_dirty(folio);
1500 if (folio_clear_dirty_for_io(folio)) {
1501 inode_dec_dirty_pages(inode);
1502 f2fs_remove_dirty_inode(inode);
1503 }
1504
1505 folio_set_f2fs_gcing(folio);
1506
1507 err = f2fs_do_write_data_page(&fio);
1508 if (err) {
1509 folio_clear_f2fs_gcing(folio);
1510 if (err == -ENOMEM) {
1511 memalloc_retry_wait(GFP_NOFS);
1512 goto retry;
1513 }
1514 if (is_dirty)
1515 folio_mark_dirty(folio);
1516 }
1517 }
1518 out:
1519 f2fs_folio_put(folio, true);
1520 return err;
1521 }
1522
1523 /*
1524 * This function tries to get parent node of victim data block, and identifies
1525 * data block validity. If the block is valid, copy that with cold status and
1526 * modify parent node.
1527 * If the parent node is not valid or the data block address is different,
1528 * the victim data block is ignored.
1529 */
gc_data_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct gc_inode_list * gc_list,unsigned int segno,int gc_type,bool force_migrate)1530 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1531 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1532 bool force_migrate)
1533 {
1534 struct super_block *sb = sbi->sb;
1535 struct f2fs_summary *entry;
1536 block_t start_addr;
1537 int off;
1538 int phase = 0;
1539 int submitted = 0;
1540 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1541
1542 start_addr = START_BLOCK(sbi, segno);
1543
1544 next_step:
1545 entry = sum;
1546
1547 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1548 struct inode *inode;
1549 struct node_info dni; /* dnode info for the data */
1550 unsigned int ofs_in_node, nofs;
1551 block_t start_bidx;
1552 nid_t nid = le32_to_cpu(entry->nid);
1553
1554 /*
1555 * stop BG_GC if there is not enough free sections.
1556 * Or, stop GC if the segment becomes fully valid caused by
1557 * race condition along with SSR block allocation.
1558 */
1559 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1560 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1561 CAP_BLKS_PER_SEC(sbi)))
1562 return submitted;
1563
1564 if (check_valid_map(sbi, segno, off) == 0)
1565 continue;
1566
1567 if (phase == 0) {
1568 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1569 META_NAT, true);
1570 continue;
1571 }
1572
1573 if (phase == 1) {
1574 f2fs_ra_node_page(sbi, nid);
1575 continue;
1576 }
1577
1578 /* Get an inode by ino with checking validity */
1579 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1580 continue;
1581
1582 if (phase == 2) {
1583 f2fs_ra_node_page(sbi, dni.ino);
1584 continue;
1585 }
1586
1587 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1588
1589 if (phase == 3) {
1590 struct folio *data_folio;
1591 int err;
1592
1593 inode = f2fs_iget(sb, dni.ino);
1594 if (IS_ERR(inode))
1595 continue;
1596
1597 if (is_bad_inode(inode) ||
1598 special_file(inode->i_mode)) {
1599 iput(inode);
1600 continue;
1601 }
1602
1603 if (f2fs_has_inline_data(inode)) {
1604 iput(inode);
1605 set_sbi_flag(sbi, SBI_NEED_FSCK);
1606 f2fs_err_ratelimited(sbi,
1607 "inode %lx has both inline_data flag and "
1608 "data block, nid=%u, ofs_in_node=%u",
1609 inode->i_ino, dni.nid, ofs_in_node);
1610 continue;
1611 }
1612
1613 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1614 if (err == -EAGAIN) {
1615 iput(inode);
1616 return submitted;
1617 }
1618
1619 if (!f2fs_down_write_trylock(
1620 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1621 iput(inode);
1622 sbi->skipped_gc_rwsem++;
1623 continue;
1624 }
1625
1626 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1627 ofs_in_node;
1628
1629 if (f2fs_meta_inode_gc_required(inode)) {
1630 int err = ra_data_block(inode, start_bidx);
1631
1632 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1633 if (err) {
1634 iput(inode);
1635 continue;
1636 }
1637 add_gc_inode(gc_list, inode);
1638 continue;
1639 }
1640
1641 data_folio = f2fs_get_read_data_folio(inode, start_bidx,
1642 REQ_RAHEAD, true, NULL);
1643 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1644 if (IS_ERR(data_folio)) {
1645 iput(inode);
1646 continue;
1647 }
1648
1649 f2fs_folio_put(data_folio, false);
1650 add_gc_inode(gc_list, inode);
1651 continue;
1652 }
1653
1654 /* phase 4 */
1655 inode = find_gc_inode(gc_list, dni.ino);
1656 if (inode) {
1657 struct f2fs_inode_info *fi = F2FS_I(inode);
1658 bool locked = false;
1659 int err;
1660
1661 if (S_ISREG(inode->i_mode)) {
1662 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1663 sbi->skipped_gc_rwsem++;
1664 continue;
1665 }
1666 if (!f2fs_down_write_trylock(
1667 &fi->i_gc_rwsem[READ])) {
1668 sbi->skipped_gc_rwsem++;
1669 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1670 continue;
1671 }
1672 locked = true;
1673
1674 /* wait for all inflight aio data */
1675 inode_dio_wait(inode);
1676 }
1677
1678 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1679 + ofs_in_node;
1680 if (f2fs_meta_inode_gc_required(inode))
1681 err = move_data_block(inode, start_bidx,
1682 gc_type, segno, off);
1683 else
1684 err = move_data_page(inode, start_bidx, gc_type,
1685 segno, off);
1686
1687 if (!err && (gc_type == FG_GC ||
1688 f2fs_meta_inode_gc_required(inode)))
1689 submitted++;
1690
1691 if (locked) {
1692 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1693 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1694 }
1695
1696 stat_inc_data_blk_count(sbi, 1, gc_type);
1697 }
1698 }
1699
1700 if (++phase < 5)
1701 goto next_step;
1702
1703 return submitted;
1704 }
1705
__get_victim(struct f2fs_sb_info * sbi,unsigned int * victim,int gc_type,bool one_time)1706 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1707 int gc_type, bool one_time)
1708 {
1709 struct sit_info *sit_i = SIT_I(sbi);
1710 int ret;
1711
1712 down_write(&sit_i->sentry_lock);
1713 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1714 LFS, 0, one_time);
1715 up_write(&sit_i->sentry_lock);
1716 return ret;
1717 }
1718
do_garbage_collect(struct f2fs_sb_info * sbi,unsigned int start_segno,struct gc_inode_list * gc_list,int gc_type,bool force_migrate,bool one_time)1719 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1720 unsigned int start_segno,
1721 struct gc_inode_list *gc_list, int gc_type,
1722 bool force_migrate, bool one_time)
1723 {
1724 struct blk_plug plug;
1725 unsigned int segno = start_segno;
1726 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1727 unsigned int sec_end_segno;
1728 int seg_freed = 0, migrated = 0;
1729 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1730 SUM_TYPE_DATA : SUM_TYPE_NODE;
1731 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1732 int submitted = 0;
1733
1734 if (__is_large_section(sbi)) {
1735 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1736
1737 /*
1738 * zone-capacity can be less than zone-size in zoned devices,
1739 * resulting in less than expected usable segments in the zone,
1740 * calculate the end segno in the zone which can be garbage
1741 * collected
1742 */
1743 if (f2fs_sb_has_blkzoned(sbi))
1744 sec_end_segno -= SEGS_PER_SEC(sbi) -
1745 f2fs_usable_segs_in_sec(sbi);
1746
1747 if (gc_type == BG_GC || one_time) {
1748 unsigned int window_granularity =
1749 sbi->migration_window_granularity;
1750
1751 if (f2fs_sb_has_blkzoned(sbi) &&
1752 !has_enough_free_blocks(sbi,
1753 sbi->gc_thread->boost_zoned_gc_percent))
1754 window_granularity *=
1755 sbi->gc_thread->boost_gc_multiple;
1756
1757 end_segno = start_segno + window_granularity;
1758 }
1759
1760 if (end_segno > sec_end_segno)
1761 end_segno = sec_end_segno;
1762 }
1763
1764 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1765
1766 /* readahead multi ssa blocks those have contiguous address */
1767 if (__is_large_section(sbi))
1768 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1769 end_segno - segno, META_SSA, true);
1770
1771 /* reference all summary page */
1772 while (segno < end_segno) {
1773 struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno++);
1774 if (IS_ERR(sum_folio)) {
1775 int err = PTR_ERR(sum_folio);
1776
1777 end_segno = segno - 1;
1778 for (segno = start_segno; segno < end_segno; segno++) {
1779 sum_folio = filemap_get_folio(META_MAPPING(sbi),
1780 GET_SUM_BLOCK(sbi, segno));
1781 folio_put_refs(sum_folio, 2);
1782 }
1783 return err;
1784 }
1785 folio_unlock(sum_folio);
1786 }
1787
1788 blk_start_plug(&plug);
1789
1790 for (segno = start_segno; segno < end_segno; segno++) {
1791 struct f2fs_summary_block *sum;
1792
1793 /* find segment summary of victim */
1794 struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
1795 GET_SUM_BLOCK(sbi, segno));
1796
1797 if (get_valid_blocks(sbi, segno, false) == 0)
1798 goto freed;
1799 if (gc_type == BG_GC && __is_large_section(sbi) &&
1800 migrated >= sbi->migration_granularity)
1801 goto skip;
1802 if (!folio_test_uptodate(sum_folio) ||
1803 unlikely(f2fs_cp_error(sbi)))
1804 goto skip;
1805
1806 sum = folio_address(sum_folio);
1807 if (type != GET_SUM_TYPE((&sum->footer))) {
1808 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1809 segno, type, GET_SUM_TYPE((&sum->footer)));
1810 f2fs_stop_checkpoint(sbi, false,
1811 STOP_CP_REASON_CORRUPTED_SUMMARY);
1812 goto skip;
1813 }
1814
1815 /*
1816 * this is to avoid deadlock:
1817 * - lock_page(sum_page) - f2fs_replace_block
1818 * - check_valid_map() - down_write(sentry_lock)
1819 * - down_read(sentry_lock) - change_curseg()
1820 * - lock_page(sum_page)
1821 */
1822 if (type == SUM_TYPE_NODE)
1823 submitted += gc_node_segment(sbi, sum->entries, segno,
1824 gc_type);
1825 else
1826 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1827 segno, gc_type,
1828 force_migrate);
1829
1830 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1831 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1832 migrated++;
1833
1834 freed:
1835 if (gc_type == FG_GC &&
1836 get_valid_blocks(sbi, segno, false) == 0)
1837 seg_freed++;
1838
1839 if (__is_large_section(sbi))
1840 sbi->next_victim_seg[gc_type] =
1841 (segno + 1 < sec_end_segno) ?
1842 segno + 1 : NULL_SEGNO;
1843 skip:
1844 folio_put_refs(sum_folio, 2);
1845 }
1846
1847 if (submitted)
1848 f2fs_submit_merged_write(sbi, data_type);
1849
1850 blk_finish_plug(&plug);
1851
1852 if (migrated)
1853 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1854
1855 return seg_freed;
1856 }
1857
f2fs_gc(struct f2fs_sb_info * sbi,struct f2fs_gc_control * gc_control)1858 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1859 {
1860 int gc_type = gc_control->init_gc_type;
1861 unsigned int segno = gc_control->victim_segno;
1862 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1863 int ret = 0;
1864 struct cp_control cpc;
1865 struct gc_inode_list gc_list = {
1866 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1867 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1868 };
1869 unsigned int skipped_round = 0, round = 0;
1870 unsigned int upper_secs;
1871
1872 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1873 gc_control->nr_free_secs,
1874 get_pages(sbi, F2FS_DIRTY_NODES),
1875 get_pages(sbi, F2FS_DIRTY_DENTS),
1876 get_pages(sbi, F2FS_DIRTY_IMETA),
1877 free_sections(sbi),
1878 free_segments(sbi),
1879 reserved_segments(sbi),
1880 prefree_segments(sbi));
1881
1882 cpc.reason = __get_cp_reason(sbi);
1883 gc_more:
1884 sbi->skipped_gc_rwsem = 0;
1885 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1886 ret = -EINVAL;
1887 goto stop;
1888 }
1889 if (unlikely(f2fs_cp_error(sbi))) {
1890 ret = -EIO;
1891 goto stop;
1892 }
1893
1894 /* Let's run FG_GC, if we don't have enough space. */
1895 if (has_not_enough_free_secs(sbi, 0, 0)) {
1896 gc_type = FG_GC;
1897 gc_control->one_time = false;
1898
1899 /*
1900 * For example, if there are many prefree_segments below given
1901 * threshold, we can make them free by checkpoint. Then, we
1902 * secure free segments which doesn't need fggc any more.
1903 */
1904 if (prefree_segments(sbi)) {
1905 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1906 ret = f2fs_write_checkpoint(sbi, &cpc);
1907 if (ret)
1908 goto stop;
1909 /* Reset due to checkpoint */
1910 sec_freed = 0;
1911 }
1912 }
1913
1914 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1915 if (gc_type == BG_GC && gc_control->no_bg_gc) {
1916 ret = -EINVAL;
1917 goto stop;
1918 }
1919 retry:
1920 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
1921 if (ret) {
1922 /* allow to search victim from sections has pinned data */
1923 if (ret == -ENODATA && gc_type == FG_GC &&
1924 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1925 f2fs_unpin_all_sections(sbi, false);
1926 goto retry;
1927 }
1928 goto stop;
1929 }
1930
1931 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1932 gc_control->should_migrate_blocks,
1933 gc_control->one_time);
1934 if (seg_freed < 0)
1935 goto stop;
1936
1937 total_freed += seg_freed;
1938
1939 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
1940 sec_freed++;
1941 total_sec_freed++;
1942 }
1943
1944 if (gc_control->one_time)
1945 goto stop;
1946
1947 if (gc_type == FG_GC) {
1948 sbi->cur_victim_sec = NULL_SEGNO;
1949
1950 if (has_enough_free_secs(sbi, sec_freed, 0)) {
1951 if (!gc_control->no_bg_gc &&
1952 total_sec_freed < gc_control->nr_free_secs)
1953 goto go_gc_more;
1954 goto stop;
1955 }
1956 if (sbi->skipped_gc_rwsem)
1957 skipped_round++;
1958 round++;
1959 if (skipped_round > MAX_SKIP_GC_COUNT &&
1960 skipped_round * 2 >= round) {
1961 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1962 ret = f2fs_write_checkpoint(sbi, &cpc);
1963 goto stop;
1964 }
1965 } else if (has_enough_free_secs(sbi, 0, 0)) {
1966 goto stop;
1967 }
1968
1969 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1970
1971 /*
1972 * Write checkpoint to reclaim prefree segments.
1973 * We need more three extra sections for writer's data/node/dentry.
1974 */
1975 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1976 prefree_segments(sbi)) {
1977 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1978 ret = f2fs_write_checkpoint(sbi, &cpc);
1979 if (ret)
1980 goto stop;
1981 /* Reset due to checkpoint */
1982 sec_freed = 0;
1983 }
1984 go_gc_more:
1985 segno = NULL_SEGNO;
1986 goto gc_more;
1987
1988 stop:
1989 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1990 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1991
1992 if (gc_type == FG_GC)
1993 f2fs_unpin_all_sections(sbi, true);
1994
1995 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1996 get_pages(sbi, F2FS_DIRTY_NODES),
1997 get_pages(sbi, F2FS_DIRTY_DENTS),
1998 get_pages(sbi, F2FS_DIRTY_IMETA),
1999 free_sections(sbi),
2000 free_segments(sbi),
2001 reserved_segments(sbi),
2002 prefree_segments(sbi));
2003
2004 f2fs_up_write(&sbi->gc_lock);
2005
2006 put_gc_inode(&gc_list);
2007
2008 if (gc_control->err_gc_skipped && !ret)
2009 ret = total_sec_freed ? 0 : -EAGAIN;
2010 return ret;
2011 }
2012
f2fs_create_garbage_collection_cache(void)2013 int __init f2fs_create_garbage_collection_cache(void)
2014 {
2015 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2016 sizeof(struct victim_entry));
2017 return victim_entry_slab ? 0 : -ENOMEM;
2018 }
2019
f2fs_destroy_garbage_collection_cache(void)2020 void f2fs_destroy_garbage_collection_cache(void)
2021 {
2022 kmem_cache_destroy(victim_entry_slab);
2023 }
2024
init_atgc_management(struct f2fs_sb_info * sbi)2025 static void init_atgc_management(struct f2fs_sb_info *sbi)
2026 {
2027 struct atgc_management *am = &sbi->am;
2028
2029 if (test_opt(sbi, ATGC) &&
2030 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2031 am->atgc_enabled = true;
2032
2033 am->root = RB_ROOT_CACHED;
2034 INIT_LIST_HEAD(&am->victim_list);
2035 am->victim_count = 0;
2036
2037 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2038 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2039 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
2040 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
2041 }
2042
f2fs_build_gc_manager(struct f2fs_sb_info * sbi)2043 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2044 {
2045 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2046
2047 /* give warm/cold data area from slower device */
2048 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2049 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2050 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2051
2052 init_atgc_management(sbi);
2053 }
2054
f2fs_gc_range(struct f2fs_sb_info * sbi,unsigned int start_seg,unsigned int end_seg,bool dry_run,unsigned int dry_run_sections)2055 int f2fs_gc_range(struct f2fs_sb_info *sbi,
2056 unsigned int start_seg, unsigned int end_seg,
2057 bool dry_run, unsigned int dry_run_sections)
2058 {
2059 unsigned int segno;
2060 unsigned int gc_secs = dry_run_sections;
2061
2062 if (unlikely(f2fs_cp_error(sbi)))
2063 return -EIO;
2064
2065 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2066 struct gc_inode_list gc_list = {
2067 .ilist = LIST_HEAD_INIT(gc_list.ilist),
2068 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2069 };
2070
2071 if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
2072 continue;
2073
2074 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2075 put_gc_inode(&gc_list);
2076
2077 if (!dry_run && get_valid_blocks(sbi, segno, true))
2078 return -EAGAIN;
2079 if (dry_run && dry_run_sections &&
2080 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2081 break;
2082
2083 if (fatal_signal_pending(current))
2084 return -ERESTARTSYS;
2085 }
2086
2087 return 0;
2088 }
2089
free_segment_range(struct f2fs_sb_info * sbi,unsigned int secs,bool dry_run)2090 static int free_segment_range(struct f2fs_sb_info *sbi,
2091 unsigned int secs, bool dry_run)
2092 {
2093 unsigned int next_inuse, start, end;
2094 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2095 int gc_mode, gc_type;
2096 int err = 0;
2097 int type;
2098
2099 /* Force block allocation for GC */
2100 MAIN_SECS(sbi) -= secs;
2101 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2102 end = MAIN_SEGS(sbi) - 1;
2103
2104 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2105 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2106 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2107 SIT_I(sbi)->last_victim[gc_mode] = 0;
2108
2109 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2110 if (sbi->next_victim_seg[gc_type] >= start)
2111 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2112 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2113
2114 /* Move out cursegs from the target range */
2115 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2116 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2117 if (err)
2118 goto out;
2119 }
2120
2121 /* do GC to move out valid blocks in the range */
2122 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2123 if (err || dry_run)
2124 goto out;
2125
2126 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2127 err = f2fs_write_checkpoint(sbi, &cpc);
2128 if (err)
2129 goto out;
2130
2131 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2132 if (next_inuse <= end) {
2133 f2fs_err(sbi, "segno %u should be free but still inuse!",
2134 next_inuse);
2135 f2fs_bug_on(sbi, 1);
2136 }
2137 out:
2138 MAIN_SECS(sbi) += secs;
2139 return err;
2140 }
2141
update_sb_metadata(struct f2fs_sb_info * sbi,int secs)2142 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2143 {
2144 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2145 int section_count;
2146 int segment_count;
2147 int segment_count_main;
2148 long long block_count;
2149 int segs = secs * SEGS_PER_SEC(sbi);
2150
2151 f2fs_down_write(&sbi->sb_lock);
2152
2153 section_count = le32_to_cpu(raw_sb->section_count);
2154 segment_count = le32_to_cpu(raw_sb->segment_count);
2155 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2156 block_count = le64_to_cpu(raw_sb->block_count);
2157
2158 raw_sb->section_count = cpu_to_le32(section_count + secs);
2159 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2160 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2161 raw_sb->block_count = cpu_to_le64(block_count +
2162 (long long)SEGS_TO_BLKS(sbi, segs));
2163 if (f2fs_is_multi_device(sbi)) {
2164 int last_dev = sbi->s_ndevs - 1;
2165 int dev_segs =
2166 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2167
2168 raw_sb->devs[last_dev].total_segments =
2169 cpu_to_le32(dev_segs + segs);
2170 }
2171
2172 f2fs_up_write(&sbi->sb_lock);
2173 }
2174
update_fs_metadata(struct f2fs_sb_info * sbi,int secs)2175 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2176 {
2177 int segs = secs * SEGS_PER_SEC(sbi);
2178 long long blks = SEGS_TO_BLKS(sbi, segs);
2179 long long user_block_count =
2180 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2181
2182 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2183 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2184 MAIN_SECS(sbi) += secs;
2185 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2186 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2187 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2188
2189 if (f2fs_is_multi_device(sbi)) {
2190 int last_dev = sbi->s_ndevs - 1;
2191
2192 FDEV(last_dev).total_segments =
2193 (int)FDEV(last_dev).total_segments + segs;
2194 FDEV(last_dev).end_blk =
2195 (long long)FDEV(last_dev).end_blk + blks;
2196 #ifdef CONFIG_BLK_DEV_ZONED
2197 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2198 div_u64(blks, sbi->blocks_per_blkz);
2199 #endif
2200 }
2201 }
2202
f2fs_resize_fs(struct file * filp,__u64 block_count)2203 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2204 {
2205 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2206 __u64 old_block_count, shrunk_blocks;
2207 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2208 unsigned int secs;
2209 int err = 0;
2210 __u32 rem;
2211
2212 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2213 if (block_count > old_block_count)
2214 return -EINVAL;
2215
2216 if (f2fs_is_multi_device(sbi)) {
2217 int last_dev = sbi->s_ndevs - 1;
2218 __u64 last_segs = FDEV(last_dev).total_segments;
2219
2220 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2221 old_block_count)
2222 return -EINVAL;
2223 }
2224
2225 /* new fs size should align to section size */
2226 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2227 if (rem)
2228 return -EINVAL;
2229
2230 if (block_count == old_block_count)
2231 return 0;
2232
2233 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2234 f2fs_err(sbi, "Should run fsck to repair first.");
2235 return -EFSCORRUPTED;
2236 }
2237
2238 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2239 f2fs_err(sbi, "Checkpoint should be enabled.");
2240 return -EINVAL;
2241 }
2242
2243 err = mnt_want_write_file(filp);
2244 if (err)
2245 return err;
2246
2247 shrunk_blocks = old_block_count - block_count;
2248 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2249
2250 /* stop other GC */
2251 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2252 err = -EAGAIN;
2253 goto out_drop_write;
2254 }
2255
2256 /* stop CP to protect MAIN_SEC in free_segment_range */
2257 f2fs_lock_op(sbi);
2258
2259 spin_lock(&sbi->stat_lock);
2260 if (shrunk_blocks + valid_user_blocks(sbi) +
2261 sbi->current_reserved_blocks + sbi->unusable_block_count +
2262 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2263 err = -ENOSPC;
2264 spin_unlock(&sbi->stat_lock);
2265
2266 if (err)
2267 goto out_unlock;
2268
2269 err = free_segment_range(sbi, secs, true);
2270
2271 out_unlock:
2272 f2fs_unlock_op(sbi);
2273 f2fs_up_write(&sbi->gc_lock);
2274 out_drop_write:
2275 mnt_drop_write_file(filp);
2276 if (err)
2277 return err;
2278
2279 err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2280 if (err)
2281 return err;
2282
2283 if (f2fs_readonly(sbi->sb)) {
2284 err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2285 if (err)
2286 return err;
2287 return -EROFS;
2288 }
2289
2290 f2fs_down_write(&sbi->gc_lock);
2291 f2fs_down_write(&sbi->cp_global_sem);
2292
2293 spin_lock(&sbi->stat_lock);
2294 if (shrunk_blocks + valid_user_blocks(sbi) +
2295 sbi->current_reserved_blocks + sbi->unusable_block_count +
2296 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2297 err = -ENOSPC;
2298 else
2299 sbi->user_block_count -= shrunk_blocks;
2300 spin_unlock(&sbi->stat_lock);
2301 if (err)
2302 goto out_err;
2303
2304 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2305 err = free_segment_range(sbi, secs, false);
2306 if (err)
2307 goto recover_out;
2308
2309 update_sb_metadata(sbi, -secs);
2310
2311 err = f2fs_commit_super(sbi, false);
2312 if (err) {
2313 update_sb_metadata(sbi, secs);
2314 goto recover_out;
2315 }
2316
2317 update_fs_metadata(sbi, -secs);
2318 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2319 set_sbi_flag(sbi, SBI_IS_DIRTY);
2320
2321 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2322 err = f2fs_write_checkpoint(sbi, &cpc);
2323 if (err) {
2324 update_fs_metadata(sbi, secs);
2325 update_sb_metadata(sbi, secs);
2326 f2fs_commit_super(sbi, false);
2327 }
2328 recover_out:
2329 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2330 if (err) {
2331 set_sbi_flag(sbi, SBI_NEED_FSCK);
2332 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2333
2334 spin_lock(&sbi->stat_lock);
2335 sbi->user_block_count += shrunk_blocks;
2336 spin_unlock(&sbi->stat_lock);
2337 }
2338 out_err:
2339 f2fs_up_write(&sbi->cp_global_sem);
2340 f2fs_up_write(&sbi->gc_lock);
2341 thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2342 return err;
2343 }
2344