xref: /linux/fs/f2fs/gc.c (revision 39f1c201b93f4ff71631bac72cff6eb155f976a4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/gc.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "gc.h"
23 #include "iostat.h"
24 #include <trace/events/f2fs.h>
25 
26 static struct kmem_cache *victim_entry_slab;
27 
28 static unsigned int count_bits(const unsigned long *addr,
29 				unsigned int offset, unsigned int len);
30 
31 static int gc_thread_func(void *data)
32 {
33 	struct f2fs_sb_info *sbi = data;
34 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37 	unsigned int wait_ms;
38 	struct f2fs_gc_control gc_control = {
39 		.victim_segno = NULL_SEGNO,
40 		.should_migrate_blocks = false,
41 		.err_gc_skipped = false,
42 		.one_time = false };
43 
44 	wait_ms = gc_th->min_sleep_time;
45 
46 	set_freezable();
47 	do {
48 		bool sync_mode, foreground = false, gc_boost = false;
49 
50 		wait_event_freezable_timeout(*wq,
51 				kthread_should_stop() ||
52 				waitqueue_active(fggc_wq) ||
53 				gc_th->gc_wake,
54 				msecs_to_jiffies(wait_ms));
55 
56 		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) {
57 			foreground = true;
58 			gc_control.one_time = false;
59 		} else if (f2fs_sb_has_blkzoned(sbi)) {
60 			gc_control.one_time = true;
61 		}
62 
63 		/* give it a try one time */
64 		if (gc_th->gc_wake)
65 			gc_th->gc_wake = false;
66 
67 		if (f2fs_readonly(sbi->sb)) {
68 			stat_other_skip_bggc_count(sbi);
69 			continue;
70 		}
71 		if (kthread_should_stop())
72 			break;
73 
74 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
75 			increase_sleep_time(gc_th, &wait_ms);
76 			stat_other_skip_bggc_count(sbi);
77 			continue;
78 		}
79 
80 		if (time_to_inject(sbi, FAULT_CHECKPOINT))
81 			f2fs_stop_checkpoint(sbi, false,
82 					STOP_CP_REASON_FAULT_INJECT);
83 
84 		if (!sb_start_write_trylock(sbi->sb)) {
85 			stat_other_skip_bggc_count(sbi);
86 			continue;
87 		}
88 
89 		/*
90 		 * [GC triggering condition]
91 		 * 0. GC is not conducted currently.
92 		 * 1. There are enough dirty segments.
93 		 * 2. IO subsystem is idle by checking the # of writeback pages.
94 		 * 3. IO subsystem is idle by checking the # of requests in
95 		 *    bdev's request list.
96 		 *
97 		 * Note) We have to avoid triggering GCs frequently.
98 		 * Because it is possible that some segments can be
99 		 * invalidated soon after by user update or deletion.
100 		 * So, I'd like to wait some time to collect dirty segments.
101 		 */
102 		if (sbi->gc_mode == GC_URGENT_HIGH ||
103 				sbi->gc_mode == GC_URGENT_MID) {
104 			wait_ms = gc_th->urgent_sleep_time;
105 			f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
106 			goto do_gc;
107 		}
108 
109 		if (foreground) {
110 			f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
111 			goto do_gc;
112 		} else if (!f2fs_down_write_trylock_trace(&sbi->gc_lock,
113 							&gc_control.lc)) {
114 			stat_other_skip_bggc_count(sbi);
115 			goto next;
116 		}
117 
118 		if (!is_idle(sbi, GC_TIME)) {
119 			increase_sleep_time(gc_th, &wait_ms);
120 			f2fs_up_write_trace(&sbi->gc_lock, &gc_control.lc);
121 			stat_io_skip_bggc_count(sbi);
122 			goto next;
123 		}
124 
125 		if (f2fs_sb_has_blkzoned(sbi)) {
126 			if (has_enough_free_blocks(sbi,
127 				gc_th->no_zoned_gc_percent)) {
128 				wait_ms = gc_th->no_gc_sleep_time;
129 				f2fs_up_write_trace(&sbi->gc_lock,
130 							&gc_control.lc);
131 				goto next;
132 			}
133 			if (wait_ms == gc_th->no_gc_sleep_time)
134 				wait_ms = gc_th->max_sleep_time;
135 		}
136 
137 		if (need_to_boost_gc(sbi)) {
138 			decrease_sleep_time(gc_th, &wait_ms);
139 			if (f2fs_sb_has_blkzoned(sbi))
140 				gc_boost = true;
141 		} else {
142 			increase_sleep_time(gc_th, &wait_ms);
143 		}
144 do_gc:
145 		stat_inc_gc_call_count(sbi, foreground ?
146 					FOREGROUND : BACKGROUND);
147 
148 		sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
149 			(gc_boost && gc_th->boost_gc_greedy);
150 
151 		/* foreground GC was been triggered via f2fs_balance_fs() */
152 		if (foreground && !f2fs_sb_has_blkzoned(sbi))
153 			sync_mode = false;
154 
155 		gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
156 		gc_control.no_bg_gc = foreground;
157 		gc_control.nr_free_secs = foreground ? 1 : 0;
158 
159 		/* if return value is not zero, no victim was selected */
160 		if (f2fs_gc(sbi, &gc_control)) {
161 			/* don't bother wait_ms by foreground gc */
162 			if (!foreground)
163 				wait_ms = gc_th->no_gc_sleep_time;
164 		} else {
165 			/* reset wait_ms to default sleep time */
166 			if (wait_ms == gc_th->no_gc_sleep_time)
167 				wait_ms = gc_th->min_sleep_time;
168 		}
169 
170 		if (foreground)
171 			wake_up_all(&gc_th->fggc_wq);
172 
173 		trace_f2fs_background_gc(sbi->sb, wait_ms,
174 				prefree_segments(sbi), free_segments(sbi));
175 
176 		/* balancing f2fs's metadata periodically */
177 		f2fs_balance_fs_bg(sbi, true);
178 next:
179 		if (sbi->gc_mode != GC_NORMAL) {
180 			spin_lock(&sbi->gc_remaining_trials_lock);
181 			if (sbi->gc_remaining_trials) {
182 				sbi->gc_remaining_trials--;
183 				if (!sbi->gc_remaining_trials)
184 					sbi->gc_mode = GC_NORMAL;
185 			}
186 			spin_unlock(&sbi->gc_remaining_trials_lock);
187 		}
188 		sb_end_write(sbi->sb);
189 
190 	} while (!kthread_should_stop());
191 	return 0;
192 }
193 
194 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
195 {
196 	struct f2fs_gc_kthread *gc_th;
197 	dev_t dev = sbi->sb->s_bdev->bd_dev;
198 
199 	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
200 	if (!gc_th)
201 		return -ENOMEM;
202 
203 	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
204 	gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
205 	gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE;
206 	gc_th->boost_gc_greedy = GC_GREEDY;
207 
208 	if (f2fs_sb_has_blkzoned(sbi)) {
209 		gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
210 		gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
211 		gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
212 		gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
213 		gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
214 	} else {
215 		gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
216 		gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
217 		gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
218 		gc_th->no_zoned_gc_percent = 0;
219 		gc_th->boost_zoned_gc_percent = 0;
220 	}
221 
222 	gc_th->gc_wake = false;
223 
224 	sbi->gc_thread = gc_th;
225 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
226 	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
227 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
228 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
229 	if (IS_ERR(gc_th->f2fs_gc_task)) {
230 		int err = PTR_ERR(gc_th->f2fs_gc_task);
231 
232 		kfree(gc_th);
233 		sbi->gc_thread = NULL;
234 		return err;
235 	}
236 
237 	set_user_nice(gc_th->f2fs_gc_task,
238 			PRIO_TO_NICE(sbi->critical_task_priority));
239 	return 0;
240 }
241 
242 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
243 {
244 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
245 
246 	if (!gc_th)
247 		return;
248 	kthread_stop(gc_th->f2fs_gc_task);
249 	wake_up_all(&gc_th->fggc_wq);
250 	kfree(gc_th);
251 	sbi->gc_thread = NULL;
252 }
253 
254 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
255 {
256 	int gc_mode;
257 
258 	if (gc_type == BG_GC) {
259 		if (sbi->am.atgc_enabled)
260 			gc_mode = GC_AT;
261 		else
262 			gc_mode = GC_CB;
263 	} else {
264 		gc_mode = GC_GREEDY;
265 	}
266 
267 	switch (sbi->gc_mode) {
268 	case GC_IDLE_CB:
269 	case GC_URGENT_LOW:
270 	case GC_URGENT_MID:
271 		gc_mode = GC_CB;
272 		break;
273 	case GC_IDLE_GREEDY:
274 	case GC_URGENT_HIGH:
275 		gc_mode = GC_GREEDY;
276 		break;
277 	case GC_IDLE_AT:
278 		gc_mode = GC_AT;
279 		break;
280 	}
281 
282 	return gc_mode;
283 }
284 
285 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
286 			int type, struct victim_sel_policy *p)
287 {
288 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
289 
290 	if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) {
291 		p->gc_mode = GC_GREEDY;
292 		p->dirty_bitmap = dirty_i->dirty_segmap[type];
293 		p->max_search = dirty_i->nr_dirty[type];
294 		p->ofs_unit = 1;
295 	} else {
296 		p->gc_mode = select_gc_type(sbi, gc_type);
297 		p->ofs_unit = SEGS_PER_SEC(sbi);
298 		if (__is_large_section(sbi)) {
299 			p->dirty_bitmap = dirty_i->dirty_secmap;
300 			p->max_search = count_bits(p->dirty_bitmap,
301 						0, MAIN_SECS(sbi));
302 		} else {
303 			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
304 			p->max_search = dirty_i->nr_dirty[DIRTY];
305 		}
306 	}
307 
308 	/*
309 	 * adjust candidates range, should select all dirty segments for
310 	 * foreground GC and urgent GC cases.
311 	 */
312 	if (gc_type != FG_GC &&
313 			(sbi->gc_mode != GC_URGENT_HIGH) &&
314 			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
315 			p->max_search > sbi->max_victim_search)
316 		p->max_search = sbi->max_victim_search;
317 
318 	/* let's select beginning hot/small space first. */
319 	if (f2fs_need_rand_seg(sbi)) {
320 		p->offset = get_random_u32_below(MAIN_SECS(sbi) *
321 						SEGS_PER_SEC(sbi));
322 		SIT_I(sbi)->last_victim[p->gc_mode] = p->offset;
323 	} else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
324 		p->offset = 0;
325 	else
326 		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
327 }
328 
329 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
330 				struct victim_sel_policy *p)
331 {
332 	/* SSR allocates in a segment unit */
333 	if (p->alloc_mode == SSR)
334 		return BLKS_PER_SEG(sbi);
335 	else if (p->alloc_mode == AT_SSR)
336 		return UINT_MAX;
337 
338 	/* LFS */
339 	if (p->gc_mode == GC_GREEDY)
340 		return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
341 	else if (p->gc_mode == GC_CB)
342 		return UINT_MAX;
343 	else if (p->gc_mode == GC_AT)
344 		return UINT_MAX;
345 	else /* No other gc_mode */
346 		return 0;
347 }
348 
349 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
350 {
351 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
352 	unsigned int secno;
353 
354 	/*
355 	 * If the gc_type is FG_GC, we can select victim segments
356 	 * selected by background GC before.
357 	 * Those segments guarantee they have small valid blocks.
358 	 */
359 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
360 		if (sec_usage_check(sbi, secno))
361 			continue;
362 		clear_bit(secno, dirty_i->victim_secmap);
363 		return GET_SEG_FROM_SEC(sbi, secno);
364 	}
365 	return NULL_SEGNO;
366 }
367 
368 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
369 {
370 	struct sit_info *sit_i = SIT_I(sbi);
371 	unsigned long long mtime = 0;
372 	unsigned int vblocks;
373 	unsigned char age = 0;
374 	unsigned char u;
375 	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
376 
377 	mtime = f2fs_get_section_mtime(sbi, segno);
378 	f2fs_bug_on(sbi, mtime == INVALID_MTIME);
379 	vblocks = get_valid_blocks(sbi, segno, true);
380 	vblocks = div_u64(vblocks, usable_segs_per_sec);
381 
382 	u = BLKS_TO_SEGS(sbi, vblocks * 100);
383 
384 	/* Handle if the system time has changed by the user */
385 	if (mtime < sit_i->min_mtime)
386 		sit_i->min_mtime = mtime;
387 	if (mtime > sit_i->max_mtime)
388 		sit_i->max_mtime = mtime;
389 	if (sit_i->max_mtime != sit_i->min_mtime)
390 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
391 				sit_i->max_mtime - sit_i->min_mtime);
392 
393 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
394 }
395 
396 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
397 			unsigned int segno, struct victim_sel_policy *p,
398 			unsigned int valid_thresh_ratio)
399 {
400 	if (p->alloc_mode == SSR)
401 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
402 
403 	if (p->one_time_gc && (valid_thresh_ratio < 100) &&
404 			(get_valid_blocks(sbi, segno, true) >=
405 			CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100))
406 		return UINT_MAX;
407 
408 	/* alloc_mode == LFS */
409 	if (p->gc_mode == GC_GREEDY)
410 		return get_valid_blocks(sbi, segno, true);
411 	else if (p->gc_mode == GC_CB)
412 		return get_cb_cost(sbi, segno);
413 
414 	f2fs_bug_on(sbi, 1);
415 	return 0;
416 }
417 
418 static unsigned int count_bits(const unsigned long *addr,
419 				unsigned int offset, unsigned int len)
420 {
421 	unsigned int end = offset + len, sum = 0;
422 
423 	while (offset < end) {
424 		if (test_bit(offset++, addr))
425 			++sum;
426 	}
427 	return sum;
428 }
429 
430 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
431 				struct rb_root_cached *root)
432 {
433 #ifdef CONFIG_F2FS_CHECK_FS
434 	struct rb_node *cur = rb_first_cached(root), *next;
435 	struct victim_entry *cur_ve, *next_ve;
436 
437 	while (cur) {
438 		next = rb_next(cur);
439 		if (!next)
440 			return true;
441 
442 		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
443 		next_ve = rb_entry(next, struct victim_entry, rb_node);
444 
445 		if (cur_ve->mtime > next_ve->mtime) {
446 			f2fs_info(sbi, "broken victim_rbtree, "
447 				"cur_mtime(%llu) next_mtime(%llu)",
448 				cur_ve->mtime, next_ve->mtime);
449 			return false;
450 		}
451 		cur = next;
452 	}
453 #endif
454 	return true;
455 }
456 
457 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
458 					unsigned long long mtime)
459 {
460 	struct atgc_management *am = &sbi->am;
461 	struct rb_node *node = am->root.rb_root.rb_node;
462 	struct victim_entry *ve = NULL;
463 
464 	while (node) {
465 		ve = rb_entry(node, struct victim_entry, rb_node);
466 
467 		if (mtime < ve->mtime)
468 			node = node->rb_left;
469 		else
470 			node = node->rb_right;
471 	}
472 	return ve;
473 }
474 
475 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
476 		unsigned long long mtime, unsigned int segno)
477 {
478 	struct atgc_management *am = &sbi->am;
479 	struct victim_entry *ve;
480 
481 	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
482 
483 	ve->mtime = mtime;
484 	ve->segno = segno;
485 
486 	list_add_tail(&ve->list, &am->victim_list);
487 	am->victim_count++;
488 
489 	return ve;
490 }
491 
492 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
493 				unsigned long long mtime, unsigned int segno)
494 {
495 	struct atgc_management *am = &sbi->am;
496 	struct rb_root_cached *root = &am->root;
497 	struct rb_node **p = &root->rb_root.rb_node;
498 	struct rb_node *parent = NULL;
499 	struct victim_entry *ve;
500 	bool left_most = true;
501 
502 	/* look up rb tree to find parent node */
503 	while (*p) {
504 		parent = *p;
505 		ve = rb_entry(parent, struct victim_entry, rb_node);
506 
507 		if (mtime < ve->mtime) {
508 			p = &(*p)->rb_left;
509 		} else {
510 			p = &(*p)->rb_right;
511 			left_most = false;
512 		}
513 	}
514 
515 	ve = __create_victim_entry(sbi, mtime, segno);
516 
517 	rb_link_node(&ve->rb_node, parent, p);
518 	rb_insert_color_cached(&ve->rb_node, root, left_most);
519 }
520 
521 static void add_victim_entry(struct f2fs_sb_info *sbi,
522 				struct victim_sel_policy *p, unsigned int segno)
523 {
524 	struct sit_info *sit_i = SIT_I(sbi);
525 	unsigned long long mtime = 0;
526 
527 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
528 		if (p->gc_mode == GC_AT &&
529 			get_valid_blocks(sbi, segno, true) == 0)
530 			return;
531 	}
532 
533 	mtime = f2fs_get_section_mtime(sbi, segno);
534 	f2fs_bug_on(sbi, mtime == INVALID_MTIME);
535 
536 	/* Handle if the system time has changed by the user */
537 	if (mtime < sit_i->min_mtime)
538 		sit_i->min_mtime = mtime;
539 	if (mtime > sit_i->max_mtime)
540 		sit_i->max_mtime = mtime;
541 	if (mtime < sit_i->dirty_min_mtime)
542 		sit_i->dirty_min_mtime = mtime;
543 	if (mtime > sit_i->dirty_max_mtime)
544 		sit_i->dirty_max_mtime = mtime;
545 
546 	/* don't choose young section as candidate */
547 	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
548 		return;
549 
550 	__insert_victim_entry(sbi, mtime, segno);
551 }
552 
553 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
554 						struct victim_sel_policy *p)
555 {
556 	struct sit_info *sit_i = SIT_I(sbi);
557 	struct atgc_management *am = &sbi->am;
558 	struct rb_root_cached *root = &am->root;
559 	struct rb_node *node;
560 	struct victim_entry *ve;
561 	unsigned long long total_time;
562 	unsigned long long age, u, accu;
563 	unsigned long long max_mtime = sit_i->dirty_max_mtime;
564 	unsigned long long min_mtime = sit_i->dirty_min_mtime;
565 	unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
566 	unsigned int vblocks;
567 	unsigned int dirty_threshold = max(am->max_candidate_count,
568 					am->candidate_ratio *
569 					am->victim_count / 100);
570 	unsigned int age_weight = am->age_weight;
571 	unsigned int cost;
572 	unsigned int iter = 0;
573 
574 	if (max_mtime < min_mtime)
575 		return;
576 
577 	max_mtime += 1;
578 	total_time = max_mtime - min_mtime;
579 
580 	accu = div64_u64(ULLONG_MAX, total_time);
581 	accu = min_t(unsigned long long, div_u64(accu, 100),
582 					DEFAULT_ACCURACY_CLASS);
583 
584 	node = rb_first_cached(root);
585 next:
586 	ve = rb_entry_safe(node, struct victim_entry, rb_node);
587 	if (!ve)
588 		return;
589 
590 	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
591 		goto skip;
592 
593 	/* age = 10000 * x% * 60 */
594 	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
595 								age_weight;
596 
597 	vblocks = get_valid_blocks(sbi, ve->segno, true);
598 	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
599 
600 	/* u = 10000 * x% * 40 */
601 	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
602 							(100 - age_weight);
603 
604 	f2fs_bug_on(sbi, age + u >= UINT_MAX);
605 
606 	cost = UINT_MAX - (age + u);
607 	iter++;
608 
609 	if (cost < p->min_cost ||
610 			(cost == p->min_cost && age > p->oldest_age)) {
611 		p->min_cost = cost;
612 		p->oldest_age = age;
613 		p->min_segno = ve->segno;
614 	}
615 skip:
616 	if (iter < dirty_threshold) {
617 		node = rb_next(node);
618 		goto next;
619 	}
620 }
621 
622 /*
623  * select candidates around source section in range of
624  * [target - dirty_threshold, target + dirty_threshold]
625  */
626 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
627 						struct victim_sel_policy *p)
628 {
629 	struct sit_info *sit_i = SIT_I(sbi);
630 	struct atgc_management *am = &sbi->am;
631 	struct victim_entry *ve;
632 	unsigned long long age;
633 	unsigned long long max_mtime = sit_i->dirty_max_mtime;
634 	unsigned long long min_mtime = sit_i->dirty_min_mtime;
635 	unsigned int vblocks;
636 	unsigned int dirty_threshold = max(am->max_candidate_count,
637 					am->candidate_ratio *
638 					am->victim_count / 100);
639 	unsigned int cost, iter;
640 	int stage = 0;
641 
642 	if (max_mtime < min_mtime)
643 		return;
644 	max_mtime += 1;
645 next_stage:
646 	iter = 0;
647 	ve = __lookup_victim_entry(sbi, p->age);
648 next_node:
649 	if (!ve) {
650 		if (stage++ == 0)
651 			goto next_stage;
652 		return;
653 	}
654 
655 	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
656 		goto skip_node;
657 
658 	age = max_mtime - ve->mtime;
659 
660 	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
661 	f2fs_bug_on(sbi, !vblocks);
662 
663 	/* rare case */
664 	if (vblocks == BLKS_PER_SEG(sbi))
665 		goto skip_node;
666 
667 	iter++;
668 
669 	age = max_mtime - abs(p->age - age);
670 	cost = UINT_MAX - vblocks;
671 
672 	if (cost < p->min_cost ||
673 			(cost == p->min_cost && age > p->oldest_age)) {
674 		p->min_cost = cost;
675 		p->oldest_age = age;
676 		p->min_segno = ve->segno;
677 	}
678 skip_node:
679 	if (iter < dirty_threshold) {
680 		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
681 					rb_next(&ve->rb_node),
682 					struct victim_entry, rb_node);
683 		goto next_node;
684 	}
685 
686 	if (stage++ == 0)
687 		goto next_stage;
688 }
689 
690 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
691 						struct victim_sel_policy *p)
692 {
693 	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
694 
695 	if (p->gc_mode == GC_AT)
696 		atgc_lookup_victim(sbi, p);
697 	else if (p->alloc_mode == AT_SSR)
698 		atssr_lookup_victim(sbi, p);
699 	else
700 		f2fs_bug_on(sbi, 1);
701 }
702 
703 static void release_victim_entry(struct f2fs_sb_info *sbi)
704 {
705 	struct atgc_management *am = &sbi->am;
706 	struct victim_entry *ve, *tmp;
707 
708 	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
709 		list_del(&ve->list);
710 		kmem_cache_free(victim_entry_slab, ve);
711 		am->victim_count--;
712 	}
713 
714 	am->root = RB_ROOT_CACHED;
715 
716 	f2fs_bug_on(sbi, am->victim_count);
717 	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
718 }
719 
720 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
721 {
722 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
723 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
724 
725 	if (!dirty_i->enable_pin_section)
726 		return false;
727 	if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
728 		dirty_i->pinned_secmap_cnt++;
729 	return true;
730 }
731 
732 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
733 {
734 	return dirty_i->pinned_secmap_cnt;
735 }
736 
737 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
738 						unsigned int secno)
739 {
740 	return dirty_i->enable_pin_section &&
741 		f2fs_pinned_section_exists(dirty_i) &&
742 		test_bit(secno, dirty_i->pinned_secmap);
743 }
744 
745 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
746 {
747 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
748 
749 	if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
750 		memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
751 		DIRTY_I(sbi)->pinned_secmap_cnt = 0;
752 	}
753 	DIRTY_I(sbi)->enable_pin_section = enable;
754 }
755 
756 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
757 							unsigned int segno)
758 {
759 	if (!f2fs_is_pinned_file(inode))
760 		return 0;
761 	if (gc_type != FG_GC)
762 		return -EBUSY;
763 	if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
764 		f2fs_pin_file_control(inode, true);
765 	return -EAGAIN;
766 }
767 
768 /*
769  * This function is called from two paths.
770  * One is garbage collection and the other is SSR segment selection.
771  * When it is called during GC, it just gets a victim segment
772  * and it does not remove it from dirty seglist.
773  * When it is called from SSR segment selection, it finds a segment
774  * which has minimum valid blocks and removes it from dirty seglist.
775  */
776 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
777 			int gc_type, int type, char alloc_mode,
778 			unsigned long long age, bool one_time)
779 {
780 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
781 	struct sit_info *sm = SIT_I(sbi);
782 	struct victim_sel_policy p = {0};
783 	unsigned int secno, last_victim;
784 	unsigned int last_segment;
785 	unsigned int nsearched;
786 	unsigned int valid_thresh_ratio = 100;
787 	bool is_atgc;
788 	int ret = 0;
789 
790 	mutex_lock(&dirty_i->seglist_lock);
791 	last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
792 
793 	p.alloc_mode = alloc_mode;
794 	p.age = age;
795 	p.age_threshold = sbi->am.age_threshold;
796 	if (one_time) {
797 		p.one_time_gc = one_time;
798 		if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG))
799 			valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio;
800 	}
801 
802 retry:
803 	select_policy(sbi, gc_type, type, &p);
804 	p.min_segno = NULL_SEGNO;
805 	p.oldest_age = 0;
806 	p.min_cost = get_max_cost(sbi, &p);
807 
808 	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
809 	nsearched = 0;
810 
811 	if (is_atgc)
812 		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
813 
814 	if (*result != NULL_SEGNO) {
815 		if (!get_valid_blocks(sbi, *result, false)) {
816 			ret = -ENODATA;
817 			goto out;
818 		}
819 
820 		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
821 			ret = -EBUSY;
822 			goto out;
823 		}
824 		if (gc_type == FG_GC)
825 			clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
826 		p.min_segno = *result;
827 		goto got_result;
828 	}
829 
830 	ret = -ENODATA;
831 	if (p.max_search == 0)
832 		goto out;
833 
834 	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
835 		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
836 			p.min_segno = sbi->next_victim_seg[BG_GC];
837 			*result = p.min_segno;
838 			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
839 			goto got_result;
840 		}
841 		if (gc_type == FG_GC &&
842 				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
843 			p.min_segno = sbi->next_victim_seg[FG_GC];
844 			*result = p.min_segno;
845 			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
846 			goto got_result;
847 		}
848 	}
849 
850 	last_victim = sm->last_victim[p.gc_mode];
851 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
852 		p.min_segno = check_bg_victims(sbi);
853 		if (p.min_segno != NULL_SEGNO)
854 			goto got_it;
855 	}
856 
857 	while (1) {
858 		unsigned long cost, *dirty_bitmap;
859 		unsigned int unit_no, segno;
860 
861 		dirty_bitmap = p.dirty_bitmap;
862 		unit_no = find_next_bit(dirty_bitmap,
863 				last_segment / p.ofs_unit,
864 				p.offset / p.ofs_unit);
865 		segno = unit_no * p.ofs_unit;
866 		if (segno >= last_segment) {
867 			if (sm->last_victim[p.gc_mode]) {
868 				last_segment =
869 					sm->last_victim[p.gc_mode];
870 				sm->last_victim[p.gc_mode] = 0;
871 				p.offset = 0;
872 				continue;
873 			}
874 			break;
875 		}
876 
877 		p.offset = segno + p.ofs_unit;
878 		nsearched++;
879 
880 #ifdef CONFIG_F2FS_CHECK_FS
881 		/*
882 		 * skip selecting the invalid segno (that is failed due to block
883 		 * validity check failure during GC) to avoid endless GC loop in
884 		 * such cases.
885 		 */
886 		if (test_bit(segno, sm->invalid_segmap))
887 			goto next;
888 #endif
889 
890 		secno = GET_SEC_FROM_SEG(sbi, segno);
891 
892 		if (sec_usage_check(sbi, secno))
893 			goto next;
894 
895 		/* Don't touch checkpointed data */
896 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
897 			if (p.alloc_mode == LFS) {
898 				/*
899 				 * LFS is set to find source section during GC.
900 				 * The victim should have no checkpointed data.
901 				 */
902 				if (get_ckpt_valid_blocks(sbi, segno, true))
903 					goto next;
904 			} else {
905 				/*
906 				 * SSR | AT_SSR are set to find target segment
907 				 * for writes which can be full by checkpointed
908 				 * and newly written blocks.
909 				 */
910 				if (!f2fs_segment_has_free_slot(sbi, segno))
911 					goto next;
912 			}
913 
914 			if (!get_valid_blocks(sbi, segno, true))
915 				goto next;
916 		}
917 
918 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
919 			goto next;
920 
921 		if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
922 			goto next;
923 
924 		if (is_atgc) {
925 			add_victim_entry(sbi, &p, segno);
926 			goto next;
927 		}
928 
929 		cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio);
930 
931 		if (p.min_cost > cost) {
932 			p.min_segno = segno;
933 			p.min_cost = cost;
934 		}
935 next:
936 		if (nsearched >= p.max_search) {
937 			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
938 				sm->last_victim[p.gc_mode] =
939 					last_victim + p.ofs_unit;
940 			else
941 				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
942 			sm->last_victim[p.gc_mode] %=
943 				(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
944 			break;
945 		}
946 	}
947 
948 	/* get victim for GC_AT/AT_SSR */
949 	if (is_atgc) {
950 		lookup_victim_by_age(sbi, &p);
951 		release_victim_entry(sbi);
952 	}
953 
954 	if (is_atgc && p.min_segno == NULL_SEGNO &&
955 			sm->elapsed_time < p.age_threshold) {
956 		p.age_threshold = 0;
957 		goto retry;
958 	}
959 
960 	if (p.min_segno != NULL_SEGNO) {
961 got_it:
962 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
963 got_result:
964 		if (p.alloc_mode == LFS) {
965 			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
966 			if (gc_type == FG_GC)
967 				sbi->cur_victim_sec = secno;
968 			else
969 				set_bit(secno, dirty_i->victim_secmap);
970 		}
971 		ret = 0;
972 
973 	}
974 out:
975 	if (p.min_segno != NULL_SEGNO)
976 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
977 				sbi->cur_victim_sec,
978 				prefree_segments(sbi), free_segments(sbi));
979 	mutex_unlock(&dirty_i->seglist_lock);
980 
981 	return ret;
982 }
983 
984 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
985 {
986 	struct inode_entry *ie;
987 
988 	ie = radix_tree_lookup(&gc_list->iroot, ino);
989 	if (ie)
990 		return ie->inode;
991 	return NULL;
992 }
993 
994 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
995 {
996 	struct inode_entry *new_ie;
997 
998 	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
999 		iput(inode);
1000 		return;
1001 	}
1002 	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
1003 					GFP_NOFS, true, NULL);
1004 	new_ie->inode = inode;
1005 
1006 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
1007 	list_add_tail(&new_ie->list, &gc_list->ilist);
1008 }
1009 
1010 static void put_gc_inode(struct gc_inode_list *gc_list)
1011 {
1012 	struct inode_entry *ie, *next_ie;
1013 
1014 	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
1015 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
1016 		iput(ie->inode);
1017 		list_del(&ie->list);
1018 		kmem_cache_free(f2fs_inode_entry_slab, ie);
1019 	}
1020 }
1021 
1022 static int check_valid_map(struct f2fs_sb_info *sbi,
1023 				unsigned int segno, int offset)
1024 {
1025 	struct sit_info *sit_i = SIT_I(sbi);
1026 	struct seg_entry *sentry;
1027 	int ret;
1028 
1029 	down_read(&sit_i->sentry_lock);
1030 	sentry = get_seg_entry(sbi, segno);
1031 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
1032 	up_read(&sit_i->sentry_lock);
1033 	return ret;
1034 }
1035 
1036 /*
1037  * This function compares node address got in summary with that in NAT.
1038  * On validity, copy that node with cold status, otherwise (invalid node)
1039  * ignore that.
1040  */
1041 static int gc_node_segment(struct f2fs_sb_info *sbi,
1042 		struct f2fs_summary *sum, unsigned int segno, int gc_type,
1043 		struct blk_plug *plug)
1044 {
1045 	struct f2fs_summary *entry;
1046 	block_t start_addr;
1047 	int off;
1048 	int phase = 0;
1049 	bool fggc = (gc_type == FG_GC);
1050 	int submitted = 0;
1051 	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1052 
1053 	start_addr = START_BLOCK(sbi, segno);
1054 
1055 next_step:
1056 	entry = sum;
1057 
1058 	if (fggc && phase == 2)
1059 		atomic_inc(&sbi->wb_sync_req[NODE]);
1060 
1061 	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1062 		nid_t nid = le32_to_cpu(entry->nid);
1063 		struct folio *node_folio;
1064 		struct node_info ni;
1065 		int err;
1066 
1067 		/* stop BG_GC if there is not enough free sections. */
1068 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1069 			return submitted;
1070 
1071 		if (check_valid_map(sbi, segno, off) == 0)
1072 			continue;
1073 
1074 		if (phase == 0) {
1075 			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1076 							META_NAT, true);
1077 			continue;
1078 		}
1079 
1080 		if (phase == 1) {
1081 			f2fs_ra_node_page(sbi, nid);
1082 			continue;
1083 		}
1084 
1085 		/* phase == 2 */
1086 		node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
1087 		if (IS_ERR(node_folio))
1088 			continue;
1089 
1090 		/* block may become invalid during f2fs_get_node_folio */
1091 		if (check_valid_map(sbi, segno, off) == 0) {
1092 			f2fs_folio_put(node_folio, true);
1093 			continue;
1094 		}
1095 
1096 		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1097 			f2fs_folio_put(node_folio, true);
1098 			continue;
1099 		}
1100 
1101 		if (ni.blk_addr != start_addr + off) {
1102 			f2fs_folio_put(node_folio, true);
1103 			continue;
1104 		}
1105 
1106 		err = f2fs_move_node_folio(node_folio, gc_type);
1107 		if (!err && gc_type == FG_GC)
1108 			submitted++;
1109 		stat_inc_node_blk_count(sbi, 1, gc_type);
1110 	}
1111 
1112 	if (++phase < 3) {
1113 		blk_finish_plug(plug);
1114 		blk_start_plug(plug);
1115 		goto next_step;
1116 	}
1117 
1118 	if (fggc)
1119 		atomic_dec(&sbi->wb_sync_req[NODE]);
1120 	return submitted;
1121 }
1122 
1123 /*
1124  * Calculate start block index indicating the given node offset.
1125  * Be careful, caller should give this node offset only indicating direct node
1126  * blocks. If any node offsets, which point the other types of node blocks such
1127  * as indirect or double indirect node blocks, are given, it must be a caller's
1128  * bug.
1129  */
1130 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1131 {
1132 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1133 	unsigned int bidx;
1134 
1135 	if (node_ofs == 0)
1136 		return 0;
1137 
1138 	if (node_ofs <= 2) {
1139 		bidx = node_ofs - 1;
1140 	} else if (node_ofs <= indirect_blks) {
1141 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1142 
1143 		bidx = node_ofs - 2 - dec;
1144 	} else {
1145 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1146 
1147 		bidx = node_ofs - 5 - dec;
1148 	}
1149 	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1150 }
1151 
1152 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1153 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1154 {
1155 	struct folio *node_folio;
1156 	nid_t nid;
1157 	unsigned int ofs_in_node, max_addrs, base;
1158 	block_t source_blkaddr;
1159 
1160 	nid = le32_to_cpu(sum->nid);
1161 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1162 
1163 	node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
1164 	if (IS_ERR(node_folio))
1165 		return false;
1166 
1167 	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1168 		f2fs_folio_put(node_folio, true);
1169 		return false;
1170 	}
1171 
1172 	if (sum->version != dni->version) {
1173 		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1174 			  __func__);
1175 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1176 	}
1177 
1178 	if (f2fs_check_nid_range(sbi, dni->ino)) {
1179 		f2fs_folio_put(node_folio, true);
1180 		return false;
1181 	}
1182 
1183 	if (IS_INODE(node_folio)) {
1184 		base = offset_in_addr(F2FS_INODE(node_folio));
1185 		max_addrs = DEF_ADDRS_PER_INODE;
1186 	} else {
1187 		base = 0;
1188 		max_addrs = DEF_ADDRS_PER_BLOCK;
1189 	}
1190 
1191 	if (base + ofs_in_node >= max_addrs) {
1192 		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1193 			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1194 		f2fs_folio_put(node_folio, true);
1195 		return false;
1196 	}
1197 
1198 	*nofs = ofs_of_node(node_folio);
1199 	source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
1200 	f2fs_folio_put(node_folio, true);
1201 
1202 	if (source_blkaddr != blkaddr) {
1203 #ifdef CONFIG_F2FS_CHECK_FS
1204 		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1205 		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1206 
1207 		if (unlikely(check_valid_map(sbi, segno, offset))) {
1208 			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1209 				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1210 					 blkaddr, source_blkaddr, segno);
1211 				set_sbi_flag(sbi, SBI_NEED_FSCK);
1212 			}
1213 		}
1214 #endif
1215 		return false;
1216 	}
1217 	return true;
1218 }
1219 
1220 static int ra_data_block(struct inode *inode, pgoff_t index)
1221 {
1222 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1223 	struct address_space *mapping = f2fs_is_cow_file(inode) ?
1224 				F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1225 	struct dnode_of_data dn;
1226 	struct folio *folio, *efolio;
1227 	struct f2fs_io_info fio = {
1228 		.sbi = sbi,
1229 		.ino = inode->i_ino,
1230 		.type = DATA,
1231 		.temp = COLD,
1232 		.op = REQ_OP_READ,
1233 		.op_flags = 0,
1234 		.encrypted_page = NULL,
1235 		.in_list = 0,
1236 	};
1237 	int err = 0;
1238 
1239 	folio = f2fs_grab_cache_folio(mapping, index, true);
1240 	if (IS_ERR(folio))
1241 		return PTR_ERR(folio);
1242 
1243 	if (f2fs_lookup_read_extent_cache_block(inode, index,
1244 						&dn.data_blkaddr)) {
1245 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1246 						DATA_GENERIC_ENHANCE_READ))) {
1247 			err = -EFSCORRUPTED;
1248 			goto put_folio;
1249 		}
1250 		goto got_it;
1251 	}
1252 
1253 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1254 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1255 	if (err)
1256 		goto put_folio;
1257 	f2fs_put_dnode(&dn);
1258 
1259 	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1260 		err = -ENOENT;
1261 		goto put_folio;
1262 	}
1263 	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1264 						DATA_GENERIC_ENHANCE))) {
1265 		err = -EFSCORRUPTED;
1266 		goto put_folio;
1267 	}
1268 got_it:
1269 	/* read folio */
1270 	fio.folio = folio;
1271 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1272 
1273 	/*
1274 	 * don't cache encrypted data into meta inode until previous dirty
1275 	 * data were writebacked to avoid racing between GC and flush.
1276 	 */
1277 	f2fs_folio_wait_writeback(folio, DATA, true, true);
1278 
1279 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1280 
1281 	efolio = f2fs_filemap_get_folio(META_MAPPING(sbi), dn.data_blkaddr,
1282 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1283 	if (IS_ERR(efolio)) {
1284 		err = PTR_ERR(efolio);
1285 		goto put_folio;
1286 	}
1287 
1288 	fio.encrypted_page = &efolio->page;
1289 
1290 	if (folio_test_uptodate(efolio))
1291 		goto put_encrypted_page;
1292 
1293 	err = f2fs_submit_page_bio(&fio);
1294 	if (err)
1295 		goto put_encrypted_page;
1296 	f2fs_put_page(fio.encrypted_page, false);
1297 	f2fs_folio_put(folio, true);
1298 
1299 	f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1300 	f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1301 
1302 	return 0;
1303 put_encrypted_page:
1304 	f2fs_put_page(fio.encrypted_page, true);
1305 put_folio:
1306 	f2fs_folio_put(folio, true);
1307 	return err;
1308 }
1309 
1310 /*
1311  * Move data block via META_MAPPING while keeping locked data page.
1312  * This can be used to move blocks, aka LBAs, directly on disk.
1313  */
1314 static int move_data_block(struct inode *inode, block_t bidx,
1315 				int gc_type, unsigned int segno, int off)
1316 {
1317 	struct address_space *mapping = f2fs_is_cow_file(inode) ?
1318 				F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1319 	struct f2fs_io_info fio = {
1320 		.sbi = F2FS_I_SB(inode),
1321 		.ino = inode->i_ino,
1322 		.type = DATA,
1323 		.temp = COLD,
1324 		.op = REQ_OP_READ,
1325 		.op_flags = 0,
1326 		.encrypted_page = NULL,
1327 		.in_list = 0,
1328 	};
1329 	struct dnode_of_data dn;
1330 	struct f2fs_summary sum;
1331 	struct node_info ni;
1332 	struct folio *folio, *mfolio, *efolio;
1333 	block_t newaddr;
1334 	int err = 0;
1335 	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1336 	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1337 				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1338 				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1339 
1340 	/* do not read out */
1341 	folio = f2fs_grab_cache_folio(mapping, bidx, false);
1342 	if (IS_ERR(folio))
1343 		return PTR_ERR(folio);
1344 
1345 	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1346 		err = -ENOENT;
1347 		goto out;
1348 	}
1349 
1350 	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1351 	if (err)
1352 		goto out;
1353 
1354 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1355 	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1356 	if (err)
1357 		goto out;
1358 
1359 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1360 		folio_clear_uptodate(folio);
1361 		err = -ENOENT;
1362 		goto put_out;
1363 	}
1364 
1365 	/*
1366 	 * don't cache encrypted data into meta inode until previous dirty
1367 	 * data were writebacked to avoid racing between GC and flush.
1368 	 */
1369 	f2fs_folio_wait_writeback(folio, DATA, true, true);
1370 
1371 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1372 
1373 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1374 	if (err)
1375 		goto put_out;
1376 
1377 	/* read page */
1378 	fio.folio = folio;
1379 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1380 
1381 	if (lfs_mode)
1382 		f2fs_down_write(&fio.sbi->io_order_lock);
1383 
1384 	mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
1385 					fio.old_blkaddr, false);
1386 	if (IS_ERR(mfolio)) {
1387 		err = PTR_ERR(mfolio);
1388 		goto up_out;
1389 	}
1390 
1391 	fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr);
1392 
1393 	/* read source block in mfolio */
1394 	if (!folio_test_uptodate(mfolio)) {
1395 		err = f2fs_submit_page_bio(&fio);
1396 		if (err) {
1397 			f2fs_folio_put(mfolio, true);
1398 			goto up_out;
1399 		}
1400 
1401 		f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1402 							F2FS_BLKSIZE);
1403 		f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1404 							F2FS_BLKSIZE);
1405 
1406 		folio_lock(mfolio);
1407 		if (unlikely(!is_meta_folio(mfolio) ||
1408 			     !folio_test_uptodate(mfolio))) {
1409 			err = -EIO;
1410 			f2fs_folio_put(mfolio, true);
1411 			goto up_out;
1412 		}
1413 	}
1414 
1415 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1416 
1417 	/* allocate block address */
1418 	err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1419 				&sum, type, NULL);
1420 	if (err) {
1421 		f2fs_folio_put(mfolio, true);
1422 		/* filesystem should shutdown, no need to recovery block */
1423 		goto up_out;
1424 	}
1425 
1426 	efolio = f2fs_filemap_get_folio(META_MAPPING(fio.sbi), newaddr,
1427 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1428 	if (IS_ERR(efolio)) {
1429 		err = PTR_ERR(efolio);
1430 		f2fs_folio_put(mfolio, true);
1431 		goto recover_block;
1432 	}
1433 
1434 	fio.encrypted_page = &efolio->page;
1435 
1436 	/* write target block */
1437 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1438 	memcpy(page_address(fio.encrypted_page),
1439 				folio_address(mfolio), PAGE_SIZE);
1440 	f2fs_folio_put(mfolio, true);
1441 
1442 	f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
1443 
1444 	set_page_dirty(fio.encrypted_page);
1445 	if (clear_page_dirty_for_io(fio.encrypted_page))
1446 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1447 
1448 	set_page_writeback(fio.encrypted_page);
1449 
1450 	fio.op = REQ_OP_WRITE;
1451 	fio.op_flags = REQ_SYNC;
1452 	fio.new_blkaddr = newaddr;
1453 	f2fs_submit_page_write(&fio);
1454 
1455 	f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1456 
1457 	f2fs_update_data_blkaddr(&dn, newaddr);
1458 	set_inode_flag(inode, FI_APPEND_WRITE);
1459 
1460 	f2fs_put_page(fio.encrypted_page, true);
1461 recover_block:
1462 	if (err)
1463 		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1464 							true, true, true);
1465 up_out:
1466 	if (lfs_mode)
1467 		f2fs_up_write(&fio.sbi->io_order_lock);
1468 put_out:
1469 	f2fs_put_dnode(&dn);
1470 out:
1471 	if (!folio_test_uptodate(folio))
1472 		__folio_set_dropbehind(folio);
1473 	folio_unlock(folio);
1474 	folio_end_dropbehind(folio);
1475 	folio_put(folio);
1476 	return err;
1477 }
1478 
1479 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1480 						unsigned int segno, int off)
1481 {
1482 	struct folio *folio;
1483 	int err = 0;
1484 
1485 	folio = f2fs_get_lock_data_folio(inode, bidx, true);
1486 	if (IS_ERR(folio))
1487 		return PTR_ERR(folio);
1488 
1489 	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1490 		err = -ENOENT;
1491 		goto out;
1492 	}
1493 
1494 	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1495 	if (err)
1496 		goto out;
1497 
1498 	if (gc_type == BG_GC) {
1499 		if (folio_test_writeback(folio)) {
1500 			err = -EAGAIN;
1501 			goto out;
1502 		}
1503 		folio_mark_dirty(folio);
1504 		folio_set_f2fs_gcing(folio);
1505 	} else {
1506 		struct f2fs_io_info fio = {
1507 			.sbi = F2FS_I_SB(inode),
1508 			.ino = inode->i_ino,
1509 			.type = DATA,
1510 			.temp = COLD,
1511 			.op = REQ_OP_WRITE,
1512 			.op_flags = REQ_SYNC,
1513 			.old_blkaddr = NULL_ADDR,
1514 			.folio = folio,
1515 			.encrypted_page = NULL,
1516 			.need_lock = LOCK_REQ,
1517 			.io_type = FS_GC_DATA_IO,
1518 		};
1519 		bool is_dirty = folio_test_dirty(folio);
1520 
1521 retry:
1522 		f2fs_folio_wait_writeback(folio, DATA, true, true);
1523 
1524 		folio_mark_dirty(folio);
1525 		if (folio_clear_dirty_for_io(folio)) {
1526 			inode_dec_dirty_pages(inode);
1527 			f2fs_remove_dirty_inode(inode);
1528 		}
1529 
1530 		folio_set_f2fs_gcing(folio);
1531 
1532 		err = f2fs_do_write_data_page(&fio);
1533 		if (err) {
1534 			folio_clear_f2fs_gcing(folio);
1535 			if (err == -ENOMEM) {
1536 				memalloc_retry_wait(GFP_NOFS);
1537 				goto retry;
1538 			}
1539 			if (is_dirty)
1540 				folio_mark_dirty(folio);
1541 		}
1542 	}
1543 out:
1544 	f2fs_folio_put(folio, true);
1545 	return err;
1546 }
1547 
1548 /*
1549  * This function tries to get parent node of victim data block, and identifies
1550  * data block validity. If the block is valid, copy that with cold status and
1551  * modify parent node.
1552  * If the parent node is not valid or the data block address is different,
1553  * the victim data block is ignored.
1554  */
1555 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1556 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1557 		bool force_migrate, struct blk_plug *plug)
1558 {
1559 	struct super_block *sb = sbi->sb;
1560 	struct f2fs_summary *entry;
1561 	block_t start_addr;
1562 	int off;
1563 	int phase = 0;
1564 	int submitted = 0;
1565 	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1566 
1567 	start_addr = START_BLOCK(sbi, segno);
1568 
1569 next_step:
1570 	entry = sum;
1571 
1572 	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1573 		struct inode *inode;
1574 		struct node_info dni; /* dnode info for the data */
1575 		unsigned int ofs_in_node, nofs;
1576 		block_t start_bidx;
1577 		nid_t nid = le32_to_cpu(entry->nid);
1578 
1579 		/*
1580 		 * stop BG_GC if there is not enough free sections.
1581 		 * Or, stop GC if the segment becomes fully valid caused by
1582 		 * race condition along with SSR block allocation.
1583 		 */
1584 		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1585 			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1586 							CAP_BLKS_PER_SEC(sbi)))
1587 			return submitted;
1588 
1589 		if (check_valid_map(sbi, segno, off) == 0)
1590 			continue;
1591 
1592 		if (phase == 0) {
1593 			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1594 							META_NAT, true);
1595 			continue;
1596 		}
1597 
1598 		if (phase == 1) {
1599 			f2fs_ra_node_page(sbi, nid);
1600 			continue;
1601 		}
1602 
1603 		/* Get an inode by ino with checking validity */
1604 		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1605 			continue;
1606 
1607 		if (phase == 2) {
1608 			f2fs_ra_node_page(sbi, dni.ino);
1609 			continue;
1610 		}
1611 
1612 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1613 
1614 		if (phase == 3) {
1615 			struct folio *data_folio;
1616 			int err;
1617 
1618 			inode = f2fs_iget(sb, dni.ino);
1619 			if (IS_ERR(inode))
1620 				continue;
1621 
1622 			if (is_bad_inode(inode) ||
1623 					special_file(inode->i_mode)) {
1624 				iput(inode);
1625 				continue;
1626 			}
1627 
1628 			if (f2fs_has_inline_data(inode)) {
1629 				iput(inode);
1630 				set_sbi_flag(sbi, SBI_NEED_FSCK);
1631 				f2fs_err_ratelimited(sbi,
1632 					"inode %llu has both inline_data flag and "
1633 					"data block, nid=%u, ofs_in_node=%u",
1634 					inode->i_ino, dni.nid, ofs_in_node);
1635 				continue;
1636 			}
1637 
1638 			err = f2fs_gc_pinned_control(inode, gc_type, segno);
1639 			if (err == -EAGAIN) {
1640 				iput(inode);
1641 				return submitted;
1642 			}
1643 
1644 			if (!f2fs_down_write_trylock(
1645 				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1646 				iput(inode);
1647 				sbi->skipped_gc_rwsem++;
1648 				continue;
1649 			}
1650 
1651 			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1652 								ofs_in_node;
1653 
1654 			if (f2fs_meta_inode_gc_required(inode)) {
1655 				int err = ra_data_block(inode, start_bidx);
1656 
1657 				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1658 				if (err) {
1659 					iput(inode);
1660 					continue;
1661 				}
1662 				add_gc_inode(gc_list, inode);
1663 				continue;
1664 			}
1665 
1666 			data_folio = f2fs_get_read_data_folio(inode, start_bidx,
1667 							REQ_RAHEAD, true, NULL);
1668 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1669 			if (IS_ERR(data_folio)) {
1670 				iput(inode);
1671 				continue;
1672 			}
1673 
1674 			f2fs_folio_put(data_folio, false);
1675 			add_gc_inode(gc_list, inode);
1676 			continue;
1677 		}
1678 
1679 		/* phase 4 */
1680 		inode = find_gc_inode(gc_list, dni.ino);
1681 		if (inode) {
1682 			struct f2fs_inode_info *fi = F2FS_I(inode);
1683 			bool locked = false;
1684 			int err;
1685 
1686 			if (S_ISREG(inode->i_mode)) {
1687 				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1688 					sbi->skipped_gc_rwsem++;
1689 					continue;
1690 				}
1691 				if (!f2fs_down_write_trylock(
1692 						&fi->i_gc_rwsem[READ])) {
1693 					sbi->skipped_gc_rwsem++;
1694 					f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1695 					continue;
1696 				}
1697 				locked = true;
1698 
1699 				/* wait for all inflight aio data */
1700 				inode_dio_wait(inode);
1701 			}
1702 
1703 			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1704 								+ ofs_in_node;
1705 			if (f2fs_meta_inode_gc_required(inode))
1706 				err = move_data_block(inode, start_bidx,
1707 							gc_type, segno, off);
1708 			else
1709 				err = move_data_page(inode, start_bidx, gc_type,
1710 								segno, off);
1711 
1712 			if (!err && (gc_type == FG_GC ||
1713 					f2fs_meta_inode_gc_required(inode)))
1714 				submitted++;
1715 
1716 			if (locked) {
1717 				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1718 				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1719 			}
1720 
1721 			stat_inc_data_blk_count(sbi, 1, gc_type);
1722 		}
1723 	}
1724 
1725 	if (++phase < 5) {
1726 		blk_finish_plug(plug);
1727 		blk_start_plug(plug);
1728 		goto next_step;
1729 	}
1730 
1731 	return submitted;
1732 }
1733 
1734 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1735 			int gc_type, bool one_time)
1736 {
1737 	struct sit_info *sit_i = SIT_I(sbi);
1738 	int ret;
1739 
1740 	down_write(&sit_i->sentry_lock);
1741 	ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1742 			LFS, 0, one_time);
1743 	up_write(&sit_i->sentry_lock);
1744 	return ret;
1745 }
1746 
1747 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1748 				unsigned int start_segno,
1749 				struct gc_inode_list *gc_list, int gc_type,
1750 				bool force_migrate, bool one_time)
1751 {
1752 	struct blk_plug plug;
1753 	unsigned int segno = start_segno;
1754 	unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1755 	unsigned int sec_end_segno;
1756 	int seg_freed = 0, migrated = 0;
1757 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1758 						SUM_TYPE_DATA : SUM_TYPE_NODE;
1759 	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1760 	int submitted = 0, sum_blk_cnt;
1761 
1762 	if (__is_large_section(sbi)) {
1763 		sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1764 
1765 		/*
1766 		 * zone-capacity can be less than zone-size in zoned devices,
1767 		 * resulting in less than expected usable segments in the zone,
1768 		 * calculate the end segno in the zone which can be garbage
1769 		 * collected
1770 		 */
1771 		if (f2fs_sb_has_blkzoned(sbi))
1772 			sec_end_segno -= SEGS_PER_SEC(sbi) -
1773 					f2fs_usable_segs_in_sec(sbi);
1774 
1775 		if (gc_type == BG_GC || one_time) {
1776 			unsigned int window_granularity =
1777 				sbi->migration_window_granularity;
1778 
1779 			if (f2fs_sb_has_blkzoned(sbi) &&
1780 					!has_enough_free_blocks(sbi,
1781 					sbi->gc_thread->boost_zoned_gc_percent))
1782 				window_granularity *=
1783 					sbi->gc_thread->boost_gc_multiple;
1784 
1785 			end_segno = start_segno + window_granularity;
1786 		}
1787 
1788 		if (end_segno > sec_end_segno)
1789 			end_segno = sec_end_segno;
1790 	}
1791 
1792 	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1793 
1794 	segno = rounddown(segno, sbi->sums_per_block);
1795 	sum_blk_cnt = DIV_ROUND_UP(end_segno - segno, sbi->sums_per_block);
1796 	/* readahead multi ssa blocks those have contiguous address */
1797 	if (__is_large_section(sbi))
1798 		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1799 					sum_blk_cnt, META_SSA, true);
1800 
1801 	/* reference all summary page */
1802 	while (segno < end_segno) {
1803 		struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno);
1804 
1805 		segno += sbi->sums_per_block;
1806 		if (IS_ERR(sum_folio)) {
1807 			int err = PTR_ERR(sum_folio);
1808 
1809 			end_segno = segno - sbi->sums_per_block;
1810 			segno = rounddown(start_segno, sbi->sums_per_block);
1811 			while (segno < end_segno) {
1812 				sum_folio = filemap_get_folio(META_MAPPING(sbi),
1813 						GET_SUM_BLOCK(sbi, segno));
1814 				folio_put_refs(sum_folio, 2);
1815 				segno += sbi->sums_per_block;
1816 			}
1817 			return err;
1818 		}
1819 		folio_unlock(sum_folio);
1820 	}
1821 
1822 	blk_start_plug(&plug);
1823 
1824 	segno = start_segno;
1825 	while (segno < end_segno) {
1826 		unsigned int cur_segno;
1827 
1828 		/* find segment summary of victim */
1829 		struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
1830 					GET_SUM_BLOCK(sbi, segno));
1831 		unsigned int block_end_segno = rounddown(segno, sbi->sums_per_block)
1832 					+ sbi->sums_per_block;
1833 
1834 		if (block_end_segno > end_segno)
1835 			block_end_segno = end_segno;
1836 
1837 		if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
1838 			f2fs_err(sbi, "%s: segment %u is used by log",
1839 							__func__, segno);
1840 			f2fs_bug_on(sbi, 1);
1841 			goto next_block;
1842 		}
1843 
1844 		if (!folio_test_uptodate(sum_folio) ||
1845 		    unlikely(f2fs_cp_error(sbi)))
1846 			goto next_block;
1847 
1848 		for (cur_segno = segno; cur_segno < block_end_segno;
1849 				cur_segno++) {
1850 			struct f2fs_summary_block *sum;
1851 
1852 			if (get_valid_blocks(sbi, cur_segno, false) == 0)
1853 				goto freed;
1854 			if (gc_type == BG_GC && __is_large_section(sbi) &&
1855 					migrated >= sbi->migration_granularity)
1856 				continue;
1857 
1858 			sum = SUM_BLK_PAGE_ADDR(sbi, sum_folio, cur_segno);
1859 			if (type != GET_SUM_TYPE(sum_footer(sbi, sum))) {
1860 				f2fs_err(sbi, "Inconsistent segment (%u) type "
1861 						"[%d, %d] in SSA and SIT",
1862 						cur_segno, type,
1863 						GET_SUM_TYPE(
1864 						sum_footer(sbi, sum)));
1865 				f2fs_stop_checkpoint(sbi, false,
1866 						STOP_CP_REASON_CORRUPTED_SUMMARY);
1867 				continue;
1868 			}
1869 
1870 			/*
1871 			 * this is to avoid deadlock:
1872 			 *  - lock_page(sum_page)     - f2fs_replace_block
1873 			 *   - check_valid_map()        - down_write(sentry_lock)
1874 			 *    - down_read(sentry_lock) - change_curseg()
1875 			 *                               - lock_page(sum_page)
1876 			 */
1877 			if (type == SUM_TYPE_NODE)
1878 				submitted += gc_node_segment(sbi, sum->entries,
1879 						cur_segno, gc_type, &plug);
1880 			else
1881 				submitted += gc_data_segment(sbi, sum->entries,
1882 						gc_list, cur_segno,
1883 						gc_type, force_migrate, &plug);
1884 
1885 			stat_inc_gc_seg_count(sbi, data_type, gc_type);
1886 			sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1887 			migrated++;
1888 
1889 freed:
1890 			if (gc_type == FG_GC &&
1891 					get_valid_blocks(sbi, cur_segno, false) == 0)
1892 				seg_freed++;
1893 
1894 			if (__is_large_section(sbi))
1895 				sbi->next_victim_seg[gc_type] =
1896 					(cur_segno + 1 < sec_end_segno) ?
1897 					cur_segno + 1 : NULL_SEGNO;
1898 
1899 			if (unlikely(freezing(current))) {
1900 				folio_put_refs(sum_folio, 2);
1901 				goto stop;
1902 			}
1903 		}
1904 next_block:
1905 		folio_put_refs(sum_folio, 2);
1906 		segno = block_end_segno;
1907 	}
1908 
1909 stop:
1910 	if (submitted)
1911 		f2fs_submit_merged_write(sbi, data_type);
1912 
1913 	blk_finish_plug(&plug);
1914 
1915 	if (migrated)
1916 		stat_inc_gc_sec_count(sbi, data_type, gc_type);
1917 
1918 	return seg_freed;
1919 }
1920 
1921 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1922 {
1923 	int gc_type = gc_control->init_gc_type;
1924 	unsigned int segno = gc_control->victim_segno;
1925 	int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1926 	int ret = 0;
1927 	struct cp_control cpc;
1928 	struct gc_inode_list gc_list = {
1929 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1930 		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1931 	};
1932 	unsigned int skipped_round = 0, round = 0;
1933 	unsigned int upper_secs;
1934 
1935 	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1936 				gc_control->nr_free_secs,
1937 				get_pages(sbi, F2FS_DIRTY_NODES),
1938 				get_pages(sbi, F2FS_DIRTY_DENTS),
1939 				get_pages(sbi, F2FS_DIRTY_IMETA),
1940 				free_sections(sbi),
1941 				free_segments(sbi),
1942 				reserved_segments(sbi),
1943 				prefree_segments(sbi));
1944 
1945 	cpc.reason = __get_cp_reason(sbi);
1946 gc_more:
1947 	sbi->skipped_gc_rwsem = 0;
1948 	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1949 		ret = -EINVAL;
1950 		goto stop;
1951 	}
1952 	if (unlikely(f2fs_cp_error(sbi))) {
1953 		ret = -EIO;
1954 		goto stop;
1955 	}
1956 
1957 	/* Let's run FG_GC, if we don't have enough space. */
1958 	if (has_not_enough_free_secs(sbi, 0, 0)) {
1959 		gc_type = FG_GC;
1960 		gc_control->one_time = false;
1961 
1962 		/*
1963 		 * For example, if there are many prefree_segments below given
1964 		 * threshold, we can make them free by checkpoint. Then, we
1965 		 * secure free segments which doesn't need fggc any more.
1966 		 */
1967 		if (prefree_segments(sbi)) {
1968 			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1969 			ret = f2fs_write_checkpoint(sbi, &cpc);
1970 			if (ret)
1971 				goto stop;
1972 			/* Reset due to checkpoint */
1973 			sec_freed = 0;
1974 		}
1975 	}
1976 
1977 	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1978 	if (gc_type == BG_GC && gc_control->no_bg_gc) {
1979 		ret = -EINVAL;
1980 		goto stop;
1981 	}
1982 retry:
1983 	if (unlikely(freezing(current))) {
1984 		ret = 0;
1985 		goto stop;
1986 	}
1987 	ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
1988 	if (ret) {
1989 		/* allow to search victim from sections has pinned data */
1990 		if (ret == -ENODATA && gc_type == FG_GC &&
1991 				f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1992 			f2fs_unpin_all_sections(sbi, false);
1993 			goto retry;
1994 		}
1995 		goto stop;
1996 	}
1997 
1998 	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1999 				gc_control->should_migrate_blocks,
2000 				gc_control->one_time);
2001 	if (seg_freed < 0)
2002 		goto stop;
2003 
2004 	total_freed += seg_freed;
2005 
2006 	if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
2007 		sec_freed++;
2008 		total_sec_freed++;
2009 	}
2010 
2011 	if (gc_control->one_time)
2012 		goto stop;
2013 
2014 	if (gc_type == FG_GC) {
2015 		sbi->cur_victim_sec = NULL_SEGNO;
2016 
2017 		if (has_enough_free_secs(sbi, sec_freed, 0)) {
2018 			if (!gc_control->no_bg_gc &&
2019 			    total_sec_freed < gc_control->nr_free_secs)
2020 				goto go_gc_more;
2021 			goto stop;
2022 		}
2023 		if (sbi->skipped_gc_rwsem)
2024 			skipped_round++;
2025 		round++;
2026 		if (skipped_round > MAX_SKIP_GC_COUNT &&
2027 				skipped_round * 2 >= round) {
2028 			stat_inc_cp_call_count(sbi, TOTAL_CALL);
2029 			ret = f2fs_write_checkpoint(sbi, &cpc);
2030 			goto stop;
2031 		}
2032 	} else if (has_enough_free_secs(sbi, 0, 0)) {
2033 		goto stop;
2034 	}
2035 
2036 	upper_secs = __get_secs_required(sbi);
2037 
2038 	/*
2039 	 * Write checkpoint to reclaim prefree segments.
2040 	 * We need more three extra sections for writer's data/node/dentry.
2041 	 */
2042 	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
2043 				prefree_segments(sbi)) {
2044 		stat_inc_cp_call_count(sbi, TOTAL_CALL);
2045 		ret = f2fs_write_checkpoint(sbi, &cpc);
2046 		if (ret)
2047 			goto stop;
2048 		/* Reset due to checkpoint */
2049 		sec_freed = 0;
2050 	}
2051 go_gc_more:
2052 	segno = NULL_SEGNO;
2053 	goto gc_more;
2054 
2055 stop:
2056 	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
2057 	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
2058 
2059 	if (gc_type == FG_GC)
2060 		f2fs_unpin_all_sections(sbi, true);
2061 
2062 	trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
2063 				get_pages(sbi, F2FS_DIRTY_NODES),
2064 				get_pages(sbi, F2FS_DIRTY_DENTS),
2065 				get_pages(sbi, F2FS_DIRTY_IMETA),
2066 				free_sections(sbi),
2067 				free_segments(sbi),
2068 				reserved_segments(sbi),
2069 				prefree_segments(sbi));
2070 
2071 	f2fs_up_write_trace(&sbi->gc_lock, &gc_control->lc);
2072 
2073 	put_gc_inode(&gc_list);
2074 
2075 	if (gc_control->err_gc_skipped && !ret)
2076 		ret = total_sec_freed ? 0 : -EAGAIN;
2077 	return ret;
2078 }
2079 
2080 int __init f2fs_create_garbage_collection_cache(void)
2081 {
2082 	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2083 					sizeof(struct victim_entry));
2084 	return victim_entry_slab ? 0 : -ENOMEM;
2085 }
2086 
2087 void f2fs_destroy_garbage_collection_cache(void)
2088 {
2089 	kmem_cache_destroy(victim_entry_slab);
2090 }
2091 
2092 static void init_atgc_management(struct f2fs_sb_info *sbi)
2093 {
2094 	struct atgc_management *am = &sbi->am;
2095 
2096 	if (test_opt(sbi, ATGC) &&
2097 		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2098 		am->atgc_enabled = true;
2099 
2100 	am->root = RB_ROOT_CACHED;
2101 	INIT_LIST_HEAD(&am->victim_list);
2102 	am->victim_count = 0;
2103 
2104 	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2105 	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2106 	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
2107 	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
2108 }
2109 
2110 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2111 {
2112 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2113 
2114 	/* give warm/cold data area from slower device */
2115 	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2116 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2117 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2118 
2119 	init_atgc_management(sbi);
2120 }
2121 
2122 int f2fs_gc_range(struct f2fs_sb_info *sbi,
2123 		unsigned int start_seg, unsigned int end_seg,
2124 		bool dry_run, unsigned int dry_run_sections)
2125 {
2126 	unsigned int segno;
2127 	unsigned int gc_secs = dry_run_sections;
2128 
2129 	if (unlikely(f2fs_cp_error(sbi)))
2130 		return -EIO;
2131 
2132 	stat_inc_gc_call_count(sbi, FOREGROUND);
2133 	for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2134 		struct gc_inode_list gc_list = {
2135 			.ilist = LIST_HEAD_INIT(gc_list.ilist),
2136 			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2137 		};
2138 
2139 		/*
2140 		 * avoid migrating empty section, as it can be allocated by
2141 		 * log in parallel.
2142 		 */
2143 		if (!get_valid_blocks(sbi, segno, true))
2144 			continue;
2145 
2146 		if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
2147 			continue;
2148 
2149 		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2150 		put_gc_inode(&gc_list);
2151 
2152 		if (!dry_run && get_valid_blocks(sbi, segno, true))
2153 			return -EAGAIN;
2154 		if (dry_run && dry_run_sections &&
2155 		    !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2156 			break;
2157 
2158 		if (fatal_signal_pending(current))
2159 			return -ERESTARTSYS;
2160 	}
2161 
2162 	return 0;
2163 }
2164 
2165 static int free_segment_range(struct f2fs_sb_info *sbi,
2166 				unsigned int secs, bool dry_run)
2167 {
2168 	unsigned int next_inuse, start, end;
2169 	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2170 	int gc_mode, gc_type;
2171 	int err = 0;
2172 	int type;
2173 
2174 	/* Force block allocation for GC */
2175 	MAIN_SECS(sbi) -= secs;
2176 	start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2177 	end = MAIN_SEGS(sbi) - 1;
2178 
2179 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2180 	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2181 		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2182 			SIT_I(sbi)->last_victim[gc_mode] = 0;
2183 
2184 	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2185 		if (sbi->next_victim_seg[gc_type] >= start)
2186 			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2187 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2188 
2189 	/* Move out cursegs from the target range */
2190 	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2191 		err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2192 		if (err)
2193 			goto out;
2194 	}
2195 
2196 	/* do GC to move out valid blocks in the range */
2197 	err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2198 	if (err || dry_run)
2199 		goto out;
2200 
2201 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2202 	err = f2fs_write_checkpoint(sbi, &cpc);
2203 	if (err)
2204 		goto out;
2205 
2206 	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2207 	if (next_inuse <= end) {
2208 		f2fs_err(sbi, "segno %u should be free but still inuse!",
2209 			 next_inuse);
2210 		f2fs_bug_on(sbi, 1);
2211 	}
2212 out:
2213 	MAIN_SECS(sbi) += secs;
2214 	return err;
2215 }
2216 
2217 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2218 {
2219 	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2220 	int section_count;
2221 	int segment_count;
2222 	int segment_count_main;
2223 	long long block_count;
2224 	int segs = secs * SEGS_PER_SEC(sbi);
2225 
2226 	f2fs_down_write(&sbi->sb_lock);
2227 
2228 	section_count = le32_to_cpu(raw_sb->section_count);
2229 	segment_count = le32_to_cpu(raw_sb->segment_count);
2230 	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2231 	block_count = le64_to_cpu(raw_sb->block_count);
2232 
2233 	raw_sb->section_count = cpu_to_le32(section_count + secs);
2234 	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2235 	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2236 	raw_sb->block_count = cpu_to_le64(block_count +
2237 			(long long)SEGS_TO_BLKS(sbi, segs));
2238 	if (f2fs_is_multi_device(sbi)) {
2239 		int last_dev = sbi->s_ndevs - 1;
2240 		int dev_segs =
2241 			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2242 
2243 		raw_sb->devs[last_dev].total_segments =
2244 						cpu_to_le32(dev_segs + segs);
2245 	}
2246 
2247 	f2fs_up_write(&sbi->sb_lock);
2248 }
2249 
2250 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2251 {
2252 	int segs = secs * SEGS_PER_SEC(sbi);
2253 	long long blks = SEGS_TO_BLKS(sbi, segs);
2254 	long long user_block_count =
2255 				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2256 
2257 	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2258 	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2259 	MAIN_SECS(sbi) += secs;
2260 	if (sbi->allocate_section_hint > MAIN_SECS(sbi))
2261 		sbi->allocate_section_hint = MAIN_SECS(sbi);
2262 	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2263 	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2264 	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2265 
2266 	if (f2fs_is_multi_device(sbi)) {
2267 		int last_dev = sbi->s_ndevs - 1;
2268 
2269 		sbi->allocate_section_hint = FDEV(0).total_segments /
2270 					SEGS_PER_SEC(sbi);
2271 
2272 		FDEV(last_dev).total_segments =
2273 				(int)FDEV(last_dev).total_segments + segs;
2274 		FDEV(last_dev).end_blk =
2275 				(long long)FDEV(last_dev).end_blk + blks;
2276 #ifdef CONFIG_BLK_DEV_ZONED
2277 		FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2278 					div_u64(blks, sbi->blocks_per_blkz);
2279 #endif
2280 	}
2281 }
2282 
2283 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2284 {
2285 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2286 	__u64 old_block_count, shrunk_blocks;
2287 	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2288 	struct f2fs_lock_context lc;
2289 	struct f2fs_lock_context glc;
2290 	struct f2fs_lock_context clc;
2291 	unsigned int secs;
2292 	int err = 0;
2293 	__u32 rem;
2294 
2295 	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2296 	if (block_count > old_block_count)
2297 		return -EINVAL;
2298 
2299 	if (f2fs_is_multi_device(sbi)) {
2300 		int last_dev = sbi->s_ndevs - 1;
2301 		__u64 last_segs = FDEV(last_dev).total_segments;
2302 
2303 		if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2304 								old_block_count)
2305 			return -EINVAL;
2306 	}
2307 
2308 	/* new fs size should align to section size */
2309 	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2310 	if (rem)
2311 		return -EINVAL;
2312 
2313 	if (block_count == old_block_count)
2314 		return 0;
2315 
2316 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2317 		f2fs_err(sbi, "Should run fsck to repair first.");
2318 		return -EFSCORRUPTED;
2319 	}
2320 
2321 	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2322 		f2fs_err(sbi, "Checkpoint should be enabled.");
2323 		return -EINVAL;
2324 	}
2325 
2326 	err = mnt_want_write_file(filp);
2327 	if (err)
2328 		return err;
2329 
2330 	shrunk_blocks = old_block_count - block_count;
2331 	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2332 
2333 	/* stop other GC */
2334 	if (!f2fs_down_write_trylock_trace(&sbi->gc_lock, &glc)) {
2335 		err = -EAGAIN;
2336 		goto out_drop_write;
2337 	}
2338 
2339 	/* stop CP to protect MAIN_SEC in free_segment_range */
2340 	f2fs_lock_op(sbi, &lc);
2341 
2342 	spin_lock(&sbi->stat_lock);
2343 	if (shrunk_blocks + valid_user_blocks(sbi) +
2344 		sbi->current_reserved_blocks + sbi->unusable_block_count +
2345 		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2346 		err = -ENOSPC;
2347 	spin_unlock(&sbi->stat_lock);
2348 
2349 	if (err)
2350 		goto out_unlock;
2351 
2352 	err = free_segment_range(sbi, secs, true);
2353 
2354 out_unlock:
2355 	f2fs_unlock_op(sbi, &lc);
2356 	f2fs_up_write_trace(&sbi->gc_lock, &glc);
2357 out_drop_write:
2358 	mnt_drop_write_file(filp);
2359 	if (err)
2360 		return err;
2361 
2362 	err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2363 	if (err)
2364 		return err;
2365 
2366 	if (f2fs_readonly(sbi->sb)) {
2367 		err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2368 		if (err)
2369 			return err;
2370 		return -EROFS;
2371 	}
2372 
2373 	f2fs_down_write_trace(&sbi->gc_lock, &glc);
2374 	f2fs_down_write_trace(&sbi->cp_global_sem, &clc);
2375 
2376 	spin_lock(&sbi->stat_lock);
2377 	if (shrunk_blocks + valid_user_blocks(sbi) +
2378 		sbi->current_reserved_blocks + sbi->unusable_block_count +
2379 		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2380 		err = -ENOSPC;
2381 	else
2382 		sbi->user_block_count -= shrunk_blocks;
2383 	spin_unlock(&sbi->stat_lock);
2384 	if (err)
2385 		goto out_err;
2386 
2387 	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2388 	err = free_segment_range(sbi, secs, false);
2389 	if (err)
2390 		goto recover_out;
2391 
2392 	update_sb_metadata(sbi, -secs);
2393 
2394 	err = f2fs_commit_super(sbi, false);
2395 	if (err) {
2396 		update_sb_metadata(sbi, secs);
2397 		goto recover_out;
2398 	}
2399 
2400 	update_fs_metadata(sbi, -secs);
2401 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2402 	set_sbi_flag(sbi, SBI_IS_DIRTY);
2403 
2404 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2405 	err = f2fs_write_checkpoint(sbi, &cpc);
2406 	if (err) {
2407 		update_fs_metadata(sbi, secs);
2408 		update_sb_metadata(sbi, secs);
2409 		f2fs_commit_super(sbi, false);
2410 	}
2411 recover_out:
2412 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2413 	if (err) {
2414 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2415 		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2416 
2417 		spin_lock(&sbi->stat_lock);
2418 		sbi->user_block_count += shrunk_blocks;
2419 		spin_unlock(&sbi->stat_lock);
2420 	}
2421 out_err:
2422 	f2fs_up_write_trace(&sbi->cp_global_sem, &clc);
2423 	f2fs_up_write_trace(&sbi->gc_lock, &glc);
2424 	thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2425 	return err;
2426 }
2427