xref: /linux/fs/f2fs/gc.c (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include <trace/events/f2fs.h>
25 
26 static int gc_thread_func(void *data)
27 {
28 	struct f2fs_sb_info *sbi = data;
29 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 	long wait_ms;
32 
33 	wait_ms = gc_th->min_sleep_time;
34 
35 	do {
36 		if (try_to_freeze())
37 			continue;
38 		else
39 			wait_event_interruptible_timeout(*wq,
40 						kthread_should_stop(),
41 						msecs_to_jiffies(wait_ms));
42 		if (kthread_should_stop())
43 			break;
44 
45 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
46 			increase_sleep_time(gc_th, &wait_ms);
47 			continue;
48 		}
49 
50 #ifdef CONFIG_F2FS_FAULT_INJECTION
51 		if (time_to_inject(sbi, FAULT_CHECKPOINT))
52 			f2fs_stop_checkpoint(sbi, false);
53 #endif
54 
55 		/*
56 		 * [GC triggering condition]
57 		 * 0. GC is not conducted currently.
58 		 * 1. There are enough dirty segments.
59 		 * 2. IO subsystem is idle by checking the # of writeback pages.
60 		 * 3. IO subsystem is idle by checking the # of requests in
61 		 *    bdev's request list.
62 		 *
63 		 * Note) We have to avoid triggering GCs frequently.
64 		 * Because it is possible that some segments can be
65 		 * invalidated soon after by user update or deletion.
66 		 * So, I'd like to wait some time to collect dirty segments.
67 		 */
68 		if (!mutex_trylock(&sbi->gc_mutex))
69 			continue;
70 
71 		if (!is_idle(sbi)) {
72 			increase_sleep_time(gc_th, &wait_ms);
73 			mutex_unlock(&sbi->gc_mutex);
74 			continue;
75 		}
76 
77 		if (has_enough_invalid_blocks(sbi))
78 			decrease_sleep_time(gc_th, &wait_ms);
79 		else
80 			increase_sleep_time(gc_th, &wait_ms);
81 
82 		stat_inc_bggc_count(sbi);
83 
84 		/* if return value is not zero, no victim was selected */
85 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
86 			wait_ms = gc_th->no_gc_sleep_time;
87 
88 		trace_f2fs_background_gc(sbi->sb, wait_ms,
89 				prefree_segments(sbi), free_segments(sbi));
90 
91 		/* balancing f2fs's metadata periodically */
92 		f2fs_balance_fs_bg(sbi);
93 
94 	} while (!kthread_should_stop());
95 	return 0;
96 }
97 
98 int start_gc_thread(struct f2fs_sb_info *sbi)
99 {
100 	struct f2fs_gc_kthread *gc_th;
101 	dev_t dev = sbi->sb->s_bdev->bd_dev;
102 	int err = 0;
103 
104 	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
105 	if (!gc_th) {
106 		err = -ENOMEM;
107 		goto out;
108 	}
109 
110 	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
111 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
112 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
113 
114 	gc_th->gc_idle = 0;
115 
116 	sbi->gc_thread = gc_th;
117 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
118 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
119 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
120 	if (IS_ERR(gc_th->f2fs_gc_task)) {
121 		err = PTR_ERR(gc_th->f2fs_gc_task);
122 		kfree(gc_th);
123 		sbi->gc_thread = NULL;
124 	}
125 out:
126 	return err;
127 }
128 
129 void stop_gc_thread(struct f2fs_sb_info *sbi)
130 {
131 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
132 	if (!gc_th)
133 		return;
134 	kthread_stop(gc_th->f2fs_gc_task);
135 	kfree(gc_th);
136 	sbi->gc_thread = NULL;
137 }
138 
139 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
140 {
141 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
142 
143 	if (gc_th && gc_th->gc_idle) {
144 		if (gc_th->gc_idle == 1)
145 			gc_mode = GC_CB;
146 		else if (gc_th->gc_idle == 2)
147 			gc_mode = GC_GREEDY;
148 	}
149 	return gc_mode;
150 }
151 
152 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
153 			int type, struct victim_sel_policy *p)
154 {
155 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
156 
157 	if (p->alloc_mode == SSR) {
158 		p->gc_mode = GC_GREEDY;
159 		p->dirty_segmap = dirty_i->dirty_segmap[type];
160 		p->max_search = dirty_i->nr_dirty[type];
161 		p->ofs_unit = 1;
162 	} else {
163 		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
164 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
165 		p->max_search = dirty_i->nr_dirty[DIRTY];
166 		p->ofs_unit = sbi->segs_per_sec;
167 	}
168 
169 	if (p->max_search > sbi->max_victim_search)
170 		p->max_search = sbi->max_victim_search;
171 
172 	p->offset = sbi->last_victim[p->gc_mode];
173 }
174 
175 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
176 				struct victim_sel_policy *p)
177 {
178 	/* SSR allocates in a segment unit */
179 	if (p->alloc_mode == SSR)
180 		return sbi->blocks_per_seg;
181 	if (p->gc_mode == GC_GREEDY)
182 		return sbi->blocks_per_seg * p->ofs_unit;
183 	else if (p->gc_mode == GC_CB)
184 		return UINT_MAX;
185 	else /* No other gc_mode */
186 		return 0;
187 }
188 
189 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
190 {
191 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
192 	unsigned int secno;
193 
194 	/*
195 	 * If the gc_type is FG_GC, we can select victim segments
196 	 * selected by background GC before.
197 	 * Those segments guarantee they have small valid blocks.
198 	 */
199 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
200 		if (sec_usage_check(sbi, secno))
201 			continue;
202 		clear_bit(secno, dirty_i->victim_secmap);
203 		return secno * sbi->segs_per_sec;
204 	}
205 	return NULL_SEGNO;
206 }
207 
208 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
209 {
210 	struct sit_info *sit_i = SIT_I(sbi);
211 	unsigned int secno = GET_SECNO(sbi, segno);
212 	unsigned int start = secno * sbi->segs_per_sec;
213 	unsigned long long mtime = 0;
214 	unsigned int vblocks;
215 	unsigned char age = 0;
216 	unsigned char u;
217 	unsigned int i;
218 
219 	for (i = 0; i < sbi->segs_per_sec; i++)
220 		mtime += get_seg_entry(sbi, start + i)->mtime;
221 	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
222 
223 	mtime = div_u64(mtime, sbi->segs_per_sec);
224 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
225 
226 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
227 
228 	/* Handle if the system time has changed by the user */
229 	if (mtime < sit_i->min_mtime)
230 		sit_i->min_mtime = mtime;
231 	if (mtime > sit_i->max_mtime)
232 		sit_i->max_mtime = mtime;
233 	if (sit_i->max_mtime != sit_i->min_mtime)
234 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
235 				sit_i->max_mtime - sit_i->min_mtime);
236 
237 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
238 }
239 
240 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
241 			unsigned int segno, struct victim_sel_policy *p)
242 {
243 	if (p->alloc_mode == SSR)
244 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
245 
246 	/* alloc_mode == LFS */
247 	if (p->gc_mode == GC_GREEDY)
248 		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
249 	else
250 		return get_cb_cost(sbi, segno);
251 }
252 
253 static unsigned int count_bits(const unsigned long *addr,
254 				unsigned int offset, unsigned int len)
255 {
256 	unsigned int end = offset + len, sum = 0;
257 
258 	while (offset < end) {
259 		if (test_bit(offset++, addr))
260 			++sum;
261 	}
262 	return sum;
263 }
264 
265 /*
266  * This function is called from two paths.
267  * One is garbage collection and the other is SSR segment selection.
268  * When it is called during GC, it just gets a victim segment
269  * and it does not remove it from dirty seglist.
270  * When it is called from SSR segment selection, it finds a segment
271  * which has minimum valid blocks and removes it from dirty seglist.
272  */
273 static int get_victim_by_default(struct f2fs_sb_info *sbi,
274 		unsigned int *result, int gc_type, int type, char alloc_mode)
275 {
276 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
277 	struct victim_sel_policy p;
278 	unsigned int secno, last_victim;
279 	unsigned int last_segment = MAIN_SEGS(sbi);
280 	unsigned int nsearched = 0;
281 
282 	mutex_lock(&dirty_i->seglist_lock);
283 
284 	p.alloc_mode = alloc_mode;
285 	select_policy(sbi, gc_type, type, &p);
286 
287 	p.min_segno = NULL_SEGNO;
288 	p.min_cost = get_max_cost(sbi, &p);
289 
290 	if (p.max_search == 0)
291 		goto out;
292 
293 	last_victim = sbi->last_victim[p.gc_mode];
294 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
295 		p.min_segno = check_bg_victims(sbi);
296 		if (p.min_segno != NULL_SEGNO)
297 			goto got_it;
298 	}
299 
300 	while (1) {
301 		unsigned long cost;
302 		unsigned int segno;
303 
304 		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
305 		if (segno >= last_segment) {
306 			if (sbi->last_victim[p.gc_mode]) {
307 				last_segment = sbi->last_victim[p.gc_mode];
308 				sbi->last_victim[p.gc_mode] = 0;
309 				p.offset = 0;
310 				continue;
311 			}
312 			break;
313 		}
314 
315 		p.offset = segno + p.ofs_unit;
316 		if (p.ofs_unit > 1) {
317 			p.offset -= segno % p.ofs_unit;
318 			nsearched += count_bits(p.dirty_segmap,
319 						p.offset - p.ofs_unit,
320 						p.ofs_unit);
321 		} else {
322 			nsearched++;
323 		}
324 
325 
326 		secno = GET_SECNO(sbi, segno);
327 
328 		if (sec_usage_check(sbi, secno))
329 			goto next;
330 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
331 			goto next;
332 
333 		cost = get_gc_cost(sbi, segno, &p);
334 
335 		if (p.min_cost > cost) {
336 			p.min_segno = segno;
337 			p.min_cost = cost;
338 		}
339 next:
340 		if (nsearched >= p.max_search) {
341 			if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
342 				sbi->last_victim[p.gc_mode] = last_victim + 1;
343 			else
344 				sbi->last_victim[p.gc_mode] = segno + 1;
345 			break;
346 		}
347 	}
348 	if (p.min_segno != NULL_SEGNO) {
349 got_it:
350 		if (p.alloc_mode == LFS) {
351 			secno = GET_SECNO(sbi, p.min_segno);
352 			if (gc_type == FG_GC)
353 				sbi->cur_victim_sec = secno;
354 			else
355 				set_bit(secno, dirty_i->victim_secmap);
356 		}
357 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
358 
359 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
360 				sbi->cur_victim_sec,
361 				prefree_segments(sbi), free_segments(sbi));
362 	}
363 out:
364 	mutex_unlock(&dirty_i->seglist_lock);
365 
366 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
367 }
368 
369 static const struct victim_selection default_v_ops = {
370 	.get_victim = get_victim_by_default,
371 };
372 
373 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
374 {
375 	struct inode_entry *ie;
376 
377 	ie = radix_tree_lookup(&gc_list->iroot, ino);
378 	if (ie)
379 		return ie->inode;
380 	return NULL;
381 }
382 
383 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
384 {
385 	struct inode_entry *new_ie;
386 
387 	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
388 		iput(inode);
389 		return;
390 	}
391 	new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
392 	new_ie->inode = inode;
393 
394 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
395 	list_add_tail(&new_ie->list, &gc_list->ilist);
396 }
397 
398 static void put_gc_inode(struct gc_inode_list *gc_list)
399 {
400 	struct inode_entry *ie, *next_ie;
401 	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
402 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
403 		iput(ie->inode);
404 		list_del(&ie->list);
405 		kmem_cache_free(inode_entry_slab, ie);
406 	}
407 }
408 
409 static int check_valid_map(struct f2fs_sb_info *sbi,
410 				unsigned int segno, int offset)
411 {
412 	struct sit_info *sit_i = SIT_I(sbi);
413 	struct seg_entry *sentry;
414 	int ret;
415 
416 	mutex_lock(&sit_i->sentry_lock);
417 	sentry = get_seg_entry(sbi, segno);
418 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
419 	mutex_unlock(&sit_i->sentry_lock);
420 	return ret;
421 }
422 
423 /*
424  * This function compares node address got in summary with that in NAT.
425  * On validity, copy that node with cold status, otherwise (invalid node)
426  * ignore that.
427  */
428 static void gc_node_segment(struct f2fs_sb_info *sbi,
429 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
430 {
431 	struct f2fs_summary *entry;
432 	block_t start_addr;
433 	int off;
434 	int phase = 0;
435 
436 	start_addr = START_BLOCK(sbi, segno);
437 
438 next_step:
439 	entry = sum;
440 
441 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
442 		nid_t nid = le32_to_cpu(entry->nid);
443 		struct page *node_page;
444 		struct node_info ni;
445 
446 		/* stop BG_GC if there is not enough free sections. */
447 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
448 			return;
449 
450 		if (check_valid_map(sbi, segno, off) == 0)
451 			continue;
452 
453 		if (phase == 0) {
454 			ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
455 							META_NAT, true);
456 			continue;
457 		}
458 
459 		if (phase == 1) {
460 			ra_node_page(sbi, nid);
461 			continue;
462 		}
463 
464 		/* phase == 2 */
465 		node_page = get_node_page(sbi, nid);
466 		if (IS_ERR(node_page))
467 			continue;
468 
469 		/* block may become invalid during get_node_page */
470 		if (check_valid_map(sbi, segno, off) == 0) {
471 			f2fs_put_page(node_page, 1);
472 			continue;
473 		}
474 
475 		get_node_info(sbi, nid, &ni);
476 		if (ni.blk_addr != start_addr + off) {
477 			f2fs_put_page(node_page, 1);
478 			continue;
479 		}
480 
481 		move_node_page(node_page, gc_type);
482 		stat_inc_node_blk_count(sbi, 1, gc_type);
483 	}
484 
485 	if (++phase < 3)
486 		goto next_step;
487 }
488 
489 /*
490  * Calculate start block index indicating the given node offset.
491  * Be careful, caller should give this node offset only indicating direct node
492  * blocks. If any node offsets, which point the other types of node blocks such
493  * as indirect or double indirect node blocks, are given, it must be a caller's
494  * bug.
495  */
496 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
497 {
498 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
499 	unsigned int bidx;
500 
501 	if (node_ofs == 0)
502 		return 0;
503 
504 	if (node_ofs <= 2) {
505 		bidx = node_ofs - 1;
506 	} else if (node_ofs <= indirect_blks) {
507 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
508 		bidx = node_ofs - 2 - dec;
509 	} else {
510 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
511 		bidx = node_ofs - 5 - dec;
512 	}
513 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
514 }
515 
516 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
517 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
518 {
519 	struct page *node_page;
520 	nid_t nid;
521 	unsigned int ofs_in_node;
522 	block_t source_blkaddr;
523 
524 	nid = le32_to_cpu(sum->nid);
525 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
526 
527 	node_page = get_node_page(sbi, nid);
528 	if (IS_ERR(node_page))
529 		return false;
530 
531 	get_node_info(sbi, nid, dni);
532 
533 	if (sum->version != dni->version) {
534 		f2fs_put_page(node_page, 1);
535 		return false;
536 	}
537 
538 	*nofs = ofs_of_node(node_page);
539 	source_blkaddr = datablock_addr(node_page, ofs_in_node);
540 	f2fs_put_page(node_page, 1);
541 
542 	if (source_blkaddr != blkaddr)
543 		return false;
544 	return true;
545 }
546 
547 static void move_encrypted_block(struct inode *inode, block_t bidx)
548 {
549 	struct f2fs_io_info fio = {
550 		.sbi = F2FS_I_SB(inode),
551 		.type = DATA,
552 		.op = REQ_OP_READ,
553 		.op_flags = READ_SYNC,
554 		.encrypted_page = NULL,
555 	};
556 	struct dnode_of_data dn;
557 	struct f2fs_summary sum;
558 	struct node_info ni;
559 	struct page *page;
560 	block_t newaddr;
561 	int err;
562 
563 	/* do not read out */
564 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
565 	if (!page)
566 		return;
567 
568 	set_new_dnode(&dn, inode, NULL, NULL, 0);
569 	err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
570 	if (err)
571 		goto out;
572 
573 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
574 		ClearPageUptodate(page);
575 		goto put_out;
576 	}
577 
578 	/*
579 	 * don't cache encrypted data into meta inode until previous dirty
580 	 * data were writebacked to avoid racing between GC and flush.
581 	 */
582 	f2fs_wait_on_page_writeback(page, DATA, true);
583 
584 	get_node_info(fio.sbi, dn.nid, &ni);
585 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
586 
587 	/* read page */
588 	fio.page = page;
589 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
590 
591 	allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
592 							&sum, CURSEG_COLD_DATA);
593 
594 	fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
595 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
596 	if (!fio.encrypted_page) {
597 		err = -ENOMEM;
598 		goto recover_block;
599 	}
600 
601 	err = f2fs_submit_page_bio(&fio);
602 	if (err)
603 		goto put_page_out;
604 
605 	/* write page */
606 	lock_page(fio.encrypted_page);
607 
608 	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
609 		err = -EIO;
610 		goto put_page_out;
611 	}
612 	if (unlikely(!PageUptodate(fio.encrypted_page))) {
613 		err = -EIO;
614 		goto put_page_out;
615 	}
616 
617 	set_page_dirty(fio.encrypted_page);
618 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
619 	if (clear_page_dirty_for_io(fio.encrypted_page))
620 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
621 
622 	set_page_writeback(fio.encrypted_page);
623 
624 	/* allocate block address */
625 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
626 
627 	fio.op = REQ_OP_WRITE;
628 	fio.op_flags = WRITE_SYNC;
629 	fio.new_blkaddr = newaddr;
630 	f2fs_submit_page_mbio(&fio);
631 
632 	f2fs_update_data_blkaddr(&dn, newaddr);
633 	set_inode_flag(inode, FI_APPEND_WRITE);
634 	if (page->index == 0)
635 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
636 put_page_out:
637 	f2fs_put_page(fio.encrypted_page, 1);
638 recover_block:
639 	if (err)
640 		__f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
641 								true, true);
642 put_out:
643 	f2fs_put_dnode(&dn);
644 out:
645 	f2fs_put_page(page, 1);
646 }
647 
648 static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
649 {
650 	struct page *page;
651 
652 	page = get_lock_data_page(inode, bidx, true);
653 	if (IS_ERR(page))
654 		return;
655 
656 	if (gc_type == BG_GC) {
657 		if (PageWriteback(page))
658 			goto out;
659 		set_page_dirty(page);
660 		set_cold_data(page);
661 	} else {
662 		struct f2fs_io_info fio = {
663 			.sbi = F2FS_I_SB(inode),
664 			.type = DATA,
665 			.op = REQ_OP_WRITE,
666 			.op_flags = WRITE_SYNC,
667 			.page = page,
668 			.encrypted_page = NULL,
669 		};
670 		bool is_dirty = PageDirty(page);
671 		int err;
672 
673 retry:
674 		set_page_dirty(page);
675 		f2fs_wait_on_page_writeback(page, DATA, true);
676 		if (clear_page_dirty_for_io(page))
677 			inode_dec_dirty_pages(inode);
678 
679 		set_cold_data(page);
680 
681 		err = do_write_data_page(&fio);
682 		if (err == -ENOMEM && is_dirty) {
683 			congestion_wait(BLK_RW_ASYNC, HZ/50);
684 			goto retry;
685 		}
686 
687 		clear_cold_data(page);
688 	}
689 out:
690 	f2fs_put_page(page, 1);
691 }
692 
693 /*
694  * This function tries to get parent node of victim data block, and identifies
695  * data block validity. If the block is valid, copy that with cold status and
696  * modify parent node.
697  * If the parent node is not valid or the data block address is different,
698  * the victim data block is ignored.
699  */
700 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
701 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
702 {
703 	struct super_block *sb = sbi->sb;
704 	struct f2fs_summary *entry;
705 	block_t start_addr;
706 	int off;
707 	int phase = 0;
708 
709 	start_addr = START_BLOCK(sbi, segno);
710 
711 next_step:
712 	entry = sum;
713 
714 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
715 		struct page *data_page;
716 		struct inode *inode;
717 		struct node_info dni; /* dnode info for the data */
718 		unsigned int ofs_in_node, nofs;
719 		block_t start_bidx;
720 		nid_t nid = le32_to_cpu(entry->nid);
721 
722 		/* stop BG_GC if there is not enough free sections. */
723 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
724 			return;
725 
726 		if (check_valid_map(sbi, segno, off) == 0)
727 			continue;
728 
729 		if (phase == 0) {
730 			ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
731 							META_NAT, true);
732 			continue;
733 		}
734 
735 		if (phase == 1) {
736 			ra_node_page(sbi, nid);
737 			continue;
738 		}
739 
740 		/* Get an inode by ino with checking validity */
741 		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
742 			continue;
743 
744 		if (phase == 2) {
745 			ra_node_page(sbi, dni.ino);
746 			continue;
747 		}
748 
749 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
750 
751 		if (phase == 3) {
752 			inode = f2fs_iget(sb, dni.ino);
753 			if (IS_ERR(inode) || is_bad_inode(inode))
754 				continue;
755 
756 			/* if encrypted inode, let's go phase 3 */
757 			if (f2fs_encrypted_inode(inode) &&
758 						S_ISREG(inode->i_mode)) {
759 				add_gc_inode(gc_list, inode);
760 				continue;
761 			}
762 
763 			start_bidx = start_bidx_of_node(nofs, inode);
764 			data_page = get_read_data_page(inode,
765 					start_bidx + ofs_in_node, REQ_RAHEAD,
766 					true);
767 			if (IS_ERR(data_page)) {
768 				iput(inode);
769 				continue;
770 			}
771 
772 			f2fs_put_page(data_page, 0);
773 			add_gc_inode(gc_list, inode);
774 			continue;
775 		}
776 
777 		/* phase 4 */
778 		inode = find_gc_inode(gc_list, dni.ino);
779 		if (inode) {
780 			struct f2fs_inode_info *fi = F2FS_I(inode);
781 			bool locked = false;
782 
783 			if (S_ISREG(inode->i_mode)) {
784 				if (!down_write_trylock(&fi->dio_rwsem[READ]))
785 					continue;
786 				if (!down_write_trylock(
787 						&fi->dio_rwsem[WRITE])) {
788 					up_write(&fi->dio_rwsem[READ]);
789 					continue;
790 				}
791 				locked = true;
792 			}
793 
794 			start_bidx = start_bidx_of_node(nofs, inode)
795 								+ ofs_in_node;
796 			if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
797 				move_encrypted_block(inode, start_bidx);
798 			else
799 				move_data_page(inode, start_bidx, gc_type);
800 
801 			if (locked) {
802 				up_write(&fi->dio_rwsem[WRITE]);
803 				up_write(&fi->dio_rwsem[READ]);
804 			}
805 
806 			stat_inc_data_blk_count(sbi, 1, gc_type);
807 		}
808 	}
809 
810 	if (++phase < 5)
811 		goto next_step;
812 }
813 
814 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
815 			int gc_type)
816 {
817 	struct sit_info *sit_i = SIT_I(sbi);
818 	int ret;
819 
820 	mutex_lock(&sit_i->sentry_lock);
821 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
822 					      NO_CHECK_TYPE, LFS);
823 	mutex_unlock(&sit_i->sentry_lock);
824 	return ret;
825 }
826 
827 static int do_garbage_collect(struct f2fs_sb_info *sbi,
828 				unsigned int start_segno,
829 				struct gc_inode_list *gc_list, int gc_type)
830 {
831 	struct page *sum_page;
832 	struct f2fs_summary_block *sum;
833 	struct blk_plug plug;
834 	unsigned int segno = start_segno;
835 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
836 	int sec_freed = 0;
837 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
838 						SUM_TYPE_DATA : SUM_TYPE_NODE;
839 
840 	/* readahead multi ssa blocks those have contiguous address */
841 	if (sbi->segs_per_sec > 1)
842 		ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
843 					sbi->segs_per_sec, META_SSA, true);
844 
845 	/* reference all summary page */
846 	while (segno < end_segno) {
847 		sum_page = get_sum_page(sbi, segno++);
848 		unlock_page(sum_page);
849 	}
850 
851 	blk_start_plug(&plug);
852 
853 	for (segno = start_segno; segno < end_segno; segno++) {
854 
855 		/* find segment summary of victim */
856 		sum_page = find_get_page(META_MAPPING(sbi),
857 					GET_SUM_BLOCK(sbi, segno));
858 		f2fs_put_page(sum_page, 0);
859 
860 		if (get_valid_blocks(sbi, segno, 1) == 0 ||
861 				!PageUptodate(sum_page) ||
862 				unlikely(f2fs_cp_error(sbi)))
863 			goto next;
864 
865 		sum = page_address(sum_page);
866 		f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
867 
868 		/*
869 		 * this is to avoid deadlock:
870 		 * - lock_page(sum_page)         - f2fs_replace_block
871 		 *  - check_valid_map()            - mutex_lock(sentry_lock)
872 		 *   - mutex_lock(sentry_lock)     - change_curseg()
873 		 *                                  - lock_page(sum_page)
874 		 */
875 
876 		if (type == SUM_TYPE_NODE)
877 			gc_node_segment(sbi, sum->entries, segno, gc_type);
878 		else
879 			gc_data_segment(sbi, sum->entries, gc_list, segno,
880 								gc_type);
881 
882 		stat_inc_seg_count(sbi, type, gc_type);
883 next:
884 		f2fs_put_page(sum_page, 0);
885 	}
886 
887 	if (gc_type == FG_GC)
888 		f2fs_submit_merged_bio(sbi,
889 				(type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
890 
891 	blk_finish_plug(&plug);
892 
893 	if (gc_type == FG_GC &&
894 		get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
895 		sec_freed = 1;
896 
897 	stat_inc_call_count(sbi->stat_info);
898 
899 	return sec_freed;
900 }
901 
902 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
903 {
904 	unsigned int segno;
905 	int gc_type = sync ? FG_GC : BG_GC;
906 	int sec_freed = 0;
907 	int ret = -EINVAL;
908 	struct cp_control cpc;
909 	struct gc_inode_list gc_list = {
910 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
911 		.iroot = RADIX_TREE_INIT(GFP_NOFS),
912 	};
913 
914 	cpc.reason = __get_cp_reason(sbi);
915 gc_more:
916 	segno = NULL_SEGNO;
917 
918 	if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
919 		goto stop;
920 	if (unlikely(f2fs_cp_error(sbi))) {
921 		ret = -EIO;
922 		goto stop;
923 	}
924 
925 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
926 		gc_type = FG_GC;
927 		/*
928 		 * If there is no victim and no prefree segment but still not
929 		 * enough free sections, we should flush dent/node blocks and do
930 		 * garbage collections.
931 		 */
932 		if (__get_victim(sbi, &segno, gc_type) ||
933 						prefree_segments(sbi)) {
934 			ret = write_checkpoint(sbi, &cpc);
935 			if (ret)
936 				goto stop;
937 			segno = NULL_SEGNO;
938 		} else if (has_not_enough_free_secs(sbi, 0, 0)) {
939 			ret = write_checkpoint(sbi, &cpc);
940 			if (ret)
941 				goto stop;
942 		}
943 	}
944 
945 	if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
946 		goto stop;
947 	ret = 0;
948 
949 	if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
950 			gc_type == FG_GC)
951 		sec_freed++;
952 
953 	if (gc_type == FG_GC)
954 		sbi->cur_victim_sec = NULL_SEGNO;
955 
956 	if (!sync) {
957 		if (has_not_enough_free_secs(sbi, sec_freed, 0))
958 			goto gc_more;
959 
960 		if (gc_type == FG_GC)
961 			ret = write_checkpoint(sbi, &cpc);
962 	}
963 stop:
964 	mutex_unlock(&sbi->gc_mutex);
965 
966 	put_gc_inode(&gc_list);
967 
968 	if (sync)
969 		ret = sec_freed ? 0 : -EAGAIN;
970 	return ret;
971 }
972 
973 void build_gc_manager(struct f2fs_sb_info *sbi)
974 {
975 	DIRTY_I(sbi)->v_ops = &default_v_ops;
976 }
977