xref: /linux/fs/f2fs/segment.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * fs/f2fs/segment.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/swap.h>
18 #include <linux/timer.h>
19 #include <linux/freezer.h>
20 #include <linux/sched/signal.h>
21 
22 #include "f2fs.h"
23 #include "segment.h"
24 #include "node.h"
25 #include "gc.h"
26 #include "trace.h"
27 #include <trace/events/f2fs.h>
28 
29 #define __reverse_ffz(x) __reverse_ffs(~(x))
30 
31 static struct kmem_cache *discard_entry_slab;
32 static struct kmem_cache *discard_cmd_slab;
33 static struct kmem_cache *sit_entry_set_slab;
34 static struct kmem_cache *inmem_entry_slab;
35 
36 static unsigned long __reverse_ulong(unsigned char *str)
37 {
38 	unsigned long tmp = 0;
39 	int shift = 24, idx = 0;
40 
41 #if BITS_PER_LONG == 64
42 	shift = 56;
43 #endif
44 	while (shift >= 0) {
45 		tmp |= (unsigned long)str[idx++] << shift;
46 		shift -= BITS_PER_BYTE;
47 	}
48 	return tmp;
49 }
50 
51 /*
52  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
53  * MSB and LSB are reversed in a byte by f2fs_set_bit.
54  */
55 static inline unsigned long __reverse_ffs(unsigned long word)
56 {
57 	int num = 0;
58 
59 #if BITS_PER_LONG == 64
60 	if ((word & 0xffffffff00000000UL) == 0)
61 		num += 32;
62 	else
63 		word >>= 32;
64 #endif
65 	if ((word & 0xffff0000) == 0)
66 		num += 16;
67 	else
68 		word >>= 16;
69 
70 	if ((word & 0xff00) == 0)
71 		num += 8;
72 	else
73 		word >>= 8;
74 
75 	if ((word & 0xf0) == 0)
76 		num += 4;
77 	else
78 		word >>= 4;
79 
80 	if ((word & 0xc) == 0)
81 		num += 2;
82 	else
83 		word >>= 2;
84 
85 	if ((word & 0x2) == 0)
86 		num += 1;
87 	return num;
88 }
89 
90 /*
91  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
92  * f2fs_set_bit makes MSB and LSB reversed in a byte.
93  * @size must be integral times of unsigned long.
94  * Example:
95  *                             MSB <--> LSB
96  *   f2fs_set_bit(0, bitmap) => 1000 0000
97  *   f2fs_set_bit(7, bitmap) => 0000 0001
98  */
99 static unsigned long __find_rev_next_bit(const unsigned long *addr,
100 			unsigned long size, unsigned long offset)
101 {
102 	const unsigned long *p = addr + BIT_WORD(offset);
103 	unsigned long result = size;
104 	unsigned long tmp;
105 
106 	if (offset >= size)
107 		return size;
108 
109 	size -= (offset & ~(BITS_PER_LONG - 1));
110 	offset %= BITS_PER_LONG;
111 
112 	while (1) {
113 		if (*p == 0)
114 			goto pass;
115 
116 		tmp = __reverse_ulong((unsigned char *)p);
117 
118 		tmp &= ~0UL >> offset;
119 		if (size < BITS_PER_LONG)
120 			tmp &= (~0UL << (BITS_PER_LONG - size));
121 		if (tmp)
122 			goto found;
123 pass:
124 		if (size <= BITS_PER_LONG)
125 			break;
126 		size -= BITS_PER_LONG;
127 		offset = 0;
128 		p++;
129 	}
130 	return result;
131 found:
132 	return result - size + __reverse_ffs(tmp);
133 }
134 
135 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
136 			unsigned long size, unsigned long offset)
137 {
138 	const unsigned long *p = addr + BIT_WORD(offset);
139 	unsigned long result = size;
140 	unsigned long tmp;
141 
142 	if (offset >= size)
143 		return size;
144 
145 	size -= (offset & ~(BITS_PER_LONG - 1));
146 	offset %= BITS_PER_LONG;
147 
148 	while (1) {
149 		if (*p == ~0UL)
150 			goto pass;
151 
152 		tmp = __reverse_ulong((unsigned char *)p);
153 
154 		if (offset)
155 			tmp |= ~0UL << (BITS_PER_LONG - offset);
156 		if (size < BITS_PER_LONG)
157 			tmp |= ~0UL >> size;
158 		if (tmp != ~0UL)
159 			goto found;
160 pass:
161 		if (size <= BITS_PER_LONG)
162 			break;
163 		size -= BITS_PER_LONG;
164 		offset = 0;
165 		p++;
166 	}
167 	return result;
168 found:
169 	return result - size + __reverse_ffz(tmp);
170 }
171 
172 bool need_SSR(struct f2fs_sb_info *sbi)
173 {
174 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
175 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
176 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
177 
178 	if (test_opt(sbi, LFS))
179 		return false;
180 	if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
181 		return true;
182 
183 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
184 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
185 }
186 
187 void register_inmem_page(struct inode *inode, struct page *page)
188 {
189 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190 	struct f2fs_inode_info *fi = F2FS_I(inode);
191 	struct inmem_pages *new;
192 
193 	f2fs_trace_pid(page);
194 
195 	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
196 	SetPagePrivate(page);
197 
198 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
199 
200 	/* add atomic page indices to the list */
201 	new->page = page;
202 	INIT_LIST_HEAD(&new->list);
203 
204 	/* increase reference count with clean state */
205 	mutex_lock(&fi->inmem_lock);
206 	get_page(page);
207 	list_add_tail(&new->list, &fi->inmem_pages);
208 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
209 	if (list_empty(&fi->inmem_ilist))
210 		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
211 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
212 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
213 	mutex_unlock(&fi->inmem_lock);
214 
215 	trace_f2fs_register_inmem_page(page, INMEM);
216 }
217 
218 static int __revoke_inmem_pages(struct inode *inode,
219 				struct list_head *head, bool drop, bool recover)
220 {
221 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
222 	struct inmem_pages *cur, *tmp;
223 	int err = 0;
224 
225 	list_for_each_entry_safe(cur, tmp, head, list) {
226 		struct page *page = cur->page;
227 
228 		if (drop)
229 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
230 
231 		lock_page(page);
232 
233 		if (recover) {
234 			struct dnode_of_data dn;
235 			struct node_info ni;
236 
237 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
238 retry:
239 			set_new_dnode(&dn, inode, NULL, NULL, 0);
240 			err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
241 			if (err) {
242 				if (err == -ENOMEM) {
243 					congestion_wait(BLK_RW_ASYNC, HZ/50);
244 					cond_resched();
245 					goto retry;
246 				}
247 				err = -EAGAIN;
248 				goto next;
249 			}
250 			get_node_info(sbi, dn.nid, &ni);
251 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
252 					cur->old_addr, ni.version, true, true);
253 			f2fs_put_dnode(&dn);
254 		}
255 next:
256 		/* we don't need to invalidate this in the sccessful status */
257 		if (drop || recover)
258 			ClearPageUptodate(page);
259 		set_page_private(page, 0);
260 		ClearPagePrivate(page);
261 		f2fs_put_page(page, 1);
262 
263 		list_del(&cur->list);
264 		kmem_cache_free(inmem_entry_slab, cur);
265 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
266 	}
267 	return err;
268 }
269 
270 void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
271 {
272 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
273 	struct inode *inode;
274 	struct f2fs_inode_info *fi;
275 next:
276 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
277 	if (list_empty(head)) {
278 		spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
279 		return;
280 	}
281 	fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
282 	inode = igrab(&fi->vfs_inode);
283 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
284 
285 	if (inode) {
286 		drop_inmem_pages(inode);
287 		iput(inode);
288 	}
289 	congestion_wait(BLK_RW_ASYNC, HZ/50);
290 	cond_resched();
291 	goto next;
292 }
293 
294 void drop_inmem_pages(struct inode *inode)
295 {
296 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
297 	struct f2fs_inode_info *fi = F2FS_I(inode);
298 
299 	mutex_lock(&fi->inmem_lock);
300 	__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
301 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
302 	if (!list_empty(&fi->inmem_ilist))
303 		list_del_init(&fi->inmem_ilist);
304 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305 	mutex_unlock(&fi->inmem_lock);
306 
307 	clear_inode_flag(inode, FI_ATOMIC_FILE);
308 	clear_inode_flag(inode, FI_HOT_DATA);
309 	stat_dec_atomic_write(inode);
310 }
311 
312 void drop_inmem_page(struct inode *inode, struct page *page)
313 {
314 	struct f2fs_inode_info *fi = F2FS_I(inode);
315 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
316 	struct list_head *head = &fi->inmem_pages;
317 	struct inmem_pages *cur = NULL;
318 
319 	f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
320 
321 	mutex_lock(&fi->inmem_lock);
322 	list_for_each_entry(cur, head, list) {
323 		if (cur->page == page)
324 			break;
325 	}
326 
327 	f2fs_bug_on(sbi, !cur || cur->page != page);
328 	list_del(&cur->list);
329 	mutex_unlock(&fi->inmem_lock);
330 
331 	dec_page_count(sbi, F2FS_INMEM_PAGES);
332 	kmem_cache_free(inmem_entry_slab, cur);
333 
334 	ClearPageUptodate(page);
335 	set_page_private(page, 0);
336 	ClearPagePrivate(page);
337 	f2fs_put_page(page, 0);
338 
339 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
340 }
341 
342 static int __commit_inmem_pages(struct inode *inode,
343 					struct list_head *revoke_list)
344 {
345 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
346 	struct f2fs_inode_info *fi = F2FS_I(inode);
347 	struct inmem_pages *cur, *tmp;
348 	struct f2fs_io_info fio = {
349 		.sbi = sbi,
350 		.ino = inode->i_ino,
351 		.type = DATA,
352 		.op = REQ_OP_WRITE,
353 		.op_flags = REQ_SYNC | REQ_PRIO,
354 		.io_type = FS_DATA_IO,
355 	};
356 	pgoff_t last_idx = ULONG_MAX;
357 	int err = 0;
358 
359 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
360 		struct page *page = cur->page;
361 
362 		lock_page(page);
363 		if (page->mapping == inode->i_mapping) {
364 			trace_f2fs_commit_inmem_page(page, INMEM);
365 
366 			set_page_dirty(page);
367 			f2fs_wait_on_page_writeback(page, DATA, true);
368 			if (clear_page_dirty_for_io(page)) {
369 				inode_dec_dirty_pages(inode);
370 				remove_dirty_inode(inode);
371 			}
372 retry:
373 			fio.page = page;
374 			fio.old_blkaddr = NULL_ADDR;
375 			fio.encrypted_page = NULL;
376 			fio.need_lock = LOCK_DONE;
377 			err = do_write_data_page(&fio);
378 			if (err) {
379 				if (err == -ENOMEM) {
380 					congestion_wait(BLK_RW_ASYNC, HZ/50);
381 					cond_resched();
382 					goto retry;
383 				}
384 				unlock_page(page);
385 				break;
386 			}
387 			/* record old blkaddr for revoking */
388 			cur->old_addr = fio.old_blkaddr;
389 			last_idx = page->index;
390 		}
391 		unlock_page(page);
392 		list_move_tail(&cur->list, revoke_list);
393 	}
394 
395 	if (last_idx != ULONG_MAX)
396 		f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
397 
398 	if (!err)
399 		__revoke_inmem_pages(inode, revoke_list, false, false);
400 
401 	return err;
402 }
403 
404 int commit_inmem_pages(struct inode *inode)
405 {
406 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
407 	struct f2fs_inode_info *fi = F2FS_I(inode);
408 	struct list_head revoke_list;
409 	int err;
410 
411 	INIT_LIST_HEAD(&revoke_list);
412 	f2fs_balance_fs(sbi, true);
413 	f2fs_lock_op(sbi);
414 
415 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
416 
417 	mutex_lock(&fi->inmem_lock);
418 	err = __commit_inmem_pages(inode, &revoke_list);
419 	if (err) {
420 		int ret;
421 		/*
422 		 * try to revoke all committed pages, but still we could fail
423 		 * due to no memory or other reason, if that happened, EAGAIN
424 		 * will be returned, which means in such case, transaction is
425 		 * already not integrity, caller should use journal to do the
426 		 * recovery or rewrite & commit last transaction. For other
427 		 * error number, revoking was done by filesystem itself.
428 		 */
429 		ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
430 		if (ret)
431 			err = ret;
432 
433 		/* drop all uncommitted pages */
434 		__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
435 	}
436 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
437 	if (!list_empty(&fi->inmem_ilist))
438 		list_del_init(&fi->inmem_ilist);
439 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
440 	mutex_unlock(&fi->inmem_lock);
441 
442 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
443 
444 	f2fs_unlock_op(sbi);
445 	return err;
446 }
447 
448 /*
449  * This function balances dirty node and dentry pages.
450  * In addition, it controls garbage collection.
451  */
452 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
453 {
454 #ifdef CONFIG_F2FS_FAULT_INJECTION
455 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
456 		f2fs_show_injection_info(FAULT_CHECKPOINT);
457 		f2fs_stop_checkpoint(sbi, false);
458 	}
459 #endif
460 
461 	/* balance_fs_bg is able to be pending */
462 	if (need && excess_cached_nats(sbi))
463 		f2fs_balance_fs_bg(sbi);
464 
465 	/*
466 	 * We should do GC or end up with checkpoint, if there are so many dirty
467 	 * dir/node pages without enough free segments.
468 	 */
469 	if (has_not_enough_free_secs(sbi, 0, 0)) {
470 		mutex_lock(&sbi->gc_mutex);
471 		f2fs_gc(sbi, false, false, NULL_SEGNO);
472 	}
473 }
474 
475 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
476 {
477 	/* try to shrink extent cache when there is no enough memory */
478 	if (!available_free_memory(sbi, EXTENT_CACHE))
479 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
480 
481 	/* check the # of cached NAT entries */
482 	if (!available_free_memory(sbi, NAT_ENTRIES))
483 		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
484 
485 	if (!available_free_memory(sbi, FREE_NIDS))
486 		try_to_free_nids(sbi, MAX_FREE_NIDS);
487 	else
488 		build_free_nids(sbi, false, false);
489 
490 	if (!is_idle(sbi) && !excess_dirty_nats(sbi))
491 		return;
492 
493 	/* checkpoint is the only way to shrink partial cached entries */
494 	if (!available_free_memory(sbi, NAT_ENTRIES) ||
495 			!available_free_memory(sbi, INO_ENTRIES) ||
496 			excess_prefree_segs(sbi) ||
497 			excess_dirty_nats(sbi) ||
498 			f2fs_time_over(sbi, CP_TIME)) {
499 		if (test_opt(sbi, DATA_FLUSH)) {
500 			struct blk_plug plug;
501 
502 			blk_start_plug(&plug);
503 			sync_dirty_inodes(sbi, FILE_INODE);
504 			blk_finish_plug(&plug);
505 		}
506 		f2fs_sync_fs(sbi->sb, true);
507 		stat_inc_bg_cp_count(sbi->stat_info);
508 	}
509 }
510 
511 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
512 				struct block_device *bdev)
513 {
514 	struct bio *bio = f2fs_bio_alloc(sbi, 0, true);
515 	int ret;
516 
517 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
518 	bio_set_dev(bio, bdev);
519 	ret = submit_bio_wait(bio);
520 	bio_put(bio);
521 
522 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
523 				test_opt(sbi, FLUSH_MERGE), ret);
524 	return ret;
525 }
526 
527 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
528 {
529 	int ret = 0;
530 	int i;
531 
532 	if (!sbi->s_ndevs)
533 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
534 
535 	for (i = 0; i < sbi->s_ndevs; i++) {
536 		if (!is_dirty_device(sbi, ino, i, FLUSH_INO))
537 			continue;
538 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
539 		if (ret)
540 			break;
541 	}
542 	return ret;
543 }
544 
545 static int issue_flush_thread(void *data)
546 {
547 	struct f2fs_sb_info *sbi = data;
548 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
549 	wait_queue_head_t *q = &fcc->flush_wait_queue;
550 repeat:
551 	if (kthread_should_stop())
552 		return 0;
553 
554 	sb_start_intwrite(sbi->sb);
555 
556 	if (!llist_empty(&fcc->issue_list)) {
557 		struct flush_cmd *cmd, *next;
558 		int ret;
559 
560 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
561 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
562 
563 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
564 
565 		ret = submit_flush_wait(sbi, cmd->ino);
566 		atomic_inc(&fcc->issued_flush);
567 
568 		llist_for_each_entry_safe(cmd, next,
569 					  fcc->dispatch_list, llnode) {
570 			cmd->ret = ret;
571 			complete(&cmd->wait);
572 		}
573 		fcc->dispatch_list = NULL;
574 	}
575 
576 	sb_end_intwrite(sbi->sb);
577 
578 	wait_event_interruptible(*q,
579 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
580 	goto repeat;
581 }
582 
583 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
584 {
585 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
586 	struct flush_cmd cmd;
587 	int ret;
588 
589 	if (test_opt(sbi, NOBARRIER))
590 		return 0;
591 
592 	if (!test_opt(sbi, FLUSH_MERGE)) {
593 		ret = submit_flush_wait(sbi, ino);
594 		atomic_inc(&fcc->issued_flush);
595 		return ret;
596 	}
597 
598 	if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
599 		ret = submit_flush_wait(sbi, ino);
600 		atomic_dec(&fcc->issing_flush);
601 
602 		atomic_inc(&fcc->issued_flush);
603 		return ret;
604 	}
605 
606 	cmd.ino = ino;
607 	init_completion(&cmd.wait);
608 
609 	llist_add(&cmd.llnode, &fcc->issue_list);
610 
611 	/* update issue_list before we wake up issue_flush thread */
612 	smp_mb();
613 
614 	if (waitqueue_active(&fcc->flush_wait_queue))
615 		wake_up(&fcc->flush_wait_queue);
616 
617 	if (fcc->f2fs_issue_flush) {
618 		wait_for_completion(&cmd.wait);
619 		atomic_dec(&fcc->issing_flush);
620 	} else {
621 		struct llist_node *list;
622 
623 		list = llist_del_all(&fcc->issue_list);
624 		if (!list) {
625 			wait_for_completion(&cmd.wait);
626 			atomic_dec(&fcc->issing_flush);
627 		} else {
628 			struct flush_cmd *tmp, *next;
629 
630 			ret = submit_flush_wait(sbi, ino);
631 
632 			llist_for_each_entry_safe(tmp, next, list, llnode) {
633 				if (tmp == &cmd) {
634 					cmd.ret = ret;
635 					atomic_dec(&fcc->issing_flush);
636 					continue;
637 				}
638 				tmp->ret = ret;
639 				complete(&tmp->wait);
640 			}
641 		}
642 	}
643 
644 	return cmd.ret;
645 }
646 
647 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
648 {
649 	dev_t dev = sbi->sb->s_bdev->bd_dev;
650 	struct flush_cmd_control *fcc;
651 	int err = 0;
652 
653 	if (SM_I(sbi)->fcc_info) {
654 		fcc = SM_I(sbi)->fcc_info;
655 		if (fcc->f2fs_issue_flush)
656 			return err;
657 		goto init_thread;
658 	}
659 
660 	fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
661 	if (!fcc)
662 		return -ENOMEM;
663 	atomic_set(&fcc->issued_flush, 0);
664 	atomic_set(&fcc->issing_flush, 0);
665 	init_waitqueue_head(&fcc->flush_wait_queue);
666 	init_llist_head(&fcc->issue_list);
667 	SM_I(sbi)->fcc_info = fcc;
668 	if (!test_opt(sbi, FLUSH_MERGE))
669 		return err;
670 
671 init_thread:
672 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
673 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
674 	if (IS_ERR(fcc->f2fs_issue_flush)) {
675 		err = PTR_ERR(fcc->f2fs_issue_flush);
676 		kfree(fcc);
677 		SM_I(sbi)->fcc_info = NULL;
678 		return err;
679 	}
680 
681 	return err;
682 }
683 
684 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
685 {
686 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
687 
688 	if (fcc && fcc->f2fs_issue_flush) {
689 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
690 
691 		fcc->f2fs_issue_flush = NULL;
692 		kthread_stop(flush_thread);
693 	}
694 	if (free) {
695 		kfree(fcc);
696 		SM_I(sbi)->fcc_info = NULL;
697 	}
698 }
699 
700 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
701 {
702 	int ret = 0, i;
703 
704 	if (!sbi->s_ndevs)
705 		return 0;
706 
707 	for (i = 1; i < sbi->s_ndevs; i++) {
708 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
709 			continue;
710 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
711 		if (ret)
712 			break;
713 
714 		spin_lock(&sbi->dev_lock);
715 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
716 		spin_unlock(&sbi->dev_lock);
717 	}
718 
719 	return ret;
720 }
721 
722 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
723 		enum dirty_type dirty_type)
724 {
725 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
726 
727 	/* need not be added */
728 	if (IS_CURSEG(sbi, segno))
729 		return;
730 
731 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
732 		dirty_i->nr_dirty[dirty_type]++;
733 
734 	if (dirty_type == DIRTY) {
735 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
736 		enum dirty_type t = sentry->type;
737 
738 		if (unlikely(t >= DIRTY)) {
739 			f2fs_bug_on(sbi, 1);
740 			return;
741 		}
742 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
743 			dirty_i->nr_dirty[t]++;
744 	}
745 }
746 
747 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
748 		enum dirty_type dirty_type)
749 {
750 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
751 
752 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
753 		dirty_i->nr_dirty[dirty_type]--;
754 
755 	if (dirty_type == DIRTY) {
756 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
757 		enum dirty_type t = sentry->type;
758 
759 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
760 			dirty_i->nr_dirty[t]--;
761 
762 		if (get_valid_blocks(sbi, segno, true) == 0)
763 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
764 						dirty_i->victim_secmap);
765 	}
766 }
767 
768 /*
769  * Should not occur error such as -ENOMEM.
770  * Adding dirty entry into seglist is not critical operation.
771  * If a given segment is one of current working segments, it won't be added.
772  */
773 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
774 {
775 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
776 	unsigned short valid_blocks;
777 
778 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
779 		return;
780 
781 	mutex_lock(&dirty_i->seglist_lock);
782 
783 	valid_blocks = get_valid_blocks(sbi, segno, false);
784 
785 	if (valid_blocks == 0) {
786 		__locate_dirty_segment(sbi, segno, PRE);
787 		__remove_dirty_segment(sbi, segno, DIRTY);
788 	} else if (valid_blocks < sbi->blocks_per_seg) {
789 		__locate_dirty_segment(sbi, segno, DIRTY);
790 	} else {
791 		/* Recovery routine with SSR needs this */
792 		__remove_dirty_segment(sbi, segno, DIRTY);
793 	}
794 
795 	mutex_unlock(&dirty_i->seglist_lock);
796 }
797 
798 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
799 		struct block_device *bdev, block_t lstart,
800 		block_t start, block_t len)
801 {
802 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
803 	struct list_head *pend_list;
804 	struct discard_cmd *dc;
805 
806 	f2fs_bug_on(sbi, !len);
807 
808 	pend_list = &dcc->pend_list[plist_idx(len)];
809 
810 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
811 	INIT_LIST_HEAD(&dc->list);
812 	dc->bdev = bdev;
813 	dc->lstart = lstart;
814 	dc->start = start;
815 	dc->len = len;
816 	dc->ref = 0;
817 	dc->state = D_PREP;
818 	dc->error = 0;
819 	init_completion(&dc->wait);
820 	list_add_tail(&dc->list, pend_list);
821 	atomic_inc(&dcc->discard_cmd_cnt);
822 	dcc->undiscard_blks += len;
823 
824 	return dc;
825 }
826 
827 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
828 				struct block_device *bdev, block_t lstart,
829 				block_t start, block_t len,
830 				struct rb_node *parent, struct rb_node **p)
831 {
832 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
833 	struct discard_cmd *dc;
834 
835 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
836 
837 	rb_link_node(&dc->rb_node, parent, p);
838 	rb_insert_color(&dc->rb_node, &dcc->root);
839 
840 	return dc;
841 }
842 
843 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
844 							struct discard_cmd *dc)
845 {
846 	if (dc->state == D_DONE)
847 		atomic_dec(&dcc->issing_discard);
848 
849 	list_del(&dc->list);
850 	rb_erase(&dc->rb_node, &dcc->root);
851 	dcc->undiscard_blks -= dc->len;
852 
853 	kmem_cache_free(discard_cmd_slab, dc);
854 
855 	atomic_dec(&dcc->discard_cmd_cnt);
856 }
857 
858 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
859 							struct discard_cmd *dc)
860 {
861 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
862 
863 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
864 
865 	f2fs_bug_on(sbi, dc->ref);
866 
867 	if (dc->error == -EOPNOTSUPP)
868 		dc->error = 0;
869 
870 	if (dc->error)
871 		f2fs_msg(sbi->sb, KERN_INFO,
872 			"Issue discard(%u, %u, %u) failed, ret: %d",
873 			dc->lstart, dc->start, dc->len, dc->error);
874 	__detach_discard_cmd(dcc, dc);
875 }
876 
877 static void f2fs_submit_discard_endio(struct bio *bio)
878 {
879 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
880 
881 	dc->error = blk_status_to_errno(bio->bi_status);
882 	dc->state = D_DONE;
883 	complete_all(&dc->wait);
884 	bio_put(bio);
885 }
886 
887 void __check_sit_bitmap(struct f2fs_sb_info *sbi,
888 				block_t start, block_t end)
889 {
890 #ifdef CONFIG_F2FS_CHECK_FS
891 	struct seg_entry *sentry;
892 	unsigned int segno;
893 	block_t blk = start;
894 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
895 	unsigned long *map;
896 
897 	while (blk < end) {
898 		segno = GET_SEGNO(sbi, blk);
899 		sentry = get_seg_entry(sbi, segno);
900 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
901 
902 		if (end < START_BLOCK(sbi, segno + 1))
903 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
904 		else
905 			size = max_blocks;
906 		map = (unsigned long *)(sentry->cur_valid_map);
907 		offset = __find_rev_next_bit(map, size, offset);
908 		f2fs_bug_on(sbi, offset != size);
909 		blk = START_BLOCK(sbi, segno + 1);
910 	}
911 #endif
912 }
913 
914 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
915 static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
916 						struct discard_policy *dpolicy,
917 						struct discard_cmd *dc)
918 {
919 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
920 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
921 					&(dcc->fstrim_list) : &(dcc->wait_list);
922 	struct bio *bio = NULL;
923 	int flag = dpolicy->sync ? REQ_SYNC : 0;
924 
925 	if (dc->state != D_PREP)
926 		return;
927 
928 	trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
929 
930 	dc->error = __blkdev_issue_discard(dc->bdev,
931 				SECTOR_FROM_BLOCK(dc->start),
932 				SECTOR_FROM_BLOCK(dc->len),
933 				GFP_NOFS, 0, &bio);
934 	if (!dc->error) {
935 		/* should keep before submission to avoid D_DONE right away */
936 		dc->state = D_SUBMIT;
937 		atomic_inc(&dcc->issued_discard);
938 		atomic_inc(&dcc->issing_discard);
939 		if (bio) {
940 			bio->bi_private = dc;
941 			bio->bi_end_io = f2fs_submit_discard_endio;
942 			bio->bi_opf |= flag;
943 			submit_bio(bio);
944 			list_move_tail(&dc->list, wait_list);
945 			__check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
946 
947 			f2fs_update_iostat(sbi, FS_DISCARD, 1);
948 		}
949 	} else {
950 		__remove_discard_cmd(sbi, dc);
951 	}
952 }
953 
954 static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
955 				struct block_device *bdev, block_t lstart,
956 				block_t start, block_t len,
957 				struct rb_node **insert_p,
958 				struct rb_node *insert_parent)
959 {
960 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
961 	struct rb_node **p;
962 	struct rb_node *parent = NULL;
963 	struct discard_cmd *dc = NULL;
964 
965 	if (insert_p && insert_parent) {
966 		parent = insert_parent;
967 		p = insert_p;
968 		goto do_insert;
969 	}
970 
971 	p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
972 do_insert:
973 	dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
974 	if (!dc)
975 		return NULL;
976 
977 	return dc;
978 }
979 
980 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
981 						struct discard_cmd *dc)
982 {
983 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
984 }
985 
986 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
987 				struct discard_cmd *dc, block_t blkaddr)
988 {
989 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
990 	struct discard_info di = dc->di;
991 	bool modified = false;
992 
993 	if (dc->state == D_DONE || dc->len == 1) {
994 		__remove_discard_cmd(sbi, dc);
995 		return;
996 	}
997 
998 	dcc->undiscard_blks -= di.len;
999 
1000 	if (blkaddr > di.lstart) {
1001 		dc->len = blkaddr - dc->lstart;
1002 		dcc->undiscard_blks += dc->len;
1003 		__relocate_discard_cmd(dcc, dc);
1004 		modified = true;
1005 	}
1006 
1007 	if (blkaddr < di.lstart + di.len - 1) {
1008 		if (modified) {
1009 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1010 					di.start + blkaddr + 1 - di.lstart,
1011 					di.lstart + di.len - 1 - blkaddr,
1012 					NULL, NULL);
1013 		} else {
1014 			dc->lstart++;
1015 			dc->len--;
1016 			dc->start++;
1017 			dcc->undiscard_blks += dc->len;
1018 			__relocate_discard_cmd(dcc, dc);
1019 		}
1020 	}
1021 }
1022 
1023 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1024 				struct block_device *bdev, block_t lstart,
1025 				block_t start, block_t len)
1026 {
1027 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1028 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1029 	struct discard_cmd *dc;
1030 	struct discard_info di = {0};
1031 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1032 	block_t end = lstart + len;
1033 
1034 	mutex_lock(&dcc->cmd_lock);
1035 
1036 	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
1037 					NULL, lstart,
1038 					(struct rb_entry **)&prev_dc,
1039 					(struct rb_entry **)&next_dc,
1040 					&insert_p, &insert_parent, true);
1041 	if (dc)
1042 		prev_dc = dc;
1043 
1044 	if (!prev_dc) {
1045 		di.lstart = lstart;
1046 		di.len = next_dc ? next_dc->lstart - lstart : len;
1047 		di.len = min(di.len, len);
1048 		di.start = start;
1049 	}
1050 
1051 	while (1) {
1052 		struct rb_node *node;
1053 		bool merged = false;
1054 		struct discard_cmd *tdc = NULL;
1055 
1056 		if (prev_dc) {
1057 			di.lstart = prev_dc->lstart + prev_dc->len;
1058 			if (di.lstart < lstart)
1059 				di.lstart = lstart;
1060 			if (di.lstart >= end)
1061 				break;
1062 
1063 			if (!next_dc || next_dc->lstart > end)
1064 				di.len = end - di.lstart;
1065 			else
1066 				di.len = next_dc->lstart - di.lstart;
1067 			di.start = start + di.lstart - lstart;
1068 		}
1069 
1070 		if (!di.len)
1071 			goto next;
1072 
1073 		if (prev_dc && prev_dc->state == D_PREP &&
1074 			prev_dc->bdev == bdev &&
1075 			__is_discard_back_mergeable(&di, &prev_dc->di)) {
1076 			prev_dc->di.len += di.len;
1077 			dcc->undiscard_blks += di.len;
1078 			__relocate_discard_cmd(dcc, prev_dc);
1079 			di = prev_dc->di;
1080 			tdc = prev_dc;
1081 			merged = true;
1082 		}
1083 
1084 		if (next_dc && next_dc->state == D_PREP &&
1085 			next_dc->bdev == bdev &&
1086 			__is_discard_front_mergeable(&di, &next_dc->di)) {
1087 			next_dc->di.lstart = di.lstart;
1088 			next_dc->di.len += di.len;
1089 			next_dc->di.start = di.start;
1090 			dcc->undiscard_blks += di.len;
1091 			__relocate_discard_cmd(dcc, next_dc);
1092 			if (tdc)
1093 				__remove_discard_cmd(sbi, tdc);
1094 			merged = true;
1095 		}
1096 
1097 		if (!merged) {
1098 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1099 							di.len, NULL, NULL);
1100 		}
1101  next:
1102 		prev_dc = next_dc;
1103 		if (!prev_dc)
1104 			break;
1105 
1106 		node = rb_next(&prev_dc->rb_node);
1107 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1108 	}
1109 
1110 	mutex_unlock(&dcc->cmd_lock);
1111 }
1112 
1113 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1114 		struct block_device *bdev, block_t blkstart, block_t blklen)
1115 {
1116 	block_t lblkstart = blkstart;
1117 
1118 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1119 
1120 	if (sbi->s_ndevs) {
1121 		int devi = f2fs_target_device_index(sbi, blkstart);
1122 
1123 		blkstart -= FDEV(devi).start_blk;
1124 	}
1125 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1126 	return 0;
1127 }
1128 
1129 static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
1130 					struct discard_policy *dpolicy,
1131 					unsigned int start, unsigned int end)
1132 {
1133 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1134 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1135 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1136 	struct discard_cmd *dc;
1137 	struct blk_plug plug;
1138 	int issued;
1139 
1140 next:
1141 	issued = 0;
1142 
1143 	mutex_lock(&dcc->cmd_lock);
1144 	f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
1145 
1146 	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
1147 					NULL, start,
1148 					(struct rb_entry **)&prev_dc,
1149 					(struct rb_entry **)&next_dc,
1150 					&insert_p, &insert_parent, true);
1151 	if (!dc)
1152 		dc = next_dc;
1153 
1154 	blk_start_plug(&plug);
1155 
1156 	while (dc && dc->lstart <= end) {
1157 		struct rb_node *node;
1158 
1159 		if (dc->len < dpolicy->granularity)
1160 			goto skip;
1161 
1162 		if (dc->state != D_PREP) {
1163 			list_move_tail(&dc->list, &dcc->fstrim_list);
1164 			goto skip;
1165 		}
1166 
1167 		__submit_discard_cmd(sbi, dpolicy, dc);
1168 
1169 		if (++issued >= dpolicy->max_requests) {
1170 			start = dc->lstart + dc->len;
1171 
1172 			blk_finish_plug(&plug);
1173 			mutex_unlock(&dcc->cmd_lock);
1174 
1175 			schedule();
1176 
1177 			goto next;
1178 		}
1179 skip:
1180 		node = rb_next(&dc->rb_node);
1181 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1182 
1183 		if (fatal_signal_pending(current))
1184 			break;
1185 	}
1186 
1187 	blk_finish_plug(&plug);
1188 	mutex_unlock(&dcc->cmd_lock);
1189 }
1190 
1191 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1192 					struct discard_policy *dpolicy)
1193 {
1194 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1195 	struct list_head *pend_list;
1196 	struct discard_cmd *dc, *tmp;
1197 	struct blk_plug plug;
1198 	int i, iter = 0, issued = 0;
1199 	bool io_interrupted = false;
1200 
1201 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1202 		if (i + 1 < dpolicy->granularity)
1203 			break;
1204 		pend_list = &dcc->pend_list[i];
1205 
1206 		mutex_lock(&dcc->cmd_lock);
1207 		f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
1208 		blk_start_plug(&plug);
1209 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1210 			f2fs_bug_on(sbi, dc->state != D_PREP);
1211 
1212 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1213 								!is_idle(sbi)) {
1214 				io_interrupted = true;
1215 				goto skip;
1216 			}
1217 
1218 			__submit_discard_cmd(sbi, dpolicy, dc);
1219 			issued++;
1220 skip:
1221 			if (++iter >= dpolicy->max_requests)
1222 				break;
1223 		}
1224 		blk_finish_plug(&plug);
1225 		mutex_unlock(&dcc->cmd_lock);
1226 
1227 		if (iter >= dpolicy->max_requests)
1228 			break;
1229 	}
1230 
1231 	if (!issued && io_interrupted)
1232 		issued = -1;
1233 
1234 	return issued;
1235 }
1236 
1237 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1238 {
1239 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1240 	struct list_head *pend_list;
1241 	struct discard_cmd *dc, *tmp;
1242 	int i;
1243 	bool dropped = false;
1244 
1245 	mutex_lock(&dcc->cmd_lock);
1246 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1247 		pend_list = &dcc->pend_list[i];
1248 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1249 			f2fs_bug_on(sbi, dc->state != D_PREP);
1250 			__remove_discard_cmd(sbi, dc);
1251 			dropped = true;
1252 		}
1253 	}
1254 	mutex_unlock(&dcc->cmd_lock);
1255 
1256 	return dropped;
1257 }
1258 
1259 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1260 							struct discard_cmd *dc)
1261 {
1262 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1263 	unsigned int len = 0;
1264 
1265 	wait_for_completion_io(&dc->wait);
1266 	mutex_lock(&dcc->cmd_lock);
1267 	f2fs_bug_on(sbi, dc->state != D_DONE);
1268 	dc->ref--;
1269 	if (!dc->ref) {
1270 		if (!dc->error)
1271 			len = dc->len;
1272 		__remove_discard_cmd(sbi, dc);
1273 	}
1274 	mutex_unlock(&dcc->cmd_lock);
1275 
1276 	return len;
1277 }
1278 
1279 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1280 						struct discard_policy *dpolicy,
1281 						block_t start, block_t end)
1282 {
1283 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1284 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1285 					&(dcc->fstrim_list) : &(dcc->wait_list);
1286 	struct discard_cmd *dc, *tmp;
1287 	bool need_wait;
1288 	unsigned int trimmed = 0;
1289 
1290 next:
1291 	need_wait = false;
1292 
1293 	mutex_lock(&dcc->cmd_lock);
1294 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
1295 		if (dc->lstart + dc->len <= start || end <= dc->lstart)
1296 			continue;
1297 		if (dc->len < dpolicy->granularity)
1298 			continue;
1299 		if (dc->state == D_DONE && !dc->ref) {
1300 			wait_for_completion_io(&dc->wait);
1301 			if (!dc->error)
1302 				trimmed += dc->len;
1303 			__remove_discard_cmd(sbi, dc);
1304 		} else {
1305 			dc->ref++;
1306 			need_wait = true;
1307 			break;
1308 		}
1309 	}
1310 	mutex_unlock(&dcc->cmd_lock);
1311 
1312 	if (need_wait) {
1313 		trimmed += __wait_one_discard_bio(sbi, dc);
1314 		goto next;
1315 	}
1316 
1317 	return trimmed;
1318 }
1319 
1320 static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1321 						struct discard_policy *dpolicy)
1322 {
1323 	__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1324 }
1325 
1326 /* This should be covered by global mutex, &sit_i->sentry_lock */
1327 void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1328 {
1329 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1330 	struct discard_cmd *dc;
1331 	bool need_wait = false;
1332 
1333 	mutex_lock(&dcc->cmd_lock);
1334 	dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
1335 	if (dc) {
1336 		if (dc->state == D_PREP) {
1337 			__punch_discard_cmd(sbi, dc, blkaddr);
1338 		} else {
1339 			dc->ref++;
1340 			need_wait = true;
1341 		}
1342 	}
1343 	mutex_unlock(&dcc->cmd_lock);
1344 
1345 	if (need_wait)
1346 		__wait_one_discard_bio(sbi, dc);
1347 }
1348 
1349 void stop_discard_thread(struct f2fs_sb_info *sbi)
1350 {
1351 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1352 
1353 	if (dcc && dcc->f2fs_issue_discard) {
1354 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1355 
1356 		dcc->f2fs_issue_discard = NULL;
1357 		kthread_stop(discard_thread);
1358 	}
1359 }
1360 
1361 /* This comes from f2fs_put_super */
1362 bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
1363 {
1364 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1365 	struct discard_policy dpolicy;
1366 	bool dropped;
1367 
1368 	init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
1369 	__issue_discard_cmd(sbi, &dpolicy);
1370 	dropped = __drop_discard_cmd(sbi);
1371 	__wait_all_discard_cmd(sbi, &dpolicy);
1372 
1373 	return dropped;
1374 }
1375 
1376 static int issue_discard_thread(void *data)
1377 {
1378 	struct f2fs_sb_info *sbi = data;
1379 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1380 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1381 	struct discard_policy dpolicy;
1382 	unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1383 	int issued;
1384 
1385 	set_freezable();
1386 
1387 	do {
1388 		init_discard_policy(&dpolicy, DPOLICY_BG,
1389 					dcc->discard_granularity);
1390 
1391 		wait_event_interruptible_timeout(*q,
1392 				kthread_should_stop() || freezing(current) ||
1393 				dcc->discard_wake,
1394 				msecs_to_jiffies(wait_ms));
1395 		if (try_to_freeze())
1396 			continue;
1397 		if (kthread_should_stop())
1398 			return 0;
1399 
1400 		if (dcc->discard_wake) {
1401 			dcc->discard_wake = 0;
1402 			if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
1403 				init_discard_policy(&dpolicy,
1404 							DPOLICY_FORCE, 1);
1405 		}
1406 
1407 		sb_start_intwrite(sbi->sb);
1408 
1409 		issued = __issue_discard_cmd(sbi, &dpolicy);
1410 		if (issued) {
1411 			__wait_all_discard_cmd(sbi, &dpolicy);
1412 			wait_ms = dpolicy.min_interval;
1413 		} else {
1414 			wait_ms = dpolicy.max_interval;
1415 		}
1416 
1417 		sb_end_intwrite(sbi->sb);
1418 
1419 	} while (!kthread_should_stop());
1420 	return 0;
1421 }
1422 
1423 #ifdef CONFIG_BLK_DEV_ZONED
1424 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1425 		struct block_device *bdev, block_t blkstart, block_t blklen)
1426 {
1427 	sector_t sector, nr_sects;
1428 	block_t lblkstart = blkstart;
1429 	int devi = 0;
1430 
1431 	if (sbi->s_ndevs) {
1432 		devi = f2fs_target_device_index(sbi, blkstart);
1433 		blkstart -= FDEV(devi).start_blk;
1434 	}
1435 
1436 	/*
1437 	 * We need to know the type of the zone: for conventional zones,
1438 	 * use regular discard if the drive supports it. For sequential
1439 	 * zones, reset the zone write pointer.
1440 	 */
1441 	switch (get_blkz_type(sbi, bdev, blkstart)) {
1442 
1443 	case BLK_ZONE_TYPE_CONVENTIONAL:
1444 		if (!blk_queue_discard(bdev_get_queue(bdev)))
1445 			return 0;
1446 		return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1447 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1448 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1449 		sector = SECTOR_FROM_BLOCK(blkstart);
1450 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1451 
1452 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1453 				nr_sects != bdev_zone_sectors(bdev)) {
1454 			f2fs_msg(sbi->sb, KERN_INFO,
1455 				"(%d) %s: Unaligned discard attempted (block %x + %x)",
1456 				devi, sbi->s_ndevs ? FDEV(devi).path: "",
1457 				blkstart, blklen);
1458 			return -EIO;
1459 		}
1460 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1461 		return blkdev_reset_zones(bdev, sector,
1462 					  nr_sects, GFP_NOFS);
1463 	default:
1464 		/* Unknown zone type: broken device ? */
1465 		return -EIO;
1466 	}
1467 }
1468 #endif
1469 
1470 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1471 		struct block_device *bdev, block_t blkstart, block_t blklen)
1472 {
1473 #ifdef CONFIG_BLK_DEV_ZONED
1474 	if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
1475 				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
1476 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1477 #endif
1478 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1479 }
1480 
1481 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1482 				block_t blkstart, block_t blklen)
1483 {
1484 	sector_t start = blkstart, len = 0;
1485 	struct block_device *bdev;
1486 	struct seg_entry *se;
1487 	unsigned int offset;
1488 	block_t i;
1489 	int err = 0;
1490 
1491 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1492 
1493 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1494 		if (i != start) {
1495 			struct block_device *bdev2 =
1496 				f2fs_target_device(sbi, i, NULL);
1497 
1498 			if (bdev2 != bdev) {
1499 				err = __issue_discard_async(sbi, bdev,
1500 						start, len);
1501 				if (err)
1502 					return err;
1503 				bdev = bdev2;
1504 				start = i;
1505 				len = 0;
1506 			}
1507 		}
1508 
1509 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1510 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1511 
1512 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
1513 			sbi->discard_blks--;
1514 	}
1515 
1516 	if (len)
1517 		err = __issue_discard_async(sbi, bdev, start, len);
1518 	return err;
1519 }
1520 
1521 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1522 							bool check_only)
1523 {
1524 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1525 	int max_blocks = sbi->blocks_per_seg;
1526 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1527 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1528 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1529 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1530 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1531 	unsigned int start = 0, end = -1;
1532 	bool force = (cpc->reason & CP_DISCARD);
1533 	struct discard_entry *de = NULL;
1534 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1535 	int i;
1536 
1537 	if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
1538 		return false;
1539 
1540 	if (!force) {
1541 		if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
1542 			SM_I(sbi)->dcc_info->nr_discards >=
1543 				SM_I(sbi)->dcc_info->max_discards)
1544 			return false;
1545 	}
1546 
1547 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1548 	for (i = 0; i < entries; i++)
1549 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1550 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1551 
1552 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1553 				SM_I(sbi)->dcc_info->max_discards) {
1554 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1555 		if (start >= max_blocks)
1556 			break;
1557 
1558 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1559 		if (force && start && end != max_blocks
1560 					&& (end - start) < cpc->trim_minlen)
1561 			continue;
1562 
1563 		if (check_only)
1564 			return true;
1565 
1566 		if (!de) {
1567 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1568 								GFP_F2FS_ZERO);
1569 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1570 			list_add_tail(&de->list, head);
1571 		}
1572 
1573 		for (i = start; i < end; i++)
1574 			__set_bit_le(i, (void *)de->discard_map);
1575 
1576 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1577 	}
1578 	return false;
1579 }
1580 
1581 void release_discard_addrs(struct f2fs_sb_info *sbi)
1582 {
1583 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1584 	struct discard_entry *entry, *this;
1585 
1586 	/* drop caches */
1587 	list_for_each_entry_safe(entry, this, head, list) {
1588 		list_del(&entry->list);
1589 		kmem_cache_free(discard_entry_slab, entry);
1590 	}
1591 }
1592 
1593 /*
1594  * Should call clear_prefree_segments after checkpoint is done.
1595  */
1596 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1597 {
1598 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1599 	unsigned int segno;
1600 
1601 	mutex_lock(&dirty_i->seglist_lock);
1602 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1603 		__set_test_and_free(sbi, segno);
1604 	mutex_unlock(&dirty_i->seglist_lock);
1605 }
1606 
1607 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1608 {
1609 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1610 	struct list_head *head = &dcc->entry_list;
1611 	struct discard_entry *entry, *this;
1612 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1613 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1614 	unsigned int start = 0, end = -1;
1615 	unsigned int secno, start_segno;
1616 	bool force = (cpc->reason & CP_DISCARD);
1617 
1618 	mutex_lock(&dirty_i->seglist_lock);
1619 
1620 	while (1) {
1621 		int i;
1622 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1623 		if (start >= MAIN_SEGS(sbi))
1624 			break;
1625 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1626 								start + 1);
1627 
1628 		for (i = start; i < end; i++)
1629 			clear_bit(i, prefree_map);
1630 
1631 		dirty_i->nr_dirty[PRE] -= end - start;
1632 
1633 		if (!test_opt(sbi, DISCARD))
1634 			continue;
1635 
1636 		if (force && start >= cpc->trim_start &&
1637 					(end - 1) <= cpc->trim_end)
1638 				continue;
1639 
1640 		if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1641 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1642 				(end - start) << sbi->log_blocks_per_seg);
1643 			continue;
1644 		}
1645 next:
1646 		secno = GET_SEC_FROM_SEG(sbi, start);
1647 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
1648 		if (!IS_CURSEC(sbi, secno) &&
1649 			!get_valid_blocks(sbi, start, true))
1650 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1651 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
1652 
1653 		start = start_segno + sbi->segs_per_sec;
1654 		if (start < end)
1655 			goto next;
1656 		else
1657 			end = start - 1;
1658 	}
1659 	mutex_unlock(&dirty_i->seglist_lock);
1660 
1661 	/* send small discards */
1662 	list_for_each_entry_safe(entry, this, head, list) {
1663 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1664 		bool is_valid = test_bit_le(0, entry->discard_map);
1665 
1666 find_next:
1667 		if (is_valid) {
1668 			next_pos = find_next_zero_bit_le(entry->discard_map,
1669 					sbi->blocks_per_seg, cur_pos);
1670 			len = next_pos - cur_pos;
1671 
1672 			if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
1673 			    (force && len < cpc->trim_minlen))
1674 				goto skip;
1675 
1676 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1677 									len);
1678 			total_len += len;
1679 		} else {
1680 			next_pos = find_next_bit_le(entry->discard_map,
1681 					sbi->blocks_per_seg, cur_pos);
1682 		}
1683 skip:
1684 		cur_pos = next_pos;
1685 		is_valid = !is_valid;
1686 
1687 		if (cur_pos < sbi->blocks_per_seg)
1688 			goto find_next;
1689 
1690 		list_del(&entry->list);
1691 		dcc->nr_discards -= total_len;
1692 		kmem_cache_free(discard_entry_slab, entry);
1693 	}
1694 
1695 	wake_up_discard_thread(sbi, false);
1696 }
1697 
1698 void init_discard_policy(struct discard_policy *dpolicy,
1699 				int discard_type, unsigned int granularity)
1700 {
1701 	/* common policy */
1702 	dpolicy->type = discard_type;
1703 	dpolicy->sync = true;
1704 	dpolicy->granularity = granularity;
1705 
1706 	if (discard_type == DPOLICY_BG) {
1707 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1708 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1709 		dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1710 		dpolicy->io_aware_gran = MAX_PLIST_NUM;
1711 		dpolicy->io_aware = true;
1712 	} else if (discard_type == DPOLICY_FORCE) {
1713 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1714 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1715 		dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1716 		dpolicy->io_aware_gran = MAX_PLIST_NUM;
1717 		dpolicy->io_aware = true;
1718 	} else if (discard_type == DPOLICY_FSTRIM) {
1719 		dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1720 		dpolicy->io_aware_gran = MAX_PLIST_NUM;
1721 		dpolicy->io_aware = false;
1722 	} else if (discard_type == DPOLICY_UMOUNT) {
1723 		dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1724 		dpolicy->io_aware_gran = MAX_PLIST_NUM;
1725 		dpolicy->io_aware = false;
1726 	}
1727 }
1728 
1729 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
1730 {
1731 	dev_t dev = sbi->sb->s_bdev->bd_dev;
1732 	struct discard_cmd_control *dcc;
1733 	int err = 0, i;
1734 
1735 	if (SM_I(sbi)->dcc_info) {
1736 		dcc = SM_I(sbi)->dcc_info;
1737 		goto init_thread;
1738 	}
1739 
1740 	dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1741 	if (!dcc)
1742 		return -ENOMEM;
1743 
1744 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
1745 	INIT_LIST_HEAD(&dcc->entry_list);
1746 	for (i = 0; i < MAX_PLIST_NUM; i++)
1747 		INIT_LIST_HEAD(&dcc->pend_list[i]);
1748 	INIT_LIST_HEAD(&dcc->wait_list);
1749 	INIT_LIST_HEAD(&dcc->fstrim_list);
1750 	mutex_init(&dcc->cmd_lock);
1751 	atomic_set(&dcc->issued_discard, 0);
1752 	atomic_set(&dcc->issing_discard, 0);
1753 	atomic_set(&dcc->discard_cmd_cnt, 0);
1754 	dcc->nr_discards = 0;
1755 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
1756 	dcc->undiscard_blks = 0;
1757 	dcc->root = RB_ROOT;
1758 
1759 	init_waitqueue_head(&dcc->discard_wait_queue);
1760 	SM_I(sbi)->dcc_info = dcc;
1761 init_thread:
1762 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1763 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1764 	if (IS_ERR(dcc->f2fs_issue_discard)) {
1765 		err = PTR_ERR(dcc->f2fs_issue_discard);
1766 		kfree(dcc);
1767 		SM_I(sbi)->dcc_info = NULL;
1768 		return err;
1769 	}
1770 
1771 	return err;
1772 }
1773 
1774 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
1775 {
1776 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1777 
1778 	if (!dcc)
1779 		return;
1780 
1781 	stop_discard_thread(sbi);
1782 
1783 	kfree(dcc);
1784 	SM_I(sbi)->dcc_info = NULL;
1785 }
1786 
1787 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
1788 {
1789 	struct sit_info *sit_i = SIT_I(sbi);
1790 
1791 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
1792 		sit_i->dirty_sentries++;
1793 		return false;
1794 	}
1795 
1796 	return true;
1797 }
1798 
1799 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1800 					unsigned int segno, int modified)
1801 {
1802 	struct seg_entry *se = get_seg_entry(sbi, segno);
1803 	se->type = type;
1804 	if (modified)
1805 		__mark_sit_entry_dirty(sbi, segno);
1806 }
1807 
1808 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1809 {
1810 	struct seg_entry *se;
1811 	unsigned int segno, offset;
1812 	long int new_vblocks;
1813 	bool exist;
1814 #ifdef CONFIG_F2FS_CHECK_FS
1815 	bool mir_exist;
1816 #endif
1817 
1818 	segno = GET_SEGNO(sbi, blkaddr);
1819 
1820 	se = get_seg_entry(sbi, segno);
1821 	new_vblocks = se->valid_blocks + del;
1822 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1823 
1824 	f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
1825 				(new_vblocks > sbi->blocks_per_seg)));
1826 
1827 	se->valid_blocks = new_vblocks;
1828 	se->mtime = get_mtime(sbi);
1829 	SIT_I(sbi)->max_mtime = se->mtime;
1830 
1831 	/* Update valid block bitmap */
1832 	if (del > 0) {
1833 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
1834 #ifdef CONFIG_F2FS_CHECK_FS
1835 		mir_exist = f2fs_test_and_set_bit(offset,
1836 						se->cur_valid_map_mir);
1837 		if (unlikely(exist != mir_exist)) {
1838 			f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1839 				"when setting bitmap, blk:%u, old bit:%d",
1840 				blkaddr, exist);
1841 			f2fs_bug_on(sbi, 1);
1842 		}
1843 #endif
1844 		if (unlikely(exist)) {
1845 			f2fs_msg(sbi->sb, KERN_ERR,
1846 				"Bitmap was wrongly set, blk:%u", blkaddr);
1847 			f2fs_bug_on(sbi, 1);
1848 			se->valid_blocks--;
1849 			del = 0;
1850 		}
1851 
1852 		if (f2fs_discard_en(sbi) &&
1853 			!f2fs_test_and_set_bit(offset, se->discard_map))
1854 			sbi->discard_blks--;
1855 
1856 		/* don't overwrite by SSR to keep node chain */
1857 		if (se->type == CURSEG_WARM_NODE) {
1858 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1859 				se->ckpt_valid_blocks++;
1860 		}
1861 	} else {
1862 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
1863 #ifdef CONFIG_F2FS_CHECK_FS
1864 		mir_exist = f2fs_test_and_clear_bit(offset,
1865 						se->cur_valid_map_mir);
1866 		if (unlikely(exist != mir_exist)) {
1867 			f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1868 				"when clearing bitmap, blk:%u, old bit:%d",
1869 				blkaddr, exist);
1870 			f2fs_bug_on(sbi, 1);
1871 		}
1872 #endif
1873 		if (unlikely(!exist)) {
1874 			f2fs_msg(sbi->sb, KERN_ERR,
1875 				"Bitmap was wrongly cleared, blk:%u", blkaddr);
1876 			f2fs_bug_on(sbi, 1);
1877 			se->valid_blocks++;
1878 			del = 0;
1879 		}
1880 
1881 		if (f2fs_discard_en(sbi) &&
1882 			f2fs_test_and_clear_bit(offset, se->discard_map))
1883 			sbi->discard_blks++;
1884 	}
1885 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1886 		se->ckpt_valid_blocks += del;
1887 
1888 	__mark_sit_entry_dirty(sbi, segno);
1889 
1890 	/* update total number of valid blocks to be written in ckpt area */
1891 	SIT_I(sbi)->written_valid_blocks += del;
1892 
1893 	if (sbi->segs_per_sec > 1)
1894 		get_sec_entry(sbi, segno)->valid_blocks += del;
1895 }
1896 
1897 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1898 {
1899 	unsigned int segno = GET_SEGNO(sbi, addr);
1900 	struct sit_info *sit_i = SIT_I(sbi);
1901 
1902 	f2fs_bug_on(sbi, addr == NULL_ADDR);
1903 	if (addr == NEW_ADDR)
1904 		return;
1905 
1906 	/* add it into sit main buffer */
1907 	down_write(&sit_i->sentry_lock);
1908 
1909 	update_sit_entry(sbi, addr, -1);
1910 
1911 	/* add it into dirty seglist */
1912 	locate_dirty_segment(sbi, segno);
1913 
1914 	up_write(&sit_i->sentry_lock);
1915 }
1916 
1917 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1918 {
1919 	struct sit_info *sit_i = SIT_I(sbi);
1920 	unsigned int segno, offset;
1921 	struct seg_entry *se;
1922 	bool is_cp = false;
1923 
1924 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1925 		return true;
1926 
1927 	down_read(&sit_i->sentry_lock);
1928 
1929 	segno = GET_SEGNO(sbi, blkaddr);
1930 	se = get_seg_entry(sbi, segno);
1931 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1932 
1933 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
1934 		is_cp = true;
1935 
1936 	up_read(&sit_i->sentry_lock);
1937 
1938 	return is_cp;
1939 }
1940 
1941 /*
1942  * This function should be resided under the curseg_mutex lock
1943  */
1944 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
1945 					struct f2fs_summary *sum)
1946 {
1947 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1948 	void *addr = curseg->sum_blk;
1949 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
1950 	memcpy(addr, sum, sizeof(struct f2fs_summary));
1951 }
1952 
1953 /*
1954  * Calculate the number of current summary pages for writing
1955  */
1956 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
1957 {
1958 	int valid_sum_count = 0;
1959 	int i, sum_in_page;
1960 
1961 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1962 		if (sbi->ckpt->alloc_type[i] == SSR)
1963 			valid_sum_count += sbi->blocks_per_seg;
1964 		else {
1965 			if (for_ra)
1966 				valid_sum_count += le16_to_cpu(
1967 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1968 			else
1969 				valid_sum_count += curseg_blkoff(sbi, i);
1970 		}
1971 	}
1972 
1973 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
1974 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1975 	if (valid_sum_count <= sum_in_page)
1976 		return 1;
1977 	else if ((valid_sum_count - sum_in_page) <=
1978 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
1979 		return 2;
1980 	return 3;
1981 }
1982 
1983 /*
1984  * Caller should put this summary page
1985  */
1986 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1987 {
1988 	return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1989 }
1990 
1991 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1992 {
1993 	struct page *page = grab_meta_page(sbi, blk_addr);
1994 
1995 	memcpy(page_address(page), src, PAGE_SIZE);
1996 	set_page_dirty(page);
1997 	f2fs_put_page(page, 1);
1998 }
1999 
2000 static void write_sum_page(struct f2fs_sb_info *sbi,
2001 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2002 {
2003 	update_meta_page(sbi, (void *)sum_blk, blk_addr);
2004 }
2005 
2006 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2007 						int type, block_t blk_addr)
2008 {
2009 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2010 	struct page *page = grab_meta_page(sbi, blk_addr);
2011 	struct f2fs_summary_block *src = curseg->sum_blk;
2012 	struct f2fs_summary_block *dst;
2013 
2014 	dst = (struct f2fs_summary_block *)page_address(page);
2015 
2016 	mutex_lock(&curseg->curseg_mutex);
2017 
2018 	down_read(&curseg->journal_rwsem);
2019 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2020 	up_read(&curseg->journal_rwsem);
2021 
2022 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2023 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2024 
2025 	mutex_unlock(&curseg->curseg_mutex);
2026 
2027 	set_page_dirty(page);
2028 	f2fs_put_page(page, 1);
2029 }
2030 
2031 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
2032 {
2033 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2034 	unsigned int segno = curseg->segno + 1;
2035 	struct free_segmap_info *free_i = FREE_I(sbi);
2036 
2037 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2038 		return !test_bit(segno, free_i->free_segmap);
2039 	return 0;
2040 }
2041 
2042 /*
2043  * Find a new segment from the free segments bitmap to right order
2044  * This function should be returned with success, otherwise BUG
2045  */
2046 static void get_new_segment(struct f2fs_sb_info *sbi,
2047 			unsigned int *newseg, bool new_sec, int dir)
2048 {
2049 	struct free_segmap_info *free_i = FREE_I(sbi);
2050 	unsigned int segno, secno, zoneno;
2051 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2052 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2053 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2054 	unsigned int left_start = hint;
2055 	bool init = true;
2056 	int go_left = 0;
2057 	int i;
2058 
2059 	spin_lock(&free_i->segmap_lock);
2060 
2061 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2062 		segno = find_next_zero_bit(free_i->free_segmap,
2063 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2064 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2065 			goto got_it;
2066 	}
2067 find_other_zone:
2068 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2069 	if (secno >= MAIN_SECS(sbi)) {
2070 		if (dir == ALLOC_RIGHT) {
2071 			secno = find_next_zero_bit(free_i->free_secmap,
2072 							MAIN_SECS(sbi), 0);
2073 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2074 		} else {
2075 			go_left = 1;
2076 			left_start = hint - 1;
2077 		}
2078 	}
2079 	if (go_left == 0)
2080 		goto skip_left;
2081 
2082 	while (test_bit(left_start, free_i->free_secmap)) {
2083 		if (left_start > 0) {
2084 			left_start--;
2085 			continue;
2086 		}
2087 		left_start = find_next_zero_bit(free_i->free_secmap,
2088 							MAIN_SECS(sbi), 0);
2089 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2090 		break;
2091 	}
2092 	secno = left_start;
2093 skip_left:
2094 	segno = GET_SEG_FROM_SEC(sbi, secno);
2095 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2096 
2097 	/* give up on finding another zone */
2098 	if (!init)
2099 		goto got_it;
2100 	if (sbi->secs_per_zone == 1)
2101 		goto got_it;
2102 	if (zoneno == old_zoneno)
2103 		goto got_it;
2104 	if (dir == ALLOC_LEFT) {
2105 		if (!go_left && zoneno + 1 >= total_zones)
2106 			goto got_it;
2107 		if (go_left && zoneno == 0)
2108 			goto got_it;
2109 	}
2110 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2111 		if (CURSEG_I(sbi, i)->zone == zoneno)
2112 			break;
2113 
2114 	if (i < NR_CURSEG_TYPE) {
2115 		/* zone is in user, try another */
2116 		if (go_left)
2117 			hint = zoneno * sbi->secs_per_zone - 1;
2118 		else if (zoneno + 1 >= total_zones)
2119 			hint = 0;
2120 		else
2121 			hint = (zoneno + 1) * sbi->secs_per_zone;
2122 		init = false;
2123 		goto find_other_zone;
2124 	}
2125 got_it:
2126 	/* set it as dirty segment in free segmap */
2127 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2128 	__set_inuse(sbi, segno);
2129 	*newseg = segno;
2130 	spin_unlock(&free_i->segmap_lock);
2131 }
2132 
2133 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2134 {
2135 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2136 	struct summary_footer *sum_footer;
2137 
2138 	curseg->segno = curseg->next_segno;
2139 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2140 	curseg->next_blkoff = 0;
2141 	curseg->next_segno = NULL_SEGNO;
2142 
2143 	sum_footer = &(curseg->sum_blk->footer);
2144 	memset(sum_footer, 0, sizeof(struct summary_footer));
2145 	if (IS_DATASEG(type))
2146 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2147 	if (IS_NODESEG(type))
2148 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2149 	__set_sit_entry_type(sbi, type, curseg->segno, modified);
2150 }
2151 
2152 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2153 {
2154 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2155 	if (sbi->segs_per_sec != 1)
2156 		return CURSEG_I(sbi, type)->segno;
2157 
2158 	if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
2159 		return 0;
2160 
2161 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2162 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2163 	return CURSEG_I(sbi, type)->segno;
2164 }
2165 
2166 /*
2167  * Allocate a current working segment.
2168  * This function always allocates a free segment in LFS manner.
2169  */
2170 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2171 {
2172 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2173 	unsigned int segno = curseg->segno;
2174 	int dir = ALLOC_LEFT;
2175 
2176 	write_sum_page(sbi, curseg->sum_blk,
2177 				GET_SUM_BLOCK(sbi, segno));
2178 	if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2179 		dir = ALLOC_RIGHT;
2180 
2181 	if (test_opt(sbi, NOHEAP))
2182 		dir = ALLOC_RIGHT;
2183 
2184 	segno = __get_next_segno(sbi, type);
2185 	get_new_segment(sbi, &segno, new_sec, dir);
2186 	curseg->next_segno = segno;
2187 	reset_curseg(sbi, type, 1);
2188 	curseg->alloc_type = LFS;
2189 }
2190 
2191 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2192 			struct curseg_info *seg, block_t start)
2193 {
2194 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2195 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2196 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2197 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2198 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2199 	int i, pos;
2200 
2201 	for (i = 0; i < entries; i++)
2202 		target_map[i] = ckpt_map[i] | cur_map[i];
2203 
2204 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2205 
2206 	seg->next_blkoff = pos;
2207 }
2208 
2209 /*
2210  * If a segment is written by LFS manner, next block offset is just obtained
2211  * by increasing the current block offset. However, if a segment is written by
2212  * SSR manner, next block offset obtained by calling __next_free_blkoff
2213  */
2214 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2215 				struct curseg_info *seg)
2216 {
2217 	if (seg->alloc_type == SSR)
2218 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2219 	else
2220 		seg->next_blkoff++;
2221 }
2222 
2223 /*
2224  * This function always allocates a used segment(from dirty seglist) by SSR
2225  * manner, so it should recover the existing segment information of valid blocks
2226  */
2227 static void change_curseg(struct f2fs_sb_info *sbi, int type)
2228 {
2229 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2230 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2231 	unsigned int new_segno = curseg->next_segno;
2232 	struct f2fs_summary_block *sum_node;
2233 	struct page *sum_page;
2234 
2235 	write_sum_page(sbi, curseg->sum_blk,
2236 				GET_SUM_BLOCK(sbi, curseg->segno));
2237 	__set_test_and_inuse(sbi, new_segno);
2238 
2239 	mutex_lock(&dirty_i->seglist_lock);
2240 	__remove_dirty_segment(sbi, new_segno, PRE);
2241 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2242 	mutex_unlock(&dirty_i->seglist_lock);
2243 
2244 	reset_curseg(sbi, type, 1);
2245 	curseg->alloc_type = SSR;
2246 	__next_free_blkoff(sbi, curseg, 0);
2247 
2248 	sum_page = get_sum_page(sbi, new_segno);
2249 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2250 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2251 	f2fs_put_page(sum_page, 1);
2252 }
2253 
2254 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2255 {
2256 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2257 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2258 	unsigned segno = NULL_SEGNO;
2259 	int i, cnt;
2260 	bool reversed = false;
2261 
2262 	/* need_SSR() already forces to do this */
2263 	if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2264 		curseg->next_segno = segno;
2265 		return 1;
2266 	}
2267 
2268 	/* For node segments, let's do SSR more intensively */
2269 	if (IS_NODESEG(type)) {
2270 		if (type >= CURSEG_WARM_NODE) {
2271 			reversed = true;
2272 			i = CURSEG_COLD_NODE;
2273 		} else {
2274 			i = CURSEG_HOT_NODE;
2275 		}
2276 		cnt = NR_CURSEG_NODE_TYPE;
2277 	} else {
2278 		if (type >= CURSEG_WARM_DATA) {
2279 			reversed = true;
2280 			i = CURSEG_COLD_DATA;
2281 		} else {
2282 			i = CURSEG_HOT_DATA;
2283 		}
2284 		cnt = NR_CURSEG_DATA_TYPE;
2285 	}
2286 
2287 	for (; cnt-- > 0; reversed ? i-- : i++) {
2288 		if (i == type)
2289 			continue;
2290 		if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2291 			curseg->next_segno = segno;
2292 			return 1;
2293 		}
2294 	}
2295 	return 0;
2296 }
2297 
2298 /*
2299  * flush out current segment and replace it with new segment
2300  * This function should be returned with success, otherwise BUG
2301  */
2302 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2303 						int type, bool force)
2304 {
2305 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2306 
2307 	if (force)
2308 		new_curseg(sbi, type, true);
2309 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2310 					type == CURSEG_WARM_NODE)
2311 		new_curseg(sbi, type, false);
2312 	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
2313 		new_curseg(sbi, type, false);
2314 	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
2315 		change_curseg(sbi, type);
2316 	else
2317 		new_curseg(sbi, type, false);
2318 
2319 	stat_inc_seg_type(sbi, curseg);
2320 }
2321 
2322 void allocate_new_segments(struct f2fs_sb_info *sbi)
2323 {
2324 	struct curseg_info *curseg;
2325 	unsigned int old_segno;
2326 	int i;
2327 
2328 	down_write(&SIT_I(sbi)->sentry_lock);
2329 
2330 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2331 		curseg = CURSEG_I(sbi, i);
2332 		old_segno = curseg->segno;
2333 		SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2334 		locate_dirty_segment(sbi, old_segno);
2335 	}
2336 
2337 	up_write(&SIT_I(sbi)->sentry_lock);
2338 }
2339 
2340 static const struct segment_allocation default_salloc_ops = {
2341 	.allocate_segment = allocate_segment_by_default,
2342 };
2343 
2344 bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2345 {
2346 	__u64 trim_start = cpc->trim_start;
2347 	bool has_candidate = false;
2348 
2349 	down_write(&SIT_I(sbi)->sentry_lock);
2350 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2351 		if (add_discard_addrs(sbi, cpc, true)) {
2352 			has_candidate = true;
2353 			break;
2354 		}
2355 	}
2356 	up_write(&SIT_I(sbi)->sentry_lock);
2357 
2358 	cpc->trim_start = trim_start;
2359 	return has_candidate;
2360 }
2361 
2362 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2363 {
2364 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
2365 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
2366 	unsigned int start_segno, end_segno, cur_segno;
2367 	block_t start_block, end_block;
2368 	struct cp_control cpc;
2369 	struct discard_policy dpolicy;
2370 	unsigned long long trimmed = 0;
2371 	int err = 0;
2372 
2373 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
2374 		return -EINVAL;
2375 
2376 	if (end <= MAIN_BLKADDR(sbi))
2377 		goto out;
2378 
2379 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2380 		f2fs_msg(sbi->sb, KERN_WARNING,
2381 			"Found FS corruption, run fsck to fix.");
2382 		goto out;
2383 	}
2384 
2385 	/* start/end segment number in main_area */
2386 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2387 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2388 						GET_SEGNO(sbi, end);
2389 
2390 	cpc.reason = CP_DISCARD;
2391 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
2392 
2393 	/* do checkpoint to issue discard commands safely */
2394 	for (cur_segno = start_segno; cur_segno <= end_segno;
2395 					cur_segno = cpc.trim_end + 1) {
2396 		cpc.trim_start = cur_segno;
2397 
2398 		if (sbi->discard_blks == 0)
2399 			break;
2400 		else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
2401 			cpc.trim_end = end_segno;
2402 		else
2403 			cpc.trim_end = min_t(unsigned int,
2404 				rounddown(cur_segno +
2405 				BATCHED_TRIM_SEGMENTS(sbi),
2406 				sbi->segs_per_sec) - 1, end_segno);
2407 
2408 		mutex_lock(&sbi->gc_mutex);
2409 		err = write_checkpoint(sbi, &cpc);
2410 		mutex_unlock(&sbi->gc_mutex);
2411 		if (err)
2412 			break;
2413 
2414 		schedule();
2415 	}
2416 
2417 	start_block = START_BLOCK(sbi, start_segno);
2418 	end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
2419 
2420 	init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
2421 	__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
2422 	trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
2423 					start_block, end_block);
2424 out:
2425 	range->len = F2FS_BLK_TO_BYTES(trimmed);
2426 	return err;
2427 }
2428 
2429 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2430 {
2431 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2432 	if (curseg->next_blkoff < sbi->blocks_per_seg)
2433 		return true;
2434 	return false;
2435 }
2436 
2437 int rw_hint_to_seg_type(enum rw_hint hint)
2438 {
2439 	switch (hint) {
2440 	case WRITE_LIFE_SHORT:
2441 		return CURSEG_HOT_DATA;
2442 	case WRITE_LIFE_EXTREME:
2443 		return CURSEG_COLD_DATA;
2444 	default:
2445 		return CURSEG_WARM_DATA;
2446 	}
2447 }
2448 
2449 static int __get_segment_type_2(struct f2fs_io_info *fio)
2450 {
2451 	if (fio->type == DATA)
2452 		return CURSEG_HOT_DATA;
2453 	else
2454 		return CURSEG_HOT_NODE;
2455 }
2456 
2457 static int __get_segment_type_4(struct f2fs_io_info *fio)
2458 {
2459 	if (fio->type == DATA) {
2460 		struct inode *inode = fio->page->mapping->host;
2461 
2462 		if (S_ISDIR(inode->i_mode))
2463 			return CURSEG_HOT_DATA;
2464 		else
2465 			return CURSEG_COLD_DATA;
2466 	} else {
2467 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
2468 			return CURSEG_WARM_NODE;
2469 		else
2470 			return CURSEG_COLD_NODE;
2471 	}
2472 }
2473 
2474 static int __get_segment_type_6(struct f2fs_io_info *fio)
2475 {
2476 	if (fio->type == DATA) {
2477 		struct inode *inode = fio->page->mapping->host;
2478 
2479 		if (is_cold_data(fio->page) || file_is_cold(inode))
2480 			return CURSEG_COLD_DATA;
2481 		if (is_inode_flag_set(inode, FI_HOT_DATA))
2482 			return CURSEG_HOT_DATA;
2483 		return rw_hint_to_seg_type(inode->i_write_hint);
2484 	} else {
2485 		if (IS_DNODE(fio->page))
2486 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
2487 						CURSEG_HOT_NODE;
2488 		return CURSEG_COLD_NODE;
2489 	}
2490 }
2491 
2492 static int __get_segment_type(struct f2fs_io_info *fio)
2493 {
2494 	int type = 0;
2495 
2496 	switch (fio->sbi->active_logs) {
2497 	case 2:
2498 		type = __get_segment_type_2(fio);
2499 		break;
2500 	case 4:
2501 		type = __get_segment_type_4(fio);
2502 		break;
2503 	case 6:
2504 		type = __get_segment_type_6(fio);
2505 		break;
2506 	default:
2507 		f2fs_bug_on(fio->sbi, true);
2508 	}
2509 
2510 	if (IS_HOT(type))
2511 		fio->temp = HOT;
2512 	else if (IS_WARM(type))
2513 		fio->temp = WARM;
2514 	else
2515 		fio->temp = COLD;
2516 	return type;
2517 }
2518 
2519 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
2520 		block_t old_blkaddr, block_t *new_blkaddr,
2521 		struct f2fs_summary *sum, int type,
2522 		struct f2fs_io_info *fio, bool add_list)
2523 {
2524 	struct sit_info *sit_i = SIT_I(sbi);
2525 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2526 
2527 	down_read(&SM_I(sbi)->curseg_lock);
2528 
2529 	mutex_lock(&curseg->curseg_mutex);
2530 	down_write(&sit_i->sentry_lock);
2531 
2532 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
2533 
2534 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
2535 
2536 	/*
2537 	 * __add_sum_entry should be resided under the curseg_mutex
2538 	 * because, this function updates a summary entry in the
2539 	 * current summary block.
2540 	 */
2541 	__add_sum_entry(sbi, type, sum);
2542 
2543 	__refresh_next_blkoff(sbi, curseg);
2544 
2545 	stat_inc_block_count(sbi, curseg);
2546 
2547 	/*
2548 	 * SIT information should be updated before segment allocation,
2549 	 * since SSR needs latest valid block information.
2550 	 */
2551 	update_sit_entry(sbi, *new_blkaddr, 1);
2552 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2553 		update_sit_entry(sbi, old_blkaddr, -1);
2554 
2555 	if (!__has_curseg_space(sbi, type))
2556 		sit_i->s_ops->allocate_segment(sbi, type, false);
2557 
2558 	/*
2559 	 * segment dirty status should be updated after segment allocation,
2560 	 * so we just need to update status only one time after previous
2561 	 * segment being closed.
2562 	 */
2563 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2564 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
2565 
2566 	up_write(&sit_i->sentry_lock);
2567 
2568 	if (page && IS_NODESEG(type)) {
2569 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
2570 
2571 		f2fs_inode_chksum_set(sbi, page);
2572 	}
2573 
2574 	if (add_list) {
2575 		struct f2fs_bio_info *io;
2576 
2577 		INIT_LIST_HEAD(&fio->list);
2578 		fio->in_list = true;
2579 		io = sbi->write_io[fio->type] + fio->temp;
2580 		spin_lock(&io->io_lock);
2581 		list_add_tail(&fio->list, &io->io_list);
2582 		spin_unlock(&io->io_lock);
2583 	}
2584 
2585 	mutex_unlock(&curseg->curseg_mutex);
2586 
2587 	up_read(&SM_I(sbi)->curseg_lock);
2588 }
2589 
2590 static void update_device_state(struct f2fs_io_info *fio)
2591 {
2592 	struct f2fs_sb_info *sbi = fio->sbi;
2593 	unsigned int devidx;
2594 
2595 	if (!sbi->s_ndevs)
2596 		return;
2597 
2598 	devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
2599 
2600 	/* update device state for fsync */
2601 	set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
2602 
2603 	/* update device state for checkpoint */
2604 	if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
2605 		spin_lock(&sbi->dev_lock);
2606 		f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
2607 		spin_unlock(&sbi->dev_lock);
2608 	}
2609 }
2610 
2611 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
2612 {
2613 	int type = __get_segment_type(fio);
2614 	int err;
2615 
2616 reallocate:
2617 	allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
2618 			&fio->new_blkaddr, sum, type, fio, true);
2619 
2620 	/* writeout dirty page into bdev */
2621 	err = f2fs_submit_page_write(fio);
2622 	if (err == -EAGAIN) {
2623 		fio->old_blkaddr = fio->new_blkaddr;
2624 		goto reallocate;
2625 	} else if (!err) {
2626 		update_device_state(fio);
2627 	}
2628 }
2629 
2630 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
2631 					enum iostat_type io_type)
2632 {
2633 	struct f2fs_io_info fio = {
2634 		.sbi = sbi,
2635 		.type = META,
2636 		.op = REQ_OP_WRITE,
2637 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
2638 		.old_blkaddr = page->index,
2639 		.new_blkaddr = page->index,
2640 		.page = page,
2641 		.encrypted_page = NULL,
2642 		.in_list = false,
2643 	};
2644 
2645 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
2646 		fio.op_flags &= ~REQ_META;
2647 
2648 	set_page_writeback(page);
2649 	f2fs_submit_page_write(&fio);
2650 
2651 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
2652 }
2653 
2654 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
2655 {
2656 	struct f2fs_summary sum;
2657 
2658 	set_summary(&sum, nid, 0, 0);
2659 	do_write_page(&sum, fio);
2660 
2661 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2662 }
2663 
2664 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
2665 {
2666 	struct f2fs_sb_info *sbi = fio->sbi;
2667 	struct f2fs_summary sum;
2668 	struct node_info ni;
2669 
2670 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
2671 	get_node_info(sbi, dn->nid, &ni);
2672 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
2673 	do_write_page(&sum, fio);
2674 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
2675 
2676 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
2677 }
2678 
2679 int rewrite_data_page(struct f2fs_io_info *fio)
2680 {
2681 	int err;
2682 
2683 	fio->new_blkaddr = fio->old_blkaddr;
2684 	stat_inc_inplace_blocks(fio->sbi);
2685 
2686 	err = f2fs_submit_page_bio(fio);
2687 	if (!err)
2688 		update_device_state(fio);
2689 
2690 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2691 
2692 	return err;
2693 }
2694 
2695 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
2696 						unsigned int segno)
2697 {
2698 	int i;
2699 
2700 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
2701 		if (CURSEG_I(sbi, i)->segno == segno)
2702 			break;
2703 	}
2704 	return i;
2705 }
2706 
2707 void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
2708 				block_t old_blkaddr, block_t new_blkaddr,
2709 				bool recover_curseg, bool recover_newaddr)
2710 {
2711 	struct sit_info *sit_i = SIT_I(sbi);
2712 	struct curseg_info *curseg;
2713 	unsigned int segno, old_cursegno;
2714 	struct seg_entry *se;
2715 	int type;
2716 	unsigned short old_blkoff;
2717 
2718 	segno = GET_SEGNO(sbi, new_blkaddr);
2719 	se = get_seg_entry(sbi, segno);
2720 	type = se->type;
2721 
2722 	down_write(&SM_I(sbi)->curseg_lock);
2723 
2724 	if (!recover_curseg) {
2725 		/* for recovery flow */
2726 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2727 			if (old_blkaddr == NULL_ADDR)
2728 				type = CURSEG_COLD_DATA;
2729 			else
2730 				type = CURSEG_WARM_DATA;
2731 		}
2732 	} else {
2733 		if (IS_CURSEG(sbi, segno)) {
2734 			/* se->type is volatile as SSR allocation */
2735 			type = __f2fs_get_curseg(sbi, segno);
2736 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
2737 		} else {
2738 			type = CURSEG_WARM_DATA;
2739 		}
2740 	}
2741 
2742 	curseg = CURSEG_I(sbi, type);
2743 
2744 	mutex_lock(&curseg->curseg_mutex);
2745 	down_write(&sit_i->sentry_lock);
2746 
2747 	old_cursegno = curseg->segno;
2748 	old_blkoff = curseg->next_blkoff;
2749 
2750 	/* change the current segment */
2751 	if (segno != curseg->segno) {
2752 		curseg->next_segno = segno;
2753 		change_curseg(sbi, type);
2754 	}
2755 
2756 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
2757 	__add_sum_entry(sbi, type, sum);
2758 
2759 	if (!recover_curseg || recover_newaddr)
2760 		update_sit_entry(sbi, new_blkaddr, 1);
2761 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2762 		update_sit_entry(sbi, old_blkaddr, -1);
2763 
2764 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2765 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
2766 
2767 	locate_dirty_segment(sbi, old_cursegno);
2768 
2769 	if (recover_curseg) {
2770 		if (old_cursegno != curseg->segno) {
2771 			curseg->next_segno = old_cursegno;
2772 			change_curseg(sbi, type);
2773 		}
2774 		curseg->next_blkoff = old_blkoff;
2775 	}
2776 
2777 	up_write(&sit_i->sentry_lock);
2778 	mutex_unlock(&curseg->curseg_mutex);
2779 	up_write(&SM_I(sbi)->curseg_lock);
2780 }
2781 
2782 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2783 				block_t old_addr, block_t new_addr,
2784 				unsigned char version, bool recover_curseg,
2785 				bool recover_newaddr)
2786 {
2787 	struct f2fs_summary sum;
2788 
2789 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2790 
2791 	__f2fs_replace_block(sbi, &sum, old_addr, new_addr,
2792 					recover_curseg, recover_newaddr);
2793 
2794 	f2fs_update_data_blkaddr(dn, new_addr);
2795 }
2796 
2797 void f2fs_wait_on_page_writeback(struct page *page,
2798 				enum page_type type, bool ordered)
2799 {
2800 	if (PageWriteback(page)) {
2801 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
2802 
2803 		f2fs_submit_merged_write_cond(sbi, page->mapping->host,
2804 						0, page->index, type);
2805 		if (ordered)
2806 			wait_on_page_writeback(page);
2807 		else
2808 			wait_for_stable_page(page);
2809 	}
2810 }
2811 
2812 void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
2813 {
2814 	struct page *cpage;
2815 
2816 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
2817 		return;
2818 
2819 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
2820 	if (cpage) {
2821 		f2fs_wait_on_page_writeback(cpage, DATA, true);
2822 		f2fs_put_page(cpage, 1);
2823 	}
2824 }
2825 
2826 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
2827 {
2828 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2829 	struct curseg_info *seg_i;
2830 	unsigned char *kaddr;
2831 	struct page *page;
2832 	block_t start;
2833 	int i, j, offset;
2834 
2835 	start = start_sum_block(sbi);
2836 
2837 	page = get_meta_page(sbi, start++);
2838 	kaddr = (unsigned char *)page_address(page);
2839 
2840 	/* Step 1: restore nat cache */
2841 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2842 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
2843 
2844 	/* Step 2: restore sit cache */
2845 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2846 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
2847 	offset = 2 * SUM_JOURNAL_SIZE;
2848 
2849 	/* Step 3: restore summary entries */
2850 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2851 		unsigned short blk_off;
2852 		unsigned int segno;
2853 
2854 		seg_i = CURSEG_I(sbi, i);
2855 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2856 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2857 		seg_i->next_segno = segno;
2858 		reset_curseg(sbi, i, 0);
2859 		seg_i->alloc_type = ckpt->alloc_type[i];
2860 		seg_i->next_blkoff = blk_off;
2861 
2862 		if (seg_i->alloc_type == SSR)
2863 			blk_off = sbi->blocks_per_seg;
2864 
2865 		for (j = 0; j < blk_off; j++) {
2866 			struct f2fs_summary *s;
2867 			s = (struct f2fs_summary *)(kaddr + offset);
2868 			seg_i->sum_blk->entries[j] = *s;
2869 			offset += SUMMARY_SIZE;
2870 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
2871 						SUM_FOOTER_SIZE)
2872 				continue;
2873 
2874 			f2fs_put_page(page, 1);
2875 			page = NULL;
2876 
2877 			page = get_meta_page(sbi, start++);
2878 			kaddr = (unsigned char *)page_address(page);
2879 			offset = 0;
2880 		}
2881 	}
2882 	f2fs_put_page(page, 1);
2883 	return 0;
2884 }
2885 
2886 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
2887 {
2888 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2889 	struct f2fs_summary_block *sum;
2890 	struct curseg_info *curseg;
2891 	struct page *new;
2892 	unsigned short blk_off;
2893 	unsigned int segno = 0;
2894 	block_t blk_addr = 0;
2895 
2896 	/* get segment number and block addr */
2897 	if (IS_DATASEG(type)) {
2898 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
2899 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
2900 							CURSEG_HOT_DATA]);
2901 		if (__exist_node_summaries(sbi))
2902 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
2903 		else
2904 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
2905 	} else {
2906 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
2907 							CURSEG_HOT_NODE]);
2908 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
2909 							CURSEG_HOT_NODE]);
2910 		if (__exist_node_summaries(sbi))
2911 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
2912 							type - CURSEG_HOT_NODE);
2913 		else
2914 			blk_addr = GET_SUM_BLOCK(sbi, segno);
2915 	}
2916 
2917 	new = get_meta_page(sbi, blk_addr);
2918 	sum = (struct f2fs_summary_block *)page_address(new);
2919 
2920 	if (IS_NODESEG(type)) {
2921 		if (__exist_node_summaries(sbi)) {
2922 			struct f2fs_summary *ns = &sum->entries[0];
2923 			int i;
2924 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
2925 				ns->version = 0;
2926 				ns->ofs_in_node = 0;
2927 			}
2928 		} else {
2929 			int err;
2930 
2931 			err = restore_node_summary(sbi, segno, sum);
2932 			if (err) {
2933 				f2fs_put_page(new, 1);
2934 				return err;
2935 			}
2936 		}
2937 	}
2938 
2939 	/* set uncompleted segment to curseg */
2940 	curseg = CURSEG_I(sbi, type);
2941 	mutex_lock(&curseg->curseg_mutex);
2942 
2943 	/* update journal info */
2944 	down_write(&curseg->journal_rwsem);
2945 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
2946 	up_write(&curseg->journal_rwsem);
2947 
2948 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
2949 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
2950 	curseg->next_segno = segno;
2951 	reset_curseg(sbi, type, 0);
2952 	curseg->alloc_type = ckpt->alloc_type[type];
2953 	curseg->next_blkoff = blk_off;
2954 	mutex_unlock(&curseg->curseg_mutex);
2955 	f2fs_put_page(new, 1);
2956 	return 0;
2957 }
2958 
2959 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
2960 {
2961 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
2962 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
2963 	int type = CURSEG_HOT_DATA;
2964 	int err;
2965 
2966 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
2967 		int npages = npages_for_summary_flush(sbi, true);
2968 
2969 		if (npages >= 2)
2970 			ra_meta_pages(sbi, start_sum_block(sbi), npages,
2971 							META_CP, true);
2972 
2973 		/* restore for compacted data summary */
2974 		if (read_compacted_summaries(sbi))
2975 			return -EINVAL;
2976 		type = CURSEG_HOT_NODE;
2977 	}
2978 
2979 	if (__exist_node_summaries(sbi))
2980 		ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
2981 					NR_CURSEG_TYPE - type, META_CP, true);
2982 
2983 	for (; type <= CURSEG_COLD_NODE; type++) {
2984 		err = read_normal_summaries(sbi, type);
2985 		if (err)
2986 			return err;
2987 	}
2988 
2989 	/* sanity check for summary blocks */
2990 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
2991 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
2992 		return -EINVAL;
2993 
2994 	return 0;
2995 }
2996 
2997 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
2998 {
2999 	struct page *page;
3000 	unsigned char *kaddr;
3001 	struct f2fs_summary *summary;
3002 	struct curseg_info *seg_i;
3003 	int written_size = 0;
3004 	int i, j;
3005 
3006 	page = grab_meta_page(sbi, blkaddr++);
3007 	kaddr = (unsigned char *)page_address(page);
3008 
3009 	/* Step 1: write nat cache */
3010 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3011 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3012 	written_size += SUM_JOURNAL_SIZE;
3013 
3014 	/* Step 2: write sit cache */
3015 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3016 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3017 	written_size += SUM_JOURNAL_SIZE;
3018 
3019 	/* Step 3: write summary entries */
3020 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3021 		unsigned short blkoff;
3022 		seg_i = CURSEG_I(sbi, i);
3023 		if (sbi->ckpt->alloc_type[i] == SSR)
3024 			blkoff = sbi->blocks_per_seg;
3025 		else
3026 			blkoff = curseg_blkoff(sbi, i);
3027 
3028 		for (j = 0; j < blkoff; j++) {
3029 			if (!page) {
3030 				page = grab_meta_page(sbi, blkaddr++);
3031 				kaddr = (unsigned char *)page_address(page);
3032 				written_size = 0;
3033 			}
3034 			summary = (struct f2fs_summary *)(kaddr + written_size);
3035 			*summary = seg_i->sum_blk->entries[j];
3036 			written_size += SUMMARY_SIZE;
3037 
3038 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3039 							SUM_FOOTER_SIZE)
3040 				continue;
3041 
3042 			set_page_dirty(page);
3043 			f2fs_put_page(page, 1);
3044 			page = NULL;
3045 		}
3046 	}
3047 	if (page) {
3048 		set_page_dirty(page);
3049 		f2fs_put_page(page, 1);
3050 	}
3051 }
3052 
3053 static void write_normal_summaries(struct f2fs_sb_info *sbi,
3054 					block_t blkaddr, int type)
3055 {
3056 	int i, end;
3057 	if (IS_DATASEG(type))
3058 		end = type + NR_CURSEG_DATA_TYPE;
3059 	else
3060 		end = type + NR_CURSEG_NODE_TYPE;
3061 
3062 	for (i = type; i < end; i++)
3063 		write_current_sum_page(sbi, i, blkaddr + (i - type));
3064 }
3065 
3066 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3067 {
3068 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
3069 		write_compacted_summaries(sbi, start_blk);
3070 	else
3071 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3072 }
3073 
3074 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3075 {
3076 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
3077 }
3078 
3079 int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3080 					unsigned int val, int alloc)
3081 {
3082 	int i;
3083 
3084 	if (type == NAT_JOURNAL) {
3085 		for (i = 0; i < nats_in_cursum(journal); i++) {
3086 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
3087 				return i;
3088 		}
3089 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3090 			return update_nats_in_cursum(journal, 1);
3091 	} else if (type == SIT_JOURNAL) {
3092 		for (i = 0; i < sits_in_cursum(journal); i++)
3093 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
3094 				return i;
3095 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3096 			return update_sits_in_cursum(journal, 1);
3097 	}
3098 	return -1;
3099 }
3100 
3101 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3102 					unsigned int segno)
3103 {
3104 	return get_meta_page(sbi, current_sit_addr(sbi, segno));
3105 }
3106 
3107 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3108 					unsigned int start)
3109 {
3110 	struct sit_info *sit_i = SIT_I(sbi);
3111 	struct page *src_page, *dst_page;
3112 	pgoff_t src_off, dst_off;
3113 	void *src_addr, *dst_addr;
3114 
3115 	src_off = current_sit_addr(sbi, start);
3116 	dst_off = next_sit_addr(sbi, src_off);
3117 
3118 	/* get current sit block page without lock */
3119 	src_page = get_meta_page(sbi, src_off);
3120 	dst_page = grab_meta_page(sbi, dst_off);
3121 	f2fs_bug_on(sbi, PageDirty(src_page));
3122 
3123 	src_addr = page_address(src_page);
3124 	dst_addr = page_address(dst_page);
3125 	memcpy(dst_addr, src_addr, PAGE_SIZE);
3126 
3127 	set_page_dirty(dst_page);
3128 	f2fs_put_page(src_page, 1);
3129 
3130 	set_to_next_sit(sit_i, start);
3131 
3132 	return dst_page;
3133 }
3134 
3135 static struct sit_entry_set *grab_sit_entry_set(void)
3136 {
3137 	struct sit_entry_set *ses =
3138 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
3139 
3140 	ses->entry_cnt = 0;
3141 	INIT_LIST_HEAD(&ses->set_list);
3142 	return ses;
3143 }
3144 
3145 static void release_sit_entry_set(struct sit_entry_set *ses)
3146 {
3147 	list_del(&ses->set_list);
3148 	kmem_cache_free(sit_entry_set_slab, ses);
3149 }
3150 
3151 static void adjust_sit_entry_set(struct sit_entry_set *ses,
3152 						struct list_head *head)
3153 {
3154 	struct sit_entry_set *next = ses;
3155 
3156 	if (list_is_last(&ses->set_list, head))
3157 		return;
3158 
3159 	list_for_each_entry_continue(next, head, set_list)
3160 		if (ses->entry_cnt <= next->entry_cnt)
3161 			break;
3162 
3163 	list_move_tail(&ses->set_list, &next->set_list);
3164 }
3165 
3166 static void add_sit_entry(unsigned int segno, struct list_head *head)
3167 {
3168 	struct sit_entry_set *ses;
3169 	unsigned int start_segno = START_SEGNO(segno);
3170 
3171 	list_for_each_entry(ses, head, set_list) {
3172 		if (ses->start_segno == start_segno) {
3173 			ses->entry_cnt++;
3174 			adjust_sit_entry_set(ses, head);
3175 			return;
3176 		}
3177 	}
3178 
3179 	ses = grab_sit_entry_set();
3180 
3181 	ses->start_segno = start_segno;
3182 	ses->entry_cnt++;
3183 	list_add(&ses->set_list, head);
3184 }
3185 
3186 static void add_sits_in_set(struct f2fs_sb_info *sbi)
3187 {
3188 	struct f2fs_sm_info *sm_info = SM_I(sbi);
3189 	struct list_head *set_list = &sm_info->sit_entry_set;
3190 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
3191 	unsigned int segno;
3192 
3193 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
3194 		add_sit_entry(segno, set_list);
3195 }
3196 
3197 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
3198 {
3199 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3200 	struct f2fs_journal *journal = curseg->journal;
3201 	int i;
3202 
3203 	down_write(&curseg->journal_rwsem);
3204 	for (i = 0; i < sits_in_cursum(journal); i++) {
3205 		unsigned int segno;
3206 		bool dirtied;
3207 
3208 		segno = le32_to_cpu(segno_in_journal(journal, i));
3209 		dirtied = __mark_sit_entry_dirty(sbi, segno);
3210 
3211 		if (!dirtied)
3212 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
3213 	}
3214 	update_sits_in_cursum(journal, -i);
3215 	up_write(&curseg->journal_rwsem);
3216 }
3217 
3218 /*
3219  * CP calls this function, which flushes SIT entries including sit_journal,
3220  * and moves prefree segs to free segs.
3221  */
3222 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3223 {
3224 	struct sit_info *sit_i = SIT_I(sbi);
3225 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
3226 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3227 	struct f2fs_journal *journal = curseg->journal;
3228 	struct sit_entry_set *ses, *tmp;
3229 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
3230 	bool to_journal = true;
3231 	struct seg_entry *se;
3232 
3233 	down_write(&sit_i->sentry_lock);
3234 
3235 	if (!sit_i->dirty_sentries)
3236 		goto out;
3237 
3238 	/*
3239 	 * add and account sit entries of dirty bitmap in sit entry
3240 	 * set temporarily
3241 	 */
3242 	add_sits_in_set(sbi);
3243 
3244 	/*
3245 	 * if there are no enough space in journal to store dirty sit
3246 	 * entries, remove all entries from journal and add and account
3247 	 * them in sit entry set.
3248 	 */
3249 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
3250 		remove_sits_in_journal(sbi);
3251 
3252 	/*
3253 	 * there are two steps to flush sit entries:
3254 	 * #1, flush sit entries to journal in current cold data summary block.
3255 	 * #2, flush sit entries to sit page.
3256 	 */
3257 	list_for_each_entry_safe(ses, tmp, head, set_list) {
3258 		struct page *page = NULL;
3259 		struct f2fs_sit_block *raw_sit = NULL;
3260 		unsigned int start_segno = ses->start_segno;
3261 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
3262 						(unsigned long)MAIN_SEGS(sbi));
3263 		unsigned int segno = start_segno;
3264 
3265 		if (to_journal &&
3266 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
3267 			to_journal = false;
3268 
3269 		if (to_journal) {
3270 			down_write(&curseg->journal_rwsem);
3271 		} else {
3272 			page = get_next_sit_page(sbi, start_segno);
3273 			raw_sit = page_address(page);
3274 		}
3275 
3276 		/* flush dirty sit entries in region of current sit set */
3277 		for_each_set_bit_from(segno, bitmap, end) {
3278 			int offset, sit_offset;
3279 
3280 			se = get_seg_entry(sbi, segno);
3281 
3282 			/* add discard candidates */
3283 			if (!(cpc->reason & CP_DISCARD)) {
3284 				cpc->trim_start = segno;
3285 				add_discard_addrs(sbi, cpc, false);
3286 			}
3287 
3288 			if (to_journal) {
3289 				offset = lookup_journal_in_cursum(journal,
3290 							SIT_JOURNAL, segno, 1);
3291 				f2fs_bug_on(sbi, offset < 0);
3292 				segno_in_journal(journal, offset) =
3293 							cpu_to_le32(segno);
3294 				seg_info_to_raw_sit(se,
3295 					&sit_in_journal(journal, offset));
3296 			} else {
3297 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3298 				seg_info_to_raw_sit(se,
3299 						&raw_sit->entries[sit_offset]);
3300 			}
3301 
3302 			__clear_bit(segno, bitmap);
3303 			sit_i->dirty_sentries--;
3304 			ses->entry_cnt--;
3305 		}
3306 
3307 		if (to_journal)
3308 			up_write(&curseg->journal_rwsem);
3309 		else
3310 			f2fs_put_page(page, 1);
3311 
3312 		f2fs_bug_on(sbi, ses->entry_cnt);
3313 		release_sit_entry_set(ses);
3314 	}
3315 
3316 	f2fs_bug_on(sbi, !list_empty(head));
3317 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
3318 out:
3319 	if (cpc->reason & CP_DISCARD) {
3320 		__u64 trim_start = cpc->trim_start;
3321 
3322 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
3323 			add_discard_addrs(sbi, cpc, false);
3324 
3325 		cpc->trim_start = trim_start;
3326 	}
3327 	up_write(&sit_i->sentry_lock);
3328 
3329 	set_prefree_as_free_segments(sbi);
3330 }
3331 
3332 static int build_sit_info(struct f2fs_sb_info *sbi)
3333 {
3334 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3335 	struct sit_info *sit_i;
3336 	unsigned int sit_segs, start;
3337 	char *src_bitmap;
3338 	unsigned int bitmap_size;
3339 
3340 	/* allocate memory for SIT information */
3341 	sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
3342 	if (!sit_i)
3343 		return -ENOMEM;
3344 
3345 	SM_I(sbi)->sit_info = sit_i;
3346 
3347 	sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
3348 					sizeof(struct seg_entry), GFP_KERNEL);
3349 	if (!sit_i->sentries)
3350 		return -ENOMEM;
3351 
3352 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3353 	sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
3354 	if (!sit_i->dirty_sentries_bitmap)
3355 		return -ENOMEM;
3356 
3357 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
3358 		sit_i->sentries[start].cur_valid_map
3359 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3360 		sit_i->sentries[start].ckpt_valid_map
3361 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3362 		if (!sit_i->sentries[start].cur_valid_map ||
3363 				!sit_i->sentries[start].ckpt_valid_map)
3364 			return -ENOMEM;
3365 
3366 #ifdef CONFIG_F2FS_CHECK_FS
3367 		sit_i->sentries[start].cur_valid_map_mir
3368 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3369 		if (!sit_i->sentries[start].cur_valid_map_mir)
3370 			return -ENOMEM;
3371 #endif
3372 
3373 		if (f2fs_discard_en(sbi)) {
3374 			sit_i->sentries[start].discard_map
3375 				= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3376 			if (!sit_i->sentries[start].discard_map)
3377 				return -ENOMEM;
3378 		}
3379 	}
3380 
3381 	sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3382 	if (!sit_i->tmp_map)
3383 		return -ENOMEM;
3384 
3385 	if (sbi->segs_per_sec > 1) {
3386 		sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
3387 					sizeof(struct sec_entry), GFP_KERNEL);
3388 		if (!sit_i->sec_entries)
3389 			return -ENOMEM;
3390 	}
3391 
3392 	/* get information related with SIT */
3393 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
3394 
3395 	/* setup SIT bitmap from ckeckpoint pack */
3396 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
3397 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
3398 
3399 	sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3400 	if (!sit_i->sit_bitmap)
3401 		return -ENOMEM;
3402 
3403 #ifdef CONFIG_F2FS_CHECK_FS
3404 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3405 	if (!sit_i->sit_bitmap_mir)
3406 		return -ENOMEM;
3407 #endif
3408 
3409 	/* init SIT information */
3410 	sit_i->s_ops = &default_salloc_ops;
3411 
3412 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
3413 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
3414 	sit_i->written_valid_blocks = 0;
3415 	sit_i->bitmap_size = bitmap_size;
3416 	sit_i->dirty_sentries = 0;
3417 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
3418 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
3419 	sit_i->mounted_time = ktime_get_real_seconds();
3420 	init_rwsem(&sit_i->sentry_lock);
3421 	return 0;
3422 }
3423 
3424 static int build_free_segmap(struct f2fs_sb_info *sbi)
3425 {
3426 	struct free_segmap_info *free_i;
3427 	unsigned int bitmap_size, sec_bitmap_size;
3428 
3429 	/* allocate memory for free segmap information */
3430 	free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
3431 	if (!free_i)
3432 		return -ENOMEM;
3433 
3434 	SM_I(sbi)->free_info = free_i;
3435 
3436 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3437 	free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
3438 	if (!free_i->free_segmap)
3439 		return -ENOMEM;
3440 
3441 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3442 	free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
3443 	if (!free_i->free_secmap)
3444 		return -ENOMEM;
3445 
3446 	/* set all segments as dirty temporarily */
3447 	memset(free_i->free_segmap, 0xff, bitmap_size);
3448 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
3449 
3450 	/* init free segmap information */
3451 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
3452 	free_i->free_segments = 0;
3453 	free_i->free_sections = 0;
3454 	spin_lock_init(&free_i->segmap_lock);
3455 	return 0;
3456 }
3457 
3458 static int build_curseg(struct f2fs_sb_info *sbi)
3459 {
3460 	struct curseg_info *array;
3461 	int i;
3462 
3463 	array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
3464 	if (!array)
3465 		return -ENOMEM;
3466 
3467 	SM_I(sbi)->curseg_array = array;
3468 
3469 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
3470 		mutex_init(&array[i].curseg_mutex);
3471 		array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
3472 		if (!array[i].sum_blk)
3473 			return -ENOMEM;
3474 		init_rwsem(&array[i].journal_rwsem);
3475 		array[i].journal = kzalloc(sizeof(struct f2fs_journal),
3476 							GFP_KERNEL);
3477 		if (!array[i].journal)
3478 			return -ENOMEM;
3479 		array[i].segno = NULL_SEGNO;
3480 		array[i].next_blkoff = 0;
3481 	}
3482 	return restore_curseg_summaries(sbi);
3483 }
3484 
3485 static void build_sit_entries(struct f2fs_sb_info *sbi)
3486 {
3487 	struct sit_info *sit_i = SIT_I(sbi);
3488 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3489 	struct f2fs_journal *journal = curseg->journal;
3490 	struct seg_entry *se;
3491 	struct f2fs_sit_entry sit;
3492 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
3493 	unsigned int i, start, end;
3494 	unsigned int readed, start_blk = 0;
3495 
3496 	do {
3497 		readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
3498 							META_SIT, true);
3499 
3500 		start = start_blk * sit_i->sents_per_block;
3501 		end = (start_blk + readed) * sit_i->sents_per_block;
3502 
3503 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
3504 			struct f2fs_sit_block *sit_blk;
3505 			struct page *page;
3506 
3507 			se = &sit_i->sentries[start];
3508 			page = get_current_sit_page(sbi, start);
3509 			sit_blk = (struct f2fs_sit_block *)page_address(page);
3510 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3511 			f2fs_put_page(page, 1);
3512 
3513 			check_block_count(sbi, start, &sit);
3514 			seg_info_from_raw_sit(se, &sit);
3515 
3516 			/* build discard map only one time */
3517 			if (f2fs_discard_en(sbi)) {
3518 				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3519 					memset(se->discard_map, 0xff,
3520 						SIT_VBLOCK_MAP_SIZE);
3521 				} else {
3522 					memcpy(se->discard_map,
3523 						se->cur_valid_map,
3524 						SIT_VBLOCK_MAP_SIZE);
3525 					sbi->discard_blks +=
3526 						sbi->blocks_per_seg -
3527 						se->valid_blocks;
3528 				}
3529 			}
3530 
3531 			if (sbi->segs_per_sec > 1)
3532 				get_sec_entry(sbi, start)->valid_blocks +=
3533 							se->valid_blocks;
3534 		}
3535 		start_blk += readed;
3536 	} while (start_blk < sit_blk_cnt);
3537 
3538 	down_read(&curseg->journal_rwsem);
3539 	for (i = 0; i < sits_in_cursum(journal); i++) {
3540 		unsigned int old_valid_blocks;
3541 
3542 		start = le32_to_cpu(segno_in_journal(journal, i));
3543 		se = &sit_i->sentries[start];
3544 		sit = sit_in_journal(journal, i);
3545 
3546 		old_valid_blocks = se->valid_blocks;
3547 
3548 		check_block_count(sbi, start, &sit);
3549 		seg_info_from_raw_sit(se, &sit);
3550 
3551 		if (f2fs_discard_en(sbi)) {
3552 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3553 				memset(se->discard_map, 0xff,
3554 							SIT_VBLOCK_MAP_SIZE);
3555 			} else {
3556 				memcpy(se->discard_map, se->cur_valid_map,
3557 							SIT_VBLOCK_MAP_SIZE);
3558 				sbi->discard_blks += old_valid_blocks -
3559 							se->valid_blocks;
3560 			}
3561 		}
3562 
3563 		if (sbi->segs_per_sec > 1)
3564 			get_sec_entry(sbi, start)->valid_blocks +=
3565 				se->valid_blocks - old_valid_blocks;
3566 	}
3567 	up_read(&curseg->journal_rwsem);
3568 }
3569 
3570 static void init_free_segmap(struct f2fs_sb_info *sbi)
3571 {
3572 	unsigned int start;
3573 	int type;
3574 
3575 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
3576 		struct seg_entry *sentry = get_seg_entry(sbi, start);
3577 		if (!sentry->valid_blocks)
3578 			__set_free(sbi, start);
3579 		else
3580 			SIT_I(sbi)->written_valid_blocks +=
3581 						sentry->valid_blocks;
3582 	}
3583 
3584 	/* set use the current segments */
3585 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
3586 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
3587 		__set_test_and_inuse(sbi, curseg_t->segno);
3588 	}
3589 }
3590 
3591 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
3592 {
3593 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3594 	struct free_segmap_info *free_i = FREE_I(sbi);
3595 	unsigned int segno = 0, offset = 0;
3596 	unsigned short valid_blocks;
3597 
3598 	while (1) {
3599 		/* find dirty segment based on free segmap */
3600 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
3601 		if (segno >= MAIN_SEGS(sbi))
3602 			break;
3603 		offset = segno + 1;
3604 		valid_blocks = get_valid_blocks(sbi, segno, false);
3605 		if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
3606 			continue;
3607 		if (valid_blocks > sbi->blocks_per_seg) {
3608 			f2fs_bug_on(sbi, 1);
3609 			continue;
3610 		}
3611 		mutex_lock(&dirty_i->seglist_lock);
3612 		__locate_dirty_segment(sbi, segno, DIRTY);
3613 		mutex_unlock(&dirty_i->seglist_lock);
3614 	}
3615 }
3616 
3617 static int init_victim_secmap(struct f2fs_sb_info *sbi)
3618 {
3619 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3620 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3621 
3622 	dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
3623 	if (!dirty_i->victim_secmap)
3624 		return -ENOMEM;
3625 	return 0;
3626 }
3627 
3628 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3629 {
3630 	struct dirty_seglist_info *dirty_i;
3631 	unsigned int bitmap_size, i;
3632 
3633 	/* allocate memory for dirty segments list information */
3634 	dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
3635 	if (!dirty_i)
3636 		return -ENOMEM;
3637 
3638 	SM_I(sbi)->dirty_info = dirty_i;
3639 	mutex_init(&dirty_i->seglist_lock);
3640 
3641 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3642 
3643 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
3644 		dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
3645 		if (!dirty_i->dirty_segmap[i])
3646 			return -ENOMEM;
3647 	}
3648 
3649 	init_dirty_segmap(sbi);
3650 	return init_victim_secmap(sbi);
3651 }
3652 
3653 /*
3654  * Update min, max modified time for cost-benefit GC algorithm
3655  */
3656 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
3657 {
3658 	struct sit_info *sit_i = SIT_I(sbi);
3659 	unsigned int segno;
3660 
3661 	down_write(&sit_i->sentry_lock);
3662 
3663 	sit_i->min_mtime = LLONG_MAX;
3664 
3665 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
3666 		unsigned int i;
3667 		unsigned long long mtime = 0;
3668 
3669 		for (i = 0; i < sbi->segs_per_sec; i++)
3670 			mtime += get_seg_entry(sbi, segno + i)->mtime;
3671 
3672 		mtime = div_u64(mtime, sbi->segs_per_sec);
3673 
3674 		if (sit_i->min_mtime > mtime)
3675 			sit_i->min_mtime = mtime;
3676 	}
3677 	sit_i->max_mtime = get_mtime(sbi);
3678 	up_write(&sit_i->sentry_lock);
3679 }
3680 
3681 int build_segment_manager(struct f2fs_sb_info *sbi)
3682 {
3683 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3684 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3685 	struct f2fs_sm_info *sm_info;
3686 	int err;
3687 
3688 	sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
3689 	if (!sm_info)
3690 		return -ENOMEM;
3691 
3692 	/* init sm info */
3693 	sbi->sm_info = sm_info;
3694 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3695 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3696 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
3697 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3698 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3699 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
3700 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3701 	sm_info->rec_prefree_segments = sm_info->main_segments *
3702 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
3703 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
3704 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
3705 
3706 	if (!test_opt(sbi, LFS))
3707 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
3708 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
3709 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
3710 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
3711 	sm_info->min_ssr_sections = reserved_sections(sbi);
3712 
3713 	sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
3714 
3715 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
3716 
3717 	init_rwsem(&sm_info->curseg_lock);
3718 
3719 	if (!f2fs_readonly(sbi->sb)) {
3720 		err = create_flush_cmd_control(sbi);
3721 		if (err)
3722 			return err;
3723 	}
3724 
3725 	err = create_discard_cmd_control(sbi);
3726 	if (err)
3727 		return err;
3728 
3729 	err = build_sit_info(sbi);
3730 	if (err)
3731 		return err;
3732 	err = build_free_segmap(sbi);
3733 	if (err)
3734 		return err;
3735 	err = build_curseg(sbi);
3736 	if (err)
3737 		return err;
3738 
3739 	/* reinit free segmap based on SIT */
3740 	build_sit_entries(sbi);
3741 
3742 	init_free_segmap(sbi);
3743 	err = build_dirty_segmap(sbi);
3744 	if (err)
3745 		return err;
3746 
3747 	init_min_max_mtime(sbi);
3748 	return 0;
3749 }
3750 
3751 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3752 		enum dirty_type dirty_type)
3753 {
3754 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3755 
3756 	mutex_lock(&dirty_i->seglist_lock);
3757 	kvfree(dirty_i->dirty_segmap[dirty_type]);
3758 	dirty_i->nr_dirty[dirty_type] = 0;
3759 	mutex_unlock(&dirty_i->seglist_lock);
3760 }
3761 
3762 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
3763 {
3764 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3765 	kvfree(dirty_i->victim_secmap);
3766 }
3767 
3768 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3769 {
3770 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3771 	int i;
3772 
3773 	if (!dirty_i)
3774 		return;
3775 
3776 	/* discard pre-free/dirty segments list */
3777 	for (i = 0; i < NR_DIRTY_TYPE; i++)
3778 		discard_dirty_segmap(sbi, i);
3779 
3780 	destroy_victim_secmap(sbi);
3781 	SM_I(sbi)->dirty_info = NULL;
3782 	kfree(dirty_i);
3783 }
3784 
3785 static void destroy_curseg(struct f2fs_sb_info *sbi)
3786 {
3787 	struct curseg_info *array = SM_I(sbi)->curseg_array;
3788 	int i;
3789 
3790 	if (!array)
3791 		return;
3792 	SM_I(sbi)->curseg_array = NULL;
3793 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
3794 		kfree(array[i].sum_blk);
3795 		kfree(array[i].journal);
3796 	}
3797 	kfree(array);
3798 }
3799 
3800 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3801 {
3802 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3803 	if (!free_i)
3804 		return;
3805 	SM_I(sbi)->free_info = NULL;
3806 	kvfree(free_i->free_segmap);
3807 	kvfree(free_i->free_secmap);
3808 	kfree(free_i);
3809 }
3810 
3811 static void destroy_sit_info(struct f2fs_sb_info *sbi)
3812 {
3813 	struct sit_info *sit_i = SIT_I(sbi);
3814 	unsigned int start;
3815 
3816 	if (!sit_i)
3817 		return;
3818 
3819 	if (sit_i->sentries) {
3820 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
3821 			kfree(sit_i->sentries[start].cur_valid_map);
3822 #ifdef CONFIG_F2FS_CHECK_FS
3823 			kfree(sit_i->sentries[start].cur_valid_map_mir);
3824 #endif
3825 			kfree(sit_i->sentries[start].ckpt_valid_map);
3826 			kfree(sit_i->sentries[start].discard_map);
3827 		}
3828 	}
3829 	kfree(sit_i->tmp_map);
3830 
3831 	kvfree(sit_i->sentries);
3832 	kvfree(sit_i->sec_entries);
3833 	kvfree(sit_i->dirty_sentries_bitmap);
3834 
3835 	SM_I(sbi)->sit_info = NULL;
3836 	kfree(sit_i->sit_bitmap);
3837 #ifdef CONFIG_F2FS_CHECK_FS
3838 	kfree(sit_i->sit_bitmap_mir);
3839 #endif
3840 	kfree(sit_i);
3841 }
3842 
3843 void destroy_segment_manager(struct f2fs_sb_info *sbi)
3844 {
3845 	struct f2fs_sm_info *sm_info = SM_I(sbi);
3846 
3847 	if (!sm_info)
3848 		return;
3849 	destroy_flush_cmd_control(sbi, true);
3850 	destroy_discard_cmd_control(sbi);
3851 	destroy_dirty_segmap(sbi);
3852 	destroy_curseg(sbi);
3853 	destroy_free_segmap(sbi);
3854 	destroy_sit_info(sbi);
3855 	sbi->sm_info = NULL;
3856 	kfree(sm_info);
3857 }
3858 
3859 int __init create_segment_manager_caches(void)
3860 {
3861 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
3862 			sizeof(struct discard_entry));
3863 	if (!discard_entry_slab)
3864 		goto fail;
3865 
3866 	discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3867 			sizeof(struct discard_cmd));
3868 	if (!discard_cmd_slab)
3869 		goto destroy_discard_entry;
3870 
3871 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
3872 			sizeof(struct sit_entry_set));
3873 	if (!sit_entry_set_slab)
3874 		goto destroy_discard_cmd;
3875 
3876 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3877 			sizeof(struct inmem_pages));
3878 	if (!inmem_entry_slab)
3879 		goto destroy_sit_entry_set;
3880 	return 0;
3881 
3882 destroy_sit_entry_set:
3883 	kmem_cache_destroy(sit_entry_set_slab);
3884 destroy_discard_cmd:
3885 	kmem_cache_destroy(discard_cmd_slab);
3886 destroy_discard_entry:
3887 	kmem_cache_destroy(discard_entry_slab);
3888 fail:
3889 	return -ENOMEM;
3890 }
3891 
3892 void destroy_segment_manager_caches(void)
3893 {
3894 	kmem_cache_destroy(sit_entry_set_slab);
3895 	kmem_cache_destroy(discard_cmd_slab);
3896 	kmem_cache_destroy(discard_entry_slab);
3897 	kmem_cache_destroy(inmem_entry_slab);
3898 }
3899