xref: /linux/fs/f2fs/segment.c (revision 067012974c8ae31a8886046df082aeba93592972)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/prefetch.h>
13 #include <linux/kthread.h>
14 #include <linux/swap.h>
15 #include <linux/timer.h>
16 #include <linux/freezer.h>
17 #include <linux/sched/signal.h>
18 
19 #include "f2fs.h"
20 #include "segment.h"
21 #include "node.h"
22 #include "gc.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25 
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *discard_cmd_slab;
30 static struct kmem_cache *sit_entry_set_slab;
31 static struct kmem_cache *inmem_entry_slab;
32 
33 static unsigned long __reverse_ulong(unsigned char *str)
34 {
35 	unsigned long tmp = 0;
36 	int shift = 24, idx = 0;
37 
38 #if BITS_PER_LONG == 64
39 	shift = 56;
40 #endif
41 	while (shift >= 0) {
42 		tmp |= (unsigned long)str[idx++] << shift;
43 		shift -= BITS_PER_BYTE;
44 	}
45 	return tmp;
46 }
47 
48 /*
49  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
50  * MSB and LSB are reversed in a byte by f2fs_set_bit.
51  */
52 static inline unsigned long __reverse_ffs(unsigned long word)
53 {
54 	int num = 0;
55 
56 #if BITS_PER_LONG == 64
57 	if ((word & 0xffffffff00000000UL) == 0)
58 		num += 32;
59 	else
60 		word >>= 32;
61 #endif
62 	if ((word & 0xffff0000) == 0)
63 		num += 16;
64 	else
65 		word >>= 16;
66 
67 	if ((word & 0xff00) == 0)
68 		num += 8;
69 	else
70 		word >>= 8;
71 
72 	if ((word & 0xf0) == 0)
73 		num += 4;
74 	else
75 		word >>= 4;
76 
77 	if ((word & 0xc) == 0)
78 		num += 2;
79 	else
80 		word >>= 2;
81 
82 	if ((word & 0x2) == 0)
83 		num += 1;
84 	return num;
85 }
86 
87 /*
88  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
89  * f2fs_set_bit makes MSB and LSB reversed in a byte.
90  * @size must be integral times of unsigned long.
91  * Example:
92  *                             MSB <--> LSB
93  *   f2fs_set_bit(0, bitmap) => 1000 0000
94  *   f2fs_set_bit(7, bitmap) => 0000 0001
95  */
96 static unsigned long __find_rev_next_bit(const unsigned long *addr,
97 			unsigned long size, unsigned long offset)
98 {
99 	const unsigned long *p = addr + BIT_WORD(offset);
100 	unsigned long result = size;
101 	unsigned long tmp;
102 
103 	if (offset >= size)
104 		return size;
105 
106 	size -= (offset & ~(BITS_PER_LONG - 1));
107 	offset %= BITS_PER_LONG;
108 
109 	while (1) {
110 		if (*p == 0)
111 			goto pass;
112 
113 		tmp = __reverse_ulong((unsigned char *)p);
114 
115 		tmp &= ~0UL >> offset;
116 		if (size < BITS_PER_LONG)
117 			tmp &= (~0UL << (BITS_PER_LONG - size));
118 		if (tmp)
119 			goto found;
120 pass:
121 		if (size <= BITS_PER_LONG)
122 			break;
123 		size -= BITS_PER_LONG;
124 		offset = 0;
125 		p++;
126 	}
127 	return result;
128 found:
129 	return result - size + __reverse_ffs(tmp);
130 }
131 
132 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
133 			unsigned long size, unsigned long offset)
134 {
135 	const unsigned long *p = addr + BIT_WORD(offset);
136 	unsigned long result = size;
137 	unsigned long tmp;
138 
139 	if (offset >= size)
140 		return size;
141 
142 	size -= (offset & ~(BITS_PER_LONG - 1));
143 	offset %= BITS_PER_LONG;
144 
145 	while (1) {
146 		if (*p == ~0UL)
147 			goto pass;
148 
149 		tmp = __reverse_ulong((unsigned char *)p);
150 
151 		if (offset)
152 			tmp |= ~0UL << (BITS_PER_LONG - offset);
153 		if (size < BITS_PER_LONG)
154 			tmp |= ~0UL >> size;
155 		if (tmp != ~0UL)
156 			goto found;
157 pass:
158 		if (size <= BITS_PER_LONG)
159 			break;
160 		size -= BITS_PER_LONG;
161 		offset = 0;
162 		p++;
163 	}
164 	return result;
165 found:
166 	return result - size + __reverse_ffz(tmp);
167 }
168 
169 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
170 {
171 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
172 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
173 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
174 
175 	if (f2fs_lfs_mode(sbi))
176 		return false;
177 	if (sbi->gc_mode == GC_URGENT_HIGH)
178 		return true;
179 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
180 		return true;
181 
182 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
183 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
184 }
185 
186 void f2fs_register_inmem_page(struct inode *inode, struct page *page)
187 {
188 	struct inmem_pages *new;
189 
190 	f2fs_trace_pid(page);
191 
192 	f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
193 
194 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
195 
196 	/* add atomic page indices to the list */
197 	new->page = page;
198 	INIT_LIST_HEAD(&new->list);
199 
200 	/* increase reference count with clean state */
201 	get_page(page);
202 	mutex_lock(&F2FS_I(inode)->inmem_lock);
203 	list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
204 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
205 	mutex_unlock(&F2FS_I(inode)->inmem_lock);
206 
207 	trace_f2fs_register_inmem_page(page, INMEM);
208 }
209 
210 static int __revoke_inmem_pages(struct inode *inode,
211 				struct list_head *head, bool drop, bool recover,
212 				bool trylock)
213 {
214 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
215 	struct inmem_pages *cur, *tmp;
216 	int err = 0;
217 
218 	list_for_each_entry_safe(cur, tmp, head, list) {
219 		struct page *page = cur->page;
220 
221 		if (drop)
222 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
223 
224 		if (trylock) {
225 			/*
226 			 * to avoid deadlock in between page lock and
227 			 * inmem_lock.
228 			 */
229 			if (!trylock_page(page))
230 				continue;
231 		} else {
232 			lock_page(page);
233 		}
234 
235 		f2fs_wait_on_page_writeback(page, DATA, true, true);
236 
237 		if (recover) {
238 			struct dnode_of_data dn;
239 			struct node_info ni;
240 
241 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
242 retry:
243 			set_new_dnode(&dn, inode, NULL, NULL, 0);
244 			err = f2fs_get_dnode_of_data(&dn, page->index,
245 								LOOKUP_NODE);
246 			if (err) {
247 				if (err == -ENOMEM) {
248 					congestion_wait(BLK_RW_ASYNC,
249 							DEFAULT_IO_TIMEOUT);
250 					cond_resched();
251 					goto retry;
252 				}
253 				err = -EAGAIN;
254 				goto next;
255 			}
256 
257 			err = f2fs_get_node_info(sbi, dn.nid, &ni);
258 			if (err) {
259 				f2fs_put_dnode(&dn);
260 				return err;
261 			}
262 
263 			if (cur->old_addr == NEW_ADDR) {
264 				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
265 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
266 			} else
267 				f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
268 					cur->old_addr, ni.version, true, true);
269 			f2fs_put_dnode(&dn);
270 		}
271 next:
272 		/* we don't need to invalidate this in the sccessful status */
273 		if (drop || recover) {
274 			ClearPageUptodate(page);
275 			clear_cold_data(page);
276 		}
277 		f2fs_clear_page_private(page);
278 		f2fs_put_page(page, 1);
279 
280 		list_del(&cur->list);
281 		kmem_cache_free(inmem_entry_slab, cur);
282 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
283 	}
284 	return err;
285 }
286 
287 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
288 {
289 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
290 	struct inode *inode;
291 	struct f2fs_inode_info *fi;
292 	unsigned int count = sbi->atomic_files;
293 	unsigned int looped = 0;
294 next:
295 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
296 	if (list_empty(head)) {
297 		spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
298 		return;
299 	}
300 	fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
301 	inode = igrab(&fi->vfs_inode);
302 	if (inode)
303 		list_move_tail(&fi->inmem_ilist, head);
304 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305 
306 	if (inode) {
307 		if (gc_failure) {
308 			if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
309 				goto skip;
310 		}
311 		set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
312 		f2fs_drop_inmem_pages(inode);
313 skip:
314 		iput(inode);
315 	}
316 	congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
317 	cond_resched();
318 	if (gc_failure) {
319 		if (++looped >= count)
320 			return;
321 	}
322 	goto next;
323 }
324 
325 void f2fs_drop_inmem_pages(struct inode *inode)
326 {
327 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
328 	struct f2fs_inode_info *fi = F2FS_I(inode);
329 
330 	while (!list_empty(&fi->inmem_pages)) {
331 		mutex_lock(&fi->inmem_lock);
332 		__revoke_inmem_pages(inode, &fi->inmem_pages,
333 						true, false, true);
334 		mutex_unlock(&fi->inmem_lock);
335 	}
336 
337 	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
338 
339 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
340 	if (!list_empty(&fi->inmem_ilist))
341 		list_del_init(&fi->inmem_ilist);
342 	if (f2fs_is_atomic_file(inode)) {
343 		clear_inode_flag(inode, FI_ATOMIC_FILE);
344 		sbi->atomic_files--;
345 	}
346 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
347 }
348 
349 void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
350 {
351 	struct f2fs_inode_info *fi = F2FS_I(inode);
352 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
353 	struct list_head *head = &fi->inmem_pages;
354 	struct inmem_pages *cur = NULL;
355 
356 	f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
357 
358 	mutex_lock(&fi->inmem_lock);
359 	list_for_each_entry(cur, head, list) {
360 		if (cur->page == page)
361 			break;
362 	}
363 
364 	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
365 	list_del(&cur->list);
366 	mutex_unlock(&fi->inmem_lock);
367 
368 	dec_page_count(sbi, F2FS_INMEM_PAGES);
369 	kmem_cache_free(inmem_entry_slab, cur);
370 
371 	ClearPageUptodate(page);
372 	f2fs_clear_page_private(page);
373 	f2fs_put_page(page, 0);
374 
375 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
376 }
377 
378 static int __f2fs_commit_inmem_pages(struct inode *inode)
379 {
380 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
381 	struct f2fs_inode_info *fi = F2FS_I(inode);
382 	struct inmem_pages *cur, *tmp;
383 	struct f2fs_io_info fio = {
384 		.sbi = sbi,
385 		.ino = inode->i_ino,
386 		.type = DATA,
387 		.op = REQ_OP_WRITE,
388 		.op_flags = REQ_SYNC | REQ_PRIO,
389 		.io_type = FS_DATA_IO,
390 	};
391 	struct list_head revoke_list;
392 	bool submit_bio = false;
393 	int err = 0;
394 
395 	INIT_LIST_HEAD(&revoke_list);
396 
397 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
398 		struct page *page = cur->page;
399 
400 		lock_page(page);
401 		if (page->mapping == inode->i_mapping) {
402 			trace_f2fs_commit_inmem_page(page, INMEM);
403 
404 			f2fs_wait_on_page_writeback(page, DATA, true, true);
405 
406 			set_page_dirty(page);
407 			if (clear_page_dirty_for_io(page)) {
408 				inode_dec_dirty_pages(inode);
409 				f2fs_remove_dirty_inode(inode);
410 			}
411 retry:
412 			fio.page = page;
413 			fio.old_blkaddr = NULL_ADDR;
414 			fio.encrypted_page = NULL;
415 			fio.need_lock = LOCK_DONE;
416 			err = f2fs_do_write_data_page(&fio);
417 			if (err) {
418 				if (err == -ENOMEM) {
419 					congestion_wait(BLK_RW_ASYNC,
420 							DEFAULT_IO_TIMEOUT);
421 					cond_resched();
422 					goto retry;
423 				}
424 				unlock_page(page);
425 				break;
426 			}
427 			/* record old blkaddr for revoking */
428 			cur->old_addr = fio.old_blkaddr;
429 			submit_bio = true;
430 		}
431 		unlock_page(page);
432 		list_move_tail(&cur->list, &revoke_list);
433 	}
434 
435 	if (submit_bio)
436 		f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
437 
438 	if (err) {
439 		/*
440 		 * try to revoke all committed pages, but still we could fail
441 		 * due to no memory or other reason, if that happened, EAGAIN
442 		 * will be returned, which means in such case, transaction is
443 		 * already not integrity, caller should use journal to do the
444 		 * recovery or rewrite & commit last transaction. For other
445 		 * error number, revoking was done by filesystem itself.
446 		 */
447 		err = __revoke_inmem_pages(inode, &revoke_list,
448 						false, true, false);
449 
450 		/* drop all uncommitted pages */
451 		__revoke_inmem_pages(inode, &fi->inmem_pages,
452 						true, false, false);
453 	} else {
454 		__revoke_inmem_pages(inode, &revoke_list,
455 						false, false, false);
456 	}
457 
458 	return err;
459 }
460 
461 int f2fs_commit_inmem_pages(struct inode *inode)
462 {
463 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
464 	struct f2fs_inode_info *fi = F2FS_I(inode);
465 	int err;
466 
467 	f2fs_balance_fs(sbi, true);
468 
469 	down_write(&fi->i_gc_rwsem[WRITE]);
470 
471 	f2fs_lock_op(sbi);
472 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
473 
474 	mutex_lock(&fi->inmem_lock);
475 	err = __f2fs_commit_inmem_pages(inode);
476 	mutex_unlock(&fi->inmem_lock);
477 
478 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
479 
480 	f2fs_unlock_op(sbi);
481 	up_write(&fi->i_gc_rwsem[WRITE]);
482 
483 	return err;
484 }
485 
486 /*
487  * This function balances dirty node and dentry pages.
488  * In addition, it controls garbage collection.
489  */
490 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
491 {
492 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
493 		f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
494 		f2fs_stop_checkpoint(sbi, false);
495 	}
496 
497 	/* balance_fs_bg is able to be pending */
498 	if (need && excess_cached_nats(sbi))
499 		f2fs_balance_fs_bg(sbi, false);
500 
501 	if (!f2fs_is_checkpoint_ready(sbi))
502 		return;
503 
504 	/*
505 	 * We should do GC or end up with checkpoint, if there are so many dirty
506 	 * dir/node pages without enough free segments.
507 	 */
508 	if (has_not_enough_free_secs(sbi, 0, 0)) {
509 		down_write(&sbi->gc_lock);
510 		f2fs_gc(sbi, false, false, NULL_SEGNO);
511 	}
512 }
513 
514 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
515 {
516 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
517 		return;
518 
519 	/* try to shrink extent cache when there is no enough memory */
520 	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
521 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
522 
523 	/* check the # of cached NAT entries */
524 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
525 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
526 
527 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
528 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
529 	else
530 		f2fs_build_free_nids(sbi, false, false);
531 
532 	if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
533 		excess_prefree_segs(sbi))
534 		goto do_sync;
535 
536 	/* there is background inflight IO or foreground operation recently */
537 	if (is_inflight_io(sbi, REQ_TIME) ||
538 		(!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
539 		return;
540 
541 	/* exceed periodical checkpoint timeout threshold */
542 	if (f2fs_time_over(sbi, CP_TIME))
543 		goto do_sync;
544 
545 	/* checkpoint is the only way to shrink partial cached entries */
546 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
547 		f2fs_available_free_memory(sbi, INO_ENTRIES))
548 		return;
549 
550 do_sync:
551 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
552 		struct blk_plug plug;
553 
554 		mutex_lock(&sbi->flush_lock);
555 
556 		blk_start_plug(&plug);
557 		f2fs_sync_dirty_inodes(sbi, FILE_INODE);
558 		blk_finish_plug(&plug);
559 
560 		mutex_unlock(&sbi->flush_lock);
561 	}
562 	f2fs_sync_fs(sbi->sb, true);
563 	stat_inc_bg_cp_count(sbi->stat_info);
564 }
565 
566 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
567 				struct block_device *bdev)
568 {
569 	struct bio *bio;
570 	int ret;
571 
572 	bio = f2fs_bio_alloc(sbi, 0, false);
573 	if (!bio)
574 		return -ENOMEM;
575 
576 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
577 	bio_set_dev(bio, bdev);
578 	ret = submit_bio_wait(bio);
579 	bio_put(bio);
580 
581 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
582 				test_opt(sbi, FLUSH_MERGE), ret);
583 	return ret;
584 }
585 
586 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
587 {
588 	int ret = 0;
589 	int i;
590 
591 	if (!f2fs_is_multi_device(sbi))
592 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
593 
594 	for (i = 0; i < sbi->s_ndevs; i++) {
595 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
596 			continue;
597 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
598 		if (ret)
599 			break;
600 	}
601 	return ret;
602 }
603 
604 static int issue_flush_thread(void *data)
605 {
606 	struct f2fs_sb_info *sbi = data;
607 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
608 	wait_queue_head_t *q = &fcc->flush_wait_queue;
609 repeat:
610 	if (kthread_should_stop())
611 		return 0;
612 
613 	sb_start_intwrite(sbi->sb);
614 
615 	if (!llist_empty(&fcc->issue_list)) {
616 		struct flush_cmd *cmd, *next;
617 		int ret;
618 
619 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
620 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
621 
622 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
623 
624 		ret = submit_flush_wait(sbi, cmd->ino);
625 		atomic_inc(&fcc->issued_flush);
626 
627 		llist_for_each_entry_safe(cmd, next,
628 					  fcc->dispatch_list, llnode) {
629 			cmd->ret = ret;
630 			complete(&cmd->wait);
631 		}
632 		fcc->dispatch_list = NULL;
633 	}
634 
635 	sb_end_intwrite(sbi->sb);
636 
637 	wait_event_interruptible(*q,
638 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
639 	goto repeat;
640 }
641 
642 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
643 {
644 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
645 	struct flush_cmd cmd;
646 	int ret;
647 
648 	if (test_opt(sbi, NOBARRIER))
649 		return 0;
650 
651 	if (!test_opt(sbi, FLUSH_MERGE)) {
652 		atomic_inc(&fcc->queued_flush);
653 		ret = submit_flush_wait(sbi, ino);
654 		atomic_dec(&fcc->queued_flush);
655 		atomic_inc(&fcc->issued_flush);
656 		return ret;
657 	}
658 
659 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
660 	    f2fs_is_multi_device(sbi)) {
661 		ret = submit_flush_wait(sbi, ino);
662 		atomic_dec(&fcc->queued_flush);
663 
664 		atomic_inc(&fcc->issued_flush);
665 		return ret;
666 	}
667 
668 	cmd.ino = ino;
669 	init_completion(&cmd.wait);
670 
671 	llist_add(&cmd.llnode, &fcc->issue_list);
672 
673 	/* update issue_list before we wake up issue_flush thread */
674 	smp_mb();
675 
676 	if (waitqueue_active(&fcc->flush_wait_queue))
677 		wake_up(&fcc->flush_wait_queue);
678 
679 	if (fcc->f2fs_issue_flush) {
680 		wait_for_completion(&cmd.wait);
681 		atomic_dec(&fcc->queued_flush);
682 	} else {
683 		struct llist_node *list;
684 
685 		list = llist_del_all(&fcc->issue_list);
686 		if (!list) {
687 			wait_for_completion(&cmd.wait);
688 			atomic_dec(&fcc->queued_flush);
689 		} else {
690 			struct flush_cmd *tmp, *next;
691 
692 			ret = submit_flush_wait(sbi, ino);
693 
694 			llist_for_each_entry_safe(tmp, next, list, llnode) {
695 				if (tmp == &cmd) {
696 					cmd.ret = ret;
697 					atomic_dec(&fcc->queued_flush);
698 					continue;
699 				}
700 				tmp->ret = ret;
701 				complete(&tmp->wait);
702 			}
703 		}
704 	}
705 
706 	return cmd.ret;
707 }
708 
709 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
710 {
711 	dev_t dev = sbi->sb->s_bdev->bd_dev;
712 	struct flush_cmd_control *fcc;
713 	int err = 0;
714 
715 	if (SM_I(sbi)->fcc_info) {
716 		fcc = SM_I(sbi)->fcc_info;
717 		if (fcc->f2fs_issue_flush)
718 			return err;
719 		goto init_thread;
720 	}
721 
722 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
723 	if (!fcc)
724 		return -ENOMEM;
725 	atomic_set(&fcc->issued_flush, 0);
726 	atomic_set(&fcc->queued_flush, 0);
727 	init_waitqueue_head(&fcc->flush_wait_queue);
728 	init_llist_head(&fcc->issue_list);
729 	SM_I(sbi)->fcc_info = fcc;
730 	if (!test_opt(sbi, FLUSH_MERGE))
731 		return err;
732 
733 init_thread:
734 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
735 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
736 	if (IS_ERR(fcc->f2fs_issue_flush)) {
737 		err = PTR_ERR(fcc->f2fs_issue_flush);
738 		kfree(fcc);
739 		SM_I(sbi)->fcc_info = NULL;
740 		return err;
741 	}
742 
743 	return err;
744 }
745 
746 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
747 {
748 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
749 
750 	if (fcc && fcc->f2fs_issue_flush) {
751 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
752 
753 		fcc->f2fs_issue_flush = NULL;
754 		kthread_stop(flush_thread);
755 	}
756 	if (free) {
757 		kfree(fcc);
758 		SM_I(sbi)->fcc_info = NULL;
759 	}
760 }
761 
762 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
763 {
764 	int ret = 0, i;
765 
766 	if (!f2fs_is_multi_device(sbi))
767 		return 0;
768 
769 	if (test_opt(sbi, NOBARRIER))
770 		return 0;
771 
772 	for (i = 1; i < sbi->s_ndevs; i++) {
773 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
774 			continue;
775 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
776 		if (ret)
777 			break;
778 
779 		spin_lock(&sbi->dev_lock);
780 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
781 		spin_unlock(&sbi->dev_lock);
782 	}
783 
784 	return ret;
785 }
786 
787 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
788 		enum dirty_type dirty_type)
789 {
790 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
791 
792 	/* need not be added */
793 	if (IS_CURSEG(sbi, segno))
794 		return;
795 
796 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
797 		dirty_i->nr_dirty[dirty_type]++;
798 
799 	if (dirty_type == DIRTY) {
800 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
801 		enum dirty_type t = sentry->type;
802 
803 		if (unlikely(t >= DIRTY)) {
804 			f2fs_bug_on(sbi, 1);
805 			return;
806 		}
807 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
808 			dirty_i->nr_dirty[t]++;
809 
810 		if (__is_large_section(sbi)) {
811 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
812 			block_t valid_blocks =
813 				get_valid_blocks(sbi, segno, true);
814 
815 			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
816 					valid_blocks == BLKS_PER_SEC(sbi)));
817 
818 			if (!IS_CURSEC(sbi, secno))
819 				set_bit(secno, dirty_i->dirty_secmap);
820 		}
821 	}
822 }
823 
824 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
825 		enum dirty_type dirty_type)
826 {
827 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
828 	block_t valid_blocks;
829 
830 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
831 		dirty_i->nr_dirty[dirty_type]--;
832 
833 	if (dirty_type == DIRTY) {
834 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
835 		enum dirty_type t = sentry->type;
836 
837 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
838 			dirty_i->nr_dirty[t]--;
839 
840 		valid_blocks = get_valid_blocks(sbi, segno, true);
841 		if (valid_blocks == 0) {
842 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
843 						dirty_i->victim_secmap);
844 #ifdef CONFIG_F2FS_CHECK_FS
845 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
846 #endif
847 		}
848 		if (__is_large_section(sbi)) {
849 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
850 
851 			if (!valid_blocks ||
852 					valid_blocks == BLKS_PER_SEC(sbi)) {
853 				clear_bit(secno, dirty_i->dirty_secmap);
854 				return;
855 			}
856 
857 			if (!IS_CURSEC(sbi, secno))
858 				set_bit(secno, dirty_i->dirty_secmap);
859 		}
860 	}
861 }
862 
863 /*
864  * Should not occur error such as -ENOMEM.
865  * Adding dirty entry into seglist is not critical operation.
866  * If a given segment is one of current working segments, it won't be added.
867  */
868 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
869 {
870 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
871 	unsigned short valid_blocks, ckpt_valid_blocks;
872 	unsigned int usable_blocks;
873 
874 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
875 		return;
876 
877 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
878 	mutex_lock(&dirty_i->seglist_lock);
879 
880 	valid_blocks = get_valid_blocks(sbi, segno, false);
881 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
882 
883 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
884 		ckpt_valid_blocks == usable_blocks)) {
885 		__locate_dirty_segment(sbi, segno, PRE);
886 		__remove_dirty_segment(sbi, segno, DIRTY);
887 	} else if (valid_blocks < usable_blocks) {
888 		__locate_dirty_segment(sbi, segno, DIRTY);
889 	} else {
890 		/* Recovery routine with SSR needs this */
891 		__remove_dirty_segment(sbi, segno, DIRTY);
892 	}
893 
894 	mutex_unlock(&dirty_i->seglist_lock);
895 }
896 
897 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
898 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
899 {
900 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
901 	unsigned int segno;
902 
903 	mutex_lock(&dirty_i->seglist_lock);
904 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
905 		if (get_valid_blocks(sbi, segno, false))
906 			continue;
907 		if (IS_CURSEG(sbi, segno))
908 			continue;
909 		__locate_dirty_segment(sbi, segno, PRE);
910 		__remove_dirty_segment(sbi, segno, DIRTY);
911 	}
912 	mutex_unlock(&dirty_i->seglist_lock);
913 }
914 
915 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
916 {
917 	int ovp_hole_segs =
918 		(overprovision_segments(sbi) - reserved_segments(sbi));
919 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
920 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
921 	block_t holes[2] = {0, 0};	/* DATA and NODE */
922 	block_t unusable;
923 	struct seg_entry *se;
924 	unsigned int segno;
925 
926 	mutex_lock(&dirty_i->seglist_lock);
927 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
928 		se = get_seg_entry(sbi, segno);
929 		if (IS_NODESEG(se->type))
930 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
931 							se->valid_blocks;
932 		else
933 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
934 							se->valid_blocks;
935 	}
936 	mutex_unlock(&dirty_i->seglist_lock);
937 
938 	unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
939 	if (unusable > ovp_holes)
940 		return unusable - ovp_holes;
941 	return 0;
942 }
943 
944 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
945 {
946 	int ovp_hole_segs =
947 		(overprovision_segments(sbi) - reserved_segments(sbi));
948 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
949 		return -EAGAIN;
950 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
951 		dirty_segments(sbi) > ovp_hole_segs)
952 		return -EAGAIN;
953 	return 0;
954 }
955 
956 /* This is only used by SBI_CP_DISABLED */
957 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
958 {
959 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
960 	unsigned int segno = 0;
961 
962 	mutex_lock(&dirty_i->seglist_lock);
963 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
964 		if (get_valid_blocks(sbi, segno, false))
965 			continue;
966 		if (get_ckpt_valid_blocks(sbi, segno))
967 			continue;
968 		mutex_unlock(&dirty_i->seglist_lock);
969 		return segno;
970 	}
971 	mutex_unlock(&dirty_i->seglist_lock);
972 	return NULL_SEGNO;
973 }
974 
975 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
976 		struct block_device *bdev, block_t lstart,
977 		block_t start, block_t len)
978 {
979 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
980 	struct list_head *pend_list;
981 	struct discard_cmd *dc;
982 
983 	f2fs_bug_on(sbi, !len);
984 
985 	pend_list = &dcc->pend_list[plist_idx(len)];
986 
987 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
988 	INIT_LIST_HEAD(&dc->list);
989 	dc->bdev = bdev;
990 	dc->lstart = lstart;
991 	dc->start = start;
992 	dc->len = len;
993 	dc->ref = 0;
994 	dc->state = D_PREP;
995 	dc->queued = 0;
996 	dc->error = 0;
997 	init_completion(&dc->wait);
998 	list_add_tail(&dc->list, pend_list);
999 	spin_lock_init(&dc->lock);
1000 	dc->bio_ref = 0;
1001 	atomic_inc(&dcc->discard_cmd_cnt);
1002 	dcc->undiscard_blks += len;
1003 
1004 	return dc;
1005 }
1006 
1007 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1008 				struct block_device *bdev, block_t lstart,
1009 				block_t start, block_t len,
1010 				struct rb_node *parent, struct rb_node **p,
1011 				bool leftmost)
1012 {
1013 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1014 	struct discard_cmd *dc;
1015 
1016 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1017 
1018 	rb_link_node(&dc->rb_node, parent, p);
1019 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1020 
1021 	return dc;
1022 }
1023 
1024 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1025 							struct discard_cmd *dc)
1026 {
1027 	if (dc->state == D_DONE)
1028 		atomic_sub(dc->queued, &dcc->queued_discard);
1029 
1030 	list_del(&dc->list);
1031 	rb_erase_cached(&dc->rb_node, &dcc->root);
1032 	dcc->undiscard_blks -= dc->len;
1033 
1034 	kmem_cache_free(discard_cmd_slab, dc);
1035 
1036 	atomic_dec(&dcc->discard_cmd_cnt);
1037 }
1038 
1039 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1040 							struct discard_cmd *dc)
1041 {
1042 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1043 	unsigned long flags;
1044 
1045 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1046 
1047 	spin_lock_irqsave(&dc->lock, flags);
1048 	if (dc->bio_ref) {
1049 		spin_unlock_irqrestore(&dc->lock, flags);
1050 		return;
1051 	}
1052 	spin_unlock_irqrestore(&dc->lock, flags);
1053 
1054 	f2fs_bug_on(sbi, dc->ref);
1055 
1056 	if (dc->error == -EOPNOTSUPP)
1057 		dc->error = 0;
1058 
1059 	if (dc->error)
1060 		printk_ratelimited(
1061 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1062 			KERN_INFO, sbi->sb->s_id,
1063 			dc->lstart, dc->start, dc->len, dc->error);
1064 	__detach_discard_cmd(dcc, dc);
1065 }
1066 
1067 static void f2fs_submit_discard_endio(struct bio *bio)
1068 {
1069 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1070 	unsigned long flags;
1071 
1072 	spin_lock_irqsave(&dc->lock, flags);
1073 	if (!dc->error)
1074 		dc->error = blk_status_to_errno(bio->bi_status);
1075 	dc->bio_ref--;
1076 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1077 		dc->state = D_DONE;
1078 		complete_all(&dc->wait);
1079 	}
1080 	spin_unlock_irqrestore(&dc->lock, flags);
1081 	bio_put(bio);
1082 }
1083 
1084 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1085 				block_t start, block_t end)
1086 {
1087 #ifdef CONFIG_F2FS_CHECK_FS
1088 	struct seg_entry *sentry;
1089 	unsigned int segno;
1090 	block_t blk = start;
1091 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1092 	unsigned long *map;
1093 
1094 	while (blk < end) {
1095 		segno = GET_SEGNO(sbi, blk);
1096 		sentry = get_seg_entry(sbi, segno);
1097 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1098 
1099 		if (end < START_BLOCK(sbi, segno + 1))
1100 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1101 		else
1102 			size = max_blocks;
1103 		map = (unsigned long *)(sentry->cur_valid_map);
1104 		offset = __find_rev_next_bit(map, size, offset);
1105 		f2fs_bug_on(sbi, offset != size);
1106 		blk = START_BLOCK(sbi, segno + 1);
1107 	}
1108 #endif
1109 }
1110 
1111 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1112 				struct discard_policy *dpolicy,
1113 				int discard_type, unsigned int granularity)
1114 {
1115 	/* common policy */
1116 	dpolicy->type = discard_type;
1117 	dpolicy->sync = true;
1118 	dpolicy->ordered = false;
1119 	dpolicy->granularity = granularity;
1120 
1121 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1122 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
1123 	dpolicy->timeout = false;
1124 
1125 	if (discard_type == DPOLICY_BG) {
1126 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1127 		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1128 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1129 		dpolicy->io_aware = true;
1130 		dpolicy->sync = false;
1131 		dpolicy->ordered = true;
1132 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1133 			dpolicy->granularity = 1;
1134 			dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1135 		}
1136 	} else if (discard_type == DPOLICY_FORCE) {
1137 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1138 		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1139 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1140 		dpolicy->io_aware = false;
1141 	} else if (discard_type == DPOLICY_FSTRIM) {
1142 		dpolicy->io_aware = false;
1143 	} else if (discard_type == DPOLICY_UMOUNT) {
1144 		dpolicy->io_aware = false;
1145 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1146 		dpolicy->granularity = 1;
1147 		dpolicy->timeout = true;
1148 	}
1149 }
1150 
1151 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1152 				struct block_device *bdev, block_t lstart,
1153 				block_t start, block_t len);
1154 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1155 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1156 						struct discard_policy *dpolicy,
1157 						struct discard_cmd *dc,
1158 						unsigned int *issued)
1159 {
1160 	struct block_device *bdev = dc->bdev;
1161 	struct request_queue *q = bdev_get_queue(bdev);
1162 	unsigned int max_discard_blocks =
1163 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1164 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1165 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1166 					&(dcc->fstrim_list) : &(dcc->wait_list);
1167 	int flag = dpolicy->sync ? REQ_SYNC : 0;
1168 	block_t lstart, start, len, total_len;
1169 	int err = 0;
1170 
1171 	if (dc->state != D_PREP)
1172 		return 0;
1173 
1174 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1175 		return 0;
1176 
1177 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1178 
1179 	lstart = dc->lstart;
1180 	start = dc->start;
1181 	len = dc->len;
1182 	total_len = len;
1183 
1184 	dc->len = 0;
1185 
1186 	while (total_len && *issued < dpolicy->max_requests && !err) {
1187 		struct bio *bio = NULL;
1188 		unsigned long flags;
1189 		bool last = true;
1190 
1191 		if (len > max_discard_blocks) {
1192 			len = max_discard_blocks;
1193 			last = false;
1194 		}
1195 
1196 		(*issued)++;
1197 		if (*issued == dpolicy->max_requests)
1198 			last = true;
1199 
1200 		dc->len += len;
1201 
1202 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1203 			f2fs_show_injection_info(sbi, FAULT_DISCARD);
1204 			err = -EIO;
1205 			goto submit;
1206 		}
1207 		err = __blkdev_issue_discard(bdev,
1208 					SECTOR_FROM_BLOCK(start),
1209 					SECTOR_FROM_BLOCK(len),
1210 					GFP_NOFS, 0, &bio);
1211 submit:
1212 		if (err) {
1213 			spin_lock_irqsave(&dc->lock, flags);
1214 			if (dc->state == D_PARTIAL)
1215 				dc->state = D_SUBMIT;
1216 			spin_unlock_irqrestore(&dc->lock, flags);
1217 
1218 			break;
1219 		}
1220 
1221 		f2fs_bug_on(sbi, !bio);
1222 
1223 		/*
1224 		 * should keep before submission to avoid D_DONE
1225 		 * right away
1226 		 */
1227 		spin_lock_irqsave(&dc->lock, flags);
1228 		if (last)
1229 			dc->state = D_SUBMIT;
1230 		else
1231 			dc->state = D_PARTIAL;
1232 		dc->bio_ref++;
1233 		spin_unlock_irqrestore(&dc->lock, flags);
1234 
1235 		atomic_inc(&dcc->queued_discard);
1236 		dc->queued++;
1237 		list_move_tail(&dc->list, wait_list);
1238 
1239 		/* sanity check on discard range */
1240 		__check_sit_bitmap(sbi, lstart, lstart + len);
1241 
1242 		bio->bi_private = dc;
1243 		bio->bi_end_io = f2fs_submit_discard_endio;
1244 		bio->bi_opf |= flag;
1245 		submit_bio(bio);
1246 
1247 		atomic_inc(&dcc->issued_discard);
1248 
1249 		f2fs_update_iostat(sbi, FS_DISCARD, 1);
1250 
1251 		lstart += len;
1252 		start += len;
1253 		total_len -= len;
1254 		len = total_len;
1255 	}
1256 
1257 	if (!err && len) {
1258 		dcc->undiscard_blks -= len;
1259 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1260 	}
1261 	return err;
1262 }
1263 
1264 static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1265 				struct block_device *bdev, block_t lstart,
1266 				block_t start, block_t len,
1267 				struct rb_node **insert_p,
1268 				struct rb_node *insert_parent)
1269 {
1270 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1271 	struct rb_node **p;
1272 	struct rb_node *parent = NULL;
1273 	bool leftmost = true;
1274 
1275 	if (insert_p && insert_parent) {
1276 		parent = insert_parent;
1277 		p = insert_p;
1278 		goto do_insert;
1279 	}
1280 
1281 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1282 							lstart, &leftmost);
1283 do_insert:
1284 	__attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1285 								p, leftmost);
1286 }
1287 
1288 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1289 						struct discard_cmd *dc)
1290 {
1291 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1292 }
1293 
1294 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1295 				struct discard_cmd *dc, block_t blkaddr)
1296 {
1297 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1298 	struct discard_info di = dc->di;
1299 	bool modified = false;
1300 
1301 	if (dc->state == D_DONE || dc->len == 1) {
1302 		__remove_discard_cmd(sbi, dc);
1303 		return;
1304 	}
1305 
1306 	dcc->undiscard_blks -= di.len;
1307 
1308 	if (blkaddr > di.lstart) {
1309 		dc->len = blkaddr - dc->lstart;
1310 		dcc->undiscard_blks += dc->len;
1311 		__relocate_discard_cmd(dcc, dc);
1312 		modified = true;
1313 	}
1314 
1315 	if (blkaddr < di.lstart + di.len - 1) {
1316 		if (modified) {
1317 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1318 					di.start + blkaddr + 1 - di.lstart,
1319 					di.lstart + di.len - 1 - blkaddr,
1320 					NULL, NULL);
1321 		} else {
1322 			dc->lstart++;
1323 			dc->len--;
1324 			dc->start++;
1325 			dcc->undiscard_blks += dc->len;
1326 			__relocate_discard_cmd(dcc, dc);
1327 		}
1328 	}
1329 }
1330 
1331 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1332 				struct block_device *bdev, block_t lstart,
1333 				block_t start, block_t len)
1334 {
1335 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1336 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1337 	struct discard_cmd *dc;
1338 	struct discard_info di = {0};
1339 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1340 	struct request_queue *q = bdev_get_queue(bdev);
1341 	unsigned int max_discard_blocks =
1342 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1343 	block_t end = lstart + len;
1344 
1345 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1346 					NULL, lstart,
1347 					(struct rb_entry **)&prev_dc,
1348 					(struct rb_entry **)&next_dc,
1349 					&insert_p, &insert_parent, true, NULL);
1350 	if (dc)
1351 		prev_dc = dc;
1352 
1353 	if (!prev_dc) {
1354 		di.lstart = lstart;
1355 		di.len = next_dc ? next_dc->lstart - lstart : len;
1356 		di.len = min(di.len, len);
1357 		di.start = start;
1358 	}
1359 
1360 	while (1) {
1361 		struct rb_node *node;
1362 		bool merged = false;
1363 		struct discard_cmd *tdc = NULL;
1364 
1365 		if (prev_dc) {
1366 			di.lstart = prev_dc->lstart + prev_dc->len;
1367 			if (di.lstart < lstart)
1368 				di.lstart = lstart;
1369 			if (di.lstart >= end)
1370 				break;
1371 
1372 			if (!next_dc || next_dc->lstart > end)
1373 				di.len = end - di.lstart;
1374 			else
1375 				di.len = next_dc->lstart - di.lstart;
1376 			di.start = start + di.lstart - lstart;
1377 		}
1378 
1379 		if (!di.len)
1380 			goto next;
1381 
1382 		if (prev_dc && prev_dc->state == D_PREP &&
1383 			prev_dc->bdev == bdev &&
1384 			__is_discard_back_mergeable(&di, &prev_dc->di,
1385 							max_discard_blocks)) {
1386 			prev_dc->di.len += di.len;
1387 			dcc->undiscard_blks += di.len;
1388 			__relocate_discard_cmd(dcc, prev_dc);
1389 			di = prev_dc->di;
1390 			tdc = prev_dc;
1391 			merged = true;
1392 		}
1393 
1394 		if (next_dc && next_dc->state == D_PREP &&
1395 			next_dc->bdev == bdev &&
1396 			__is_discard_front_mergeable(&di, &next_dc->di,
1397 							max_discard_blocks)) {
1398 			next_dc->di.lstart = di.lstart;
1399 			next_dc->di.len += di.len;
1400 			next_dc->di.start = di.start;
1401 			dcc->undiscard_blks += di.len;
1402 			__relocate_discard_cmd(dcc, next_dc);
1403 			if (tdc)
1404 				__remove_discard_cmd(sbi, tdc);
1405 			merged = true;
1406 		}
1407 
1408 		if (!merged) {
1409 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1410 							di.len, NULL, NULL);
1411 		}
1412  next:
1413 		prev_dc = next_dc;
1414 		if (!prev_dc)
1415 			break;
1416 
1417 		node = rb_next(&prev_dc->rb_node);
1418 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1419 	}
1420 }
1421 
1422 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1423 		struct block_device *bdev, block_t blkstart, block_t blklen)
1424 {
1425 	block_t lblkstart = blkstart;
1426 
1427 	if (!f2fs_bdev_support_discard(bdev))
1428 		return 0;
1429 
1430 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1431 
1432 	if (f2fs_is_multi_device(sbi)) {
1433 		int devi = f2fs_target_device_index(sbi, blkstart);
1434 
1435 		blkstart -= FDEV(devi).start_blk;
1436 	}
1437 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1438 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1439 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1440 	return 0;
1441 }
1442 
1443 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1444 					struct discard_policy *dpolicy)
1445 {
1446 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1447 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1448 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1449 	struct discard_cmd *dc;
1450 	struct blk_plug plug;
1451 	unsigned int pos = dcc->next_pos;
1452 	unsigned int issued = 0;
1453 	bool io_interrupted = false;
1454 
1455 	mutex_lock(&dcc->cmd_lock);
1456 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1457 					NULL, pos,
1458 					(struct rb_entry **)&prev_dc,
1459 					(struct rb_entry **)&next_dc,
1460 					&insert_p, &insert_parent, true, NULL);
1461 	if (!dc)
1462 		dc = next_dc;
1463 
1464 	blk_start_plug(&plug);
1465 
1466 	while (dc) {
1467 		struct rb_node *node;
1468 		int err = 0;
1469 
1470 		if (dc->state != D_PREP)
1471 			goto next;
1472 
1473 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1474 			io_interrupted = true;
1475 			break;
1476 		}
1477 
1478 		dcc->next_pos = dc->lstart + dc->len;
1479 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1480 
1481 		if (issued >= dpolicy->max_requests)
1482 			break;
1483 next:
1484 		node = rb_next(&dc->rb_node);
1485 		if (err)
1486 			__remove_discard_cmd(sbi, dc);
1487 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1488 	}
1489 
1490 	blk_finish_plug(&plug);
1491 
1492 	if (!dc)
1493 		dcc->next_pos = 0;
1494 
1495 	mutex_unlock(&dcc->cmd_lock);
1496 
1497 	if (!issued && io_interrupted)
1498 		issued = -1;
1499 
1500 	return issued;
1501 }
1502 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1503 					struct discard_policy *dpolicy);
1504 
1505 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1506 					struct discard_policy *dpolicy)
1507 {
1508 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1509 	struct list_head *pend_list;
1510 	struct discard_cmd *dc, *tmp;
1511 	struct blk_plug plug;
1512 	int i, issued;
1513 	bool io_interrupted = false;
1514 
1515 	if (dpolicy->timeout)
1516 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1517 
1518 retry:
1519 	issued = 0;
1520 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1521 		if (dpolicy->timeout &&
1522 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1523 			break;
1524 
1525 		if (i + 1 < dpolicy->granularity)
1526 			break;
1527 
1528 		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1529 			return __issue_discard_cmd_orderly(sbi, dpolicy);
1530 
1531 		pend_list = &dcc->pend_list[i];
1532 
1533 		mutex_lock(&dcc->cmd_lock);
1534 		if (list_empty(pend_list))
1535 			goto next;
1536 		if (unlikely(dcc->rbtree_check))
1537 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1538 							&dcc->root, false));
1539 		blk_start_plug(&plug);
1540 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1541 			f2fs_bug_on(sbi, dc->state != D_PREP);
1542 
1543 			if (dpolicy->timeout &&
1544 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1545 				break;
1546 
1547 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1548 						!is_idle(sbi, DISCARD_TIME)) {
1549 				io_interrupted = true;
1550 				break;
1551 			}
1552 
1553 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1554 
1555 			if (issued >= dpolicy->max_requests)
1556 				break;
1557 		}
1558 		blk_finish_plug(&plug);
1559 next:
1560 		mutex_unlock(&dcc->cmd_lock);
1561 
1562 		if (issued >= dpolicy->max_requests || io_interrupted)
1563 			break;
1564 	}
1565 
1566 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1567 		__wait_all_discard_cmd(sbi, dpolicy);
1568 		goto retry;
1569 	}
1570 
1571 	if (!issued && io_interrupted)
1572 		issued = -1;
1573 
1574 	return issued;
1575 }
1576 
1577 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1578 {
1579 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1580 	struct list_head *pend_list;
1581 	struct discard_cmd *dc, *tmp;
1582 	int i;
1583 	bool dropped = false;
1584 
1585 	mutex_lock(&dcc->cmd_lock);
1586 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1587 		pend_list = &dcc->pend_list[i];
1588 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1589 			f2fs_bug_on(sbi, dc->state != D_PREP);
1590 			__remove_discard_cmd(sbi, dc);
1591 			dropped = true;
1592 		}
1593 	}
1594 	mutex_unlock(&dcc->cmd_lock);
1595 
1596 	return dropped;
1597 }
1598 
1599 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1600 {
1601 	__drop_discard_cmd(sbi);
1602 }
1603 
1604 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1605 							struct discard_cmd *dc)
1606 {
1607 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1608 	unsigned int len = 0;
1609 
1610 	wait_for_completion_io(&dc->wait);
1611 	mutex_lock(&dcc->cmd_lock);
1612 	f2fs_bug_on(sbi, dc->state != D_DONE);
1613 	dc->ref--;
1614 	if (!dc->ref) {
1615 		if (!dc->error)
1616 			len = dc->len;
1617 		__remove_discard_cmd(sbi, dc);
1618 	}
1619 	mutex_unlock(&dcc->cmd_lock);
1620 
1621 	return len;
1622 }
1623 
1624 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1625 						struct discard_policy *dpolicy,
1626 						block_t start, block_t end)
1627 {
1628 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1629 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1630 					&(dcc->fstrim_list) : &(dcc->wait_list);
1631 	struct discard_cmd *dc, *tmp;
1632 	bool need_wait;
1633 	unsigned int trimmed = 0;
1634 
1635 next:
1636 	need_wait = false;
1637 
1638 	mutex_lock(&dcc->cmd_lock);
1639 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
1640 		if (dc->lstart + dc->len <= start || end <= dc->lstart)
1641 			continue;
1642 		if (dc->len < dpolicy->granularity)
1643 			continue;
1644 		if (dc->state == D_DONE && !dc->ref) {
1645 			wait_for_completion_io(&dc->wait);
1646 			if (!dc->error)
1647 				trimmed += dc->len;
1648 			__remove_discard_cmd(sbi, dc);
1649 		} else {
1650 			dc->ref++;
1651 			need_wait = true;
1652 			break;
1653 		}
1654 	}
1655 	mutex_unlock(&dcc->cmd_lock);
1656 
1657 	if (need_wait) {
1658 		trimmed += __wait_one_discard_bio(sbi, dc);
1659 		goto next;
1660 	}
1661 
1662 	return trimmed;
1663 }
1664 
1665 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1666 						struct discard_policy *dpolicy)
1667 {
1668 	struct discard_policy dp;
1669 	unsigned int discard_blks;
1670 
1671 	if (dpolicy)
1672 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1673 
1674 	/* wait all */
1675 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1676 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1677 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1678 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1679 
1680 	return discard_blks;
1681 }
1682 
1683 /* This should be covered by global mutex, &sit_i->sentry_lock */
1684 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1685 {
1686 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1687 	struct discard_cmd *dc;
1688 	bool need_wait = false;
1689 
1690 	mutex_lock(&dcc->cmd_lock);
1691 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1692 							NULL, blkaddr);
1693 	if (dc) {
1694 		if (dc->state == D_PREP) {
1695 			__punch_discard_cmd(sbi, dc, blkaddr);
1696 		} else {
1697 			dc->ref++;
1698 			need_wait = true;
1699 		}
1700 	}
1701 	mutex_unlock(&dcc->cmd_lock);
1702 
1703 	if (need_wait)
1704 		__wait_one_discard_bio(sbi, dc);
1705 }
1706 
1707 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1708 {
1709 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1710 
1711 	if (dcc && dcc->f2fs_issue_discard) {
1712 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1713 
1714 		dcc->f2fs_issue_discard = NULL;
1715 		kthread_stop(discard_thread);
1716 	}
1717 }
1718 
1719 /* This comes from f2fs_put_super */
1720 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1721 {
1722 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1723 	struct discard_policy dpolicy;
1724 	bool dropped;
1725 
1726 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1727 					dcc->discard_granularity);
1728 	__issue_discard_cmd(sbi, &dpolicy);
1729 	dropped = __drop_discard_cmd(sbi);
1730 
1731 	/* just to make sure there is no pending discard commands */
1732 	__wait_all_discard_cmd(sbi, NULL);
1733 
1734 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1735 	return dropped;
1736 }
1737 
1738 static int issue_discard_thread(void *data)
1739 {
1740 	struct f2fs_sb_info *sbi = data;
1741 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1742 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1743 	struct discard_policy dpolicy;
1744 	unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1745 	int issued;
1746 
1747 	set_freezable();
1748 
1749 	do {
1750 		__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1751 					dcc->discard_granularity);
1752 
1753 		wait_event_interruptible_timeout(*q,
1754 				kthread_should_stop() || freezing(current) ||
1755 				dcc->discard_wake,
1756 				msecs_to_jiffies(wait_ms));
1757 
1758 		if (dcc->discard_wake)
1759 			dcc->discard_wake = 0;
1760 
1761 		/* clean up pending candidates before going to sleep */
1762 		if (atomic_read(&dcc->queued_discard))
1763 			__wait_all_discard_cmd(sbi, NULL);
1764 
1765 		if (try_to_freeze())
1766 			continue;
1767 		if (f2fs_readonly(sbi->sb))
1768 			continue;
1769 		if (kthread_should_stop())
1770 			return 0;
1771 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1772 			wait_ms = dpolicy.max_interval;
1773 			continue;
1774 		}
1775 
1776 		if (sbi->gc_mode == GC_URGENT_HIGH)
1777 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1778 
1779 		sb_start_intwrite(sbi->sb);
1780 
1781 		issued = __issue_discard_cmd(sbi, &dpolicy);
1782 		if (issued > 0) {
1783 			__wait_all_discard_cmd(sbi, &dpolicy);
1784 			wait_ms = dpolicy.min_interval;
1785 		} else if (issued == -1){
1786 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1787 			if (!wait_ms)
1788 				wait_ms = dpolicy.mid_interval;
1789 		} else {
1790 			wait_ms = dpolicy.max_interval;
1791 		}
1792 
1793 		sb_end_intwrite(sbi->sb);
1794 
1795 	} while (!kthread_should_stop());
1796 	return 0;
1797 }
1798 
1799 #ifdef CONFIG_BLK_DEV_ZONED
1800 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1801 		struct block_device *bdev, block_t blkstart, block_t blklen)
1802 {
1803 	sector_t sector, nr_sects;
1804 	block_t lblkstart = blkstart;
1805 	int devi = 0;
1806 
1807 	if (f2fs_is_multi_device(sbi)) {
1808 		devi = f2fs_target_device_index(sbi, blkstart);
1809 		if (blkstart < FDEV(devi).start_blk ||
1810 		    blkstart > FDEV(devi).end_blk) {
1811 			f2fs_err(sbi, "Invalid block %x", blkstart);
1812 			return -EIO;
1813 		}
1814 		blkstart -= FDEV(devi).start_blk;
1815 	}
1816 
1817 	/* For sequential zones, reset the zone write pointer */
1818 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1819 		sector = SECTOR_FROM_BLOCK(blkstart);
1820 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1821 
1822 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1823 				nr_sects != bdev_zone_sectors(bdev)) {
1824 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1825 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1826 				 blkstart, blklen);
1827 			return -EIO;
1828 		}
1829 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1830 		return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1831 					sector, nr_sects, GFP_NOFS);
1832 	}
1833 
1834 	/* For conventional zones, use regular discard if supported */
1835 	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1836 }
1837 #endif
1838 
1839 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1840 		struct block_device *bdev, block_t blkstart, block_t blklen)
1841 {
1842 #ifdef CONFIG_BLK_DEV_ZONED
1843 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1844 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1845 #endif
1846 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1847 }
1848 
1849 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1850 				block_t blkstart, block_t blklen)
1851 {
1852 	sector_t start = blkstart, len = 0;
1853 	struct block_device *bdev;
1854 	struct seg_entry *se;
1855 	unsigned int offset;
1856 	block_t i;
1857 	int err = 0;
1858 
1859 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1860 
1861 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1862 		if (i != start) {
1863 			struct block_device *bdev2 =
1864 				f2fs_target_device(sbi, i, NULL);
1865 
1866 			if (bdev2 != bdev) {
1867 				err = __issue_discard_async(sbi, bdev,
1868 						start, len);
1869 				if (err)
1870 					return err;
1871 				bdev = bdev2;
1872 				start = i;
1873 				len = 0;
1874 			}
1875 		}
1876 
1877 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1878 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1879 
1880 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
1881 			sbi->discard_blks--;
1882 	}
1883 
1884 	if (len)
1885 		err = __issue_discard_async(sbi, bdev, start, len);
1886 	return err;
1887 }
1888 
1889 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1890 							bool check_only)
1891 {
1892 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1893 	int max_blocks = sbi->blocks_per_seg;
1894 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1895 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1896 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1897 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1898 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1899 	unsigned int start = 0, end = -1;
1900 	bool force = (cpc->reason & CP_DISCARD);
1901 	struct discard_entry *de = NULL;
1902 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1903 	int i;
1904 
1905 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
1906 		return false;
1907 
1908 	if (!force) {
1909 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1910 			SM_I(sbi)->dcc_info->nr_discards >=
1911 				SM_I(sbi)->dcc_info->max_discards)
1912 			return false;
1913 	}
1914 
1915 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1916 	for (i = 0; i < entries; i++)
1917 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1918 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1919 
1920 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1921 				SM_I(sbi)->dcc_info->max_discards) {
1922 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1923 		if (start >= max_blocks)
1924 			break;
1925 
1926 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1927 		if (force && start && end != max_blocks
1928 					&& (end - start) < cpc->trim_minlen)
1929 			continue;
1930 
1931 		if (check_only)
1932 			return true;
1933 
1934 		if (!de) {
1935 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1936 								GFP_F2FS_ZERO);
1937 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1938 			list_add_tail(&de->list, head);
1939 		}
1940 
1941 		for (i = start; i < end; i++)
1942 			__set_bit_le(i, (void *)de->discard_map);
1943 
1944 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1945 	}
1946 	return false;
1947 }
1948 
1949 static void release_discard_addr(struct discard_entry *entry)
1950 {
1951 	list_del(&entry->list);
1952 	kmem_cache_free(discard_entry_slab, entry);
1953 }
1954 
1955 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
1956 {
1957 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1958 	struct discard_entry *entry, *this;
1959 
1960 	/* drop caches */
1961 	list_for_each_entry_safe(entry, this, head, list)
1962 		release_discard_addr(entry);
1963 }
1964 
1965 /*
1966  * Should call f2fs_clear_prefree_segments after checkpoint is done.
1967  */
1968 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1969 {
1970 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1971 	unsigned int segno;
1972 
1973 	mutex_lock(&dirty_i->seglist_lock);
1974 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1975 		__set_test_and_free(sbi, segno, false);
1976 	mutex_unlock(&dirty_i->seglist_lock);
1977 }
1978 
1979 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
1980 						struct cp_control *cpc)
1981 {
1982 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1983 	struct list_head *head = &dcc->entry_list;
1984 	struct discard_entry *entry, *this;
1985 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1986 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1987 	unsigned int start = 0, end = -1;
1988 	unsigned int secno, start_segno;
1989 	bool force = (cpc->reason & CP_DISCARD);
1990 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
1991 
1992 	mutex_lock(&dirty_i->seglist_lock);
1993 
1994 	while (1) {
1995 		int i;
1996 
1997 		if (need_align && end != -1)
1998 			end--;
1999 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2000 		if (start >= MAIN_SEGS(sbi))
2001 			break;
2002 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2003 								start + 1);
2004 
2005 		if (need_align) {
2006 			start = rounddown(start, sbi->segs_per_sec);
2007 			end = roundup(end, sbi->segs_per_sec);
2008 		}
2009 
2010 		for (i = start; i < end; i++) {
2011 			if (test_and_clear_bit(i, prefree_map))
2012 				dirty_i->nr_dirty[PRE]--;
2013 		}
2014 
2015 		if (!f2fs_realtime_discard_enable(sbi))
2016 			continue;
2017 
2018 		if (force && start >= cpc->trim_start &&
2019 					(end - 1) <= cpc->trim_end)
2020 				continue;
2021 
2022 		if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
2023 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2024 				(end - start) << sbi->log_blocks_per_seg);
2025 			continue;
2026 		}
2027 next:
2028 		secno = GET_SEC_FROM_SEG(sbi, start);
2029 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2030 		if (!IS_CURSEC(sbi, secno) &&
2031 			!get_valid_blocks(sbi, start, true))
2032 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2033 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
2034 
2035 		start = start_segno + sbi->segs_per_sec;
2036 		if (start < end)
2037 			goto next;
2038 		else
2039 			end = start - 1;
2040 	}
2041 	mutex_unlock(&dirty_i->seglist_lock);
2042 
2043 	/* send small discards */
2044 	list_for_each_entry_safe(entry, this, head, list) {
2045 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2046 		bool is_valid = test_bit_le(0, entry->discard_map);
2047 
2048 find_next:
2049 		if (is_valid) {
2050 			next_pos = find_next_zero_bit_le(entry->discard_map,
2051 					sbi->blocks_per_seg, cur_pos);
2052 			len = next_pos - cur_pos;
2053 
2054 			if (f2fs_sb_has_blkzoned(sbi) ||
2055 			    (force && len < cpc->trim_minlen))
2056 				goto skip;
2057 
2058 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2059 									len);
2060 			total_len += len;
2061 		} else {
2062 			next_pos = find_next_bit_le(entry->discard_map,
2063 					sbi->blocks_per_seg, cur_pos);
2064 		}
2065 skip:
2066 		cur_pos = next_pos;
2067 		is_valid = !is_valid;
2068 
2069 		if (cur_pos < sbi->blocks_per_seg)
2070 			goto find_next;
2071 
2072 		release_discard_addr(entry);
2073 		dcc->nr_discards -= total_len;
2074 	}
2075 
2076 	wake_up_discard_thread(sbi, false);
2077 }
2078 
2079 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2080 {
2081 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2082 	struct discard_cmd_control *dcc;
2083 	int err = 0, i;
2084 
2085 	if (SM_I(sbi)->dcc_info) {
2086 		dcc = SM_I(sbi)->dcc_info;
2087 		goto init_thread;
2088 	}
2089 
2090 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2091 	if (!dcc)
2092 		return -ENOMEM;
2093 
2094 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2095 	INIT_LIST_HEAD(&dcc->entry_list);
2096 	for (i = 0; i < MAX_PLIST_NUM; i++)
2097 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2098 	INIT_LIST_HEAD(&dcc->wait_list);
2099 	INIT_LIST_HEAD(&dcc->fstrim_list);
2100 	mutex_init(&dcc->cmd_lock);
2101 	atomic_set(&dcc->issued_discard, 0);
2102 	atomic_set(&dcc->queued_discard, 0);
2103 	atomic_set(&dcc->discard_cmd_cnt, 0);
2104 	dcc->nr_discards = 0;
2105 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2106 	dcc->undiscard_blks = 0;
2107 	dcc->next_pos = 0;
2108 	dcc->root = RB_ROOT_CACHED;
2109 	dcc->rbtree_check = false;
2110 
2111 	init_waitqueue_head(&dcc->discard_wait_queue);
2112 	SM_I(sbi)->dcc_info = dcc;
2113 init_thread:
2114 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2115 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2116 	if (IS_ERR(dcc->f2fs_issue_discard)) {
2117 		err = PTR_ERR(dcc->f2fs_issue_discard);
2118 		kfree(dcc);
2119 		SM_I(sbi)->dcc_info = NULL;
2120 		return err;
2121 	}
2122 
2123 	return err;
2124 }
2125 
2126 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2127 {
2128 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2129 
2130 	if (!dcc)
2131 		return;
2132 
2133 	f2fs_stop_discard_thread(sbi);
2134 
2135 	/*
2136 	 * Recovery can cache discard commands, so in error path of
2137 	 * fill_super(), it needs to give a chance to handle them.
2138 	 */
2139 	if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2140 		f2fs_issue_discard_timeout(sbi);
2141 
2142 	kfree(dcc);
2143 	SM_I(sbi)->dcc_info = NULL;
2144 }
2145 
2146 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2147 {
2148 	struct sit_info *sit_i = SIT_I(sbi);
2149 
2150 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2151 		sit_i->dirty_sentries++;
2152 		return false;
2153 	}
2154 
2155 	return true;
2156 }
2157 
2158 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2159 					unsigned int segno, int modified)
2160 {
2161 	struct seg_entry *se = get_seg_entry(sbi, segno);
2162 	se->type = type;
2163 	if (modified)
2164 		__mark_sit_entry_dirty(sbi, segno);
2165 }
2166 
2167 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2168 								block_t blkaddr)
2169 {
2170 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2171 
2172 	if (segno == NULL_SEGNO)
2173 		return 0;
2174 	return get_seg_entry(sbi, segno)->mtime;
2175 }
2176 
2177 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2178 						unsigned long long old_mtime)
2179 {
2180 	struct seg_entry *se;
2181 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2182 	unsigned long long ctime = get_mtime(sbi, false);
2183 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2184 
2185 	if (segno == NULL_SEGNO)
2186 		return;
2187 
2188 	se = get_seg_entry(sbi, segno);
2189 
2190 	if (!se->mtime)
2191 		se->mtime = mtime;
2192 	else
2193 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2194 						se->valid_blocks + 1);
2195 
2196 	if (ctime > SIT_I(sbi)->max_mtime)
2197 		SIT_I(sbi)->max_mtime = ctime;
2198 }
2199 
2200 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2201 {
2202 	struct seg_entry *se;
2203 	unsigned int segno, offset;
2204 	long int new_vblocks;
2205 	bool exist;
2206 #ifdef CONFIG_F2FS_CHECK_FS
2207 	bool mir_exist;
2208 #endif
2209 
2210 	segno = GET_SEGNO(sbi, blkaddr);
2211 
2212 	se = get_seg_entry(sbi, segno);
2213 	new_vblocks = se->valid_blocks + del;
2214 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2215 
2216 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2217 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2218 
2219 	se->valid_blocks = new_vblocks;
2220 
2221 	/* Update valid block bitmap */
2222 	if (del > 0) {
2223 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2224 #ifdef CONFIG_F2FS_CHECK_FS
2225 		mir_exist = f2fs_test_and_set_bit(offset,
2226 						se->cur_valid_map_mir);
2227 		if (unlikely(exist != mir_exist)) {
2228 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2229 				 blkaddr, exist);
2230 			f2fs_bug_on(sbi, 1);
2231 		}
2232 #endif
2233 		if (unlikely(exist)) {
2234 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2235 				 blkaddr);
2236 			f2fs_bug_on(sbi, 1);
2237 			se->valid_blocks--;
2238 			del = 0;
2239 		}
2240 
2241 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
2242 			sbi->discard_blks--;
2243 
2244 		/*
2245 		 * SSR should never reuse block which is checkpointed
2246 		 * or newly invalidated.
2247 		 */
2248 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2249 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2250 				se->ckpt_valid_blocks++;
2251 		}
2252 	} else {
2253 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2254 #ifdef CONFIG_F2FS_CHECK_FS
2255 		mir_exist = f2fs_test_and_clear_bit(offset,
2256 						se->cur_valid_map_mir);
2257 		if (unlikely(exist != mir_exist)) {
2258 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2259 				 blkaddr, exist);
2260 			f2fs_bug_on(sbi, 1);
2261 		}
2262 #endif
2263 		if (unlikely(!exist)) {
2264 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2265 				 blkaddr);
2266 			f2fs_bug_on(sbi, 1);
2267 			se->valid_blocks++;
2268 			del = 0;
2269 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2270 			/*
2271 			 * If checkpoints are off, we must not reuse data that
2272 			 * was used in the previous checkpoint. If it was used
2273 			 * before, we must track that to know how much space we
2274 			 * really have.
2275 			 */
2276 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2277 				spin_lock(&sbi->stat_lock);
2278 				sbi->unusable_block_count++;
2279 				spin_unlock(&sbi->stat_lock);
2280 			}
2281 		}
2282 
2283 		if (f2fs_test_and_clear_bit(offset, se->discard_map))
2284 			sbi->discard_blks++;
2285 	}
2286 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2287 		se->ckpt_valid_blocks += del;
2288 
2289 	__mark_sit_entry_dirty(sbi, segno);
2290 
2291 	/* update total number of valid blocks to be written in ckpt area */
2292 	SIT_I(sbi)->written_valid_blocks += del;
2293 
2294 	if (__is_large_section(sbi))
2295 		get_sec_entry(sbi, segno)->valid_blocks += del;
2296 }
2297 
2298 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2299 {
2300 	unsigned int segno = GET_SEGNO(sbi, addr);
2301 	struct sit_info *sit_i = SIT_I(sbi);
2302 
2303 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2304 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2305 		return;
2306 
2307 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2308 
2309 	/* add it into sit main buffer */
2310 	down_write(&sit_i->sentry_lock);
2311 
2312 	update_segment_mtime(sbi, addr, 0);
2313 	update_sit_entry(sbi, addr, -1);
2314 
2315 	/* add it into dirty seglist */
2316 	locate_dirty_segment(sbi, segno);
2317 
2318 	up_write(&sit_i->sentry_lock);
2319 }
2320 
2321 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2322 {
2323 	struct sit_info *sit_i = SIT_I(sbi);
2324 	unsigned int segno, offset;
2325 	struct seg_entry *se;
2326 	bool is_cp = false;
2327 
2328 	if (!__is_valid_data_blkaddr(blkaddr))
2329 		return true;
2330 
2331 	down_read(&sit_i->sentry_lock);
2332 
2333 	segno = GET_SEGNO(sbi, blkaddr);
2334 	se = get_seg_entry(sbi, segno);
2335 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2336 
2337 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2338 		is_cp = true;
2339 
2340 	up_read(&sit_i->sentry_lock);
2341 
2342 	return is_cp;
2343 }
2344 
2345 /*
2346  * This function should be resided under the curseg_mutex lock
2347  */
2348 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2349 					struct f2fs_summary *sum)
2350 {
2351 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2352 	void *addr = curseg->sum_blk;
2353 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2354 	memcpy(addr, sum, sizeof(struct f2fs_summary));
2355 }
2356 
2357 /*
2358  * Calculate the number of current summary pages for writing
2359  */
2360 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2361 {
2362 	int valid_sum_count = 0;
2363 	int i, sum_in_page;
2364 
2365 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2366 		if (sbi->ckpt->alloc_type[i] == SSR)
2367 			valid_sum_count += sbi->blocks_per_seg;
2368 		else {
2369 			if (for_ra)
2370 				valid_sum_count += le16_to_cpu(
2371 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2372 			else
2373 				valid_sum_count += curseg_blkoff(sbi, i);
2374 		}
2375 	}
2376 
2377 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2378 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2379 	if (valid_sum_count <= sum_in_page)
2380 		return 1;
2381 	else if ((valid_sum_count - sum_in_page) <=
2382 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2383 		return 2;
2384 	return 3;
2385 }
2386 
2387 /*
2388  * Caller should put this summary page
2389  */
2390 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2391 {
2392 	if (unlikely(f2fs_cp_error(sbi)))
2393 		return ERR_PTR(-EIO);
2394 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2395 }
2396 
2397 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2398 					void *src, block_t blk_addr)
2399 {
2400 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2401 
2402 	memcpy(page_address(page), src, PAGE_SIZE);
2403 	set_page_dirty(page);
2404 	f2fs_put_page(page, 1);
2405 }
2406 
2407 static void write_sum_page(struct f2fs_sb_info *sbi,
2408 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2409 {
2410 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2411 }
2412 
2413 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2414 						int type, block_t blk_addr)
2415 {
2416 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2417 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2418 	struct f2fs_summary_block *src = curseg->sum_blk;
2419 	struct f2fs_summary_block *dst;
2420 
2421 	dst = (struct f2fs_summary_block *)page_address(page);
2422 	memset(dst, 0, PAGE_SIZE);
2423 
2424 	mutex_lock(&curseg->curseg_mutex);
2425 
2426 	down_read(&curseg->journal_rwsem);
2427 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2428 	up_read(&curseg->journal_rwsem);
2429 
2430 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2431 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2432 
2433 	mutex_unlock(&curseg->curseg_mutex);
2434 
2435 	set_page_dirty(page);
2436 	f2fs_put_page(page, 1);
2437 }
2438 
2439 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2440 				struct curseg_info *curseg, int type)
2441 {
2442 	unsigned int segno = curseg->segno + 1;
2443 	struct free_segmap_info *free_i = FREE_I(sbi);
2444 
2445 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2446 		return !test_bit(segno, free_i->free_segmap);
2447 	return 0;
2448 }
2449 
2450 /*
2451  * Find a new segment from the free segments bitmap to right order
2452  * This function should be returned with success, otherwise BUG
2453  */
2454 static void get_new_segment(struct f2fs_sb_info *sbi,
2455 			unsigned int *newseg, bool new_sec, int dir)
2456 {
2457 	struct free_segmap_info *free_i = FREE_I(sbi);
2458 	unsigned int segno, secno, zoneno;
2459 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2460 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2461 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2462 	unsigned int left_start = hint;
2463 	bool init = true;
2464 	int go_left = 0;
2465 	int i;
2466 
2467 	spin_lock(&free_i->segmap_lock);
2468 
2469 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2470 		segno = find_next_zero_bit(free_i->free_segmap,
2471 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2472 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2473 			goto got_it;
2474 	}
2475 find_other_zone:
2476 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2477 	if (secno >= MAIN_SECS(sbi)) {
2478 		if (dir == ALLOC_RIGHT) {
2479 			secno = find_next_zero_bit(free_i->free_secmap,
2480 							MAIN_SECS(sbi), 0);
2481 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2482 		} else {
2483 			go_left = 1;
2484 			left_start = hint - 1;
2485 		}
2486 	}
2487 	if (go_left == 0)
2488 		goto skip_left;
2489 
2490 	while (test_bit(left_start, free_i->free_secmap)) {
2491 		if (left_start > 0) {
2492 			left_start--;
2493 			continue;
2494 		}
2495 		left_start = find_next_zero_bit(free_i->free_secmap,
2496 							MAIN_SECS(sbi), 0);
2497 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2498 		break;
2499 	}
2500 	secno = left_start;
2501 skip_left:
2502 	segno = GET_SEG_FROM_SEC(sbi, secno);
2503 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2504 
2505 	/* give up on finding another zone */
2506 	if (!init)
2507 		goto got_it;
2508 	if (sbi->secs_per_zone == 1)
2509 		goto got_it;
2510 	if (zoneno == old_zoneno)
2511 		goto got_it;
2512 	if (dir == ALLOC_LEFT) {
2513 		if (!go_left && zoneno + 1 >= total_zones)
2514 			goto got_it;
2515 		if (go_left && zoneno == 0)
2516 			goto got_it;
2517 	}
2518 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2519 		if (CURSEG_I(sbi, i)->zone == zoneno)
2520 			break;
2521 
2522 	if (i < NR_CURSEG_TYPE) {
2523 		/* zone is in user, try another */
2524 		if (go_left)
2525 			hint = zoneno * sbi->secs_per_zone - 1;
2526 		else if (zoneno + 1 >= total_zones)
2527 			hint = 0;
2528 		else
2529 			hint = (zoneno + 1) * sbi->secs_per_zone;
2530 		init = false;
2531 		goto find_other_zone;
2532 	}
2533 got_it:
2534 	/* set it as dirty segment in free segmap */
2535 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2536 	__set_inuse(sbi, segno);
2537 	*newseg = segno;
2538 	spin_unlock(&free_i->segmap_lock);
2539 }
2540 
2541 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2542 {
2543 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2544 	struct summary_footer *sum_footer;
2545 	unsigned short seg_type = curseg->seg_type;
2546 
2547 	curseg->inited = true;
2548 	curseg->segno = curseg->next_segno;
2549 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2550 	curseg->next_blkoff = 0;
2551 	curseg->next_segno = NULL_SEGNO;
2552 
2553 	sum_footer = &(curseg->sum_blk->footer);
2554 	memset(sum_footer, 0, sizeof(struct summary_footer));
2555 
2556 	sanity_check_seg_type(sbi, seg_type);
2557 
2558 	if (IS_DATASEG(seg_type))
2559 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2560 	if (IS_NODESEG(seg_type))
2561 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2562 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2563 }
2564 
2565 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2566 {
2567 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2568 	unsigned short seg_type = curseg->seg_type;
2569 
2570 	sanity_check_seg_type(sbi, seg_type);
2571 
2572 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2573 	if (__is_large_section(sbi))
2574 		return curseg->segno;
2575 
2576 	/* inmem log may not locate on any segment after mount */
2577 	if (!curseg->inited)
2578 		return 0;
2579 
2580 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2581 		return 0;
2582 
2583 	if (test_opt(sbi, NOHEAP) &&
2584 		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2585 		return 0;
2586 
2587 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2588 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2589 
2590 	/* find segments from 0 to reuse freed segments */
2591 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2592 		return 0;
2593 
2594 	return curseg->segno;
2595 }
2596 
2597 /*
2598  * Allocate a current working segment.
2599  * This function always allocates a free segment in LFS manner.
2600  */
2601 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2602 {
2603 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2604 	unsigned short seg_type = curseg->seg_type;
2605 	unsigned int segno = curseg->segno;
2606 	int dir = ALLOC_LEFT;
2607 
2608 	if (curseg->inited)
2609 		write_sum_page(sbi, curseg->sum_blk,
2610 				GET_SUM_BLOCK(sbi, segno));
2611 	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2612 		dir = ALLOC_RIGHT;
2613 
2614 	if (test_opt(sbi, NOHEAP))
2615 		dir = ALLOC_RIGHT;
2616 
2617 	segno = __get_next_segno(sbi, type);
2618 	get_new_segment(sbi, &segno, new_sec, dir);
2619 	curseg->next_segno = segno;
2620 	reset_curseg(sbi, type, 1);
2621 	curseg->alloc_type = LFS;
2622 }
2623 
2624 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2625 			struct curseg_info *seg, block_t start)
2626 {
2627 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2628 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2629 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2630 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2631 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2632 	int i, pos;
2633 
2634 	for (i = 0; i < entries; i++)
2635 		target_map[i] = ckpt_map[i] | cur_map[i];
2636 
2637 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2638 
2639 	seg->next_blkoff = pos;
2640 }
2641 
2642 /*
2643  * If a segment is written by LFS manner, next block offset is just obtained
2644  * by increasing the current block offset. However, if a segment is written by
2645  * SSR manner, next block offset obtained by calling __next_free_blkoff
2646  */
2647 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2648 				struct curseg_info *seg)
2649 {
2650 	if (seg->alloc_type == SSR)
2651 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2652 	else
2653 		seg->next_blkoff++;
2654 }
2655 
2656 /*
2657  * This function always allocates a used segment(from dirty seglist) by SSR
2658  * manner, so it should recover the existing segment information of valid blocks
2659  */
2660 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
2661 {
2662 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2663 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2664 	unsigned int new_segno = curseg->next_segno;
2665 	struct f2fs_summary_block *sum_node;
2666 	struct page *sum_page;
2667 
2668 	if (flush)
2669 		write_sum_page(sbi, curseg->sum_blk,
2670 					GET_SUM_BLOCK(sbi, curseg->segno));
2671 
2672 	__set_test_and_inuse(sbi, new_segno);
2673 
2674 	mutex_lock(&dirty_i->seglist_lock);
2675 	__remove_dirty_segment(sbi, new_segno, PRE);
2676 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2677 	mutex_unlock(&dirty_i->seglist_lock);
2678 
2679 	reset_curseg(sbi, type, 1);
2680 	curseg->alloc_type = SSR;
2681 	__next_free_blkoff(sbi, curseg, 0);
2682 
2683 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2684 	if (IS_ERR(sum_page)) {
2685 		/* GC won't be able to use stale summary pages by cp_error */
2686 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2687 		return;
2688 	}
2689 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2690 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2691 	f2fs_put_page(sum_page, 1);
2692 }
2693 
2694 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2695 				int alloc_mode, unsigned long long age);
2696 
2697 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2698 					int target_type, int alloc_mode,
2699 					unsigned long long age)
2700 {
2701 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2702 
2703 	curseg->seg_type = target_type;
2704 
2705 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2706 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2707 
2708 		curseg->seg_type = se->type;
2709 		change_curseg(sbi, type, true);
2710 	} else {
2711 		/* allocate cold segment by default */
2712 		curseg->seg_type = CURSEG_COLD_DATA;
2713 		new_curseg(sbi, type, true);
2714 	}
2715 	stat_inc_seg_type(sbi, curseg);
2716 }
2717 
2718 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2719 {
2720 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2721 
2722 	if (!sbi->am.atgc_enabled)
2723 		return;
2724 
2725 	down_read(&SM_I(sbi)->curseg_lock);
2726 
2727 	mutex_lock(&curseg->curseg_mutex);
2728 	down_write(&SIT_I(sbi)->sentry_lock);
2729 
2730 	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2731 
2732 	up_write(&SIT_I(sbi)->sentry_lock);
2733 	mutex_unlock(&curseg->curseg_mutex);
2734 
2735 	up_read(&SM_I(sbi)->curseg_lock);
2736 
2737 }
2738 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2739 {
2740 	__f2fs_init_atgc_curseg(sbi);
2741 }
2742 
2743 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2744 {
2745 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2746 
2747 	mutex_lock(&curseg->curseg_mutex);
2748 	if (!curseg->inited)
2749 		goto out;
2750 
2751 	if (get_valid_blocks(sbi, curseg->segno, false)) {
2752 		write_sum_page(sbi, curseg->sum_blk,
2753 				GET_SUM_BLOCK(sbi, curseg->segno));
2754 	} else {
2755 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2756 		__set_test_and_free(sbi, curseg->segno, true);
2757 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2758 	}
2759 out:
2760 	mutex_unlock(&curseg->curseg_mutex);
2761 }
2762 
2763 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2764 {
2765 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2766 
2767 	if (sbi->am.atgc_enabled)
2768 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2769 }
2770 
2771 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2772 {
2773 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2774 
2775 	mutex_lock(&curseg->curseg_mutex);
2776 	if (!curseg->inited)
2777 		goto out;
2778 	if (get_valid_blocks(sbi, curseg->segno, false))
2779 		goto out;
2780 
2781 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2782 	__set_test_and_inuse(sbi, curseg->segno);
2783 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2784 out:
2785 	mutex_unlock(&curseg->curseg_mutex);
2786 }
2787 
2788 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2789 {
2790 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2791 
2792 	if (sbi->am.atgc_enabled)
2793 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2794 }
2795 
2796 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2797 				int alloc_mode, unsigned long long age)
2798 {
2799 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2800 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2801 	unsigned segno = NULL_SEGNO;
2802 	unsigned short seg_type = curseg->seg_type;
2803 	int i, cnt;
2804 	bool reversed = false;
2805 
2806 	sanity_check_seg_type(sbi, seg_type);
2807 
2808 	/* f2fs_need_SSR() already forces to do this */
2809 	if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2810 		curseg->next_segno = segno;
2811 		return 1;
2812 	}
2813 
2814 	/* For node segments, let's do SSR more intensively */
2815 	if (IS_NODESEG(seg_type)) {
2816 		if (seg_type >= CURSEG_WARM_NODE) {
2817 			reversed = true;
2818 			i = CURSEG_COLD_NODE;
2819 		} else {
2820 			i = CURSEG_HOT_NODE;
2821 		}
2822 		cnt = NR_CURSEG_NODE_TYPE;
2823 	} else {
2824 		if (seg_type >= CURSEG_WARM_DATA) {
2825 			reversed = true;
2826 			i = CURSEG_COLD_DATA;
2827 		} else {
2828 			i = CURSEG_HOT_DATA;
2829 		}
2830 		cnt = NR_CURSEG_DATA_TYPE;
2831 	}
2832 
2833 	for (; cnt-- > 0; reversed ? i-- : i++) {
2834 		if (i == seg_type)
2835 			continue;
2836 		if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
2837 			curseg->next_segno = segno;
2838 			return 1;
2839 		}
2840 	}
2841 
2842 	/* find valid_blocks=0 in dirty list */
2843 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2844 		segno = get_free_segment(sbi);
2845 		if (segno != NULL_SEGNO) {
2846 			curseg->next_segno = segno;
2847 			return 1;
2848 		}
2849 	}
2850 	return 0;
2851 }
2852 
2853 /*
2854  * flush out current segment and replace it with new segment
2855  * This function should be returned with success, otherwise BUG
2856  */
2857 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2858 						int type, bool force)
2859 {
2860 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2861 
2862 	if (force)
2863 		new_curseg(sbi, type, true);
2864 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2865 					curseg->seg_type == CURSEG_WARM_NODE)
2866 		new_curseg(sbi, type, false);
2867 	else if (curseg->alloc_type == LFS &&
2868 			is_next_segment_free(sbi, curseg, type) &&
2869 			likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2870 		new_curseg(sbi, type, false);
2871 	else if (f2fs_need_SSR(sbi) &&
2872 			get_ssr_segment(sbi, type, SSR, 0))
2873 		change_curseg(sbi, type, true);
2874 	else
2875 		new_curseg(sbi, type, false);
2876 
2877 	stat_inc_seg_type(sbi, curseg);
2878 }
2879 
2880 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2881 					unsigned int start, unsigned int end)
2882 {
2883 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2884 	unsigned int segno;
2885 
2886 	down_read(&SM_I(sbi)->curseg_lock);
2887 	mutex_lock(&curseg->curseg_mutex);
2888 	down_write(&SIT_I(sbi)->sentry_lock);
2889 
2890 	segno = CURSEG_I(sbi, type)->segno;
2891 	if (segno < start || segno > end)
2892 		goto unlock;
2893 
2894 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2895 		change_curseg(sbi, type, true);
2896 	else
2897 		new_curseg(sbi, type, true);
2898 
2899 	stat_inc_seg_type(sbi, curseg);
2900 
2901 	locate_dirty_segment(sbi, segno);
2902 unlock:
2903 	up_write(&SIT_I(sbi)->sentry_lock);
2904 
2905 	if (segno != curseg->segno)
2906 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2907 			    type, segno, curseg->segno);
2908 
2909 	mutex_unlock(&curseg->curseg_mutex);
2910 	up_read(&SM_I(sbi)->curseg_lock);
2911 }
2912 
2913 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
2914 {
2915 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2916 	unsigned int old_segno;
2917 
2918 	if (!curseg->inited)
2919 		goto alloc;
2920 
2921 	if (!curseg->next_blkoff &&
2922 		!get_valid_blocks(sbi, curseg->segno, false) &&
2923 		!get_ckpt_valid_blocks(sbi, curseg->segno))
2924 		return;
2925 
2926 alloc:
2927 	old_segno = curseg->segno;
2928 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
2929 	locate_dirty_segment(sbi, old_segno);
2930 }
2931 
2932 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
2933 {
2934 	down_write(&SIT_I(sbi)->sentry_lock);
2935 	__allocate_new_segment(sbi, type);
2936 	up_write(&SIT_I(sbi)->sentry_lock);
2937 }
2938 
2939 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
2940 {
2941 	int i;
2942 
2943 	down_write(&SIT_I(sbi)->sentry_lock);
2944 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
2945 		__allocate_new_segment(sbi, i);
2946 	up_write(&SIT_I(sbi)->sentry_lock);
2947 }
2948 
2949 static const struct segment_allocation default_salloc_ops = {
2950 	.allocate_segment = allocate_segment_by_default,
2951 };
2952 
2953 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
2954 						struct cp_control *cpc)
2955 {
2956 	__u64 trim_start = cpc->trim_start;
2957 	bool has_candidate = false;
2958 
2959 	down_write(&SIT_I(sbi)->sentry_lock);
2960 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2961 		if (add_discard_addrs(sbi, cpc, true)) {
2962 			has_candidate = true;
2963 			break;
2964 		}
2965 	}
2966 	up_write(&SIT_I(sbi)->sentry_lock);
2967 
2968 	cpc->trim_start = trim_start;
2969 	return has_candidate;
2970 }
2971 
2972 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
2973 					struct discard_policy *dpolicy,
2974 					unsigned int start, unsigned int end)
2975 {
2976 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2977 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
2978 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
2979 	struct discard_cmd *dc;
2980 	struct blk_plug plug;
2981 	int issued;
2982 	unsigned int trimmed = 0;
2983 
2984 next:
2985 	issued = 0;
2986 
2987 	mutex_lock(&dcc->cmd_lock);
2988 	if (unlikely(dcc->rbtree_check))
2989 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
2990 							&dcc->root, false));
2991 
2992 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
2993 					NULL, start,
2994 					(struct rb_entry **)&prev_dc,
2995 					(struct rb_entry **)&next_dc,
2996 					&insert_p, &insert_parent, true, NULL);
2997 	if (!dc)
2998 		dc = next_dc;
2999 
3000 	blk_start_plug(&plug);
3001 
3002 	while (dc && dc->lstart <= end) {
3003 		struct rb_node *node;
3004 		int err = 0;
3005 
3006 		if (dc->len < dpolicy->granularity)
3007 			goto skip;
3008 
3009 		if (dc->state != D_PREP) {
3010 			list_move_tail(&dc->list, &dcc->fstrim_list);
3011 			goto skip;
3012 		}
3013 
3014 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3015 
3016 		if (issued >= dpolicy->max_requests) {
3017 			start = dc->lstart + dc->len;
3018 
3019 			if (err)
3020 				__remove_discard_cmd(sbi, dc);
3021 
3022 			blk_finish_plug(&plug);
3023 			mutex_unlock(&dcc->cmd_lock);
3024 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3025 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
3026 			goto next;
3027 		}
3028 skip:
3029 		node = rb_next(&dc->rb_node);
3030 		if (err)
3031 			__remove_discard_cmd(sbi, dc);
3032 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3033 
3034 		if (fatal_signal_pending(current))
3035 			break;
3036 	}
3037 
3038 	blk_finish_plug(&plug);
3039 	mutex_unlock(&dcc->cmd_lock);
3040 
3041 	return trimmed;
3042 }
3043 
3044 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3045 {
3046 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3047 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3048 	unsigned int start_segno, end_segno;
3049 	block_t start_block, end_block;
3050 	struct cp_control cpc;
3051 	struct discard_policy dpolicy;
3052 	unsigned long long trimmed = 0;
3053 	int err = 0;
3054 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3055 
3056 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3057 		return -EINVAL;
3058 
3059 	if (end < MAIN_BLKADDR(sbi))
3060 		goto out;
3061 
3062 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3063 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3064 		return -EFSCORRUPTED;
3065 	}
3066 
3067 	/* start/end segment number in main_area */
3068 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3069 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3070 						GET_SEGNO(sbi, end);
3071 	if (need_align) {
3072 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
3073 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3074 	}
3075 
3076 	cpc.reason = CP_DISCARD;
3077 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3078 	cpc.trim_start = start_segno;
3079 	cpc.trim_end = end_segno;
3080 
3081 	if (sbi->discard_blks == 0)
3082 		goto out;
3083 
3084 	down_write(&sbi->gc_lock);
3085 	err = f2fs_write_checkpoint(sbi, &cpc);
3086 	up_write(&sbi->gc_lock);
3087 	if (err)
3088 		goto out;
3089 
3090 	/*
3091 	 * We filed discard candidates, but actually we don't need to wait for
3092 	 * all of them, since they'll be issued in idle time along with runtime
3093 	 * discard option. User configuration looks like using runtime discard
3094 	 * or periodic fstrim instead of it.
3095 	 */
3096 	if (f2fs_realtime_discard_enable(sbi))
3097 		goto out;
3098 
3099 	start_block = START_BLOCK(sbi, start_segno);
3100 	end_block = START_BLOCK(sbi, end_segno + 1);
3101 
3102 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3103 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3104 					start_block, end_block);
3105 
3106 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3107 					start_block, end_block);
3108 out:
3109 	if (!err)
3110 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3111 	return err;
3112 }
3113 
3114 static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3115 					struct curseg_info *curseg)
3116 {
3117 	return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3118 							curseg->segno);
3119 }
3120 
3121 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3122 {
3123 	switch (hint) {
3124 	case WRITE_LIFE_SHORT:
3125 		return CURSEG_HOT_DATA;
3126 	case WRITE_LIFE_EXTREME:
3127 		return CURSEG_COLD_DATA;
3128 	default:
3129 		return CURSEG_WARM_DATA;
3130 	}
3131 }
3132 
3133 /* This returns write hints for each segment type. This hints will be
3134  * passed down to block layer. There are mapping tables which depend on
3135  * the mount option 'whint_mode'.
3136  *
3137  * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
3138  *
3139  * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
3140  *
3141  * User                  F2FS                     Block
3142  * ----                  ----                     -----
3143  *                       META                     WRITE_LIFE_NOT_SET
3144  *                       HOT_NODE                 "
3145  *                       WARM_NODE                "
3146  *                       COLD_NODE                "
3147  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3148  * extension list        "                        "
3149  *
3150  * -- buffered io
3151  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3152  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3153  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3154  * WRITE_LIFE_NONE       "                        "
3155  * WRITE_LIFE_MEDIUM     "                        "
3156  * WRITE_LIFE_LONG       "                        "
3157  *
3158  * -- direct io
3159  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3160  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3161  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3162  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3163  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3164  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3165  *
3166  * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
3167  *
3168  * User                  F2FS                     Block
3169  * ----                  ----                     -----
3170  *                       META                     WRITE_LIFE_MEDIUM;
3171  *                       HOT_NODE                 WRITE_LIFE_NOT_SET
3172  *                       WARM_NODE                "
3173  *                       COLD_NODE                WRITE_LIFE_NONE
3174  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3175  * extension list        "                        "
3176  *
3177  * -- buffered io
3178  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3179  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3180  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
3181  * WRITE_LIFE_NONE       "                        "
3182  * WRITE_LIFE_MEDIUM     "                        "
3183  * WRITE_LIFE_LONG       "                        "
3184  *
3185  * -- direct io
3186  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3187  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3188  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3189  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3190  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3191  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3192  */
3193 
3194 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3195 				enum page_type type, enum temp_type temp)
3196 {
3197 	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
3198 		if (type == DATA) {
3199 			if (temp == WARM)
3200 				return WRITE_LIFE_NOT_SET;
3201 			else if (temp == HOT)
3202 				return WRITE_LIFE_SHORT;
3203 			else if (temp == COLD)
3204 				return WRITE_LIFE_EXTREME;
3205 		} else {
3206 			return WRITE_LIFE_NOT_SET;
3207 		}
3208 	} else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
3209 		if (type == DATA) {
3210 			if (temp == WARM)
3211 				return WRITE_LIFE_LONG;
3212 			else if (temp == HOT)
3213 				return WRITE_LIFE_SHORT;
3214 			else if (temp == COLD)
3215 				return WRITE_LIFE_EXTREME;
3216 		} else if (type == NODE) {
3217 			if (temp == WARM || temp == HOT)
3218 				return WRITE_LIFE_NOT_SET;
3219 			else if (temp == COLD)
3220 				return WRITE_LIFE_NONE;
3221 		} else if (type == META) {
3222 			return WRITE_LIFE_MEDIUM;
3223 		}
3224 	}
3225 	return WRITE_LIFE_NOT_SET;
3226 }
3227 
3228 static int __get_segment_type_2(struct f2fs_io_info *fio)
3229 {
3230 	if (fio->type == DATA)
3231 		return CURSEG_HOT_DATA;
3232 	else
3233 		return CURSEG_HOT_NODE;
3234 }
3235 
3236 static int __get_segment_type_4(struct f2fs_io_info *fio)
3237 {
3238 	if (fio->type == DATA) {
3239 		struct inode *inode = fio->page->mapping->host;
3240 
3241 		if (S_ISDIR(inode->i_mode))
3242 			return CURSEG_HOT_DATA;
3243 		else
3244 			return CURSEG_COLD_DATA;
3245 	} else {
3246 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3247 			return CURSEG_WARM_NODE;
3248 		else
3249 			return CURSEG_COLD_NODE;
3250 	}
3251 }
3252 
3253 static int __get_segment_type_6(struct f2fs_io_info *fio)
3254 {
3255 	if (fio->type == DATA) {
3256 		struct inode *inode = fio->page->mapping->host;
3257 
3258 		if (is_cold_data(fio->page)) {
3259 			if (fio->sbi->am.atgc_enabled)
3260 				return CURSEG_ALL_DATA_ATGC;
3261 			else
3262 				return CURSEG_COLD_DATA;
3263 		}
3264 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3265 			return CURSEG_COLD_DATA;
3266 		if (file_is_hot(inode) ||
3267 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3268 				f2fs_is_atomic_file(inode) ||
3269 				f2fs_is_volatile_file(inode))
3270 			return CURSEG_HOT_DATA;
3271 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3272 	} else {
3273 		if (IS_DNODE(fio->page))
3274 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3275 						CURSEG_HOT_NODE;
3276 		return CURSEG_COLD_NODE;
3277 	}
3278 }
3279 
3280 static int __get_segment_type(struct f2fs_io_info *fio)
3281 {
3282 	int type = 0;
3283 
3284 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3285 	case 2:
3286 		type = __get_segment_type_2(fio);
3287 		break;
3288 	case 4:
3289 		type = __get_segment_type_4(fio);
3290 		break;
3291 	case 6:
3292 		type = __get_segment_type_6(fio);
3293 		break;
3294 	default:
3295 		f2fs_bug_on(fio->sbi, true);
3296 	}
3297 
3298 	if (IS_HOT(type))
3299 		fio->temp = HOT;
3300 	else if (IS_WARM(type))
3301 		fio->temp = WARM;
3302 	else
3303 		fio->temp = COLD;
3304 	return type;
3305 }
3306 
3307 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3308 		block_t old_blkaddr, block_t *new_blkaddr,
3309 		struct f2fs_summary *sum, int type,
3310 		struct f2fs_io_info *fio)
3311 {
3312 	struct sit_info *sit_i = SIT_I(sbi);
3313 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3314 	unsigned long long old_mtime;
3315 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3316 	struct seg_entry *se = NULL;
3317 
3318 	down_read(&SM_I(sbi)->curseg_lock);
3319 
3320 	mutex_lock(&curseg->curseg_mutex);
3321 	down_write(&sit_i->sentry_lock);
3322 
3323 	if (from_gc) {
3324 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3325 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3326 		sanity_check_seg_type(sbi, se->type);
3327 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3328 	}
3329 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3330 
3331 	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3332 
3333 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3334 
3335 	/*
3336 	 * __add_sum_entry should be resided under the curseg_mutex
3337 	 * because, this function updates a summary entry in the
3338 	 * current summary block.
3339 	 */
3340 	__add_sum_entry(sbi, type, sum);
3341 
3342 	__refresh_next_blkoff(sbi, curseg);
3343 
3344 	stat_inc_block_count(sbi, curseg);
3345 
3346 	if (from_gc) {
3347 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3348 	} else {
3349 		update_segment_mtime(sbi, old_blkaddr, 0);
3350 		old_mtime = 0;
3351 	}
3352 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3353 
3354 	/*
3355 	 * SIT information should be updated before segment allocation,
3356 	 * since SSR needs latest valid block information.
3357 	 */
3358 	update_sit_entry(sbi, *new_blkaddr, 1);
3359 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3360 		update_sit_entry(sbi, old_blkaddr, -1);
3361 
3362 	if (!__has_curseg_space(sbi, curseg)) {
3363 		if (from_gc)
3364 			get_atssr_segment(sbi, type, se->type,
3365 						AT_SSR, se->mtime);
3366 		else
3367 			sit_i->s_ops->allocate_segment(sbi, type, false);
3368 	}
3369 	/*
3370 	 * segment dirty status should be updated after segment allocation,
3371 	 * so we just need to update status only one time after previous
3372 	 * segment being closed.
3373 	 */
3374 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3375 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3376 
3377 	up_write(&sit_i->sentry_lock);
3378 
3379 	if (page && IS_NODESEG(type)) {
3380 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3381 
3382 		f2fs_inode_chksum_set(sbi, page);
3383 	}
3384 
3385 	if (F2FS_IO_ALIGNED(sbi))
3386 		fio->retry = false;
3387 
3388 	if (fio) {
3389 		struct f2fs_bio_info *io;
3390 
3391 		INIT_LIST_HEAD(&fio->list);
3392 		fio->in_list = true;
3393 		io = sbi->write_io[fio->type] + fio->temp;
3394 		spin_lock(&io->io_lock);
3395 		list_add_tail(&fio->list, &io->io_list);
3396 		spin_unlock(&io->io_lock);
3397 	}
3398 
3399 	mutex_unlock(&curseg->curseg_mutex);
3400 
3401 	up_read(&SM_I(sbi)->curseg_lock);
3402 }
3403 
3404 static void update_device_state(struct f2fs_io_info *fio)
3405 {
3406 	struct f2fs_sb_info *sbi = fio->sbi;
3407 	unsigned int devidx;
3408 
3409 	if (!f2fs_is_multi_device(sbi))
3410 		return;
3411 
3412 	devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
3413 
3414 	/* update device state for fsync */
3415 	f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
3416 
3417 	/* update device state for checkpoint */
3418 	if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3419 		spin_lock(&sbi->dev_lock);
3420 		f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3421 		spin_unlock(&sbi->dev_lock);
3422 	}
3423 }
3424 
3425 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3426 {
3427 	int type = __get_segment_type(fio);
3428 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3429 
3430 	if (keep_order)
3431 		down_read(&fio->sbi->io_order_lock);
3432 reallocate:
3433 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3434 			&fio->new_blkaddr, sum, type, fio);
3435 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3436 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
3437 					fio->old_blkaddr, fio->old_blkaddr);
3438 
3439 	/* writeout dirty page into bdev */
3440 	f2fs_submit_page_write(fio);
3441 	if (fio->retry) {
3442 		fio->old_blkaddr = fio->new_blkaddr;
3443 		goto reallocate;
3444 	}
3445 
3446 	update_device_state(fio);
3447 
3448 	if (keep_order)
3449 		up_read(&fio->sbi->io_order_lock);
3450 }
3451 
3452 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3453 					enum iostat_type io_type)
3454 {
3455 	struct f2fs_io_info fio = {
3456 		.sbi = sbi,
3457 		.type = META,
3458 		.temp = HOT,
3459 		.op = REQ_OP_WRITE,
3460 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3461 		.old_blkaddr = page->index,
3462 		.new_blkaddr = page->index,
3463 		.page = page,
3464 		.encrypted_page = NULL,
3465 		.in_list = false,
3466 	};
3467 
3468 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3469 		fio.op_flags &= ~REQ_META;
3470 
3471 	set_page_writeback(page);
3472 	ClearPageError(page);
3473 	f2fs_submit_page_write(&fio);
3474 
3475 	stat_inc_meta_count(sbi, page->index);
3476 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3477 }
3478 
3479 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3480 {
3481 	struct f2fs_summary sum;
3482 
3483 	set_summary(&sum, nid, 0, 0);
3484 	do_write_page(&sum, fio);
3485 
3486 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3487 }
3488 
3489 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3490 					struct f2fs_io_info *fio)
3491 {
3492 	struct f2fs_sb_info *sbi = fio->sbi;
3493 	struct f2fs_summary sum;
3494 
3495 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3496 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3497 	do_write_page(&sum, fio);
3498 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3499 
3500 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3501 }
3502 
3503 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3504 {
3505 	int err;
3506 	struct f2fs_sb_info *sbi = fio->sbi;
3507 	unsigned int segno;
3508 
3509 	fio->new_blkaddr = fio->old_blkaddr;
3510 	/* i/o temperature is needed for passing down write hints */
3511 	__get_segment_type(fio);
3512 
3513 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3514 
3515 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3516 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3517 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3518 			  __func__, segno);
3519 		return -EFSCORRUPTED;
3520 	}
3521 
3522 	stat_inc_inplace_blocks(fio->sbi);
3523 
3524 	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3525 		err = f2fs_merge_page_bio(fio);
3526 	else
3527 		err = f2fs_submit_page_bio(fio);
3528 	if (!err) {
3529 		update_device_state(fio);
3530 		f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3531 	}
3532 
3533 	return err;
3534 }
3535 
3536 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3537 						unsigned int segno)
3538 {
3539 	int i;
3540 
3541 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3542 		if (CURSEG_I(sbi, i)->segno == segno)
3543 			break;
3544 	}
3545 	return i;
3546 }
3547 
3548 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3549 				block_t old_blkaddr, block_t new_blkaddr,
3550 				bool recover_curseg, bool recover_newaddr,
3551 				bool from_gc)
3552 {
3553 	struct sit_info *sit_i = SIT_I(sbi);
3554 	struct curseg_info *curseg;
3555 	unsigned int segno, old_cursegno;
3556 	struct seg_entry *se;
3557 	int type;
3558 	unsigned short old_blkoff;
3559 
3560 	segno = GET_SEGNO(sbi, new_blkaddr);
3561 	se = get_seg_entry(sbi, segno);
3562 	type = se->type;
3563 
3564 	down_write(&SM_I(sbi)->curseg_lock);
3565 
3566 	if (!recover_curseg) {
3567 		/* for recovery flow */
3568 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3569 			if (old_blkaddr == NULL_ADDR)
3570 				type = CURSEG_COLD_DATA;
3571 			else
3572 				type = CURSEG_WARM_DATA;
3573 		}
3574 	} else {
3575 		if (IS_CURSEG(sbi, segno)) {
3576 			/* se->type is volatile as SSR allocation */
3577 			type = __f2fs_get_curseg(sbi, segno);
3578 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3579 		} else {
3580 			type = CURSEG_WARM_DATA;
3581 		}
3582 	}
3583 
3584 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3585 	curseg = CURSEG_I(sbi, type);
3586 
3587 	mutex_lock(&curseg->curseg_mutex);
3588 	down_write(&sit_i->sentry_lock);
3589 
3590 	old_cursegno = curseg->segno;
3591 	old_blkoff = curseg->next_blkoff;
3592 
3593 	/* change the current segment */
3594 	if (segno != curseg->segno) {
3595 		curseg->next_segno = segno;
3596 		change_curseg(sbi, type, true);
3597 	}
3598 
3599 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3600 	__add_sum_entry(sbi, type, sum);
3601 
3602 	if (!recover_curseg || recover_newaddr) {
3603 		if (!from_gc)
3604 			update_segment_mtime(sbi, new_blkaddr, 0);
3605 		update_sit_entry(sbi, new_blkaddr, 1);
3606 	}
3607 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3608 		invalidate_mapping_pages(META_MAPPING(sbi),
3609 					old_blkaddr, old_blkaddr);
3610 		if (!from_gc)
3611 			update_segment_mtime(sbi, old_blkaddr, 0);
3612 		update_sit_entry(sbi, old_blkaddr, -1);
3613 	}
3614 
3615 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3616 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3617 
3618 	locate_dirty_segment(sbi, old_cursegno);
3619 
3620 	if (recover_curseg) {
3621 		if (old_cursegno != curseg->segno) {
3622 			curseg->next_segno = old_cursegno;
3623 			change_curseg(sbi, type, true);
3624 		}
3625 		curseg->next_blkoff = old_blkoff;
3626 	}
3627 
3628 	up_write(&sit_i->sentry_lock);
3629 	mutex_unlock(&curseg->curseg_mutex);
3630 	up_write(&SM_I(sbi)->curseg_lock);
3631 }
3632 
3633 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3634 				block_t old_addr, block_t new_addr,
3635 				unsigned char version, bool recover_curseg,
3636 				bool recover_newaddr)
3637 {
3638 	struct f2fs_summary sum;
3639 
3640 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3641 
3642 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3643 					recover_curseg, recover_newaddr, false);
3644 
3645 	f2fs_update_data_blkaddr(dn, new_addr);
3646 }
3647 
3648 void f2fs_wait_on_page_writeback(struct page *page,
3649 				enum page_type type, bool ordered, bool locked)
3650 {
3651 	if (PageWriteback(page)) {
3652 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3653 
3654 		/* submit cached LFS IO */
3655 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3656 		/* sbumit cached IPU IO */
3657 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3658 		if (ordered) {
3659 			wait_on_page_writeback(page);
3660 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3661 		} else {
3662 			wait_for_stable_page(page);
3663 		}
3664 	}
3665 }
3666 
3667 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3668 {
3669 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3670 	struct page *cpage;
3671 
3672 	if (!f2fs_post_read_required(inode))
3673 		return;
3674 
3675 	if (!__is_valid_data_blkaddr(blkaddr))
3676 		return;
3677 
3678 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3679 	if (cpage) {
3680 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3681 		f2fs_put_page(cpage, 1);
3682 	}
3683 }
3684 
3685 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3686 								block_t len)
3687 {
3688 	block_t i;
3689 
3690 	for (i = 0; i < len; i++)
3691 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3692 }
3693 
3694 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3695 {
3696 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3697 	struct curseg_info *seg_i;
3698 	unsigned char *kaddr;
3699 	struct page *page;
3700 	block_t start;
3701 	int i, j, offset;
3702 
3703 	start = start_sum_block(sbi);
3704 
3705 	page = f2fs_get_meta_page(sbi, start++);
3706 	if (IS_ERR(page))
3707 		return PTR_ERR(page);
3708 	kaddr = (unsigned char *)page_address(page);
3709 
3710 	/* Step 1: restore nat cache */
3711 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3712 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3713 
3714 	/* Step 2: restore sit cache */
3715 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3716 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3717 	offset = 2 * SUM_JOURNAL_SIZE;
3718 
3719 	/* Step 3: restore summary entries */
3720 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3721 		unsigned short blk_off;
3722 		unsigned int segno;
3723 
3724 		seg_i = CURSEG_I(sbi, i);
3725 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3726 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3727 		seg_i->next_segno = segno;
3728 		reset_curseg(sbi, i, 0);
3729 		seg_i->alloc_type = ckpt->alloc_type[i];
3730 		seg_i->next_blkoff = blk_off;
3731 
3732 		if (seg_i->alloc_type == SSR)
3733 			blk_off = sbi->blocks_per_seg;
3734 
3735 		for (j = 0; j < blk_off; j++) {
3736 			struct f2fs_summary *s;
3737 			s = (struct f2fs_summary *)(kaddr + offset);
3738 			seg_i->sum_blk->entries[j] = *s;
3739 			offset += SUMMARY_SIZE;
3740 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3741 						SUM_FOOTER_SIZE)
3742 				continue;
3743 
3744 			f2fs_put_page(page, 1);
3745 			page = NULL;
3746 
3747 			page = f2fs_get_meta_page(sbi, start++);
3748 			if (IS_ERR(page))
3749 				return PTR_ERR(page);
3750 			kaddr = (unsigned char *)page_address(page);
3751 			offset = 0;
3752 		}
3753 	}
3754 	f2fs_put_page(page, 1);
3755 	return 0;
3756 }
3757 
3758 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3759 {
3760 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3761 	struct f2fs_summary_block *sum;
3762 	struct curseg_info *curseg;
3763 	struct page *new;
3764 	unsigned short blk_off;
3765 	unsigned int segno = 0;
3766 	block_t blk_addr = 0;
3767 	int err = 0;
3768 
3769 	/* get segment number and block addr */
3770 	if (IS_DATASEG(type)) {
3771 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3772 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3773 							CURSEG_HOT_DATA]);
3774 		if (__exist_node_summaries(sbi))
3775 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3776 		else
3777 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3778 	} else {
3779 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3780 							CURSEG_HOT_NODE]);
3781 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3782 							CURSEG_HOT_NODE]);
3783 		if (__exist_node_summaries(sbi))
3784 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3785 							type - CURSEG_HOT_NODE);
3786 		else
3787 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3788 	}
3789 
3790 	new = f2fs_get_meta_page(sbi, blk_addr);
3791 	if (IS_ERR(new))
3792 		return PTR_ERR(new);
3793 	sum = (struct f2fs_summary_block *)page_address(new);
3794 
3795 	if (IS_NODESEG(type)) {
3796 		if (__exist_node_summaries(sbi)) {
3797 			struct f2fs_summary *ns = &sum->entries[0];
3798 			int i;
3799 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3800 				ns->version = 0;
3801 				ns->ofs_in_node = 0;
3802 			}
3803 		} else {
3804 			err = f2fs_restore_node_summary(sbi, segno, sum);
3805 			if (err)
3806 				goto out;
3807 		}
3808 	}
3809 
3810 	/* set uncompleted segment to curseg */
3811 	curseg = CURSEG_I(sbi, type);
3812 	mutex_lock(&curseg->curseg_mutex);
3813 
3814 	/* update journal info */
3815 	down_write(&curseg->journal_rwsem);
3816 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3817 	up_write(&curseg->journal_rwsem);
3818 
3819 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3820 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3821 	curseg->next_segno = segno;
3822 	reset_curseg(sbi, type, 0);
3823 	curseg->alloc_type = ckpt->alloc_type[type];
3824 	curseg->next_blkoff = blk_off;
3825 	mutex_unlock(&curseg->curseg_mutex);
3826 out:
3827 	f2fs_put_page(new, 1);
3828 	return err;
3829 }
3830 
3831 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3832 {
3833 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3834 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3835 	int type = CURSEG_HOT_DATA;
3836 	int err;
3837 
3838 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3839 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3840 
3841 		if (npages >= 2)
3842 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3843 							META_CP, true);
3844 
3845 		/* restore for compacted data summary */
3846 		err = read_compacted_summaries(sbi);
3847 		if (err)
3848 			return err;
3849 		type = CURSEG_HOT_NODE;
3850 	}
3851 
3852 	if (__exist_node_summaries(sbi))
3853 		f2fs_ra_meta_pages(sbi,
3854 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
3855 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
3856 
3857 	for (; type <= CURSEG_COLD_NODE; type++) {
3858 		err = read_normal_summaries(sbi, type);
3859 		if (err)
3860 			return err;
3861 	}
3862 
3863 	/* sanity check for summary blocks */
3864 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3865 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3866 		f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
3867 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
3868 		return -EINVAL;
3869 	}
3870 
3871 	return 0;
3872 }
3873 
3874 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3875 {
3876 	struct page *page;
3877 	unsigned char *kaddr;
3878 	struct f2fs_summary *summary;
3879 	struct curseg_info *seg_i;
3880 	int written_size = 0;
3881 	int i, j;
3882 
3883 	page = f2fs_grab_meta_page(sbi, blkaddr++);
3884 	kaddr = (unsigned char *)page_address(page);
3885 	memset(kaddr, 0, PAGE_SIZE);
3886 
3887 	/* Step 1: write nat cache */
3888 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3889 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3890 	written_size += SUM_JOURNAL_SIZE;
3891 
3892 	/* Step 2: write sit cache */
3893 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3894 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3895 	written_size += SUM_JOURNAL_SIZE;
3896 
3897 	/* Step 3: write summary entries */
3898 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3899 		unsigned short blkoff;
3900 		seg_i = CURSEG_I(sbi, i);
3901 		if (sbi->ckpt->alloc_type[i] == SSR)
3902 			blkoff = sbi->blocks_per_seg;
3903 		else
3904 			blkoff = curseg_blkoff(sbi, i);
3905 
3906 		for (j = 0; j < blkoff; j++) {
3907 			if (!page) {
3908 				page = f2fs_grab_meta_page(sbi, blkaddr++);
3909 				kaddr = (unsigned char *)page_address(page);
3910 				memset(kaddr, 0, PAGE_SIZE);
3911 				written_size = 0;
3912 			}
3913 			summary = (struct f2fs_summary *)(kaddr + written_size);
3914 			*summary = seg_i->sum_blk->entries[j];
3915 			written_size += SUMMARY_SIZE;
3916 
3917 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3918 							SUM_FOOTER_SIZE)
3919 				continue;
3920 
3921 			set_page_dirty(page);
3922 			f2fs_put_page(page, 1);
3923 			page = NULL;
3924 		}
3925 	}
3926 	if (page) {
3927 		set_page_dirty(page);
3928 		f2fs_put_page(page, 1);
3929 	}
3930 }
3931 
3932 static void write_normal_summaries(struct f2fs_sb_info *sbi,
3933 					block_t blkaddr, int type)
3934 {
3935 	int i, end;
3936 	if (IS_DATASEG(type))
3937 		end = type + NR_CURSEG_DATA_TYPE;
3938 	else
3939 		end = type + NR_CURSEG_NODE_TYPE;
3940 
3941 	for (i = type; i < end; i++)
3942 		write_current_sum_page(sbi, i, blkaddr + (i - type));
3943 }
3944 
3945 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3946 {
3947 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
3948 		write_compacted_summaries(sbi, start_blk);
3949 	else
3950 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3951 }
3952 
3953 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3954 {
3955 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
3956 }
3957 
3958 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3959 					unsigned int val, int alloc)
3960 {
3961 	int i;
3962 
3963 	if (type == NAT_JOURNAL) {
3964 		for (i = 0; i < nats_in_cursum(journal); i++) {
3965 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
3966 				return i;
3967 		}
3968 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3969 			return update_nats_in_cursum(journal, 1);
3970 	} else if (type == SIT_JOURNAL) {
3971 		for (i = 0; i < sits_in_cursum(journal); i++)
3972 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
3973 				return i;
3974 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3975 			return update_sits_in_cursum(journal, 1);
3976 	}
3977 	return -1;
3978 }
3979 
3980 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3981 					unsigned int segno)
3982 {
3983 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
3984 }
3985 
3986 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3987 					unsigned int start)
3988 {
3989 	struct sit_info *sit_i = SIT_I(sbi);
3990 	struct page *page;
3991 	pgoff_t src_off, dst_off;
3992 
3993 	src_off = current_sit_addr(sbi, start);
3994 	dst_off = next_sit_addr(sbi, src_off);
3995 
3996 	page = f2fs_grab_meta_page(sbi, dst_off);
3997 	seg_info_to_sit_page(sbi, page, start);
3998 
3999 	set_page_dirty(page);
4000 	set_to_next_sit(sit_i, start);
4001 
4002 	return page;
4003 }
4004 
4005 static struct sit_entry_set *grab_sit_entry_set(void)
4006 {
4007 	struct sit_entry_set *ses =
4008 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
4009 
4010 	ses->entry_cnt = 0;
4011 	INIT_LIST_HEAD(&ses->set_list);
4012 	return ses;
4013 }
4014 
4015 static void release_sit_entry_set(struct sit_entry_set *ses)
4016 {
4017 	list_del(&ses->set_list);
4018 	kmem_cache_free(sit_entry_set_slab, ses);
4019 }
4020 
4021 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4022 						struct list_head *head)
4023 {
4024 	struct sit_entry_set *next = ses;
4025 
4026 	if (list_is_last(&ses->set_list, head))
4027 		return;
4028 
4029 	list_for_each_entry_continue(next, head, set_list)
4030 		if (ses->entry_cnt <= next->entry_cnt)
4031 			break;
4032 
4033 	list_move_tail(&ses->set_list, &next->set_list);
4034 }
4035 
4036 static void add_sit_entry(unsigned int segno, struct list_head *head)
4037 {
4038 	struct sit_entry_set *ses;
4039 	unsigned int start_segno = START_SEGNO(segno);
4040 
4041 	list_for_each_entry(ses, head, set_list) {
4042 		if (ses->start_segno == start_segno) {
4043 			ses->entry_cnt++;
4044 			adjust_sit_entry_set(ses, head);
4045 			return;
4046 		}
4047 	}
4048 
4049 	ses = grab_sit_entry_set();
4050 
4051 	ses->start_segno = start_segno;
4052 	ses->entry_cnt++;
4053 	list_add(&ses->set_list, head);
4054 }
4055 
4056 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4057 {
4058 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4059 	struct list_head *set_list = &sm_info->sit_entry_set;
4060 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4061 	unsigned int segno;
4062 
4063 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4064 		add_sit_entry(segno, set_list);
4065 }
4066 
4067 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4068 {
4069 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4070 	struct f2fs_journal *journal = curseg->journal;
4071 	int i;
4072 
4073 	down_write(&curseg->journal_rwsem);
4074 	for (i = 0; i < sits_in_cursum(journal); i++) {
4075 		unsigned int segno;
4076 		bool dirtied;
4077 
4078 		segno = le32_to_cpu(segno_in_journal(journal, i));
4079 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4080 
4081 		if (!dirtied)
4082 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4083 	}
4084 	update_sits_in_cursum(journal, -i);
4085 	up_write(&curseg->journal_rwsem);
4086 }
4087 
4088 /*
4089  * CP calls this function, which flushes SIT entries including sit_journal,
4090  * and moves prefree segs to free segs.
4091  */
4092 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4093 {
4094 	struct sit_info *sit_i = SIT_I(sbi);
4095 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4096 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4097 	struct f2fs_journal *journal = curseg->journal;
4098 	struct sit_entry_set *ses, *tmp;
4099 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4100 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4101 	struct seg_entry *se;
4102 
4103 	down_write(&sit_i->sentry_lock);
4104 
4105 	if (!sit_i->dirty_sentries)
4106 		goto out;
4107 
4108 	/*
4109 	 * add and account sit entries of dirty bitmap in sit entry
4110 	 * set temporarily
4111 	 */
4112 	add_sits_in_set(sbi);
4113 
4114 	/*
4115 	 * if there are no enough space in journal to store dirty sit
4116 	 * entries, remove all entries from journal and add and account
4117 	 * them in sit entry set.
4118 	 */
4119 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4120 								!to_journal)
4121 		remove_sits_in_journal(sbi);
4122 
4123 	/*
4124 	 * there are two steps to flush sit entries:
4125 	 * #1, flush sit entries to journal in current cold data summary block.
4126 	 * #2, flush sit entries to sit page.
4127 	 */
4128 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4129 		struct page *page = NULL;
4130 		struct f2fs_sit_block *raw_sit = NULL;
4131 		unsigned int start_segno = ses->start_segno;
4132 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4133 						(unsigned long)MAIN_SEGS(sbi));
4134 		unsigned int segno = start_segno;
4135 
4136 		if (to_journal &&
4137 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4138 			to_journal = false;
4139 
4140 		if (to_journal) {
4141 			down_write(&curseg->journal_rwsem);
4142 		} else {
4143 			page = get_next_sit_page(sbi, start_segno);
4144 			raw_sit = page_address(page);
4145 		}
4146 
4147 		/* flush dirty sit entries in region of current sit set */
4148 		for_each_set_bit_from(segno, bitmap, end) {
4149 			int offset, sit_offset;
4150 
4151 			se = get_seg_entry(sbi, segno);
4152 #ifdef CONFIG_F2FS_CHECK_FS
4153 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4154 						SIT_VBLOCK_MAP_SIZE))
4155 				f2fs_bug_on(sbi, 1);
4156 #endif
4157 
4158 			/* add discard candidates */
4159 			if (!(cpc->reason & CP_DISCARD)) {
4160 				cpc->trim_start = segno;
4161 				add_discard_addrs(sbi, cpc, false);
4162 			}
4163 
4164 			if (to_journal) {
4165 				offset = f2fs_lookup_journal_in_cursum(journal,
4166 							SIT_JOURNAL, segno, 1);
4167 				f2fs_bug_on(sbi, offset < 0);
4168 				segno_in_journal(journal, offset) =
4169 							cpu_to_le32(segno);
4170 				seg_info_to_raw_sit(se,
4171 					&sit_in_journal(journal, offset));
4172 				check_block_count(sbi, segno,
4173 					&sit_in_journal(journal, offset));
4174 			} else {
4175 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4176 				seg_info_to_raw_sit(se,
4177 						&raw_sit->entries[sit_offset]);
4178 				check_block_count(sbi, segno,
4179 						&raw_sit->entries[sit_offset]);
4180 			}
4181 
4182 			__clear_bit(segno, bitmap);
4183 			sit_i->dirty_sentries--;
4184 			ses->entry_cnt--;
4185 		}
4186 
4187 		if (to_journal)
4188 			up_write(&curseg->journal_rwsem);
4189 		else
4190 			f2fs_put_page(page, 1);
4191 
4192 		f2fs_bug_on(sbi, ses->entry_cnt);
4193 		release_sit_entry_set(ses);
4194 	}
4195 
4196 	f2fs_bug_on(sbi, !list_empty(head));
4197 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4198 out:
4199 	if (cpc->reason & CP_DISCARD) {
4200 		__u64 trim_start = cpc->trim_start;
4201 
4202 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4203 			add_discard_addrs(sbi, cpc, false);
4204 
4205 		cpc->trim_start = trim_start;
4206 	}
4207 	up_write(&sit_i->sentry_lock);
4208 
4209 	set_prefree_as_free_segments(sbi);
4210 }
4211 
4212 static int build_sit_info(struct f2fs_sb_info *sbi)
4213 {
4214 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4215 	struct sit_info *sit_i;
4216 	unsigned int sit_segs, start;
4217 	char *src_bitmap, *bitmap;
4218 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4219 
4220 	/* allocate memory for SIT information */
4221 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4222 	if (!sit_i)
4223 		return -ENOMEM;
4224 
4225 	SM_I(sbi)->sit_info = sit_i;
4226 
4227 	sit_i->sentries =
4228 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4229 					      MAIN_SEGS(sbi)),
4230 			      GFP_KERNEL);
4231 	if (!sit_i->sentries)
4232 		return -ENOMEM;
4233 
4234 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4235 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4236 								GFP_KERNEL);
4237 	if (!sit_i->dirty_sentries_bitmap)
4238 		return -ENOMEM;
4239 
4240 #ifdef CONFIG_F2FS_CHECK_FS
4241 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
4242 #else
4243 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
4244 #endif
4245 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4246 	if (!sit_i->bitmap)
4247 		return -ENOMEM;
4248 
4249 	bitmap = sit_i->bitmap;
4250 
4251 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4252 		sit_i->sentries[start].cur_valid_map = bitmap;
4253 		bitmap += SIT_VBLOCK_MAP_SIZE;
4254 
4255 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4256 		bitmap += SIT_VBLOCK_MAP_SIZE;
4257 
4258 #ifdef CONFIG_F2FS_CHECK_FS
4259 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4260 		bitmap += SIT_VBLOCK_MAP_SIZE;
4261 #endif
4262 
4263 		sit_i->sentries[start].discard_map = bitmap;
4264 		bitmap += SIT_VBLOCK_MAP_SIZE;
4265 	}
4266 
4267 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4268 	if (!sit_i->tmp_map)
4269 		return -ENOMEM;
4270 
4271 	if (__is_large_section(sbi)) {
4272 		sit_i->sec_entries =
4273 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4274 						      MAIN_SECS(sbi)),
4275 				      GFP_KERNEL);
4276 		if (!sit_i->sec_entries)
4277 			return -ENOMEM;
4278 	}
4279 
4280 	/* get information related with SIT */
4281 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4282 
4283 	/* setup SIT bitmap from ckeckpoint pack */
4284 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4285 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4286 
4287 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4288 	if (!sit_i->sit_bitmap)
4289 		return -ENOMEM;
4290 
4291 #ifdef CONFIG_F2FS_CHECK_FS
4292 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4293 					sit_bitmap_size, GFP_KERNEL);
4294 	if (!sit_i->sit_bitmap_mir)
4295 		return -ENOMEM;
4296 
4297 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4298 					main_bitmap_size, GFP_KERNEL);
4299 	if (!sit_i->invalid_segmap)
4300 		return -ENOMEM;
4301 #endif
4302 
4303 	/* init SIT information */
4304 	sit_i->s_ops = &default_salloc_ops;
4305 
4306 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4307 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4308 	sit_i->written_valid_blocks = 0;
4309 	sit_i->bitmap_size = sit_bitmap_size;
4310 	sit_i->dirty_sentries = 0;
4311 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4312 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4313 	sit_i->mounted_time = ktime_get_boottime_seconds();
4314 	init_rwsem(&sit_i->sentry_lock);
4315 	return 0;
4316 }
4317 
4318 static int build_free_segmap(struct f2fs_sb_info *sbi)
4319 {
4320 	struct free_segmap_info *free_i;
4321 	unsigned int bitmap_size, sec_bitmap_size;
4322 
4323 	/* allocate memory for free segmap information */
4324 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4325 	if (!free_i)
4326 		return -ENOMEM;
4327 
4328 	SM_I(sbi)->free_info = free_i;
4329 
4330 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4331 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4332 	if (!free_i->free_segmap)
4333 		return -ENOMEM;
4334 
4335 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4336 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4337 	if (!free_i->free_secmap)
4338 		return -ENOMEM;
4339 
4340 	/* set all segments as dirty temporarily */
4341 	memset(free_i->free_segmap, 0xff, bitmap_size);
4342 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4343 
4344 	/* init free segmap information */
4345 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4346 	free_i->free_segments = 0;
4347 	free_i->free_sections = 0;
4348 	spin_lock_init(&free_i->segmap_lock);
4349 	return 0;
4350 }
4351 
4352 static int build_curseg(struct f2fs_sb_info *sbi)
4353 {
4354 	struct curseg_info *array;
4355 	int i;
4356 
4357 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4358 					sizeof(*array)), GFP_KERNEL);
4359 	if (!array)
4360 		return -ENOMEM;
4361 
4362 	SM_I(sbi)->curseg_array = array;
4363 
4364 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4365 		mutex_init(&array[i].curseg_mutex);
4366 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4367 		if (!array[i].sum_blk)
4368 			return -ENOMEM;
4369 		init_rwsem(&array[i].journal_rwsem);
4370 		array[i].journal = f2fs_kzalloc(sbi,
4371 				sizeof(struct f2fs_journal), GFP_KERNEL);
4372 		if (!array[i].journal)
4373 			return -ENOMEM;
4374 		if (i < NR_PERSISTENT_LOG)
4375 			array[i].seg_type = CURSEG_HOT_DATA + i;
4376 		else if (i == CURSEG_COLD_DATA_PINNED)
4377 			array[i].seg_type = CURSEG_COLD_DATA;
4378 		else if (i == CURSEG_ALL_DATA_ATGC)
4379 			array[i].seg_type = CURSEG_COLD_DATA;
4380 		array[i].segno = NULL_SEGNO;
4381 		array[i].next_blkoff = 0;
4382 		array[i].inited = false;
4383 	}
4384 	return restore_curseg_summaries(sbi);
4385 }
4386 
4387 static int build_sit_entries(struct f2fs_sb_info *sbi)
4388 {
4389 	struct sit_info *sit_i = SIT_I(sbi);
4390 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4391 	struct f2fs_journal *journal = curseg->journal;
4392 	struct seg_entry *se;
4393 	struct f2fs_sit_entry sit;
4394 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4395 	unsigned int i, start, end;
4396 	unsigned int readed, start_blk = 0;
4397 	int err = 0;
4398 	block_t total_node_blocks = 0;
4399 
4400 	do {
4401 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
4402 							META_SIT, true);
4403 
4404 		start = start_blk * sit_i->sents_per_block;
4405 		end = (start_blk + readed) * sit_i->sents_per_block;
4406 
4407 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4408 			struct f2fs_sit_block *sit_blk;
4409 			struct page *page;
4410 
4411 			se = &sit_i->sentries[start];
4412 			page = get_current_sit_page(sbi, start);
4413 			if (IS_ERR(page))
4414 				return PTR_ERR(page);
4415 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4416 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4417 			f2fs_put_page(page, 1);
4418 
4419 			err = check_block_count(sbi, start, &sit);
4420 			if (err)
4421 				return err;
4422 			seg_info_from_raw_sit(se, &sit);
4423 			if (IS_NODESEG(se->type))
4424 				total_node_blocks += se->valid_blocks;
4425 
4426 			/* build discard map only one time */
4427 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4428 				memset(se->discard_map, 0xff,
4429 					SIT_VBLOCK_MAP_SIZE);
4430 			} else {
4431 				memcpy(se->discard_map,
4432 					se->cur_valid_map,
4433 					SIT_VBLOCK_MAP_SIZE);
4434 				sbi->discard_blks +=
4435 					sbi->blocks_per_seg -
4436 					se->valid_blocks;
4437 			}
4438 
4439 			if (__is_large_section(sbi))
4440 				get_sec_entry(sbi, start)->valid_blocks +=
4441 							se->valid_blocks;
4442 		}
4443 		start_blk += readed;
4444 	} while (start_blk < sit_blk_cnt);
4445 
4446 	down_read(&curseg->journal_rwsem);
4447 	for (i = 0; i < sits_in_cursum(journal); i++) {
4448 		unsigned int old_valid_blocks;
4449 
4450 		start = le32_to_cpu(segno_in_journal(journal, i));
4451 		if (start >= MAIN_SEGS(sbi)) {
4452 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4453 				 start);
4454 			err = -EFSCORRUPTED;
4455 			break;
4456 		}
4457 
4458 		se = &sit_i->sentries[start];
4459 		sit = sit_in_journal(journal, i);
4460 
4461 		old_valid_blocks = se->valid_blocks;
4462 		if (IS_NODESEG(se->type))
4463 			total_node_blocks -= old_valid_blocks;
4464 
4465 		err = check_block_count(sbi, start, &sit);
4466 		if (err)
4467 			break;
4468 		seg_info_from_raw_sit(se, &sit);
4469 		if (IS_NODESEG(se->type))
4470 			total_node_blocks += se->valid_blocks;
4471 
4472 		if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4473 			memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4474 		} else {
4475 			memcpy(se->discard_map, se->cur_valid_map,
4476 						SIT_VBLOCK_MAP_SIZE);
4477 			sbi->discard_blks += old_valid_blocks;
4478 			sbi->discard_blks -= se->valid_blocks;
4479 		}
4480 
4481 		if (__is_large_section(sbi)) {
4482 			get_sec_entry(sbi, start)->valid_blocks +=
4483 							se->valid_blocks;
4484 			get_sec_entry(sbi, start)->valid_blocks -=
4485 							old_valid_blocks;
4486 		}
4487 	}
4488 	up_read(&curseg->journal_rwsem);
4489 
4490 	if (!err && total_node_blocks != valid_node_count(sbi)) {
4491 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4492 			 total_node_blocks, valid_node_count(sbi));
4493 		err = -EFSCORRUPTED;
4494 	}
4495 
4496 	return err;
4497 }
4498 
4499 static void init_free_segmap(struct f2fs_sb_info *sbi)
4500 {
4501 	unsigned int start;
4502 	int type;
4503 	struct seg_entry *sentry;
4504 
4505 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4506 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4507 			continue;
4508 		sentry = get_seg_entry(sbi, start);
4509 		if (!sentry->valid_blocks)
4510 			__set_free(sbi, start);
4511 		else
4512 			SIT_I(sbi)->written_valid_blocks +=
4513 						sentry->valid_blocks;
4514 	}
4515 
4516 	/* set use the current segments */
4517 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4518 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4519 		__set_test_and_inuse(sbi, curseg_t->segno);
4520 	}
4521 }
4522 
4523 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4524 {
4525 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4526 	struct free_segmap_info *free_i = FREE_I(sbi);
4527 	unsigned int segno = 0, offset = 0, secno;
4528 	block_t valid_blocks, usable_blks_in_seg;
4529 	block_t blks_per_sec = BLKS_PER_SEC(sbi);
4530 
4531 	while (1) {
4532 		/* find dirty segment based on free segmap */
4533 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4534 		if (segno >= MAIN_SEGS(sbi))
4535 			break;
4536 		offset = segno + 1;
4537 		valid_blocks = get_valid_blocks(sbi, segno, false);
4538 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4539 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4540 			continue;
4541 		if (valid_blocks > usable_blks_in_seg) {
4542 			f2fs_bug_on(sbi, 1);
4543 			continue;
4544 		}
4545 		mutex_lock(&dirty_i->seglist_lock);
4546 		__locate_dirty_segment(sbi, segno, DIRTY);
4547 		mutex_unlock(&dirty_i->seglist_lock);
4548 	}
4549 
4550 	if (!__is_large_section(sbi))
4551 		return;
4552 
4553 	mutex_lock(&dirty_i->seglist_lock);
4554 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4555 		valid_blocks = get_valid_blocks(sbi, segno, true);
4556 		secno = GET_SEC_FROM_SEG(sbi, segno);
4557 
4558 		if (!valid_blocks || valid_blocks == blks_per_sec)
4559 			continue;
4560 		if (IS_CURSEC(sbi, secno))
4561 			continue;
4562 		set_bit(secno, dirty_i->dirty_secmap);
4563 	}
4564 	mutex_unlock(&dirty_i->seglist_lock);
4565 }
4566 
4567 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4568 {
4569 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4570 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4571 
4572 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4573 	if (!dirty_i->victim_secmap)
4574 		return -ENOMEM;
4575 	return 0;
4576 }
4577 
4578 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4579 {
4580 	struct dirty_seglist_info *dirty_i;
4581 	unsigned int bitmap_size, i;
4582 
4583 	/* allocate memory for dirty segments list information */
4584 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4585 								GFP_KERNEL);
4586 	if (!dirty_i)
4587 		return -ENOMEM;
4588 
4589 	SM_I(sbi)->dirty_info = dirty_i;
4590 	mutex_init(&dirty_i->seglist_lock);
4591 
4592 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4593 
4594 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4595 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4596 								GFP_KERNEL);
4597 		if (!dirty_i->dirty_segmap[i])
4598 			return -ENOMEM;
4599 	}
4600 
4601 	if (__is_large_section(sbi)) {
4602 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4603 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4604 						bitmap_size, GFP_KERNEL);
4605 		if (!dirty_i->dirty_secmap)
4606 			return -ENOMEM;
4607 	}
4608 
4609 	init_dirty_segmap(sbi);
4610 	return init_victim_secmap(sbi);
4611 }
4612 
4613 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4614 {
4615 	int i;
4616 
4617 	/*
4618 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4619 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4620 	 */
4621 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4622 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4623 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4624 		unsigned int blkofs = curseg->next_blkoff;
4625 
4626 		sanity_check_seg_type(sbi, curseg->seg_type);
4627 
4628 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4629 			goto out;
4630 
4631 		if (curseg->alloc_type == SSR)
4632 			continue;
4633 
4634 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4635 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4636 				continue;
4637 out:
4638 			f2fs_err(sbi,
4639 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4640 				 i, curseg->segno, curseg->alloc_type,
4641 				 curseg->next_blkoff, blkofs);
4642 			return -EFSCORRUPTED;
4643 		}
4644 	}
4645 	return 0;
4646 }
4647 
4648 #ifdef CONFIG_BLK_DEV_ZONED
4649 
4650 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4651 				    struct f2fs_dev_info *fdev,
4652 				    struct blk_zone *zone)
4653 {
4654 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4655 	block_t zone_block, wp_block, last_valid_block;
4656 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4657 	int i, s, b, ret;
4658 	struct seg_entry *se;
4659 
4660 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4661 		return 0;
4662 
4663 	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4664 	wp_segno = GET_SEGNO(sbi, wp_block);
4665 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4666 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4667 	zone_segno = GET_SEGNO(sbi, zone_block);
4668 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4669 
4670 	if (zone_segno >= MAIN_SEGS(sbi))
4671 		return 0;
4672 
4673 	/*
4674 	 * Skip check of zones cursegs point to, since
4675 	 * fix_curseg_write_pointer() checks them.
4676 	 */
4677 	for (i = 0; i < NO_CHECK_TYPE; i++)
4678 		if (zone_secno == GET_SEC_FROM_SEG(sbi,
4679 						   CURSEG_I(sbi, i)->segno))
4680 			return 0;
4681 
4682 	/*
4683 	 * Get last valid block of the zone.
4684 	 */
4685 	last_valid_block = zone_block - 1;
4686 	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4687 		segno = zone_segno + s;
4688 		se = get_seg_entry(sbi, segno);
4689 		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4690 			if (f2fs_test_bit(b, se->cur_valid_map)) {
4691 				last_valid_block = START_BLOCK(sbi, segno) + b;
4692 				break;
4693 			}
4694 		if (last_valid_block >= zone_block)
4695 			break;
4696 	}
4697 
4698 	/*
4699 	 * If last valid block is beyond the write pointer, report the
4700 	 * inconsistency. This inconsistency does not cause write error
4701 	 * because the zone will not be selected for write operation until
4702 	 * it get discarded. Just report it.
4703 	 */
4704 	if (last_valid_block >= wp_block) {
4705 		f2fs_notice(sbi, "Valid block beyond write pointer: "
4706 			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4707 			    GET_SEGNO(sbi, last_valid_block),
4708 			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4709 			    wp_segno, wp_blkoff);
4710 		return 0;
4711 	}
4712 
4713 	/*
4714 	 * If there is no valid block in the zone and if write pointer is
4715 	 * not at zone start, reset the write pointer.
4716 	 */
4717 	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4718 		f2fs_notice(sbi,
4719 			    "Zone without valid block has non-zero write "
4720 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4721 			    wp_segno, wp_blkoff);
4722 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4723 					zone->len >> log_sectors_per_block);
4724 		if (ret) {
4725 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4726 				 fdev->path, ret);
4727 			return ret;
4728 		}
4729 	}
4730 
4731 	return 0;
4732 }
4733 
4734 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4735 						  block_t zone_blkaddr)
4736 {
4737 	int i;
4738 
4739 	for (i = 0; i < sbi->s_ndevs; i++) {
4740 		if (!bdev_is_zoned(FDEV(i).bdev))
4741 			continue;
4742 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4743 				zone_blkaddr <= FDEV(i).end_blk))
4744 			return &FDEV(i);
4745 	}
4746 
4747 	return NULL;
4748 }
4749 
4750 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4751 			      void *data) {
4752 	memcpy(data, zone, sizeof(struct blk_zone));
4753 	return 0;
4754 }
4755 
4756 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4757 {
4758 	struct curseg_info *cs = CURSEG_I(sbi, type);
4759 	struct f2fs_dev_info *zbd;
4760 	struct blk_zone zone;
4761 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4762 	block_t cs_zone_block, wp_block;
4763 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4764 	sector_t zone_sector;
4765 	int err;
4766 
4767 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4768 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4769 
4770 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4771 	if (!zbd)
4772 		return 0;
4773 
4774 	/* report zone for the sector the curseg points to */
4775 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4776 		<< log_sectors_per_block;
4777 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4778 				  report_one_zone_cb, &zone);
4779 	if (err != 1) {
4780 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4781 			 zbd->path, err);
4782 		return err;
4783 	}
4784 
4785 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4786 		return 0;
4787 
4788 	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4789 	wp_segno = GET_SEGNO(sbi, wp_block);
4790 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4791 	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4792 
4793 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4794 		wp_sector_off == 0)
4795 		return 0;
4796 
4797 	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4798 		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4799 		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4800 
4801 	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4802 		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4803 	allocate_segment_by_default(sbi, type, true);
4804 
4805 	/* check consistency of the zone curseg pointed to */
4806 	if (check_zone_write_pointer(sbi, zbd, &zone))
4807 		return -EIO;
4808 
4809 	/* check newly assigned zone */
4810 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4811 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4812 
4813 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4814 	if (!zbd)
4815 		return 0;
4816 
4817 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4818 		<< log_sectors_per_block;
4819 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4820 				  report_one_zone_cb, &zone);
4821 	if (err != 1) {
4822 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4823 			 zbd->path, err);
4824 		return err;
4825 	}
4826 
4827 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4828 		return 0;
4829 
4830 	if (zone.wp != zone.start) {
4831 		f2fs_notice(sbi,
4832 			    "New zone for curseg[%d] is not yet discarded. "
4833 			    "Reset the zone: curseg[0x%x,0x%x]",
4834 			    type, cs->segno, cs->next_blkoff);
4835 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4836 				zone_sector >> log_sectors_per_block,
4837 				zone.len >> log_sectors_per_block);
4838 		if (err) {
4839 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4840 				 zbd->path, err);
4841 			return err;
4842 		}
4843 	}
4844 
4845 	return 0;
4846 }
4847 
4848 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4849 {
4850 	int i, ret;
4851 
4852 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4853 		ret = fix_curseg_write_pointer(sbi, i);
4854 		if (ret)
4855 			return ret;
4856 	}
4857 
4858 	return 0;
4859 }
4860 
4861 struct check_zone_write_pointer_args {
4862 	struct f2fs_sb_info *sbi;
4863 	struct f2fs_dev_info *fdev;
4864 };
4865 
4866 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4867 				      void *data) {
4868 	struct check_zone_write_pointer_args *args;
4869 	args = (struct check_zone_write_pointer_args *)data;
4870 
4871 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
4872 }
4873 
4874 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4875 {
4876 	int i, ret;
4877 	struct check_zone_write_pointer_args args;
4878 
4879 	for (i = 0; i < sbi->s_ndevs; i++) {
4880 		if (!bdev_is_zoned(FDEV(i).bdev))
4881 			continue;
4882 
4883 		args.sbi = sbi;
4884 		args.fdev = &FDEV(i);
4885 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4886 					  check_zone_write_pointer_cb, &args);
4887 		if (ret < 0)
4888 			return ret;
4889 	}
4890 
4891 	return 0;
4892 }
4893 
4894 static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
4895 						unsigned int dev_idx)
4896 {
4897 	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
4898 		return true;
4899 	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
4900 }
4901 
4902 /* Return the zone index in the given device */
4903 static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
4904 					int dev_idx)
4905 {
4906 	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4907 
4908 	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
4909 						sbi->log_blocks_per_blkz;
4910 }
4911 
4912 /*
4913  * Return the usable segments in a section based on the zone's
4914  * corresponding zone capacity. Zone is equal to a section.
4915  */
4916 static inline unsigned int f2fs_usable_zone_segs_in_sec(
4917 		struct f2fs_sb_info *sbi, unsigned int segno)
4918 {
4919 	unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
4920 
4921 	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
4922 	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
4923 
4924 	/* Conventional zone's capacity is always equal to zone size */
4925 	if (is_conv_zone(sbi, zone_idx, dev_idx))
4926 		return sbi->segs_per_sec;
4927 
4928 	/*
4929 	 * If the zone_capacity_blocks array is NULL, then zone capacity
4930 	 * is equal to the zone size for all zones
4931 	 */
4932 	if (!FDEV(dev_idx).zone_capacity_blocks)
4933 		return sbi->segs_per_sec;
4934 
4935 	/* Get the segment count beyond zone capacity block */
4936 	unusable_segs_in_sec = (sbi->blocks_per_blkz -
4937 				FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
4938 				sbi->log_blocks_per_seg;
4939 	return sbi->segs_per_sec - unusable_segs_in_sec;
4940 }
4941 
4942 /*
4943  * Return the number of usable blocks in a segment. The number of blocks
4944  * returned is always equal to the number of blocks in a segment for
4945  * segments fully contained within a sequential zone capacity or a
4946  * conventional zone. For segments partially contained in a sequential
4947  * zone capacity, the number of usable blocks up to the zone capacity
4948  * is returned. 0 is returned in all other cases.
4949  */
4950 static inline unsigned int f2fs_usable_zone_blks_in_seg(
4951 			struct f2fs_sb_info *sbi, unsigned int segno)
4952 {
4953 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
4954 	unsigned int zone_idx, dev_idx, secno;
4955 
4956 	secno = GET_SEC_FROM_SEG(sbi, segno);
4957 	seg_start = START_BLOCK(sbi, segno);
4958 	dev_idx = f2fs_target_device_index(sbi, seg_start);
4959 	zone_idx = get_zone_idx(sbi, secno, dev_idx);
4960 
4961 	/*
4962 	 * Conventional zone's capacity is always equal to zone size,
4963 	 * so, blocks per segment is unchanged.
4964 	 */
4965 	if (is_conv_zone(sbi, zone_idx, dev_idx))
4966 		return sbi->blocks_per_seg;
4967 
4968 	if (!FDEV(dev_idx).zone_capacity_blocks)
4969 		return sbi->blocks_per_seg;
4970 
4971 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4972 	sec_cap_blkaddr = sec_start_blkaddr +
4973 				FDEV(dev_idx).zone_capacity_blocks[zone_idx];
4974 
4975 	/*
4976 	 * If segment starts before zone capacity and spans beyond
4977 	 * zone capacity, then usable blocks are from seg start to
4978 	 * zone capacity. If the segment starts after the zone capacity,
4979 	 * then there are no usable blocks.
4980 	 */
4981 	if (seg_start >= sec_cap_blkaddr)
4982 		return 0;
4983 	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
4984 		return sec_cap_blkaddr - seg_start;
4985 
4986 	return sbi->blocks_per_seg;
4987 }
4988 #else
4989 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4990 {
4991 	return 0;
4992 }
4993 
4994 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4995 {
4996 	return 0;
4997 }
4998 
4999 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5000 							unsigned int segno)
5001 {
5002 	return 0;
5003 }
5004 
5005 static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5006 							unsigned int segno)
5007 {
5008 	return 0;
5009 }
5010 #endif
5011 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5012 					unsigned int segno)
5013 {
5014 	if (f2fs_sb_has_blkzoned(sbi))
5015 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5016 
5017 	return sbi->blocks_per_seg;
5018 }
5019 
5020 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5021 					unsigned int segno)
5022 {
5023 	if (f2fs_sb_has_blkzoned(sbi))
5024 		return f2fs_usable_zone_segs_in_sec(sbi, segno);
5025 
5026 	return sbi->segs_per_sec;
5027 }
5028 
5029 /*
5030  * Update min, max modified time for cost-benefit GC algorithm
5031  */
5032 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5033 {
5034 	struct sit_info *sit_i = SIT_I(sbi);
5035 	unsigned int segno;
5036 
5037 	down_write(&sit_i->sentry_lock);
5038 
5039 	sit_i->min_mtime = ULLONG_MAX;
5040 
5041 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5042 		unsigned int i;
5043 		unsigned long long mtime = 0;
5044 
5045 		for (i = 0; i < sbi->segs_per_sec; i++)
5046 			mtime += get_seg_entry(sbi, segno + i)->mtime;
5047 
5048 		mtime = div_u64(mtime, sbi->segs_per_sec);
5049 
5050 		if (sit_i->min_mtime > mtime)
5051 			sit_i->min_mtime = mtime;
5052 	}
5053 	sit_i->max_mtime = get_mtime(sbi, false);
5054 	sit_i->dirty_max_mtime = 0;
5055 	up_write(&sit_i->sentry_lock);
5056 }
5057 
5058 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5059 {
5060 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5061 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5062 	struct f2fs_sm_info *sm_info;
5063 	int err;
5064 
5065 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5066 	if (!sm_info)
5067 		return -ENOMEM;
5068 
5069 	/* init sm info */
5070 	sbi->sm_info = sm_info;
5071 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5072 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5073 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5074 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5075 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5076 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5077 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5078 	sm_info->rec_prefree_segments = sm_info->main_segments *
5079 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5080 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5081 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5082 
5083 	if (!f2fs_lfs_mode(sbi))
5084 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5085 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5086 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5087 	sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
5088 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5089 	sm_info->min_ssr_sections = reserved_sections(sbi);
5090 
5091 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5092 
5093 	init_rwsem(&sm_info->curseg_lock);
5094 
5095 	if (!f2fs_readonly(sbi->sb)) {
5096 		err = f2fs_create_flush_cmd_control(sbi);
5097 		if (err)
5098 			return err;
5099 	}
5100 
5101 	err = create_discard_cmd_control(sbi);
5102 	if (err)
5103 		return err;
5104 
5105 	err = build_sit_info(sbi);
5106 	if (err)
5107 		return err;
5108 	err = build_free_segmap(sbi);
5109 	if (err)
5110 		return err;
5111 	err = build_curseg(sbi);
5112 	if (err)
5113 		return err;
5114 
5115 	/* reinit free segmap based on SIT */
5116 	err = build_sit_entries(sbi);
5117 	if (err)
5118 		return err;
5119 
5120 	init_free_segmap(sbi);
5121 	err = build_dirty_segmap(sbi);
5122 	if (err)
5123 		return err;
5124 
5125 	err = sanity_check_curseg(sbi);
5126 	if (err)
5127 		return err;
5128 
5129 	init_min_max_mtime(sbi);
5130 	return 0;
5131 }
5132 
5133 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5134 		enum dirty_type dirty_type)
5135 {
5136 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5137 
5138 	mutex_lock(&dirty_i->seglist_lock);
5139 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5140 	dirty_i->nr_dirty[dirty_type] = 0;
5141 	mutex_unlock(&dirty_i->seglist_lock);
5142 }
5143 
5144 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5145 {
5146 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5147 	kvfree(dirty_i->victim_secmap);
5148 }
5149 
5150 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5151 {
5152 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5153 	int i;
5154 
5155 	if (!dirty_i)
5156 		return;
5157 
5158 	/* discard pre-free/dirty segments list */
5159 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5160 		discard_dirty_segmap(sbi, i);
5161 
5162 	if (__is_large_section(sbi)) {
5163 		mutex_lock(&dirty_i->seglist_lock);
5164 		kvfree(dirty_i->dirty_secmap);
5165 		mutex_unlock(&dirty_i->seglist_lock);
5166 	}
5167 
5168 	destroy_victim_secmap(sbi);
5169 	SM_I(sbi)->dirty_info = NULL;
5170 	kfree(dirty_i);
5171 }
5172 
5173 static void destroy_curseg(struct f2fs_sb_info *sbi)
5174 {
5175 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5176 	int i;
5177 
5178 	if (!array)
5179 		return;
5180 	SM_I(sbi)->curseg_array = NULL;
5181 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5182 		kfree(array[i].sum_blk);
5183 		kfree(array[i].journal);
5184 	}
5185 	kfree(array);
5186 }
5187 
5188 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5189 {
5190 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5191 	if (!free_i)
5192 		return;
5193 	SM_I(sbi)->free_info = NULL;
5194 	kvfree(free_i->free_segmap);
5195 	kvfree(free_i->free_secmap);
5196 	kfree(free_i);
5197 }
5198 
5199 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5200 {
5201 	struct sit_info *sit_i = SIT_I(sbi);
5202 
5203 	if (!sit_i)
5204 		return;
5205 
5206 	if (sit_i->sentries)
5207 		kvfree(sit_i->bitmap);
5208 	kfree(sit_i->tmp_map);
5209 
5210 	kvfree(sit_i->sentries);
5211 	kvfree(sit_i->sec_entries);
5212 	kvfree(sit_i->dirty_sentries_bitmap);
5213 
5214 	SM_I(sbi)->sit_info = NULL;
5215 	kvfree(sit_i->sit_bitmap);
5216 #ifdef CONFIG_F2FS_CHECK_FS
5217 	kvfree(sit_i->sit_bitmap_mir);
5218 	kvfree(sit_i->invalid_segmap);
5219 #endif
5220 	kfree(sit_i);
5221 }
5222 
5223 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5224 {
5225 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5226 
5227 	if (!sm_info)
5228 		return;
5229 	f2fs_destroy_flush_cmd_control(sbi, true);
5230 	destroy_discard_cmd_control(sbi);
5231 	destroy_dirty_segmap(sbi);
5232 	destroy_curseg(sbi);
5233 	destroy_free_segmap(sbi);
5234 	destroy_sit_info(sbi);
5235 	sbi->sm_info = NULL;
5236 	kfree(sm_info);
5237 }
5238 
5239 int __init f2fs_create_segment_manager_caches(void)
5240 {
5241 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5242 			sizeof(struct discard_entry));
5243 	if (!discard_entry_slab)
5244 		goto fail;
5245 
5246 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5247 			sizeof(struct discard_cmd));
5248 	if (!discard_cmd_slab)
5249 		goto destroy_discard_entry;
5250 
5251 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5252 			sizeof(struct sit_entry_set));
5253 	if (!sit_entry_set_slab)
5254 		goto destroy_discard_cmd;
5255 
5256 	inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
5257 			sizeof(struct inmem_pages));
5258 	if (!inmem_entry_slab)
5259 		goto destroy_sit_entry_set;
5260 	return 0;
5261 
5262 destroy_sit_entry_set:
5263 	kmem_cache_destroy(sit_entry_set_slab);
5264 destroy_discard_cmd:
5265 	kmem_cache_destroy(discard_cmd_slab);
5266 destroy_discard_entry:
5267 	kmem_cache_destroy(discard_entry_slab);
5268 fail:
5269 	return -ENOMEM;
5270 }
5271 
5272 void f2fs_destroy_segment_manager_caches(void)
5273 {
5274 	kmem_cache_destroy(sit_entry_set_slab);
5275 	kmem_cache_destroy(discard_cmd_slab);
5276 	kmem_cache_destroy(discard_entry_slab);
5277 	kmem_cache_destroy(inmem_entry_slab);
5278 }
5279