xref: /linux/fs/f2fs/segment.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *revoke_entry_slab;
34 
35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
188 void f2fs_abort_atomic_write(struct inode *inode, bool clean)
189 {
190 	struct f2fs_inode_info *fi = F2FS_I(inode);
191 
192 	if (!f2fs_is_atomic_file(inode))
193 		return;
194 
195 	if (clean)
196 		truncate_inode_pages_final(inode->i_mapping);
197 
198 	release_atomic_write_cnt(inode);
199 	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
200 	clear_inode_flag(inode, FI_ATOMIC_REPLACE);
201 	clear_inode_flag(inode, FI_ATOMIC_FILE);
202 	if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
203 		clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
204 		/*
205 		 * The vfs inode keeps clean during commit, but the f2fs inode
206 		 * doesn't. So clear the dirty state after commit and let
207 		 * f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
208 		 */
209 		f2fs_inode_synced(inode);
210 		f2fs_mark_inode_dirty_sync(inode, true);
211 	}
212 	stat_dec_atomic_inode(inode);
213 
214 	F2FS_I(inode)->atomic_write_task = NULL;
215 
216 	if (clean) {
217 		f2fs_i_size_write(inode, fi->original_i_size);
218 		fi->original_i_size = 0;
219 	}
220 	/* avoid stale dirty inode during eviction */
221 	sync_inode_metadata(inode, 0);
222 }
223 
224 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
225 			block_t new_addr, block_t *old_addr, bool recover)
226 {
227 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
228 	struct dnode_of_data dn;
229 	struct node_info ni;
230 	int err;
231 
232 retry:
233 	set_new_dnode(&dn, inode, NULL, NULL, 0);
234 	err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
235 	if (err) {
236 		if (err == -ENOMEM) {
237 			memalloc_retry_wait(GFP_NOFS);
238 			goto retry;
239 		}
240 		return err;
241 	}
242 
243 	err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
244 	if (err) {
245 		f2fs_put_dnode(&dn);
246 		return err;
247 	}
248 
249 	if (recover) {
250 		/* dn.data_blkaddr is always valid */
251 		if (!__is_valid_data_blkaddr(new_addr)) {
252 			if (new_addr == NULL_ADDR)
253 				dec_valid_block_count(sbi, inode, 1);
254 			f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
255 			f2fs_update_data_blkaddr(&dn, new_addr);
256 		} else {
257 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
258 				new_addr, ni.version, true, true);
259 		}
260 	} else {
261 		blkcnt_t count = 1;
262 
263 		err = inc_valid_block_count(sbi, inode, &count, true);
264 		if (err) {
265 			f2fs_put_dnode(&dn);
266 			return err;
267 		}
268 
269 		*old_addr = dn.data_blkaddr;
270 		f2fs_truncate_data_blocks_range(&dn, 1);
271 		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
272 
273 		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
274 					ni.version, true, false);
275 	}
276 
277 	f2fs_put_dnode(&dn);
278 
279 	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
280 			index, old_addr ? *old_addr : 0, new_addr, recover);
281 	return 0;
282 }
283 
284 static void __complete_revoke_list(struct inode *inode, struct list_head *head,
285 					bool revoke)
286 {
287 	struct revoke_entry *cur, *tmp;
288 	pgoff_t start_index = 0;
289 	bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
290 
291 	list_for_each_entry_safe(cur, tmp, head, list) {
292 		if (revoke) {
293 			__replace_atomic_write_block(inode, cur->index,
294 						cur->old_addr, NULL, true);
295 		} else if (truncate) {
296 			f2fs_truncate_hole(inode, start_index, cur->index);
297 			start_index = cur->index + 1;
298 		}
299 
300 		list_del(&cur->list);
301 		kmem_cache_free(revoke_entry_slab, cur);
302 	}
303 
304 	if (!revoke && truncate)
305 		f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
306 }
307 
308 static int __f2fs_commit_atomic_write(struct inode *inode)
309 {
310 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
311 	struct f2fs_inode_info *fi = F2FS_I(inode);
312 	struct inode *cow_inode = fi->cow_inode;
313 	struct revoke_entry *new;
314 	struct list_head revoke_list;
315 	block_t blkaddr;
316 	struct dnode_of_data dn;
317 	pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
318 	pgoff_t off = 0, blen, index;
319 	int ret = 0, i;
320 
321 	INIT_LIST_HEAD(&revoke_list);
322 
323 	while (len) {
324 		blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
325 
326 		set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
327 		ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
328 		if (ret && ret != -ENOENT) {
329 			goto out;
330 		} else if (ret == -ENOENT) {
331 			ret = 0;
332 			if (dn.max_level == 0)
333 				goto out;
334 			goto next;
335 		}
336 
337 		blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, cow_inode),
338 				len);
339 		index = off;
340 		for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
341 			blkaddr = f2fs_data_blkaddr(&dn);
342 
343 			if (!__is_valid_data_blkaddr(blkaddr)) {
344 				continue;
345 			} else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
346 					DATA_GENERIC_ENHANCE)) {
347 				f2fs_put_dnode(&dn);
348 				ret = -EFSCORRUPTED;
349 				goto out;
350 			}
351 
352 			new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
353 							true, NULL);
354 
355 			ret = __replace_atomic_write_block(inode, index, blkaddr,
356 							&new->old_addr, false);
357 			if (ret) {
358 				f2fs_put_dnode(&dn);
359 				kmem_cache_free(revoke_entry_slab, new);
360 				goto out;
361 			}
362 
363 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
364 			new->index = index;
365 			list_add_tail(&new->list, &revoke_list);
366 		}
367 		f2fs_put_dnode(&dn);
368 next:
369 		off += blen;
370 		len -= blen;
371 	}
372 
373 out:
374 	if (time_to_inject(sbi, FAULT_ATOMIC_TIMEOUT))
375 		f2fs_schedule_timeout_killable(DEFAULT_FAULT_TIMEOUT, true);
376 
377 	if (ret) {
378 		sbi->revoked_atomic_block += fi->atomic_write_cnt;
379 	} else {
380 		sbi->committed_atomic_block += fi->atomic_write_cnt;
381 		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
382 
383 		/*
384 		 * inode may has no FI_ATOMIC_DIRTIED flag due to no write
385 		 * before commit.
386 		 */
387 		if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
388 			/* clear atomic dirty status and set vfs dirty status */
389 			clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
390 			f2fs_mark_inode_dirty_sync(inode, true);
391 		}
392 	}
393 
394 	__complete_revoke_list(inode, &revoke_list, ret ? true : false);
395 
396 	return ret;
397 }
398 
399 int f2fs_commit_atomic_write(struct inode *inode)
400 {
401 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
402 	struct f2fs_inode_info *fi = F2FS_I(inode);
403 	struct f2fs_lock_context lc;
404 	int err;
405 
406 	err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
407 	if (err)
408 		return err;
409 
410 	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
411 	f2fs_lock_op(sbi, &lc);
412 
413 	err = __f2fs_commit_atomic_write(inode);
414 
415 	f2fs_unlock_op(sbi, &lc);
416 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
417 
418 	return err;
419 }
420 
421 /*
422  * This function balances dirty node and dentry pages.
423  * In addition, it controls garbage collection.
424  */
425 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
426 {
427 	if (f2fs_cp_error(sbi))
428 		return;
429 
430 	if (time_to_inject(sbi, FAULT_CHECKPOINT))
431 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
432 
433 	/* balance_fs_bg is able to be pending */
434 	if (need && excess_cached_nats(sbi))
435 		f2fs_balance_fs_bg(sbi, false);
436 
437 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
438 		return;
439 
440 	/*
441 	 * We should do GC or end up with checkpoint, if there are so many dirty
442 	 * dir/node pages without enough free segments.
443 	 */
444 	if (has_enough_free_secs(sbi, 0, 0))
445 		return;
446 
447 	if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
448 				sbi->gc_thread->f2fs_gc_task) {
449 		DEFINE_WAIT(wait);
450 
451 		prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
452 					TASK_UNINTERRUPTIBLE);
453 		wake_up(&sbi->gc_thread->gc_wait_queue_head);
454 		io_schedule();
455 		finish_wait(&sbi->gc_thread->fggc_wq, &wait);
456 	} else {
457 		struct f2fs_gc_control gc_control = {
458 			.victim_segno = NULL_SEGNO,
459 			.init_gc_type = f2fs_sb_has_blkzoned(sbi) ?
460 				FG_GC : BG_GC,
461 			.no_bg_gc = true,
462 			.should_migrate_blocks = false,
463 			.err_gc_skipped = false,
464 			.nr_free_secs = 1 };
465 		f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
466 		stat_inc_gc_call_count(sbi, FOREGROUND);
467 		f2fs_gc(sbi, &gc_control);
468 	}
469 }
470 
471 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
472 {
473 	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
474 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
475 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
476 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
477 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
478 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
479 	unsigned int threshold =
480 		SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));
481 	unsigned int global_threshold = threshold * 3 / 2;
482 
483 	if (dents >= threshold || qdata >= threshold ||
484 		nodes >= threshold || meta >= threshold ||
485 		imeta >= threshold)
486 		return true;
487 	return dents + qdata + nodes + meta + imeta >  global_threshold;
488 }
489 
490 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
491 {
492 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
493 		return;
494 
495 	/* try to shrink extent cache when there is no enough memory */
496 	if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
497 		f2fs_shrink_read_extent_tree(sbi,
498 				READ_EXTENT_CACHE_SHRINK_NUMBER);
499 
500 	/* try to shrink age extent cache when there is no enough memory */
501 	if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
502 		f2fs_shrink_age_extent_tree(sbi,
503 				AGE_EXTENT_CACHE_SHRINK_NUMBER);
504 
505 	/* check the # of cached NAT entries */
506 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
507 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
508 
509 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
510 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
511 	else
512 		f2fs_build_free_nids(sbi, false, false);
513 
514 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
515 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
516 		goto do_sync;
517 
518 	/* there is background inflight IO or foreground operation recently */
519 	if (is_inflight_io(sbi, REQ_TIME) ||
520 		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
521 		return;
522 
523 	/* exceed periodical checkpoint timeout threshold */
524 	if (f2fs_time_over(sbi, CP_TIME))
525 		goto do_sync;
526 
527 	/* checkpoint is the only way to shrink partial cached entries */
528 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
529 		f2fs_available_free_memory(sbi, INO_ENTRIES))
530 		return;
531 
532 do_sync:
533 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
534 		struct blk_plug plug;
535 
536 		mutex_lock(&sbi->flush_lock);
537 
538 		blk_start_plug(&plug);
539 		f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
540 		blk_finish_plug(&plug);
541 
542 		mutex_unlock(&sbi->flush_lock);
543 	}
544 	stat_inc_cp_call_count(sbi, BACKGROUND);
545 	f2fs_sync_fs(sbi->sb, 1);
546 }
547 
548 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
549 				struct block_device *bdev)
550 {
551 	int ret = blkdev_issue_flush(bdev);
552 
553 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
554 				test_opt(sbi, FLUSH_MERGE), ret);
555 	if (!ret)
556 		f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
557 	return ret;
558 }
559 
560 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
561 {
562 	int ret = 0;
563 	int i;
564 
565 	if (!f2fs_is_multi_device(sbi))
566 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
567 
568 	for (i = 0; i < sbi->s_ndevs; i++) {
569 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
570 			continue;
571 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
572 		if (ret)
573 			break;
574 	}
575 	return ret;
576 }
577 
578 static int issue_flush_thread(void *data)
579 {
580 	struct f2fs_sb_info *sbi = data;
581 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
582 	wait_queue_head_t *q = &fcc->flush_wait_queue;
583 repeat:
584 	if (kthread_should_stop())
585 		return 0;
586 
587 	if (!llist_empty(&fcc->issue_list)) {
588 		struct flush_cmd *cmd, *next;
589 		int ret;
590 
591 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
592 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
593 
594 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
595 
596 		ret = submit_flush_wait(sbi, cmd->ino);
597 		atomic_inc(&fcc->issued_flush);
598 
599 		llist_for_each_entry_safe(cmd, next,
600 					  fcc->dispatch_list, llnode) {
601 			cmd->ret = ret;
602 			complete(&cmd->wait);
603 		}
604 		fcc->dispatch_list = NULL;
605 	}
606 
607 	wait_event_interruptible(*q,
608 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
609 	goto repeat;
610 }
611 
612 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
613 {
614 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
615 	struct flush_cmd cmd;
616 	int ret;
617 
618 	if (test_opt(sbi, NOBARRIER))
619 		return 0;
620 
621 	if (!test_opt(sbi, FLUSH_MERGE)) {
622 		atomic_inc(&fcc->queued_flush);
623 		ret = submit_flush_wait(sbi, ino);
624 		atomic_dec(&fcc->queued_flush);
625 		atomic_inc(&fcc->issued_flush);
626 		return ret;
627 	}
628 
629 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
630 	    f2fs_is_multi_device(sbi)) {
631 		ret = submit_flush_wait(sbi, ino);
632 		atomic_dec(&fcc->queued_flush);
633 
634 		atomic_inc(&fcc->issued_flush);
635 		return ret;
636 	}
637 
638 	cmd.ino = ino;
639 	init_completion(&cmd.wait);
640 
641 	llist_add(&cmd.llnode, &fcc->issue_list);
642 
643 	/*
644 	 * update issue_list before we wake up issue_flush thread, this
645 	 * smp_mb() pairs with another barrier in ___wait_event(), see
646 	 * more details in comments of waitqueue_active().
647 	 */
648 	smp_mb();
649 
650 	if (waitqueue_active(&fcc->flush_wait_queue))
651 		wake_up(&fcc->flush_wait_queue);
652 
653 	if (fcc->f2fs_issue_flush) {
654 		wait_for_completion(&cmd.wait);
655 		atomic_dec(&fcc->queued_flush);
656 	} else {
657 		struct llist_node *list;
658 
659 		list = llist_del_all(&fcc->issue_list);
660 		if (!list) {
661 			wait_for_completion(&cmd.wait);
662 			atomic_dec(&fcc->queued_flush);
663 		} else {
664 			struct flush_cmd *tmp, *next;
665 
666 			ret = submit_flush_wait(sbi, ino);
667 
668 			llist_for_each_entry_safe(tmp, next, list, llnode) {
669 				if (tmp == &cmd) {
670 					cmd.ret = ret;
671 					atomic_dec(&fcc->queued_flush);
672 					continue;
673 				}
674 				tmp->ret = ret;
675 				complete(&tmp->wait);
676 			}
677 		}
678 	}
679 
680 	return cmd.ret;
681 }
682 
683 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
684 {
685 	dev_t dev = sbi->sb->s_bdev->bd_dev;
686 	struct flush_cmd_control *fcc;
687 
688 	if (SM_I(sbi)->fcc_info) {
689 		fcc = SM_I(sbi)->fcc_info;
690 		if (fcc->f2fs_issue_flush)
691 			return 0;
692 		goto init_thread;
693 	}
694 
695 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
696 	if (!fcc)
697 		return -ENOMEM;
698 	atomic_set(&fcc->issued_flush, 0);
699 	atomic_set(&fcc->queued_flush, 0);
700 	init_waitqueue_head(&fcc->flush_wait_queue);
701 	init_llist_head(&fcc->issue_list);
702 	SM_I(sbi)->fcc_info = fcc;
703 	if (!test_opt(sbi, FLUSH_MERGE))
704 		return 0;
705 
706 init_thread:
707 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
708 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
709 	if (IS_ERR(fcc->f2fs_issue_flush)) {
710 		int err = PTR_ERR(fcc->f2fs_issue_flush);
711 
712 		fcc->f2fs_issue_flush = NULL;
713 		return err;
714 	}
715 
716 	return 0;
717 }
718 
719 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
720 {
721 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
722 
723 	if (fcc && fcc->f2fs_issue_flush) {
724 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
725 
726 		fcc->f2fs_issue_flush = NULL;
727 		kthread_stop(flush_thread);
728 	}
729 	if (free) {
730 		kfree(fcc);
731 		SM_I(sbi)->fcc_info = NULL;
732 	}
733 }
734 
735 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
736 {
737 	int ret = 0, i;
738 
739 	if (!f2fs_is_multi_device(sbi))
740 		return 0;
741 
742 	if (test_opt(sbi, NOBARRIER))
743 		return 0;
744 
745 	for (i = 1; i < sbi->s_ndevs; i++) {
746 		int count = DEFAULT_RETRY_IO_COUNT;
747 
748 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
749 			continue;
750 
751 		do {
752 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
753 			if (ret)
754 				f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
755 		} while (ret && --count);
756 
757 		if (ret) {
758 			f2fs_stop_checkpoint(sbi, false,
759 					STOP_CP_REASON_FLUSH_FAIL);
760 			break;
761 		}
762 
763 		spin_lock(&sbi->dev_lock);
764 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
765 		spin_unlock(&sbi->dev_lock);
766 	}
767 
768 	return ret;
769 }
770 
771 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
772 		enum dirty_type dirty_type)
773 {
774 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
775 
776 	/* need not be added */
777 	if (is_curseg(sbi, segno))
778 		return;
779 
780 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
781 		dirty_i->nr_dirty[dirty_type]++;
782 
783 	if (dirty_type == DIRTY) {
784 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
785 		enum dirty_type t = sentry->type;
786 
787 		if (unlikely(t >= DIRTY)) {
788 			f2fs_bug_on(sbi, 1);
789 			return;
790 		}
791 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
792 			dirty_i->nr_dirty[t]++;
793 
794 		if (__is_large_section(sbi)) {
795 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
796 			block_t valid_blocks =
797 				get_valid_blocks(sbi, segno, true);
798 
799 			f2fs_bug_on(sbi,
800 				(!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
801 				!valid_blocks) ||
802 				valid_blocks == CAP_BLKS_PER_SEC(sbi));
803 
804 			if (!is_cursec(sbi, secno))
805 				set_bit(secno, dirty_i->dirty_secmap);
806 		}
807 	}
808 }
809 
810 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
811 		enum dirty_type dirty_type)
812 {
813 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
814 	block_t valid_blocks;
815 
816 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
817 		dirty_i->nr_dirty[dirty_type]--;
818 
819 	if (dirty_type == DIRTY) {
820 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
821 		enum dirty_type t = sentry->type;
822 
823 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
824 			dirty_i->nr_dirty[t]--;
825 
826 		valid_blocks = get_valid_blocks(sbi, segno, true);
827 		if (valid_blocks == 0) {
828 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
829 						dirty_i->victim_secmap);
830 #ifdef CONFIG_F2FS_CHECK_FS
831 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
832 #endif
833 		}
834 		if (__is_large_section(sbi)) {
835 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
836 
837 			if (!valid_blocks ||
838 					valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
839 				clear_bit(secno, dirty_i->dirty_secmap);
840 				return;
841 			}
842 
843 			if (!is_cursec(sbi, secno))
844 				set_bit(secno, dirty_i->dirty_secmap);
845 		}
846 	}
847 }
848 
849 /*
850  * Should not occur error such as -ENOMEM.
851  * Adding dirty entry into seglist is not critical operation.
852  * If a given segment is one of current working segments, it won't be added.
853  */
854 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
855 {
856 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
857 	unsigned short valid_blocks, ckpt_valid_blocks;
858 	unsigned int usable_blocks;
859 
860 	if (segno == NULL_SEGNO || is_curseg(sbi, segno))
861 		return;
862 
863 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
864 	mutex_lock(&dirty_i->seglist_lock);
865 
866 	valid_blocks = get_valid_blocks(sbi, segno, false);
867 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
868 
869 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
870 		ckpt_valid_blocks == usable_blocks)) {
871 		__locate_dirty_segment(sbi, segno, PRE);
872 		__remove_dirty_segment(sbi, segno, DIRTY);
873 	} else if (valid_blocks < usable_blocks) {
874 		__locate_dirty_segment(sbi, segno, DIRTY);
875 	} else {
876 		/* Recovery routine with SSR needs this */
877 		__remove_dirty_segment(sbi, segno, DIRTY);
878 	}
879 
880 	mutex_unlock(&dirty_i->seglist_lock);
881 }
882 
883 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
884 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
885 {
886 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
887 	unsigned int segno;
888 
889 	mutex_lock(&dirty_i->seglist_lock);
890 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
891 		if (get_valid_blocks(sbi, segno, false))
892 			continue;
893 		if (is_curseg(sbi, segno))
894 			continue;
895 		__locate_dirty_segment(sbi, segno, PRE);
896 		__remove_dirty_segment(sbi, segno, DIRTY);
897 	}
898 	mutex_unlock(&dirty_i->seglist_lock);
899 }
900 
901 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
902 {
903 	int ovp_hole_segs =
904 		(overprovision_segments(sbi) - reserved_segments(sbi));
905 	block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);
906 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
907 	block_t holes[2] = {0, 0};	/* DATA and NODE */
908 	block_t unusable;
909 	struct seg_entry *se;
910 	unsigned int segno;
911 
912 	mutex_lock(&dirty_i->seglist_lock);
913 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
914 		se = get_seg_entry(sbi, segno);
915 		if (IS_NODESEG(se->type))
916 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
917 							se->valid_blocks;
918 		else
919 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
920 							se->valid_blocks;
921 	}
922 	mutex_unlock(&dirty_i->seglist_lock);
923 
924 	unusable = max(holes[DATA], holes[NODE]);
925 	if (unusable > ovp_holes)
926 		return unusable - ovp_holes;
927 	return 0;
928 }
929 
930 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
931 {
932 	int ovp_hole_segs =
933 		(overprovision_segments(sbi) - reserved_segments(sbi));
934 
935 	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
936 		return 0;
937 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
938 		return -EAGAIN;
939 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
940 		dirty_segments(sbi) > ovp_hole_segs)
941 		return -EAGAIN;
942 	if (has_not_enough_free_secs(sbi, 0, 0))
943 		return -EAGAIN;
944 	return 0;
945 }
946 
947 /* This is only used by SBI_CP_DISABLED */
948 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
949 {
950 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
951 	unsigned int segno = 0;
952 
953 	mutex_lock(&dirty_i->seglist_lock);
954 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
955 		if (get_valid_blocks(sbi, segno, false))
956 			continue;
957 		if (get_ckpt_valid_blocks(sbi, segno, false))
958 			continue;
959 		mutex_unlock(&dirty_i->seglist_lock);
960 		return segno;
961 	}
962 	mutex_unlock(&dirty_i->seglist_lock);
963 	return NULL_SEGNO;
964 }
965 
966 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
967 		struct block_device *bdev, block_t lstart,
968 		block_t start, block_t len)
969 {
970 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
971 	struct list_head *pend_list;
972 	struct discard_cmd *dc;
973 
974 	f2fs_bug_on(sbi, !len);
975 
976 	pend_list = &dcc->pend_list[plist_idx(len)];
977 
978 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
979 	INIT_LIST_HEAD(&dc->list);
980 	dc->bdev = bdev;
981 	dc->di.lstart = lstart;
982 	dc->di.start = start;
983 	dc->di.len = len;
984 	dc->ref = 0;
985 	dc->state = D_PREP;
986 	dc->queued = 0;
987 	dc->error = 0;
988 	init_completion(&dc->wait);
989 	list_add_tail(&dc->list, pend_list);
990 	spin_lock_init(&dc->lock);
991 	dc->bio_ref = 0;
992 	atomic_inc(&dcc->discard_cmd_cnt);
993 	dcc->undiscard_blks += len;
994 
995 	return dc;
996 }
997 
998 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
999 {
1000 #ifdef CONFIG_F2FS_CHECK_FS
1001 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1002 	struct rb_node *cur = rb_first_cached(&dcc->root), *next;
1003 	struct discard_cmd *cur_dc, *next_dc;
1004 
1005 	while (cur) {
1006 		next = rb_next(cur);
1007 		if (!next)
1008 			return true;
1009 
1010 		cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
1011 		next_dc = rb_entry(next, struct discard_cmd, rb_node);
1012 
1013 		if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
1014 			f2fs_info(sbi, "broken discard_rbtree, "
1015 				"cur(%u, %u) next(%u, %u)",
1016 				cur_dc->di.lstart, cur_dc->di.len,
1017 				next_dc->di.lstart, next_dc->di.len);
1018 			return false;
1019 		}
1020 		cur = next;
1021 	}
1022 #endif
1023 	return true;
1024 }
1025 
1026 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
1027 						block_t blkaddr)
1028 {
1029 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1030 	struct rb_node *node = dcc->root.rb_root.rb_node;
1031 	struct discard_cmd *dc;
1032 
1033 	while (node) {
1034 		dc = rb_entry(node, struct discard_cmd, rb_node);
1035 
1036 		if (blkaddr < dc->di.lstart)
1037 			node = node->rb_left;
1038 		else if (blkaddr >= dc->di.lstart + dc->di.len)
1039 			node = node->rb_right;
1040 		else
1041 			return dc;
1042 	}
1043 	return NULL;
1044 }
1045 
1046 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
1047 				block_t blkaddr,
1048 				struct discard_cmd **prev_entry,
1049 				struct discard_cmd **next_entry,
1050 				struct rb_node ***insert_p,
1051 				struct rb_node **insert_parent)
1052 {
1053 	struct rb_node **pnode = &root->rb_root.rb_node;
1054 	struct rb_node *parent = NULL, *tmp_node;
1055 	struct discard_cmd *dc;
1056 
1057 	*insert_p = NULL;
1058 	*insert_parent = NULL;
1059 	*prev_entry = NULL;
1060 	*next_entry = NULL;
1061 
1062 	if (RB_EMPTY_ROOT(&root->rb_root))
1063 		return NULL;
1064 
1065 	while (*pnode) {
1066 		parent = *pnode;
1067 		dc = rb_entry(*pnode, struct discard_cmd, rb_node);
1068 
1069 		if (blkaddr < dc->di.lstart)
1070 			pnode = &(*pnode)->rb_left;
1071 		else if (blkaddr >= dc->di.lstart + dc->di.len)
1072 			pnode = &(*pnode)->rb_right;
1073 		else
1074 			goto lookup_neighbors;
1075 	}
1076 
1077 	*insert_p = pnode;
1078 	*insert_parent = parent;
1079 
1080 	dc = rb_entry(parent, struct discard_cmd, rb_node);
1081 	tmp_node = parent;
1082 	if (parent && blkaddr > dc->di.lstart)
1083 		tmp_node = rb_next(parent);
1084 	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1085 
1086 	tmp_node = parent;
1087 	if (parent && blkaddr < dc->di.lstart)
1088 		tmp_node = rb_prev(parent);
1089 	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1090 	return NULL;
1091 
1092 lookup_neighbors:
1093 	/* lookup prev node for merging backward later */
1094 	tmp_node = rb_prev(&dc->rb_node);
1095 	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1096 
1097 	/* lookup next node for merging frontward later */
1098 	tmp_node = rb_next(&dc->rb_node);
1099 	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1100 	return dc;
1101 }
1102 
1103 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1104 							struct discard_cmd *dc)
1105 {
1106 	if (dc->state == D_DONE)
1107 		atomic_sub(dc->queued, &dcc->queued_discard);
1108 
1109 	list_del(&dc->list);
1110 	rb_erase_cached(&dc->rb_node, &dcc->root);
1111 	dcc->undiscard_blks -= dc->di.len;
1112 
1113 	kmem_cache_free(discard_cmd_slab, dc);
1114 
1115 	atomic_dec(&dcc->discard_cmd_cnt);
1116 }
1117 
1118 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1119 							struct discard_cmd *dc)
1120 {
1121 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1122 	unsigned long flags;
1123 
1124 	trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
1125 
1126 	spin_lock_irqsave(&dc->lock, flags);
1127 	if (dc->bio_ref) {
1128 		spin_unlock_irqrestore(&dc->lock, flags);
1129 		return;
1130 	}
1131 	spin_unlock_irqrestore(&dc->lock, flags);
1132 
1133 	f2fs_bug_on(sbi, dc->ref);
1134 
1135 	if (dc->error == -EOPNOTSUPP)
1136 		dc->error = 0;
1137 
1138 	if (dc->error)
1139 		f2fs_info_ratelimited(sbi,
1140 			"Issue discard(%u, %u, %u) failed, ret: %d",
1141 			dc->di.lstart, dc->di.start, dc->di.len, dc->error);
1142 	__detach_discard_cmd(dcc, dc);
1143 }
1144 
1145 static void f2fs_submit_discard_endio(struct bio *bio)
1146 {
1147 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1148 	unsigned long flags;
1149 
1150 	spin_lock_irqsave(&dc->lock, flags);
1151 	if (!dc->error)
1152 		dc->error = blk_status_to_errno(bio->bi_status);
1153 	dc->bio_ref--;
1154 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1155 		dc->state = D_DONE;
1156 		complete_all(&dc->wait);
1157 	}
1158 	spin_unlock_irqrestore(&dc->lock, flags);
1159 	bio_put(bio);
1160 }
1161 
1162 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1163 				block_t start, block_t end)
1164 {
1165 #ifdef CONFIG_F2FS_CHECK_FS
1166 	struct seg_entry *sentry;
1167 	unsigned int segno;
1168 	block_t blk = start;
1169 	unsigned long offset, size, *map;
1170 
1171 	while (blk < end) {
1172 		segno = GET_SEGNO(sbi, blk);
1173 		sentry = get_seg_entry(sbi, segno);
1174 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1175 
1176 		if (end < START_BLOCK(sbi, segno + 1))
1177 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1178 		else
1179 			size = BLKS_PER_SEG(sbi);
1180 		map = (unsigned long *)(sentry->cur_valid_map);
1181 		offset = __find_rev_next_bit(map, size, offset);
1182 		f2fs_bug_on(sbi, offset != size);
1183 		blk = START_BLOCK(sbi, segno + 1);
1184 	}
1185 #endif
1186 }
1187 
1188 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1189 				struct discard_policy *dpolicy,
1190 				int discard_type, unsigned int granularity)
1191 {
1192 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1193 
1194 	/* common policy */
1195 	dpolicy->type = discard_type;
1196 	dpolicy->sync = true;
1197 	dpolicy->ordered = false;
1198 	dpolicy->granularity = granularity;
1199 
1200 	dpolicy->max_requests = dcc->max_discard_request;
1201 	dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
1202 	dpolicy->timeout = false;
1203 
1204 	if (discard_type == DPOLICY_BG) {
1205 		dpolicy->min_interval = dcc->min_discard_issue_time;
1206 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1207 		dpolicy->max_interval = dcc->max_discard_issue_time;
1208 		if (dcc->discard_io_aware == DPOLICY_IO_AWARE_ENABLE)
1209 			dpolicy->io_aware = true;
1210 		else if (dcc->discard_io_aware == DPOLICY_IO_AWARE_DISABLE)
1211 			dpolicy->io_aware = false;
1212 		dpolicy->sync = false;
1213 		dpolicy->ordered = true;
1214 		if (utilization(sbi) > dcc->discard_urgent_util) {
1215 			dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1216 			if (atomic_read(&dcc->discard_cmd_cnt))
1217 				dpolicy->max_interval =
1218 					dcc->min_discard_issue_time;
1219 		}
1220 	} else if (discard_type == DPOLICY_FORCE) {
1221 		dpolicy->min_interval = dcc->min_discard_issue_time;
1222 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1223 		dpolicy->max_interval = dcc->max_discard_issue_time;
1224 		dpolicy->io_aware = false;
1225 	} else if (discard_type == DPOLICY_FSTRIM) {
1226 		dpolicy->io_aware = false;
1227 	} else if (discard_type == DPOLICY_UMOUNT) {
1228 		dpolicy->io_aware = false;
1229 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1230 		dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1231 		dpolicy->timeout = true;
1232 	}
1233 }
1234 
1235 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1236 				struct block_device *bdev, block_t lstart,
1237 				block_t start, block_t len);
1238 
1239 #ifdef CONFIG_BLK_DEV_ZONED
1240 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1241 				   struct discard_cmd *dc, blk_opf_t flag,
1242 				   struct list_head *wait_list,
1243 				   unsigned int *issued)
1244 {
1245 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1246 	struct block_device *bdev = dc->bdev;
1247 	struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
1248 	unsigned long flags;
1249 
1250 	trace_f2fs_issue_reset_zone(bdev, dc->di.start);
1251 
1252 	spin_lock_irqsave(&dc->lock, flags);
1253 	dc->state = D_SUBMIT;
1254 	dc->bio_ref++;
1255 	spin_unlock_irqrestore(&dc->lock, flags);
1256 
1257 	if (issued)
1258 		(*issued)++;
1259 
1260 	atomic_inc(&dcc->queued_discard);
1261 	dc->queued++;
1262 	list_move_tail(&dc->list, wait_list);
1263 
1264 	/* sanity check on discard range */
1265 	__check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1266 
1267 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
1268 	bio->bi_private = dc;
1269 	bio->bi_end_io = f2fs_submit_discard_endio;
1270 	submit_bio(bio);
1271 
1272 	atomic_inc(&dcc->issued_discard);
1273 	f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1274 }
1275 #endif
1276 
1277 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1278 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1279 				struct discard_policy *dpolicy,
1280 				struct discard_cmd *dc, int *issued)
1281 {
1282 	struct block_device *bdev = dc->bdev;
1283 	unsigned int max_discard_blocks =
1284 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1285 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1286 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1287 					&(dcc->fstrim_list) : &(dcc->wait_list);
1288 	blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
1289 	block_t lstart, start, len, total_len;
1290 
1291 	if (dc->state != D_PREP)
1292 		return 0;
1293 
1294 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1295 		return 0;
1296 
1297 #ifdef CONFIG_BLK_DEV_ZONED
1298 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1299 		int devi = f2fs_bdev_index(sbi, bdev);
1300 
1301 		if (devi < 0)
1302 			return -EINVAL;
1303 
1304 		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1305 			__submit_zone_reset_cmd(sbi, dc, flag,
1306 						wait_list, issued);
1307 			return 0;
1308 		}
1309 	}
1310 #endif
1311 
1312 	/*
1313 	 * stop issuing discard for any of below cases:
1314 	 * 1. device is conventional zone, but it doesn't support discard.
1315 	 * 2. device is regulare device, after snapshot it doesn't support
1316 	 * discard.
1317 	 */
1318 	if (!bdev_max_discard_sectors(bdev))
1319 		return -EOPNOTSUPP;
1320 
1321 	trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
1322 
1323 	lstart = dc->di.lstart;
1324 	start = dc->di.start;
1325 	len = dc->di.len;
1326 	total_len = len;
1327 
1328 	dc->di.len = 0;
1329 
1330 	while (total_len && *issued < dpolicy->max_requests) {
1331 		struct bio *bio = NULL;
1332 		unsigned long flags;
1333 		bool last = true;
1334 
1335 		if (len > max_discard_blocks) {
1336 			len = max_discard_blocks;
1337 			last = false;
1338 		}
1339 
1340 		(*issued)++;
1341 		if (*issued == dpolicy->max_requests)
1342 			last = true;
1343 
1344 		dc->di.len += len;
1345 
1346 		__blkdev_issue_discard(bdev, SECTOR_FROM_BLOCK(start),
1347 				SECTOR_FROM_BLOCK(len), GFP_NOFS, &bio);
1348 		f2fs_bug_on(sbi, !bio);
1349 
1350 		/*
1351 		 * should keep before submission to avoid D_DONE
1352 		 * right away
1353 		 */
1354 		spin_lock_irqsave(&dc->lock, flags);
1355 		if (last)
1356 			dc->state = D_SUBMIT;
1357 		else
1358 			dc->state = D_PARTIAL;
1359 		dc->bio_ref++;
1360 		spin_unlock_irqrestore(&dc->lock, flags);
1361 
1362 		atomic_inc(&dcc->queued_discard);
1363 		dc->queued++;
1364 		list_move_tail(&dc->list, wait_list);
1365 
1366 		/* sanity check on discard range */
1367 		__check_sit_bitmap(sbi, lstart, lstart + len);
1368 
1369 		bio->bi_private = dc;
1370 		bio->bi_end_io = f2fs_submit_discard_endio;
1371 		bio->bi_opf |= flag;
1372 		submit_bio(bio);
1373 
1374 		atomic_inc(&dcc->issued_discard);
1375 
1376 		f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1377 
1378 		lstart += len;
1379 		start += len;
1380 		total_len -= len;
1381 		len = total_len;
1382 	}
1383 
1384 	if (len) {
1385 		dcc->undiscard_blks -= len;
1386 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1387 	}
1388 	return 0;
1389 }
1390 
1391 static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1392 				struct block_device *bdev, block_t lstart,
1393 				block_t start, block_t len)
1394 {
1395 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1396 	struct rb_node **p = &dcc->root.rb_root.rb_node;
1397 	struct rb_node *parent = NULL;
1398 	struct discard_cmd *dc;
1399 	bool leftmost = true;
1400 
1401 	/* look up rb tree to find parent node */
1402 	while (*p) {
1403 		parent = *p;
1404 		dc = rb_entry(parent, struct discard_cmd, rb_node);
1405 
1406 		if (lstart < dc->di.lstart) {
1407 			p = &(*p)->rb_left;
1408 		} else if (lstart >= dc->di.lstart + dc->di.len) {
1409 			p = &(*p)->rb_right;
1410 			leftmost = false;
1411 		} else {
1412 			/* Let's skip to add, if exists */
1413 			return;
1414 		}
1415 	}
1416 
1417 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1418 
1419 	rb_link_node(&dc->rb_node, parent, p);
1420 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1421 }
1422 
1423 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1424 						struct discard_cmd *dc)
1425 {
1426 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
1427 }
1428 
1429 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1430 				struct discard_cmd *dc, block_t blkaddr)
1431 {
1432 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1433 	struct discard_info di = dc->di;
1434 	bool modified = false;
1435 
1436 	if (dc->state == D_DONE || dc->di.len == 1) {
1437 		__remove_discard_cmd(sbi, dc);
1438 		return;
1439 	}
1440 
1441 	dcc->undiscard_blks -= di.len;
1442 
1443 	if (blkaddr > di.lstart) {
1444 		dc->di.len = blkaddr - dc->di.lstart;
1445 		dcc->undiscard_blks += dc->di.len;
1446 		__relocate_discard_cmd(dcc, dc);
1447 		modified = true;
1448 	}
1449 
1450 	if (blkaddr < di.lstart + di.len - 1) {
1451 		if (modified) {
1452 			__insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1453 					di.start + blkaddr + 1 - di.lstart,
1454 					di.lstart + di.len - 1 - blkaddr);
1455 		} else {
1456 			dc->di.lstart++;
1457 			dc->di.len--;
1458 			dc->di.start++;
1459 			dcc->undiscard_blks += dc->di.len;
1460 			__relocate_discard_cmd(dcc, dc);
1461 		}
1462 	}
1463 }
1464 
1465 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1466 				struct block_device *bdev, block_t lstart,
1467 				block_t start, block_t len)
1468 {
1469 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1470 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1471 	struct discard_cmd *dc;
1472 	struct discard_info di = {0};
1473 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1474 	unsigned int max_discard_blocks =
1475 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1476 	block_t end = lstart + len;
1477 
1478 	dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
1479 				&prev_dc, &next_dc, &insert_p, &insert_parent);
1480 	if (dc)
1481 		prev_dc = dc;
1482 
1483 	if (!prev_dc) {
1484 		di.lstart = lstart;
1485 		di.len = next_dc ? next_dc->di.lstart - lstart : len;
1486 		di.len = min(di.len, len);
1487 		di.start = start;
1488 	}
1489 
1490 	while (1) {
1491 		struct rb_node *node;
1492 		bool merged = false;
1493 		struct discard_cmd *tdc = NULL;
1494 
1495 		if (prev_dc) {
1496 			di.lstart = prev_dc->di.lstart + prev_dc->di.len;
1497 			if (di.lstart < lstart)
1498 				di.lstart = lstart;
1499 			if (di.lstart >= end)
1500 				break;
1501 
1502 			if (!next_dc || next_dc->di.lstart > end)
1503 				di.len = end - di.lstart;
1504 			else
1505 				di.len = next_dc->di.lstart - di.lstart;
1506 			di.start = start + di.lstart - lstart;
1507 		}
1508 
1509 		if (!di.len)
1510 			goto next;
1511 
1512 		if (prev_dc && prev_dc->state == D_PREP &&
1513 			prev_dc->bdev == bdev &&
1514 			__is_discard_back_mergeable(&di, &prev_dc->di,
1515 							max_discard_blocks)) {
1516 			prev_dc->di.len += di.len;
1517 			dcc->undiscard_blks += di.len;
1518 			__relocate_discard_cmd(dcc, prev_dc);
1519 			di = prev_dc->di;
1520 			tdc = prev_dc;
1521 			merged = true;
1522 		}
1523 
1524 		if (next_dc && next_dc->state == D_PREP &&
1525 			next_dc->bdev == bdev &&
1526 			__is_discard_front_mergeable(&di, &next_dc->di,
1527 							max_discard_blocks)) {
1528 			next_dc->di.lstart = di.lstart;
1529 			next_dc->di.len += di.len;
1530 			next_dc->di.start = di.start;
1531 			dcc->undiscard_blks += di.len;
1532 			__relocate_discard_cmd(dcc, next_dc);
1533 			if (tdc)
1534 				__remove_discard_cmd(sbi, tdc);
1535 			merged = true;
1536 		}
1537 
1538 		if (!merged)
1539 			__insert_discard_cmd(sbi, bdev,
1540 						di.lstart, di.start, di.len);
1541  next:
1542 		prev_dc = next_dc;
1543 		if (!prev_dc)
1544 			break;
1545 
1546 		node = rb_next(&prev_dc->rb_node);
1547 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1548 	}
1549 }
1550 
1551 #ifdef CONFIG_BLK_DEV_ZONED
1552 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1553 		struct block_device *bdev, block_t blkstart, block_t lblkstart,
1554 		block_t blklen)
1555 {
1556 	trace_f2fs_queue_reset_zone(bdev, blkstart);
1557 
1558 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1559 	__insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1560 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1561 }
1562 #endif
1563 
1564 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1565 		struct block_device *bdev, block_t blkstart, block_t blklen)
1566 {
1567 	block_t lblkstart = blkstart;
1568 
1569 	if (!f2fs_bdev_support_discard(bdev))
1570 		return;
1571 
1572 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1573 
1574 	if (f2fs_is_multi_device(sbi)) {
1575 		int devi = f2fs_target_device_index(sbi, blkstart);
1576 
1577 		blkstart -= FDEV(devi).start_blk;
1578 	}
1579 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1580 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1581 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1582 }
1583 
1584 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1585 		struct discard_policy *dpolicy, int *issued)
1586 {
1587 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1588 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1589 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1590 	struct discard_cmd *dc;
1591 	struct blk_plug plug;
1592 	bool io_interrupted = false;
1593 
1594 	mutex_lock(&dcc->cmd_lock);
1595 	dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
1596 				&prev_dc, &next_dc, &insert_p, &insert_parent);
1597 	if (!dc)
1598 		dc = next_dc;
1599 
1600 	blk_start_plug(&plug);
1601 
1602 	while (dc) {
1603 		struct rb_node *node;
1604 		int err = 0;
1605 
1606 		if (dc->state != D_PREP)
1607 			goto next;
1608 
1609 		if (*issued > 0 && unlikely(freezing(current)))
1610 			break;
1611 
1612 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1613 			io_interrupted = true;
1614 			break;
1615 		}
1616 
1617 		dcc->next_pos = dc->di.lstart + dc->di.len;
1618 		err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1619 
1620 		if (*issued >= dpolicy->max_requests)
1621 			break;
1622 next:
1623 		node = rb_next(&dc->rb_node);
1624 		if (err)
1625 			__remove_discard_cmd(sbi, dc);
1626 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1627 	}
1628 
1629 	blk_finish_plug(&plug);
1630 
1631 	if (!dc)
1632 		dcc->next_pos = 0;
1633 
1634 	mutex_unlock(&dcc->cmd_lock);
1635 
1636 	if (!(*issued) && io_interrupted)
1637 		*issued = -1;
1638 }
1639 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1640 					struct discard_policy *dpolicy);
1641 
1642 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1643 					struct discard_policy *dpolicy)
1644 {
1645 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1646 	struct list_head *pend_list;
1647 	struct discard_cmd *dc, *tmp;
1648 	struct blk_plug plug;
1649 	int i, issued;
1650 	bool io_interrupted = false;
1651 	bool suspended = false;
1652 
1653 	if (dpolicy->timeout)
1654 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1655 
1656 retry:
1657 	issued = 0;
1658 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1659 		if (dpolicy->timeout &&
1660 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1661 			break;
1662 
1663 		if (i + 1 < dpolicy->granularity)
1664 			break;
1665 
1666 		if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
1667 			__issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1668 			return issued;
1669 		}
1670 
1671 		pend_list = &dcc->pend_list[i];
1672 
1673 		mutex_lock(&dcc->cmd_lock);
1674 		if (list_empty(pend_list))
1675 			goto next;
1676 		if (unlikely(dcc->rbtree_check))
1677 			f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1678 		blk_start_plug(&plug);
1679 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1680 			f2fs_bug_on(sbi, dc->state != D_PREP);
1681 
1682 			if (issued > 0 && unlikely(freezing(current))) {
1683 				suspended = true;
1684 				break;
1685 			}
1686 
1687 			if (dpolicy->timeout &&
1688 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1689 				break;
1690 
1691 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1692 						!is_idle(sbi, DISCARD_TIME)) {
1693 				io_interrupted = true;
1694 				break;
1695 			}
1696 
1697 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1698 
1699 			if (issued >= dpolicy->max_requests)
1700 				break;
1701 		}
1702 		blk_finish_plug(&plug);
1703 next:
1704 		mutex_unlock(&dcc->cmd_lock);
1705 
1706 		if (issued >= dpolicy->max_requests || io_interrupted ||
1707 					suspended)
1708 			break;
1709 	}
1710 
1711 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1712 		__wait_all_discard_cmd(sbi, dpolicy);
1713 		goto retry;
1714 	}
1715 
1716 	if (!issued && io_interrupted)
1717 		issued = -1;
1718 
1719 	return issued;
1720 }
1721 
1722 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1723 {
1724 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1725 	struct list_head *pend_list;
1726 	struct discard_cmd *dc, *tmp;
1727 	int i;
1728 	bool dropped = false;
1729 
1730 	mutex_lock(&dcc->cmd_lock);
1731 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1732 		pend_list = &dcc->pend_list[i];
1733 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1734 			f2fs_bug_on(sbi, dc->state != D_PREP);
1735 			__remove_discard_cmd(sbi, dc);
1736 			dropped = true;
1737 		}
1738 	}
1739 	mutex_unlock(&dcc->cmd_lock);
1740 
1741 	return dropped;
1742 }
1743 
1744 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1745 {
1746 	__drop_discard_cmd(sbi);
1747 }
1748 
1749 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1750 							struct discard_cmd *dc)
1751 {
1752 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1753 	unsigned int len = 0;
1754 
1755 	wait_for_completion_io(&dc->wait);
1756 	mutex_lock(&dcc->cmd_lock);
1757 	f2fs_bug_on(sbi, dc->state != D_DONE);
1758 	dc->ref--;
1759 	if (!dc->ref) {
1760 		if (!dc->error)
1761 			len = dc->di.len;
1762 		__remove_discard_cmd(sbi, dc);
1763 	}
1764 	mutex_unlock(&dcc->cmd_lock);
1765 
1766 	return len;
1767 }
1768 
1769 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1770 						struct discard_policy *dpolicy,
1771 						block_t start, block_t end)
1772 {
1773 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1774 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1775 					&(dcc->fstrim_list) : &(dcc->wait_list);
1776 	struct discard_cmd *dc = NULL, *iter, *tmp;
1777 	unsigned int trimmed = 0;
1778 
1779 next:
1780 	dc = NULL;
1781 
1782 	mutex_lock(&dcc->cmd_lock);
1783 	list_for_each_entry_safe(iter, tmp, wait_list, list) {
1784 		if (iter->di.lstart + iter->di.len <= start ||
1785 					end <= iter->di.lstart)
1786 			continue;
1787 		if (iter->di.len < dpolicy->granularity)
1788 			continue;
1789 		if (iter->state == D_DONE && !iter->ref) {
1790 			wait_for_completion_io(&iter->wait);
1791 			if (!iter->error)
1792 				trimmed += iter->di.len;
1793 			__remove_discard_cmd(sbi, iter);
1794 		} else {
1795 			iter->ref++;
1796 			dc = iter;
1797 			break;
1798 		}
1799 	}
1800 	mutex_unlock(&dcc->cmd_lock);
1801 
1802 	if (dc) {
1803 		trimmed += __wait_one_discard_bio(sbi, dc);
1804 		goto next;
1805 	}
1806 
1807 	return trimmed;
1808 }
1809 
1810 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1811 						struct discard_policy *dpolicy)
1812 {
1813 	struct discard_policy dp;
1814 	unsigned int discard_blks;
1815 
1816 	if (dpolicy)
1817 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1818 
1819 	/* wait all */
1820 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1821 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1822 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1823 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1824 
1825 	return discard_blks;
1826 }
1827 
1828 /* This should be covered by global mutex, &sit_i->sentry_lock */
1829 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1830 {
1831 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1832 	struct discard_cmd *dc;
1833 	bool need_wait = false;
1834 
1835 	mutex_lock(&dcc->cmd_lock);
1836 	dc = __lookup_discard_cmd(sbi, blkaddr);
1837 #ifdef CONFIG_BLK_DEV_ZONED
1838 	if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1839 		int devi = f2fs_bdev_index(sbi, dc->bdev);
1840 
1841 		if (devi < 0) {
1842 			mutex_unlock(&dcc->cmd_lock);
1843 			return;
1844 		}
1845 
1846 		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1847 			/* force submit zone reset */
1848 			if (dc->state == D_PREP)
1849 				__submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1850 							&dcc->wait_list, NULL);
1851 			dc->ref++;
1852 			mutex_unlock(&dcc->cmd_lock);
1853 			/* wait zone reset */
1854 			__wait_one_discard_bio(sbi, dc);
1855 			return;
1856 		}
1857 	}
1858 #endif
1859 	if (dc) {
1860 		if (dc->state == D_PREP) {
1861 			__punch_discard_cmd(sbi, dc, blkaddr);
1862 		} else {
1863 			dc->ref++;
1864 			need_wait = true;
1865 		}
1866 	}
1867 	mutex_unlock(&dcc->cmd_lock);
1868 
1869 	if (need_wait)
1870 		__wait_one_discard_bio(sbi, dc);
1871 }
1872 
1873 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1874 {
1875 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1876 
1877 	if (dcc && dcc->f2fs_issue_discard) {
1878 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1879 
1880 		dcc->f2fs_issue_discard = NULL;
1881 		kthread_stop(discard_thread);
1882 	}
1883 }
1884 
1885 /**
1886  * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
1887  * @sbi: the f2fs_sb_info data for discard cmd to issue
1888  *
1889  * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
1890  *
1891  * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
1892  */
1893 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check)
1894 {
1895 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1896 	struct discard_policy dpolicy;
1897 	bool dropped;
1898 
1899 	if (!atomic_read(&dcc->discard_cmd_cnt))
1900 		return true;
1901 
1902 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1903 					dcc->discard_granularity);
1904 	__issue_discard_cmd(sbi, &dpolicy);
1905 	dropped = __drop_discard_cmd(sbi);
1906 
1907 	/* just to make sure there is no pending discard commands */
1908 	__wait_all_discard_cmd(sbi, NULL);
1909 
1910 	f2fs_bug_on(sbi, need_check && atomic_read(&dcc->discard_cmd_cnt));
1911 	return !dropped;
1912 }
1913 
1914 static int issue_discard_thread(void *data)
1915 {
1916 	struct f2fs_sb_info *sbi = data;
1917 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1918 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1919 	struct discard_policy dpolicy;
1920 	unsigned int wait_ms = dcc->min_discard_issue_time;
1921 	int issued;
1922 
1923 	set_freezable();
1924 
1925 	do {
1926 		wait_event_freezable_timeout(*q,
1927 				kthread_should_stop() || dcc->discard_wake,
1928 				msecs_to_jiffies(wait_ms));
1929 
1930 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1931 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1932 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1933 						MIN_DISCARD_GRANULARITY);
1934 		else
1935 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1936 						dcc->discard_granularity);
1937 
1938 		if (dcc->discard_wake)
1939 			dcc->discard_wake = false;
1940 
1941 		/* clean up pending candidates before going to sleep */
1942 		if (atomic_read(&dcc->queued_discard))
1943 			__wait_all_discard_cmd(sbi, NULL);
1944 
1945 		if (f2fs_readonly(sbi->sb))
1946 			continue;
1947 		if (kthread_should_stop())
1948 			return 0;
1949 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1950 			!atomic_read(&dcc->discard_cmd_cnt)) {
1951 			wait_ms = dpolicy.max_interval;
1952 			continue;
1953 		}
1954 
1955 		sb_start_intwrite(sbi->sb);
1956 
1957 		issued = __issue_discard_cmd(sbi, &dpolicy);
1958 		if (issued > 0) {
1959 			__wait_all_discard_cmd(sbi, &dpolicy);
1960 			wait_ms = dpolicy.min_interval;
1961 		} else if (issued == -1) {
1962 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1963 			if (!wait_ms)
1964 				wait_ms = dpolicy.mid_interval;
1965 		} else {
1966 			wait_ms = dpolicy.max_interval;
1967 		}
1968 		if (!atomic_read(&dcc->discard_cmd_cnt))
1969 			wait_ms = dpolicy.max_interval;
1970 
1971 		sb_end_intwrite(sbi->sb);
1972 
1973 	} while (!kthread_should_stop());
1974 	return 0;
1975 }
1976 
1977 #ifdef CONFIG_BLK_DEV_ZONED
1978 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1979 		struct block_device *bdev, block_t blkstart, block_t blklen)
1980 {
1981 	sector_t sector, nr_sects;
1982 	block_t lblkstart = blkstart;
1983 	int devi = 0;
1984 	u64 remainder = 0;
1985 
1986 	if (f2fs_is_multi_device(sbi)) {
1987 		devi = f2fs_target_device_index(sbi, blkstart);
1988 		if (blkstart < FDEV(devi).start_blk ||
1989 		    blkstart > FDEV(devi).end_blk) {
1990 			f2fs_err(sbi, "Invalid block %x", blkstart);
1991 			return -EIO;
1992 		}
1993 		blkstart -= FDEV(devi).start_blk;
1994 	}
1995 
1996 	/* For sequential zones, reset the zone write pointer */
1997 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1998 		sector = SECTOR_FROM_BLOCK(blkstart);
1999 		nr_sects = SECTOR_FROM_BLOCK(blklen);
2000 		div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
2001 
2002 		if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
2003 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
2004 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
2005 				 blkstart, blklen);
2006 			return -EIO;
2007 		}
2008 
2009 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
2010 			unsigned int nofs_flags;
2011 			int ret;
2012 
2013 			trace_f2fs_issue_reset_zone(bdev, blkstart);
2014 			nofs_flags = memalloc_nofs_save();
2015 			ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
2016 						sector, nr_sects);
2017 			memalloc_nofs_restore(nofs_flags);
2018 			return ret;
2019 		}
2020 
2021 		__queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
2022 		return 0;
2023 	}
2024 
2025 	/* For conventional zones, use regular discard if supported */
2026 	__queue_discard_cmd(sbi, bdev, lblkstart, blklen);
2027 	return 0;
2028 }
2029 #endif
2030 
2031 static int __issue_discard_async(struct f2fs_sb_info *sbi,
2032 		struct block_device *bdev, block_t blkstart, block_t blklen)
2033 {
2034 #ifdef CONFIG_BLK_DEV_ZONED
2035 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
2036 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
2037 #endif
2038 	__queue_discard_cmd(sbi, bdev, blkstart, blklen);
2039 	return 0;
2040 }
2041 
2042 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2043 				block_t blkstart, block_t blklen)
2044 {
2045 	sector_t start = blkstart, len = 0;
2046 	struct block_device *bdev;
2047 	struct seg_entry *se;
2048 	unsigned int offset;
2049 	block_t i;
2050 	int err = 0;
2051 
2052 	bdev = f2fs_target_device(sbi, blkstart, NULL);
2053 
2054 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
2055 		if (i != start) {
2056 			struct block_device *bdev2 =
2057 				f2fs_target_device(sbi, i, NULL);
2058 
2059 			if (bdev2 != bdev) {
2060 				err = __issue_discard_async(sbi, bdev,
2061 						start, len);
2062 				if (err)
2063 					return err;
2064 				bdev = bdev2;
2065 				start = i;
2066 				len = 0;
2067 			}
2068 		}
2069 
2070 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2071 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2072 
2073 		if (f2fs_block_unit_discard(sbi) &&
2074 				!f2fs_test_and_set_bit(offset, se->discard_map))
2075 			sbi->discard_blks--;
2076 	}
2077 
2078 	if (len)
2079 		err = __issue_discard_async(sbi, bdev, start, len);
2080 	return err;
2081 }
2082 
2083 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2084 							bool check_only)
2085 {
2086 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2087 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2088 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2089 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2090 	unsigned long *discard_map = (unsigned long *)se->discard_map;
2091 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
2092 	unsigned int start = 0, end = -1;
2093 	bool force = (cpc->reason & CP_DISCARD);
2094 	struct discard_entry *de = NULL;
2095 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2096 	int i;
2097 
2098 	if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
2099 	    !f2fs_hw_support_discard(sbi) ||
2100 	    !f2fs_block_unit_discard(sbi))
2101 		return false;
2102 
2103 	if (!force) {
2104 		if (!f2fs_realtime_discard_enable(sbi) ||
2105 			(!se->valid_blocks &&
2106 				!is_curseg(sbi, cpc->trim_start)) ||
2107 			SM_I(sbi)->dcc_info->nr_discards >=
2108 				SM_I(sbi)->dcc_info->max_discards)
2109 			return false;
2110 	}
2111 
2112 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
2113 	for (i = 0; i < entries; i++)
2114 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
2115 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
2116 
2117 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
2118 				SM_I(sbi)->dcc_info->max_discards) {
2119 		start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
2120 		if (start >= BLKS_PER_SEG(sbi))
2121 			break;
2122 
2123 		end = __find_rev_next_zero_bit(dmap,
2124 						BLKS_PER_SEG(sbi), start + 1);
2125 		if (force && start && end != BLKS_PER_SEG(sbi) &&
2126 		    (end - start) < cpc->trim_minlen)
2127 			continue;
2128 
2129 		if (check_only)
2130 			return true;
2131 
2132 		if (!de) {
2133 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
2134 						GFP_F2FS_ZERO, true, NULL);
2135 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2136 			list_add_tail(&de->list, head);
2137 		}
2138 
2139 		for (i = start; i < end; i++)
2140 			__set_bit_le(i, (void *)de->discard_map);
2141 
2142 		SM_I(sbi)->dcc_info->nr_discards += end - start;
2143 	}
2144 	return false;
2145 }
2146 
2147 static void release_discard_addr(struct discard_entry *entry)
2148 {
2149 	list_del(&entry->list);
2150 	kmem_cache_free(discard_entry_slab, entry);
2151 }
2152 
2153 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2154 {
2155 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2156 	struct discard_entry *entry, *this;
2157 
2158 	/* drop caches */
2159 	list_for_each_entry_safe(entry, this, head, list)
2160 		release_discard_addr(entry);
2161 }
2162 
2163 /*
2164  * Should call f2fs_clear_prefree_segments after checkpoint is done.
2165  */
2166 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2167 {
2168 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2169 	unsigned int segno;
2170 
2171 	mutex_lock(&dirty_i->seglist_lock);
2172 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2173 		__set_test_and_free(sbi, segno, false);
2174 	mutex_unlock(&dirty_i->seglist_lock);
2175 }
2176 
2177 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2178 						struct cp_control *cpc)
2179 {
2180 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2181 	struct list_head *head = &dcc->entry_list;
2182 	struct discard_entry *entry, *this;
2183 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2184 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2185 	unsigned int start = 0, end = -1;
2186 	unsigned int secno, start_segno;
2187 	bool force = (cpc->reason & CP_DISCARD);
2188 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2189 						DISCARD_UNIT_SECTION;
2190 
2191 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2192 		section_alignment = true;
2193 
2194 	mutex_lock(&dirty_i->seglist_lock);
2195 
2196 	while (1) {
2197 		int i;
2198 
2199 		if (section_alignment && end != -1)
2200 			end--;
2201 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2202 		if (start >= MAIN_SEGS(sbi))
2203 			break;
2204 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2205 								start + 1);
2206 
2207 		if (section_alignment) {
2208 			start = rounddown(start, SEGS_PER_SEC(sbi));
2209 			end = roundup(end, SEGS_PER_SEC(sbi));
2210 		}
2211 
2212 		for (i = start; i < end; i++) {
2213 			if (test_and_clear_bit(i, prefree_map))
2214 				dirty_i->nr_dirty[PRE]--;
2215 		}
2216 
2217 		if (!f2fs_realtime_discard_enable(sbi))
2218 			continue;
2219 
2220 		if (force && start >= cpc->trim_start &&
2221 					(end - 1) <= cpc->trim_end)
2222 			continue;
2223 
2224 		/* Should cover 2MB zoned device for zone-based reset */
2225 		if (!f2fs_sb_has_blkzoned(sbi) &&
2226 		    (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2227 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2228 				SEGS_TO_BLKS(sbi, end - start));
2229 			continue;
2230 		}
2231 next:
2232 		secno = GET_SEC_FROM_SEG(sbi, start);
2233 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2234 		if (!is_cursec(sbi, secno) &&
2235 			!get_valid_blocks(sbi, start, true))
2236 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2237 						BLKS_PER_SEC(sbi));
2238 
2239 		start = start_segno + SEGS_PER_SEC(sbi);
2240 		if (start < end)
2241 			goto next;
2242 		else
2243 			end = start - 1;
2244 	}
2245 	mutex_unlock(&dirty_i->seglist_lock);
2246 
2247 	if (!f2fs_block_unit_discard(sbi))
2248 		goto wakeup;
2249 
2250 	/* send small discards */
2251 	list_for_each_entry_safe(entry, this, head, list) {
2252 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2253 		bool is_valid = test_bit_le(0, entry->discard_map);
2254 
2255 find_next:
2256 		if (is_valid) {
2257 			next_pos = find_next_zero_bit_le(entry->discard_map,
2258 						BLKS_PER_SEG(sbi), cur_pos);
2259 			len = next_pos - cur_pos;
2260 
2261 			if (f2fs_sb_has_blkzoned(sbi) ||
2262 			    (force && len < cpc->trim_minlen))
2263 				goto skip;
2264 
2265 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2266 									len);
2267 			total_len += len;
2268 		} else {
2269 			next_pos = find_next_bit_le(entry->discard_map,
2270 						BLKS_PER_SEG(sbi), cur_pos);
2271 		}
2272 skip:
2273 		cur_pos = next_pos;
2274 		is_valid = !is_valid;
2275 
2276 		if (cur_pos < BLKS_PER_SEG(sbi))
2277 			goto find_next;
2278 
2279 		release_discard_addr(entry);
2280 		dcc->nr_discards -= total_len;
2281 	}
2282 
2283 wakeup:
2284 	wake_up_discard_thread(sbi, false);
2285 }
2286 
2287 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2288 {
2289 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2290 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2291 	int err = 0;
2292 
2293 	if (f2fs_sb_has_readonly(sbi)) {
2294 		f2fs_info(sbi,
2295 			"Skip to start discard thread for readonly image");
2296 		return 0;
2297 	}
2298 
2299 	if (!f2fs_realtime_discard_enable(sbi))
2300 		return 0;
2301 
2302 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2303 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2304 	if (IS_ERR(dcc->f2fs_issue_discard)) {
2305 		err = PTR_ERR(dcc->f2fs_issue_discard);
2306 		dcc->f2fs_issue_discard = NULL;
2307 	}
2308 
2309 	return err;
2310 }
2311 
2312 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2313 {
2314 	struct discard_cmd_control *dcc;
2315 	int err = 0, i;
2316 
2317 	if (SM_I(sbi)->dcc_info) {
2318 		dcc = SM_I(sbi)->dcc_info;
2319 		goto init_thread;
2320 	}
2321 
2322 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2323 	if (!dcc)
2324 		return -ENOMEM;
2325 
2326 	dcc->discard_io_aware_gran = MAX_PLIST_NUM;
2327 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2328 	dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
2329 	dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
2330 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT ||
2331 		F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2332 		dcc->discard_granularity = BLKS_PER_SEG(sbi);
2333 
2334 	INIT_LIST_HEAD(&dcc->entry_list);
2335 	for (i = 0; i < MAX_PLIST_NUM; i++)
2336 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2337 	INIT_LIST_HEAD(&dcc->wait_list);
2338 	INIT_LIST_HEAD(&dcc->fstrim_list);
2339 	mutex_init(&dcc->cmd_lock);
2340 	atomic_set(&dcc->issued_discard, 0);
2341 	atomic_set(&dcc->queued_discard, 0);
2342 	atomic_set(&dcc->discard_cmd_cnt, 0);
2343 	dcc->nr_discards = 0;
2344 	dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));
2345 	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2346 	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2347 	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2348 	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2349 	dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2350 	dcc->undiscard_blks = 0;
2351 	dcc->next_pos = 0;
2352 	dcc->root = RB_ROOT_CACHED;
2353 	dcc->rbtree_check = false;
2354 
2355 	init_waitqueue_head(&dcc->discard_wait_queue);
2356 	SM_I(sbi)->dcc_info = dcc;
2357 init_thread:
2358 	err = f2fs_start_discard_thread(sbi);
2359 	if (err) {
2360 		kfree(dcc);
2361 		SM_I(sbi)->dcc_info = NULL;
2362 	}
2363 
2364 	return err;
2365 }
2366 
2367 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2368 {
2369 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2370 
2371 	if (!dcc)
2372 		return;
2373 
2374 	f2fs_stop_discard_thread(sbi);
2375 
2376 	/*
2377 	 * Recovery can cache discard commands, so in error path of
2378 	 * fill_super(), it needs to give a chance to handle them.
2379 	 */
2380 	f2fs_issue_discard_timeout(sbi, true);
2381 
2382 	kfree(dcc);
2383 	SM_I(sbi)->dcc_info = NULL;
2384 }
2385 
2386 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2387 {
2388 	struct sit_info *sit_i = SIT_I(sbi);
2389 
2390 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2391 		sit_i->dirty_sentries++;
2392 		return false;
2393 	}
2394 
2395 	return true;
2396 }
2397 
2398 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2399 					unsigned int segno, int modified)
2400 {
2401 	struct seg_entry *se = get_seg_entry(sbi, segno);
2402 
2403 	se->type = type;
2404 	if (modified)
2405 		__mark_sit_entry_dirty(sbi, segno);
2406 }
2407 
2408 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2409 								block_t blkaddr)
2410 {
2411 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2412 
2413 	if (segno == NULL_SEGNO)
2414 		return 0;
2415 	return get_seg_entry(sbi, segno)->mtime;
2416 }
2417 
2418 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2419 						unsigned long long old_mtime)
2420 {
2421 	struct seg_entry *se;
2422 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2423 	unsigned long long ctime = get_mtime(sbi, false);
2424 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2425 
2426 	if (segno == NULL_SEGNO)
2427 		return;
2428 
2429 	se = get_seg_entry(sbi, segno);
2430 
2431 	if (!se->mtime)
2432 		se->mtime = mtime;
2433 	else
2434 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2435 						se->valid_blocks + 1);
2436 
2437 	if (ctime > SIT_I(sbi)->max_mtime)
2438 		SIT_I(sbi)->max_mtime = ctime;
2439 }
2440 
2441 /*
2442  * NOTE: when updating multiple blocks at the same time, please ensure
2443  * that the consecutive input blocks belong to the same segment.
2444  */
2445 static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
2446 				unsigned int segno, block_t blkaddr, unsigned int offset, int del)
2447 {
2448 	bool exist;
2449 #ifdef CONFIG_F2FS_CHECK_FS
2450 	bool mir_exist;
2451 #endif
2452 	int i;
2453 	int del_count = -del;
2454 
2455 	f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
2456 
2457 	for (i = 0; i < del_count; i++) {
2458 		exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
2459 #ifdef CONFIG_F2FS_CHECK_FS
2460 		mir_exist = f2fs_test_and_clear_bit(offset + i,
2461 						se->cur_valid_map_mir);
2462 		if (unlikely(exist != mir_exist)) {
2463 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2464 				blkaddr + i, exist);
2465 			f2fs_bug_on(sbi, 1);
2466 		}
2467 #endif
2468 		if (unlikely(!exist)) {
2469 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
2470 			f2fs_bug_on(sbi, 1);
2471 			se->valid_blocks++;
2472 			del += 1;
2473 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2474 			/*
2475 			 * If checkpoints are off, we must not reuse data that
2476 			 * was used in the previous checkpoint. If it was used
2477 			 * before, we must track that to know how much space we
2478 			 * really have.
2479 			 */
2480 			if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
2481 				spin_lock(&sbi->stat_lock);
2482 				sbi->unusable_block_count++;
2483 				spin_unlock(&sbi->stat_lock);
2484 			}
2485 		}
2486 
2487 		if (f2fs_block_unit_discard(sbi) &&
2488 				f2fs_test_and_clear_bit(offset + i, se->discard_map))
2489 			sbi->discard_blks++;
2490 
2491 		if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
2492 			se->ckpt_valid_blocks -= 1;
2493 			if (__is_large_section(sbi))
2494 				get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
2495 		}
2496 	}
2497 
2498 	if (__is_large_section(sbi))
2499 		sanity_check_valid_blocks(sbi, segno);
2500 
2501 	return del;
2502 }
2503 
2504 static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
2505 				unsigned int segno, block_t blkaddr, unsigned int offset, int del)
2506 {
2507 	bool exist;
2508 #ifdef CONFIG_F2FS_CHECK_FS
2509 	bool mir_exist;
2510 #endif
2511 
2512 	exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2513 #ifdef CONFIG_F2FS_CHECK_FS
2514 	mir_exist = f2fs_test_and_set_bit(offset,
2515 					se->cur_valid_map_mir);
2516 	if (unlikely(exist != mir_exist)) {
2517 		f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2518 			blkaddr, exist);
2519 		f2fs_bug_on(sbi, 1);
2520 	}
2521 #endif
2522 	if (unlikely(exist)) {
2523 		f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
2524 		f2fs_bug_on(sbi, 1);
2525 		se->valid_blocks--;
2526 		del = 0;
2527 	}
2528 
2529 	if (f2fs_block_unit_discard(sbi) &&
2530 			!f2fs_test_and_set_bit(offset, se->discard_map))
2531 		sbi->discard_blks--;
2532 
2533 	/*
2534 	 * SSR should never reuse block which is checkpointed
2535 	 * or newly invalidated.
2536 	 */
2537 	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2538 		if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) {
2539 			se->ckpt_valid_blocks++;
2540 			if (__is_large_section(sbi))
2541 				get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
2542 		}
2543 	}
2544 
2545 	if (!f2fs_test_bit(offset, se->ckpt_valid_map)) {
2546 		se->ckpt_valid_blocks += del;
2547 		if (__is_large_section(sbi))
2548 			get_sec_entry(sbi, segno)->ckpt_valid_blocks += del;
2549 	}
2550 
2551 	if (__is_large_section(sbi))
2552 		sanity_check_valid_blocks(sbi, segno);
2553 
2554 	return del;
2555 }
2556 
2557 /*
2558  * If releasing blocks, this function supports updating multiple consecutive blocks
2559  * at one time, but please note that these consecutive blocks need to belong to the
2560  * same segment.
2561  */
2562 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2563 {
2564 	struct seg_entry *se;
2565 	unsigned int segno, offset;
2566 	long int new_vblocks;
2567 
2568 	segno = GET_SEGNO(sbi, blkaddr);
2569 	if (segno == NULL_SEGNO)
2570 		return;
2571 
2572 	se = get_seg_entry(sbi, segno);
2573 	new_vblocks = se->valid_blocks + del;
2574 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2575 
2576 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2577 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2578 
2579 	se->valid_blocks = new_vblocks;
2580 
2581 	/* Update valid block bitmap */
2582 	if (del > 0) {
2583 		del = update_sit_entry_for_alloc(sbi, se, segno, blkaddr, offset, del);
2584 	} else {
2585 		del = update_sit_entry_for_release(sbi, se, segno, blkaddr, offset, del);
2586 	}
2587 
2588 	__mark_sit_entry_dirty(sbi, segno);
2589 
2590 	/* update total number of valid blocks to be written in ckpt area */
2591 	SIT_I(sbi)->written_valid_blocks += del;
2592 
2593 	if (__is_large_section(sbi))
2594 		get_sec_entry(sbi, segno)->valid_blocks += del;
2595 }
2596 
2597 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
2598 				unsigned int len)
2599 {
2600 	unsigned int segno = GET_SEGNO(sbi, addr);
2601 	struct sit_info *sit_i = SIT_I(sbi);
2602 	block_t addr_start = addr, addr_end = addr + len - 1;
2603 	unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
2604 	unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
2605 
2606 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2607 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2608 		return;
2609 
2610 	f2fs_invalidate_internal_cache(sbi, addr, len);
2611 
2612 	/* add it into sit main buffer */
2613 	down_write(&sit_i->sentry_lock);
2614 
2615 	if (seg_num == 1)
2616 		cnt = len;
2617 	else
2618 		cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
2619 
2620 	do {
2621 		update_segment_mtime(sbi, addr_start, 0);
2622 		update_sit_entry(sbi, addr_start, -cnt);
2623 
2624 		/* add it into dirty seglist */
2625 		locate_dirty_segment(sbi, segno);
2626 
2627 		/* update @addr_start and @cnt and @segno */
2628 		addr_start = START_BLOCK(sbi, ++segno);
2629 		if (++i == seg_num)
2630 			cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
2631 		else
2632 			cnt = max_blocks;
2633 	} while (i <= seg_num);
2634 
2635 	up_write(&sit_i->sentry_lock);
2636 }
2637 
2638 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2639 {
2640 	struct sit_info *sit_i = SIT_I(sbi);
2641 	unsigned int segno, offset;
2642 	struct seg_entry *se;
2643 	bool is_cp = false;
2644 
2645 	if (!__is_valid_data_blkaddr(blkaddr))
2646 		return true;
2647 
2648 	down_read(&sit_i->sentry_lock);
2649 
2650 	segno = GET_SEGNO(sbi, blkaddr);
2651 	se = get_seg_entry(sbi, segno);
2652 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2653 
2654 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2655 		is_cp = true;
2656 
2657 	up_read(&sit_i->sentry_lock);
2658 
2659 	return is_cp;
2660 }
2661 
2662 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2663 {
2664 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2665 
2666 	if (sbi->ckpt->alloc_type[type] == SSR)
2667 		return BLKS_PER_SEG(sbi);
2668 	return curseg->next_blkoff;
2669 }
2670 
2671 /*
2672  * Calculate the number of current summary pages for writing
2673  */
2674 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2675 {
2676 	int valid_sum_count = 0;
2677 	int i, sum_in_page;
2678 
2679 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2680 		if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2681 			valid_sum_count +=
2682 				le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2683 		else
2684 			valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2685 	}
2686 
2687 	sum_in_page = (sbi->blocksize - 2 * sbi->sum_journal_size -
2688 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2689 	if (valid_sum_count <= sum_in_page)
2690 		return 1;
2691 	else if ((valid_sum_count - sum_in_page) <=
2692 		(sbi->blocksize - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2693 		return 2;
2694 	return 3;
2695 }
2696 
2697 /*
2698  * Caller should put this summary folio
2699  */
2700 struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno)
2701 {
2702 	if (unlikely(f2fs_cp_error(sbi)))
2703 		return ERR_PTR(-EIO);
2704 	return f2fs_get_meta_folio_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2705 }
2706 
2707 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2708 					void *src, block_t blk_addr)
2709 {
2710 	struct folio *folio;
2711 
2712 	if (!f2fs_sb_has_packed_ssa(sbi))
2713 		folio = f2fs_grab_meta_folio(sbi, blk_addr);
2714 	else
2715 		folio = f2fs_get_meta_folio_retry(sbi, blk_addr);
2716 
2717 	if (IS_ERR(folio))
2718 		return;
2719 
2720 	memcpy(folio_address(folio), src, PAGE_SIZE);
2721 	folio_mark_dirty(folio);
2722 	f2fs_folio_put(folio, true);
2723 }
2724 
2725 static void write_sum_page(struct f2fs_sb_info *sbi,
2726 		struct f2fs_summary_block *sum_blk, unsigned int segno)
2727 {
2728 	struct folio *folio;
2729 
2730 	if (!f2fs_sb_has_packed_ssa(sbi))
2731 		return f2fs_update_meta_page(sbi, (void *)sum_blk,
2732 				GET_SUM_BLOCK(sbi, segno));
2733 
2734 	folio = f2fs_get_sum_folio(sbi, segno);
2735 	if (IS_ERR(folio))
2736 		return;
2737 
2738 	memcpy(SUM_BLK_PAGE_ADDR(sbi, folio, segno), sum_blk,
2739 			sbi->sum_blocksize);
2740 	folio_mark_dirty(folio);
2741 	f2fs_folio_put(folio, true);
2742 }
2743 
2744 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2745 						int type, block_t blk_addr)
2746 {
2747 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2748 	struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
2749 	struct f2fs_summary_block *src = curseg->sum_blk;
2750 	struct f2fs_summary_block *dst;
2751 
2752 	dst = folio_address(folio);
2753 	memset(dst, 0, PAGE_SIZE);
2754 
2755 	mutex_lock(&curseg->curseg_mutex);
2756 
2757 	down_read(&curseg->journal_rwsem);
2758 	memcpy(sum_journal(sbi, dst), curseg->journal, sbi->sum_journal_size);
2759 	up_read(&curseg->journal_rwsem);
2760 
2761 	memcpy(sum_entries(dst), sum_entries(src), sbi->sum_entry_size);
2762 	memcpy(sum_footer(sbi, dst), sum_footer(sbi, src), SUM_FOOTER_SIZE);
2763 
2764 	mutex_unlock(&curseg->curseg_mutex);
2765 
2766 	folio_mark_dirty(folio);
2767 	f2fs_folio_put(folio, true);
2768 }
2769 
2770 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2771 				struct curseg_info *curseg)
2772 {
2773 	unsigned int segno = curseg->segno + 1;
2774 	struct free_segmap_info *free_i = FREE_I(sbi);
2775 
2776 	if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
2777 		return !test_bit(segno, free_i->free_segmap);
2778 	return 0;
2779 }
2780 
2781 /*
2782  * Find a new segment from the free segments bitmap to right order
2783  * This function should be returned with success, otherwise BUG
2784  */
2785 static int get_new_segment(struct f2fs_sb_info *sbi,
2786 			unsigned int *newseg, bool new_sec, bool pinning)
2787 {
2788 	struct free_segmap_info *free_i = FREE_I(sbi);
2789 	unsigned int segno, secno, zoneno;
2790 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2791 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2792 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2793 	unsigned int alloc_policy = sbi->allocate_section_policy;
2794 	unsigned int alloc_hint = sbi->allocate_section_hint;
2795 	bool init = true;
2796 	int i;
2797 	int ret = 0;
2798 
2799 	spin_lock(&free_i->segmap_lock);
2800 
2801 	if (time_to_inject(sbi, FAULT_NO_SEGMENT)) {
2802 		ret = -ENOSPC;
2803 		goto out_unlock;
2804 	}
2805 
2806 	if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
2807 		segno = find_next_zero_bit(free_i->free_segmap,
2808 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2809 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2810 			goto got_it;
2811 	}
2812 
2813 #ifdef CONFIG_BLK_DEV_ZONED
2814 	/*
2815 	 * If we format f2fs on zoned storage, let's try to get pinned sections
2816 	 * from beginning of the storage, which should be a conventional one.
2817 	 */
2818 	if (f2fs_sb_has_blkzoned(sbi)) {
2819 		/* Prioritize writing to conventional zones */
2820 		if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
2821 			segno = 0;
2822 		else
2823 			segno = max(sbi->first_seq_zone_segno, *newseg);
2824 		hint = GET_SEC_FROM_SEG(sbi, segno);
2825 	}
2826 #endif
2827 
2828 	/*
2829 	 * Prevent allocate_section_hint from exceeding MAIN_SECS()
2830 	 * due to desynchronization.
2831 	 */
2832 	if (alloc_policy != ALLOCATE_FORWARD_NOHINT &&
2833 		alloc_hint > MAIN_SECS(sbi))
2834 		alloc_hint = MAIN_SECS(sbi);
2835 
2836 	if (alloc_policy == ALLOCATE_FORWARD_FROM_HINT &&
2837 		hint < alloc_hint)
2838 		hint = alloc_hint;
2839 	else if (alloc_policy == ALLOCATE_FORWARD_WITHIN_HINT &&
2840 			hint >= alloc_hint)
2841 		hint = 0;
2842 
2843 find_other_zone:
2844 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2845 
2846 #ifdef CONFIG_BLK_DEV_ZONED
2847 	if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
2848 		/* Write only to sequential zones */
2849 		if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
2850 			hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno);
2851 			secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2852 		} else
2853 			secno = find_first_zero_bit(free_i->free_secmap,
2854 								MAIN_SECS(sbi));
2855 		if (secno >= MAIN_SECS(sbi)) {
2856 			ret = -ENOSPC;
2857 			f2fs_bug_on(sbi, 1);
2858 			goto out_unlock;
2859 		}
2860 	}
2861 #endif
2862 
2863 	if (secno >= MAIN_SECS(sbi)) {
2864 		secno = find_first_zero_bit(free_i->free_secmap,
2865 							MAIN_SECS(sbi));
2866 		if (secno >= MAIN_SECS(sbi)) {
2867 			ret = -ENOSPC;
2868 			f2fs_bug_on(sbi, !pinning);
2869 			goto out_unlock;
2870 		}
2871 	}
2872 	segno = GET_SEG_FROM_SEC(sbi, secno);
2873 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2874 
2875 	/* give up on finding another zone */
2876 	if (!init)
2877 		goto got_it;
2878 	if (sbi->secs_per_zone == 1)
2879 		goto got_it;
2880 	if (zoneno == old_zoneno)
2881 		goto got_it;
2882 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2883 		if (CURSEG_I(sbi, i)->zone == zoneno)
2884 			break;
2885 
2886 	if (i < NR_CURSEG_TYPE) {
2887 		/* zone is in user, try another */
2888 		if (zoneno + 1 >= total_zones)
2889 			hint = 0;
2890 		else
2891 			hint = (zoneno + 1) * sbi->secs_per_zone;
2892 		init = false;
2893 		goto find_other_zone;
2894 	}
2895 got_it:
2896 	/* set it as dirty segment in free segmap */
2897 	if (test_bit(segno, free_i->free_segmap)) {
2898 		ret = -EFSCORRUPTED;
2899 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP);
2900 		goto out_unlock;
2901 	}
2902 
2903 	/* no free section in conventional device or conventional zone */
2904 	if (new_sec && pinning &&
2905 		f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) {
2906 		ret = -EAGAIN;
2907 		goto out_unlock;
2908 	}
2909 	__set_inuse(sbi, segno);
2910 	*newseg = segno;
2911 out_unlock:
2912 	spin_unlock(&free_i->segmap_lock);
2913 
2914 	if (ret == -ENOSPC && !pinning)
2915 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
2916 	return ret;
2917 }
2918 
2919 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2920 {
2921 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2922 	struct summary_footer *sum_footer;
2923 	unsigned short seg_type = curseg->seg_type;
2924 
2925 	/* only happen when get_new_segment() fails */
2926 	if (curseg->next_segno == NULL_SEGNO)
2927 		return;
2928 
2929 	curseg->inited = true;
2930 	curseg->segno = curseg->next_segno;
2931 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2932 	curseg->next_blkoff = 0;
2933 	curseg->next_segno = NULL_SEGNO;
2934 
2935 	sum_footer = sum_footer(sbi, curseg->sum_blk);
2936 	memset(sum_footer, 0, sizeof(struct summary_footer));
2937 
2938 	sanity_check_seg_type(sbi, seg_type);
2939 
2940 	if (IS_DATASEG(seg_type))
2941 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2942 	if (IS_NODESEG(seg_type))
2943 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2944 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2945 }
2946 
2947 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2948 {
2949 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2950 	unsigned short seg_type = curseg->seg_type;
2951 
2952 	sanity_check_seg_type(sbi, seg_type);
2953 	if (__is_large_section(sbi)) {
2954 		if (f2fs_need_rand_seg(sbi)) {
2955 			unsigned int hint = GET_SEC_FROM_SEG(sbi, curseg->segno);
2956 
2957 			if (GET_SEC_FROM_SEG(sbi, curseg->segno + 1) != hint)
2958 				return curseg->segno;
2959 			return get_random_u32_inclusive(curseg->segno + 1,
2960 					GET_SEG_FROM_SEC(sbi, hint + 1) - 1);
2961 		}
2962 		return curseg->segno;
2963 	} else if (f2fs_need_rand_seg(sbi)) {
2964 		return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
2965 	}
2966 
2967 	/* inmem log may not locate on any segment after mount */
2968 	if (!curseg->inited)
2969 		return 0;
2970 
2971 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2972 		return 0;
2973 
2974 	if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
2975 		return 0;
2976 
2977 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2978 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2979 
2980 	/* find segments from 0 to reuse freed segments */
2981 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2982 		return 0;
2983 
2984 	return curseg->segno;
2985 }
2986 
2987 static void reset_curseg_fields(struct curseg_info *curseg)
2988 {
2989 	curseg->inited = false;
2990 	curseg->segno = NULL_SEGNO;
2991 	curseg->next_segno = 0;
2992 }
2993 
2994 /*
2995  * Allocate a current working segment.
2996  * This function always allocates a free segment in LFS manner.
2997  */
2998 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2999 {
3000 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3001 	unsigned int segno = curseg->segno;
3002 	bool pinning = type == CURSEG_COLD_DATA_PINNED;
3003 	int ret;
3004 
3005 	if (curseg->inited)
3006 		write_sum_page(sbi, curseg->sum_blk, segno);
3007 
3008 	segno = __get_next_segno(sbi, type);
3009 	ret = get_new_segment(sbi, &segno, new_sec, pinning);
3010 	if (ret) {
3011 		if (ret == -ENOSPC)
3012 			reset_curseg_fields(curseg);
3013 		return ret;
3014 	}
3015 
3016 	curseg->next_segno = segno;
3017 	reset_curseg(sbi, type, 1);
3018 	curseg->alloc_type = LFS;
3019 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3020 		curseg->fragment_remained_chunk =
3021 				get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3022 	return 0;
3023 }
3024 
3025 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
3026 					int segno, block_t start)
3027 {
3028 	struct seg_entry *se = get_seg_entry(sbi, segno);
3029 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
3030 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
3031 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
3032 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
3033 	int i;
3034 
3035 	for (i = 0; i < entries; i++)
3036 		target_map[i] = ckpt_map[i] | cur_map[i];
3037 
3038 	return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
3039 }
3040 
3041 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
3042 		struct curseg_info *seg)
3043 {
3044 	return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
3045 }
3046 
3047 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
3048 {
3049 	return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
3050 }
3051 
3052 /*
3053  * This function always allocates a used segment(from dirty seglist) by SSR
3054  * manner, so it should recover the existing segment information of valid blocks
3055  */
3056 static int change_curseg(struct f2fs_sb_info *sbi, int type)
3057 {
3058 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3059 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3060 	unsigned int new_segno = curseg->next_segno;
3061 	struct f2fs_summary_block *sum_node;
3062 	struct folio *sum_folio;
3063 
3064 	if (curseg->inited)
3065 		write_sum_page(sbi, curseg->sum_blk, curseg->segno);
3066 
3067 	__set_test_and_inuse(sbi, new_segno);
3068 
3069 	mutex_lock(&dirty_i->seglist_lock);
3070 	__remove_dirty_segment(sbi, new_segno, PRE);
3071 	__remove_dirty_segment(sbi, new_segno, DIRTY);
3072 	mutex_unlock(&dirty_i->seglist_lock);
3073 
3074 	reset_curseg(sbi, type, 1);
3075 	curseg->alloc_type = SSR;
3076 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
3077 
3078 	sum_folio = f2fs_get_sum_folio(sbi, new_segno);
3079 	if (IS_ERR(sum_folio)) {
3080 		/* GC won't be able to use stale summary pages by cp_error */
3081 		memset(curseg->sum_blk, 0, sbi->sum_entry_size);
3082 		return PTR_ERR(sum_folio);
3083 	}
3084 	sum_node = SUM_BLK_PAGE_ADDR(sbi, sum_folio, new_segno);
3085 	memcpy(curseg->sum_blk, sum_node, sbi->sum_entry_size);
3086 	f2fs_folio_put(sum_folio, true);
3087 	return 0;
3088 }
3089 
3090 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3091 				int alloc_mode, unsigned long long age);
3092 
3093 static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
3094 					int target_type, int alloc_mode,
3095 					unsigned long long age)
3096 {
3097 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3098 	int ret = 0;
3099 
3100 	curseg->seg_type = target_type;
3101 
3102 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
3103 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
3104 
3105 		curseg->seg_type = se->type;
3106 		ret = change_curseg(sbi, type);
3107 	} else {
3108 		/* allocate cold segment by default */
3109 		curseg->seg_type = CURSEG_COLD_DATA;
3110 		ret = new_curseg(sbi, type, true);
3111 	}
3112 	stat_inc_seg_type(sbi, curseg);
3113 	return ret;
3114 }
3115 
3116 static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi, bool force)
3117 {
3118 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
3119 	int ret = 0;
3120 
3121 	if (!sbi->am.atgc_enabled && !force)
3122 		return 0;
3123 
3124 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3125 
3126 	mutex_lock(&curseg->curseg_mutex);
3127 	down_write(&SIT_I(sbi)->sentry_lock);
3128 
3129 	ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC,
3130 					CURSEG_COLD_DATA, SSR, 0);
3131 
3132 	up_write(&SIT_I(sbi)->sentry_lock);
3133 	mutex_unlock(&curseg->curseg_mutex);
3134 
3135 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3136 	return ret;
3137 }
3138 
3139 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
3140 {
3141 	return __f2fs_init_atgc_curseg(sbi, false);
3142 }
3143 
3144 int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi)
3145 {
3146 	int ret;
3147 
3148 	if (!test_opt(sbi, ATGC))
3149 		return 0;
3150 	if (sbi->am.atgc_enabled)
3151 		return 0;
3152 	if (le64_to_cpu(F2FS_CKPT(sbi)->elapsed_time) <
3153 			sbi->am.age_threshold)
3154 		return 0;
3155 
3156 	ret = __f2fs_init_atgc_curseg(sbi, true);
3157 	if (!ret) {
3158 		sbi->am.atgc_enabled = true;
3159 		f2fs_info(sbi, "reenabled age threshold GC");
3160 	}
3161 	return ret;
3162 }
3163 
3164 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
3165 {
3166 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3167 
3168 	mutex_lock(&curseg->curseg_mutex);
3169 	if (!curseg->inited)
3170 		goto out;
3171 
3172 	if (get_valid_blocks(sbi, curseg->segno, false)) {
3173 		write_sum_page(sbi, curseg->sum_blk, curseg->segno);
3174 	} else {
3175 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
3176 		__set_test_and_free(sbi, curseg->segno, true);
3177 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3178 	}
3179 out:
3180 	mutex_unlock(&curseg->curseg_mutex);
3181 }
3182 
3183 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
3184 {
3185 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3186 
3187 	if (sbi->am.atgc_enabled)
3188 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3189 }
3190 
3191 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
3192 {
3193 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3194 
3195 	mutex_lock(&curseg->curseg_mutex);
3196 	if (!curseg->inited)
3197 		goto out;
3198 	if (get_valid_blocks(sbi, curseg->segno, false))
3199 		goto out;
3200 
3201 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
3202 	__set_test_and_inuse(sbi, curseg->segno);
3203 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3204 out:
3205 	mutex_unlock(&curseg->curseg_mutex);
3206 }
3207 
3208 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
3209 {
3210 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3211 
3212 	if (sbi->am.atgc_enabled)
3213 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3214 }
3215 
3216 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3217 				int alloc_mode, unsigned long long age)
3218 {
3219 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3220 	unsigned segno = NULL_SEGNO;
3221 	unsigned short seg_type = curseg->seg_type;
3222 	int i, cnt;
3223 	bool reversed = false;
3224 
3225 	sanity_check_seg_type(sbi, seg_type);
3226 
3227 	/* f2fs_need_SSR() already forces to do this */
3228 	if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
3229 				alloc_mode, age, false)) {
3230 		curseg->next_segno = segno;
3231 		return 1;
3232 	}
3233 
3234 	/* For node segments, let's do SSR more intensively */
3235 	if (IS_NODESEG(seg_type)) {
3236 		if (seg_type >= CURSEG_WARM_NODE) {
3237 			reversed = true;
3238 			i = CURSEG_COLD_NODE;
3239 		} else {
3240 			i = CURSEG_HOT_NODE;
3241 		}
3242 		cnt = NR_CURSEG_NODE_TYPE;
3243 	} else {
3244 		if (seg_type >= CURSEG_WARM_DATA) {
3245 			reversed = true;
3246 			i = CURSEG_COLD_DATA;
3247 		} else {
3248 			i = CURSEG_HOT_DATA;
3249 		}
3250 		cnt = NR_CURSEG_DATA_TYPE;
3251 	}
3252 
3253 	for (; cnt-- > 0; reversed ? i-- : i++) {
3254 		if (i == seg_type)
3255 			continue;
3256 		if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
3257 					alloc_mode, age, false)) {
3258 			curseg->next_segno = segno;
3259 			return 1;
3260 		}
3261 	}
3262 
3263 	/* find valid_blocks=0 in dirty list */
3264 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3265 		segno = get_free_segment(sbi);
3266 		if (segno != NULL_SEGNO) {
3267 			curseg->next_segno = segno;
3268 			return 1;
3269 		}
3270 	}
3271 	return 0;
3272 }
3273 
3274 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3275 {
3276 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3277 
3278 	if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3279 	    curseg->seg_type == CURSEG_WARM_NODE)
3280 		return true;
3281 	if (curseg->alloc_type == LFS && is_next_segment_free(sbi, curseg) &&
3282 	    likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3283 		return true;
3284 	if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3285 		return true;
3286 	return false;
3287 }
3288 
3289 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3290 					unsigned int start, unsigned int end)
3291 {
3292 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3293 	unsigned int segno;
3294 	int ret = 0;
3295 
3296 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3297 	mutex_lock(&curseg->curseg_mutex);
3298 	down_write(&SIT_I(sbi)->sentry_lock);
3299 
3300 	segno = CURSEG_I(sbi, type)->segno;
3301 	if (segno < start || segno > end)
3302 		goto unlock;
3303 
3304 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3305 		ret = change_curseg(sbi, type);
3306 	else
3307 		ret = new_curseg(sbi, type, true);
3308 
3309 	stat_inc_seg_type(sbi, curseg);
3310 
3311 	locate_dirty_segment(sbi, segno);
3312 unlock:
3313 	up_write(&SIT_I(sbi)->sentry_lock);
3314 
3315 	if (segno != curseg->segno)
3316 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3317 			    type, segno, curseg->segno);
3318 
3319 	mutex_unlock(&curseg->curseg_mutex);
3320 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3321 	return ret;
3322 }
3323 
3324 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3325 						bool new_sec, bool force)
3326 {
3327 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3328 	unsigned int old_segno;
3329 	int err = 0;
3330 
3331 	if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited)
3332 		goto allocate;
3333 
3334 	if (!force && curseg->inited &&
3335 	    !curseg->next_blkoff &&
3336 	    !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3337 	    !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3338 		return 0;
3339 
3340 allocate:
3341 	old_segno = curseg->segno;
3342 	err = new_curseg(sbi, type, true);
3343 	if (err)
3344 		return err;
3345 	stat_inc_seg_type(sbi, curseg);
3346 	locate_dirty_segment(sbi, old_segno);
3347 	return 0;
3348 }
3349 
3350 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3351 {
3352 	int ret;
3353 
3354 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3355 	down_write(&SIT_I(sbi)->sentry_lock);
3356 	ret = __allocate_new_segment(sbi, type, true, force);
3357 	up_write(&SIT_I(sbi)->sentry_lock);
3358 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3359 
3360 	return ret;
3361 }
3362 
3363 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
3364 {
3365 	struct f2fs_lock_context lc;
3366 	int err;
3367 	bool gc_required = true;
3368 
3369 retry:
3370 	f2fs_lock_op(sbi, &lc);
3371 	err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3372 	f2fs_unlock_op(sbi, &lc);
3373 
3374 	if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
3375 		f2fs_down_write_trace(&sbi->gc_lock, &lc);
3376 		err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1,
3377 				true, ZONED_PIN_SEC_REQUIRED_COUNT);
3378 		f2fs_up_write_trace(&sbi->gc_lock, &lc);
3379 
3380 		gc_required = false;
3381 		if (!err)
3382 			goto retry;
3383 	}
3384 
3385 	return err;
3386 }
3387 
3388 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3389 {
3390 	int i;
3391 	int err = 0;
3392 
3393 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3394 	down_write(&SIT_I(sbi)->sentry_lock);
3395 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3396 		err += __allocate_new_segment(sbi, i, false, false);
3397 	up_write(&SIT_I(sbi)->sentry_lock);
3398 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3399 
3400 	return err;
3401 }
3402 
3403 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3404 						struct cp_control *cpc)
3405 {
3406 	__u64 trim_start = cpc->trim_start;
3407 	bool has_candidate = false;
3408 
3409 	down_write(&SIT_I(sbi)->sentry_lock);
3410 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3411 		if (add_discard_addrs(sbi, cpc, true)) {
3412 			has_candidate = true;
3413 			break;
3414 		}
3415 	}
3416 	up_write(&SIT_I(sbi)->sentry_lock);
3417 
3418 	cpc->trim_start = trim_start;
3419 	return has_candidate;
3420 }
3421 
3422 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3423 					struct discard_policy *dpolicy,
3424 					unsigned int start, unsigned int end)
3425 {
3426 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3427 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3428 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3429 	struct discard_cmd *dc;
3430 	struct blk_plug plug;
3431 	int issued;
3432 	unsigned int trimmed = 0;
3433 
3434 next:
3435 	issued = 0;
3436 
3437 	mutex_lock(&dcc->cmd_lock);
3438 	if (unlikely(dcc->rbtree_check))
3439 		f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3440 
3441 	dc = __lookup_discard_cmd_ret(&dcc->root, start,
3442 				&prev_dc, &next_dc, &insert_p, &insert_parent);
3443 	if (!dc)
3444 		dc = next_dc;
3445 
3446 	blk_start_plug(&plug);
3447 
3448 	while (dc && dc->di.lstart <= end) {
3449 		struct rb_node *node;
3450 		int err = 0;
3451 
3452 		if (dc->di.len < dpolicy->granularity)
3453 			goto skip;
3454 
3455 		if (dc->state != D_PREP) {
3456 			list_move_tail(&dc->list, &dcc->fstrim_list);
3457 			goto skip;
3458 		}
3459 
3460 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3461 
3462 		if (issued >= dpolicy->max_requests) {
3463 			start = dc->di.lstart + dc->di.len;
3464 
3465 			if (err)
3466 				__remove_discard_cmd(sbi, dc);
3467 
3468 			blk_finish_plug(&plug);
3469 			mutex_unlock(&dcc->cmd_lock);
3470 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3471 			f2fs_schedule_timeout(DEFAULT_DISCARD_INTERVAL);
3472 			goto next;
3473 		}
3474 skip:
3475 		node = rb_next(&dc->rb_node);
3476 		if (err)
3477 			__remove_discard_cmd(sbi, dc);
3478 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3479 
3480 		if (fatal_signal_pending(current))
3481 			break;
3482 	}
3483 
3484 	blk_finish_plug(&plug);
3485 	mutex_unlock(&dcc->cmd_lock);
3486 
3487 	return trimmed;
3488 }
3489 
3490 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3491 {
3492 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3493 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3494 	unsigned int start_segno, end_segno;
3495 	block_t start_block, end_block;
3496 	struct cp_control cpc;
3497 	struct discard_policy dpolicy;
3498 	struct f2fs_lock_context lc;
3499 	unsigned long long trimmed = 0;
3500 	int err = 0;
3501 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3502 
3503 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3504 		return -EINVAL;
3505 
3506 	if (end < MAIN_BLKADDR(sbi))
3507 		goto out;
3508 
3509 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3510 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3511 		return -EFSCORRUPTED;
3512 	}
3513 
3514 	/* start/end segment number in main_area */
3515 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3516 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3517 						GET_SEGNO(sbi, end);
3518 	if (need_align) {
3519 		start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
3520 		end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
3521 	}
3522 
3523 	cpc.reason = CP_DISCARD;
3524 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3525 	cpc.trim_start = start_segno;
3526 	cpc.trim_end = end_segno;
3527 
3528 	if (sbi->discard_blks == 0)
3529 		goto out;
3530 
3531 	f2fs_down_write_trace(&sbi->gc_lock, &lc);
3532 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
3533 	err = f2fs_write_checkpoint(sbi, &cpc);
3534 	f2fs_up_write_trace(&sbi->gc_lock, &lc);
3535 	if (err)
3536 		goto out;
3537 
3538 	/*
3539 	 * We filed discard candidates, but actually we don't need to wait for
3540 	 * all of them, since they'll be issued in idle time along with runtime
3541 	 * discard option. User configuration looks like using runtime discard
3542 	 * or periodic fstrim instead of it.
3543 	 */
3544 	if (f2fs_realtime_discard_enable(sbi))
3545 		goto out;
3546 
3547 	start_block = START_BLOCK(sbi, start_segno);
3548 	end_block = START_BLOCK(sbi, end_segno + 1);
3549 
3550 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3551 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3552 					start_block, end_block);
3553 
3554 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3555 					start_block, end_block);
3556 out:
3557 	if (!err)
3558 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3559 	return err;
3560 }
3561 
3562 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint)
3563 {
3564 	if (F2FS_OPTION(sbi).active_logs == 2)
3565 		return CURSEG_HOT_DATA;
3566 	else if (F2FS_OPTION(sbi).active_logs == 4)
3567 		return CURSEG_COLD_DATA;
3568 
3569 	/* active_log == 6 */
3570 	switch (hint) {
3571 	case WRITE_LIFE_SHORT:
3572 		return CURSEG_HOT_DATA;
3573 	case WRITE_LIFE_EXTREME:
3574 		return CURSEG_COLD_DATA;
3575 	default:
3576 		return CURSEG_WARM_DATA;
3577 	}
3578 }
3579 
3580 /*
3581  * This returns write hints for each segment type. This hints will be
3582  * passed down to block layer as below by default.
3583  *
3584  * User                  F2FS                     Block
3585  * ----                  ----                     -----
3586  *                       META                     WRITE_LIFE_NONE|REQ_META
3587  *                       HOT_NODE                 WRITE_LIFE_NONE
3588  *                       WARM_NODE                WRITE_LIFE_MEDIUM
3589  *                       COLD_NODE                WRITE_LIFE_LONG
3590  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3591  * extension list        "                        "
3592  *
3593  * -- buffered io
3594  *                       COLD_DATA                WRITE_LIFE_EXTREME
3595  *                       HOT_DATA                 WRITE_LIFE_SHORT
3596  *                       WARM_DATA                WRITE_LIFE_NOT_SET
3597  *
3598  * -- direct io
3599  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3600  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3601  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3602  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3603  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3604  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3605  */
3606 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3607 				enum page_type type, enum temp_type temp)
3608 {
3609 	switch (type) {
3610 	case DATA:
3611 		switch (temp) {
3612 		case WARM:
3613 			return WRITE_LIFE_NOT_SET;
3614 		case HOT:
3615 			return WRITE_LIFE_SHORT;
3616 		case COLD:
3617 			return WRITE_LIFE_EXTREME;
3618 		default:
3619 			return WRITE_LIFE_NONE;
3620 		}
3621 	case NODE:
3622 		switch (temp) {
3623 		case WARM:
3624 			return WRITE_LIFE_MEDIUM;
3625 		case HOT:
3626 			return WRITE_LIFE_NONE;
3627 		case COLD:
3628 			return WRITE_LIFE_LONG;
3629 		default:
3630 			return WRITE_LIFE_NONE;
3631 		}
3632 	case META:
3633 		return WRITE_LIFE_NONE;
3634 	default:
3635 		return WRITE_LIFE_NONE;
3636 	}
3637 }
3638 
3639 static int __get_segment_type_2(struct f2fs_io_info *fio)
3640 {
3641 	if (fio->type == DATA)
3642 		return CURSEG_HOT_DATA;
3643 	else
3644 		return CURSEG_HOT_NODE;
3645 }
3646 
3647 static int __get_segment_type_4(struct f2fs_io_info *fio)
3648 {
3649 	if (fio->type == DATA) {
3650 		struct inode *inode = fio_inode(fio);
3651 
3652 		if (S_ISDIR(inode->i_mode))
3653 			return CURSEG_HOT_DATA;
3654 		else
3655 			return CURSEG_COLD_DATA;
3656 	} else {
3657 		if (IS_DNODE(fio->folio) && is_cold_node(fio->folio))
3658 			return CURSEG_WARM_NODE;
3659 		else
3660 			return CURSEG_COLD_NODE;
3661 	}
3662 }
3663 
3664 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
3665 {
3666 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3667 	struct extent_info ei = {};
3668 
3669 	if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
3670 		if (!ei.age)
3671 			return NO_CHECK_TYPE;
3672 		if (ei.age <= sbi->hot_data_age_threshold)
3673 			return CURSEG_HOT_DATA;
3674 		if (ei.age <= sbi->warm_data_age_threshold)
3675 			return CURSEG_WARM_DATA;
3676 		return CURSEG_COLD_DATA;
3677 	}
3678 	return NO_CHECK_TYPE;
3679 }
3680 
3681 static int __get_segment_type_6(struct f2fs_io_info *fio)
3682 {
3683 	if (fio->type == DATA) {
3684 		struct inode *inode = fio_inode(fio);
3685 		int type;
3686 
3687 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3688 			return CURSEG_COLD_DATA_PINNED;
3689 
3690 		if (page_private_gcing(fio->page)) {
3691 			if (fio->sbi->am.atgc_enabled &&
3692 				(fio->io_type == FS_DATA_IO) &&
3693 				(fio->sbi->gc_mode != GC_URGENT_HIGH) &&
3694 				__is_valid_data_blkaddr(fio->old_blkaddr) &&
3695 				!is_inode_flag_set(inode, FI_OPU_WRITE))
3696 				return CURSEG_ALL_DATA_ATGC;
3697 			else
3698 				return CURSEG_COLD_DATA;
3699 		}
3700 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3701 			return CURSEG_COLD_DATA;
3702 
3703 		type = __get_age_segment_type(inode, fio->folio->index);
3704 		if (type != NO_CHECK_TYPE)
3705 			return type;
3706 
3707 		if (file_is_hot(inode) ||
3708 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3709 				f2fs_is_cow_file(inode) ||
3710 				is_inode_flag_set(inode, FI_NEED_IPU))
3711 			return CURSEG_HOT_DATA;
3712 		return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
3713 						inode->i_write_hint);
3714 	} else {
3715 		if (IS_DNODE(fio->folio))
3716 			return is_cold_node(fio->folio) ? CURSEG_WARM_NODE :
3717 						CURSEG_HOT_NODE;
3718 		return CURSEG_COLD_NODE;
3719 	}
3720 }
3721 
3722 enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
3723 						enum log_type type)
3724 {
3725 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3726 	enum temp_type temp = COLD;
3727 
3728 	switch (curseg->seg_type) {
3729 	case CURSEG_HOT_NODE:
3730 	case CURSEG_HOT_DATA:
3731 		temp = HOT;
3732 		break;
3733 	case CURSEG_WARM_NODE:
3734 	case CURSEG_WARM_DATA:
3735 		temp = WARM;
3736 		break;
3737 	case CURSEG_COLD_NODE:
3738 	case CURSEG_COLD_DATA:
3739 		temp = COLD;
3740 		break;
3741 	default:
3742 		f2fs_bug_on(sbi, 1);
3743 	}
3744 
3745 	return temp;
3746 }
3747 
3748 static int __get_segment_type(struct f2fs_io_info *fio)
3749 {
3750 	enum log_type type = CURSEG_HOT_DATA;
3751 
3752 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3753 	case 2:
3754 		type = __get_segment_type_2(fio);
3755 		break;
3756 	case 4:
3757 		type = __get_segment_type_4(fio);
3758 		break;
3759 	case 6:
3760 		type = __get_segment_type_6(fio);
3761 		break;
3762 	default:
3763 		f2fs_bug_on(fio->sbi, true);
3764 	}
3765 
3766 	fio->temp = f2fs_get_segment_temp(fio->sbi, type);
3767 
3768 	return type;
3769 }
3770 
3771 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3772 		struct curseg_info *seg)
3773 {
3774 	/* To allocate block chunks in different sizes, use random number */
3775 	if (--seg->fragment_remained_chunk > 0)
3776 		return;
3777 
3778 	seg->fragment_remained_chunk =
3779 		get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3780 	seg->next_blkoff +=
3781 		get_random_u32_inclusive(1, sbi->max_fragment_hole);
3782 }
3783 
3784 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
3785 		block_t old_blkaddr, block_t *new_blkaddr,
3786 		struct f2fs_summary *sum, int type,
3787 		struct f2fs_io_info *fio)
3788 {
3789 	struct sit_info *sit_i = SIT_I(sbi);
3790 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3791 	unsigned long long old_mtime;
3792 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3793 	struct seg_entry *se = NULL;
3794 	bool segment_full = false;
3795 	int ret = 0;
3796 
3797 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3798 
3799 	mutex_lock(&curseg->curseg_mutex);
3800 	down_write(&sit_i->sentry_lock);
3801 
3802 	if (curseg->segno == NULL_SEGNO) {
3803 		ret = -ENOSPC;
3804 		goto out_err;
3805 	}
3806 
3807 	if (from_gc) {
3808 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3809 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3810 		sanity_check_seg_type(sbi, se->type);
3811 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3812 	}
3813 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3814 
3815 	f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
3816 
3817 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3818 
3819 	sum_entries(curseg->sum_blk)[curseg->next_blkoff] = *sum;
3820 	if (curseg->alloc_type == SSR) {
3821 		curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3822 	} else {
3823 		curseg->next_blkoff++;
3824 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3825 			f2fs_randomize_chunk(sbi, curseg);
3826 	}
3827 	if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3828 		segment_full = true;
3829 	stat_inc_block_count(sbi, curseg);
3830 
3831 	if (from_gc) {
3832 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3833 	} else {
3834 		update_segment_mtime(sbi, old_blkaddr, 0);
3835 		old_mtime = 0;
3836 	}
3837 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3838 
3839 	/*
3840 	 * SIT information should be updated before segment allocation,
3841 	 * since SSR needs latest valid block information.
3842 	 */
3843 	update_sit_entry(sbi, *new_blkaddr, 1);
3844 	update_sit_entry(sbi, old_blkaddr, -1);
3845 
3846 	/*
3847 	 * If the current segment is full, flush it out and replace it with a
3848 	 * new segment.
3849 	 */
3850 	if (segment_full) {
3851 		if (type == CURSEG_COLD_DATA_PINNED &&
3852 		    !((curseg->segno + 1) % sbi->segs_per_sec)) {
3853 			write_sum_page(sbi, curseg->sum_blk, curseg->segno);
3854 			reset_curseg_fields(curseg);
3855 			goto skip_new_segment;
3856 		}
3857 
3858 		if (from_gc) {
3859 			ret = get_atssr_segment(sbi, type, se->type,
3860 						AT_SSR, se->mtime);
3861 		} else {
3862 			if (need_new_seg(sbi, type))
3863 				ret = new_curseg(sbi, type, false);
3864 			else
3865 				ret = change_curseg(sbi, type);
3866 			stat_inc_seg_type(sbi, curseg);
3867 		}
3868 
3869 		if (ret)
3870 			goto out_err;
3871 	}
3872 
3873 skip_new_segment:
3874 	/*
3875 	 * segment dirty status should be updated after segment allocation,
3876 	 * so we just need to update status only one time after previous
3877 	 * segment being closed.
3878 	 */
3879 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3880 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3881 
3882 	if (IS_DATASEG(curseg->seg_type)) {
3883 		unsigned long long new_val;
3884 
3885 		new_val = atomic64_inc_return(&sbi->allocated_data_blocks);
3886 		if (unlikely(new_val == ULLONG_MAX))
3887 			atomic64_set(&sbi->allocated_data_blocks, 0);
3888 	}
3889 
3890 	up_write(&sit_i->sentry_lock);
3891 
3892 	if (folio && IS_NODESEG(curseg->seg_type)) {
3893 		fill_node_footer_blkaddr(folio, NEXT_FREE_BLKADDR(sbi, curseg));
3894 
3895 		f2fs_inode_chksum_set(sbi, folio);
3896 	}
3897 
3898 	if (fio) {
3899 		struct f2fs_bio_info *io;
3900 
3901 		INIT_LIST_HEAD(&fio->list);
3902 		fio->in_list = 1;
3903 		io = sbi->write_io[fio->type] + fio->temp;
3904 		spin_lock(&io->io_lock);
3905 		list_add_tail(&fio->list, &io->io_list);
3906 		spin_unlock(&io->io_lock);
3907 	}
3908 
3909 	mutex_unlock(&curseg->curseg_mutex);
3910 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3911 	return 0;
3912 
3913 out_err:
3914 	*new_blkaddr = NULL_ADDR;
3915 	up_write(&sit_i->sentry_lock);
3916 	mutex_unlock(&curseg->curseg_mutex);
3917 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3918 	return ret;
3919 }
3920 
3921 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3922 					block_t blkaddr, unsigned int blkcnt)
3923 {
3924 	if (!f2fs_is_multi_device(sbi))
3925 		return;
3926 
3927 	while (1) {
3928 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3929 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3930 
3931 		/* update device state for fsync */
3932 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3933 
3934 		/* update device state for checkpoint */
3935 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3936 			spin_lock(&sbi->dev_lock);
3937 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3938 			spin_unlock(&sbi->dev_lock);
3939 		}
3940 
3941 		if (blkcnt <= blks)
3942 			break;
3943 		blkcnt -= blks;
3944 		blkaddr += blks;
3945 	}
3946 }
3947 
3948 static int log_type_to_seg_type(enum log_type type)
3949 {
3950 	int seg_type = CURSEG_COLD_DATA;
3951 
3952 	switch (type) {
3953 	case CURSEG_HOT_DATA:
3954 	case CURSEG_WARM_DATA:
3955 	case CURSEG_COLD_DATA:
3956 	case CURSEG_HOT_NODE:
3957 	case CURSEG_WARM_NODE:
3958 	case CURSEG_COLD_NODE:
3959 		seg_type = (int)type;
3960 		break;
3961 	case CURSEG_COLD_DATA_PINNED:
3962 	case CURSEG_ALL_DATA_ATGC:
3963 		seg_type = CURSEG_COLD_DATA;
3964 		break;
3965 	default:
3966 		break;
3967 	}
3968 	return seg_type;
3969 }
3970 
3971 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3972 {
3973 	struct folio *folio = fio->folio;
3974 	enum log_type type = __get_segment_type(fio);
3975 	int seg_type = log_type_to_seg_type(type);
3976 	bool keep_order = (f2fs_lfs_mode(fio->sbi) &&
3977 				seg_type == CURSEG_COLD_DATA);
3978 	int err;
3979 
3980 	if (keep_order)
3981 		f2fs_down_read(&fio->sbi->io_order_lock);
3982 
3983 	err = f2fs_allocate_data_block(fio->sbi, folio, fio->old_blkaddr,
3984 			&fio->new_blkaddr, sum, type, fio);
3985 	if (unlikely(err)) {
3986 		f2fs_err_ratelimited(fio->sbi,
3987 			"%s Failed to allocate data block, ino:%u, index:%lu, type:%d, old_blkaddr:0x%x, new_blkaddr:0x%x, err:%d",
3988 			__func__, fio->ino, folio->index, type,
3989 			fio->old_blkaddr, fio->new_blkaddr, err);
3990 		if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
3991 			fscrypt_finalize_bounce_page(&fio->encrypted_page);
3992 		folio_end_writeback(folio);
3993 		if (f2fs_in_warm_node_list(folio))
3994 			f2fs_del_fsync_node_entry(fio->sbi, folio);
3995 		f2fs_bug_on(fio->sbi, !is_set_ckpt_flags(fio->sbi,
3996 							CP_ERROR_FLAG));
3997 		goto out;
3998 	}
3999 
4000 	f2fs_bug_on(fio->sbi, !f2fs_is_valid_blkaddr_raw(fio->sbi,
4001 				fio->new_blkaddr, DATA_GENERIC_ENHANCE));
4002 
4003 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
4004 		f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
4005 
4006 	/* writeout dirty page into bdev */
4007 	f2fs_submit_page_write(fio);
4008 
4009 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
4010 out:
4011 	if (keep_order)
4012 		f2fs_up_read(&fio->sbi->io_order_lock);
4013 }
4014 
4015 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
4016 					enum iostat_type io_type)
4017 {
4018 	struct f2fs_io_info fio = {
4019 		.sbi = sbi,
4020 		.type = META,
4021 		.temp = HOT,
4022 		.op = REQ_OP_WRITE,
4023 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
4024 		.old_blkaddr = folio->index,
4025 		.new_blkaddr = folio->index,
4026 		.folio = folio,
4027 		.encrypted_page = NULL,
4028 		.in_list = 0,
4029 	};
4030 
4031 	if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
4032 		fio.op_flags &= ~REQ_META;
4033 
4034 	folio_start_writeback(folio);
4035 	f2fs_submit_page_write(&fio);
4036 
4037 	stat_inc_meta_count(sbi, folio->index);
4038 	f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
4039 }
4040 
4041 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
4042 {
4043 	struct f2fs_summary sum;
4044 
4045 	set_summary(&sum, nid, 0, 0);
4046 	do_write_page(&sum, fio);
4047 
4048 	f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
4049 }
4050 
4051 void f2fs_outplace_write_data(struct dnode_of_data *dn,
4052 					struct f2fs_io_info *fio)
4053 {
4054 	struct f2fs_sb_info *sbi = fio->sbi;
4055 	struct f2fs_summary sum;
4056 
4057 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
4058 	if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
4059 		f2fs_update_age_extent_cache(dn);
4060 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
4061 	do_write_page(&sum, fio);
4062 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
4063 
4064 	f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
4065 }
4066 
4067 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
4068 {
4069 	int err;
4070 	struct f2fs_sb_info *sbi = fio->sbi;
4071 	unsigned int segno;
4072 
4073 	fio->new_blkaddr = fio->old_blkaddr;
4074 	/* i/o temperature is needed for passing down write hints */
4075 	__get_segment_type(fio);
4076 
4077 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
4078 
4079 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
4080 		set_sbi_flag(sbi, SBI_NEED_FSCK);
4081 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
4082 			  __func__, segno);
4083 		err = -EFSCORRUPTED;
4084 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4085 		goto drop_bio;
4086 	}
4087 
4088 	if (f2fs_cp_error(sbi)) {
4089 		err = -EIO;
4090 		goto drop_bio;
4091 	}
4092 
4093 	if (fio->meta_gc)
4094 		f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
4095 
4096 	stat_inc_inplace_blocks(fio->sbi);
4097 
4098 	if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
4099 		err = f2fs_merge_page_bio(fio);
4100 	else
4101 		err = f2fs_submit_page_bio(fio);
4102 	if (!err) {
4103 		f2fs_update_device_state(fio->sbi, fio->ino,
4104 						fio->new_blkaddr, 1);
4105 		f2fs_update_iostat(fio->sbi, fio_inode(fio),
4106 						fio->io_type, F2FS_BLKSIZE);
4107 	}
4108 
4109 	return err;
4110 drop_bio:
4111 	if (fio->bio && *(fio->bio)) {
4112 		struct bio *bio = *(fio->bio);
4113 
4114 		bio->bi_status = BLK_STS_IOERR;
4115 		bio_endio(bio);
4116 		*(fio->bio) = NULL;
4117 	}
4118 	return err;
4119 }
4120 
4121 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
4122 						unsigned int segno)
4123 {
4124 	int i;
4125 
4126 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
4127 		if (CURSEG_I(sbi, i)->segno == segno)
4128 			break;
4129 	}
4130 	return i;
4131 }
4132 
4133 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
4134 				block_t old_blkaddr, block_t new_blkaddr,
4135 				bool recover_curseg, bool recover_newaddr,
4136 				bool from_gc)
4137 {
4138 	struct sit_info *sit_i = SIT_I(sbi);
4139 	struct curseg_info *curseg;
4140 	unsigned int segno, old_cursegno;
4141 	struct seg_entry *se;
4142 	int type;
4143 	unsigned short old_blkoff;
4144 	unsigned char old_alloc_type;
4145 
4146 	segno = GET_SEGNO(sbi, new_blkaddr);
4147 	se = get_seg_entry(sbi, segno);
4148 	type = se->type;
4149 
4150 	f2fs_down_write(&SM_I(sbi)->curseg_lock);
4151 
4152 	if (!recover_curseg) {
4153 		/* for recovery flow */
4154 		if (se->valid_blocks == 0 && !is_curseg(sbi, segno)) {
4155 			if (old_blkaddr == NULL_ADDR)
4156 				type = CURSEG_COLD_DATA;
4157 			else
4158 				type = CURSEG_WARM_DATA;
4159 		}
4160 	} else {
4161 		if (is_curseg(sbi, segno)) {
4162 			/* se->type is volatile as SSR allocation */
4163 			type = __f2fs_get_curseg(sbi, segno);
4164 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
4165 		} else {
4166 			type = CURSEG_WARM_DATA;
4167 		}
4168 	}
4169 
4170 	curseg = CURSEG_I(sbi, type);
4171 	f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type));
4172 
4173 	mutex_lock(&curseg->curseg_mutex);
4174 	down_write(&sit_i->sentry_lock);
4175 
4176 	old_cursegno = curseg->segno;
4177 	old_blkoff = curseg->next_blkoff;
4178 	old_alloc_type = curseg->alloc_type;
4179 
4180 	/* change the current segment */
4181 	if (segno != curseg->segno) {
4182 		curseg->next_segno = segno;
4183 		if (change_curseg(sbi, type))
4184 			goto out_unlock;
4185 	}
4186 
4187 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
4188 	sum_entries(curseg->sum_blk)[curseg->next_blkoff] = *sum;
4189 
4190 	if (!recover_curseg || recover_newaddr) {
4191 		if (!from_gc)
4192 			update_segment_mtime(sbi, new_blkaddr, 0);
4193 		update_sit_entry(sbi, new_blkaddr, 1);
4194 	}
4195 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
4196 		f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
4197 		if (!from_gc)
4198 			update_segment_mtime(sbi, old_blkaddr, 0);
4199 		update_sit_entry(sbi, old_blkaddr, -1);
4200 	}
4201 
4202 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
4203 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
4204 
4205 	locate_dirty_segment(sbi, old_cursegno);
4206 
4207 	if (recover_curseg) {
4208 		if (old_cursegno != curseg->segno) {
4209 			curseg->next_segno = old_cursegno;
4210 			if (change_curseg(sbi, type))
4211 				goto out_unlock;
4212 		}
4213 		curseg->next_blkoff = old_blkoff;
4214 		curseg->alloc_type = old_alloc_type;
4215 	}
4216 
4217 out_unlock:
4218 	up_write(&sit_i->sentry_lock);
4219 	mutex_unlock(&curseg->curseg_mutex);
4220 	f2fs_up_write(&SM_I(sbi)->curseg_lock);
4221 }
4222 
4223 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
4224 				block_t old_addr, block_t new_addr,
4225 				unsigned char version, bool recover_curseg,
4226 				bool recover_newaddr)
4227 {
4228 	struct f2fs_summary sum;
4229 
4230 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
4231 
4232 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
4233 					recover_curseg, recover_newaddr, false);
4234 
4235 	f2fs_update_data_blkaddr(dn, new_addr);
4236 }
4237 
4238 void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
4239 		bool ordered, bool locked)
4240 {
4241 	if (folio_test_writeback(folio)) {
4242 		struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
4243 
4244 		/* submit cached LFS IO */
4245 		f2fs_submit_merged_write_folio(sbi, folio, type);
4246 		/* submit cached IPU IO */
4247 		f2fs_submit_merged_ipu_write(sbi, NULL, folio);
4248 		if (ordered) {
4249 			folio_wait_writeback(folio);
4250 			f2fs_bug_on(sbi, locked && folio_test_writeback(folio));
4251 		} else {
4252 			folio_wait_stable(folio);
4253 		}
4254 	}
4255 }
4256 
4257 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
4258 {
4259 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4260 	struct folio *cfolio;
4261 
4262 	if (!f2fs_meta_inode_gc_required(inode))
4263 		return;
4264 
4265 	if (!__is_valid_data_blkaddr(blkaddr))
4266 		return;
4267 
4268 	cfolio = filemap_lock_folio(META_MAPPING(sbi), blkaddr);
4269 	if (!IS_ERR(cfolio)) {
4270 		f2fs_folio_wait_writeback(cfolio, DATA, true, true);
4271 		f2fs_folio_put(cfolio, true);
4272 	}
4273 }
4274 
4275 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
4276 								block_t len)
4277 {
4278 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4279 	block_t i;
4280 
4281 	if (!f2fs_meta_inode_gc_required(inode))
4282 		return;
4283 
4284 	for (i = 0; i < len; i++)
4285 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
4286 
4287 	f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
4288 }
4289 
4290 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
4291 {
4292 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4293 	struct curseg_info *seg_i;
4294 	unsigned char *kaddr;
4295 	struct folio *folio;
4296 	block_t start;
4297 	int i, j, offset;
4298 
4299 	start = start_sum_block(sbi);
4300 
4301 	folio = f2fs_get_meta_folio(sbi, start++);
4302 	if (IS_ERR(folio))
4303 		return PTR_ERR(folio);
4304 	kaddr = folio_address(folio);
4305 
4306 	/* Step 1: restore nat cache */
4307 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4308 	memcpy(seg_i->journal, kaddr, sbi->sum_journal_size);
4309 
4310 	/* Step 2: restore sit cache */
4311 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4312 	memcpy(seg_i->journal, kaddr + sbi->sum_journal_size, sbi->sum_journal_size);
4313 	offset = 2 * sbi->sum_journal_size;
4314 
4315 	/* Step 3: restore summary entries */
4316 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4317 		unsigned short blk_off;
4318 		unsigned int segno;
4319 
4320 		seg_i = CURSEG_I(sbi, i);
4321 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
4322 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
4323 		seg_i->next_segno = segno;
4324 		reset_curseg(sbi, i, 0);
4325 		seg_i->alloc_type = ckpt->alloc_type[i];
4326 		seg_i->next_blkoff = blk_off;
4327 
4328 		if (seg_i->alloc_type == SSR)
4329 			blk_off = BLKS_PER_SEG(sbi);
4330 
4331 		for (j = 0; j < blk_off; j++) {
4332 			struct f2fs_summary *s;
4333 
4334 			s = (struct f2fs_summary *)(kaddr + offset);
4335 			sum_entries(seg_i->sum_blk)[j] = *s;
4336 			offset += SUMMARY_SIZE;
4337 			if (offset + SUMMARY_SIZE <= sbi->blocksize -
4338 						SUM_FOOTER_SIZE)
4339 				continue;
4340 
4341 			f2fs_folio_put(folio, true);
4342 
4343 			folio = f2fs_get_meta_folio(sbi, start++);
4344 			if (IS_ERR(folio))
4345 				return PTR_ERR(folio);
4346 			kaddr = folio_address(folio);
4347 			offset = 0;
4348 		}
4349 	}
4350 	f2fs_folio_put(folio, true);
4351 	return 0;
4352 }
4353 
4354 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
4355 {
4356 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4357 	struct f2fs_summary_block *sum;
4358 	struct curseg_info *curseg;
4359 	struct folio *new;
4360 	unsigned short blk_off;
4361 	unsigned int segno = 0;
4362 	block_t blk_addr = 0;
4363 	int err = 0;
4364 
4365 	/* get segment number and block addr */
4366 	if (IS_DATASEG(type)) {
4367 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
4368 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
4369 							CURSEG_HOT_DATA]);
4370 		if (__exist_node_summaries(sbi))
4371 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
4372 		else
4373 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
4374 	} else {
4375 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
4376 							CURSEG_HOT_NODE]);
4377 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
4378 							CURSEG_HOT_NODE]);
4379 		if (__exist_node_summaries(sbi))
4380 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
4381 							type - CURSEG_HOT_NODE);
4382 		else
4383 			blk_addr = GET_SUM_BLOCK(sbi, segno);
4384 	}
4385 
4386 	new = f2fs_get_meta_folio(sbi, blk_addr);
4387 	if (IS_ERR(new))
4388 		return PTR_ERR(new);
4389 	sum = folio_address(new);
4390 
4391 	if (IS_NODESEG(type)) {
4392 		if (__exist_node_summaries(sbi)) {
4393 			struct f2fs_summary *ns = sum_entries(sum);
4394 			int i;
4395 
4396 			for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
4397 				ns->version = 0;
4398 				ns->ofs_in_node = 0;
4399 			}
4400 		} else {
4401 			err = f2fs_restore_node_summary(sbi, segno, sum);
4402 			if (err)
4403 				goto out;
4404 		}
4405 	}
4406 
4407 	/* set uncompleted segment to curseg */
4408 	curseg = CURSEG_I(sbi, type);
4409 	mutex_lock(&curseg->curseg_mutex);
4410 
4411 	/* update journal info */
4412 	down_write(&curseg->journal_rwsem);
4413 	memcpy(curseg->journal, sum_journal(sbi, sum), sbi->sum_journal_size);
4414 	up_write(&curseg->journal_rwsem);
4415 
4416 	memcpy(sum_entries(curseg->sum_blk), sum_entries(sum),
4417 			sbi->sum_entry_size);
4418 	memcpy(sum_footer(sbi, curseg->sum_blk), sum_footer(sbi, sum),
4419 			SUM_FOOTER_SIZE);
4420 	curseg->next_segno = segno;
4421 	reset_curseg(sbi, type, 0);
4422 	curseg->alloc_type = ckpt->alloc_type[type];
4423 	curseg->next_blkoff = blk_off;
4424 	mutex_unlock(&curseg->curseg_mutex);
4425 out:
4426 	f2fs_folio_put(new, true);
4427 	return err;
4428 }
4429 
4430 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4431 {
4432 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
4433 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4434 	int type = CURSEG_HOT_DATA;
4435 	int err;
4436 
4437 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4438 		int npages = f2fs_npages_for_summary_flush(sbi, true);
4439 
4440 		if (npages >= 2)
4441 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4442 							META_CP, true);
4443 
4444 		/* restore for compacted data summary */
4445 		err = read_compacted_summaries(sbi);
4446 		if (err)
4447 			return err;
4448 		type = CURSEG_HOT_NODE;
4449 	}
4450 
4451 	if (__exist_node_summaries(sbi))
4452 		f2fs_ra_meta_pages(sbi,
4453 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4454 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4455 
4456 	for (; type <= CURSEG_COLD_NODE; type++) {
4457 		err = read_normal_summaries(sbi, type);
4458 		if (err)
4459 			return err;
4460 	}
4461 
4462 	/* sanity check for summary blocks */
4463 	if (nats_in_cursum(nat_j) > sbi->nat_journal_entries ||
4464 			sits_in_cursum(sit_j) > sbi->sit_journal_entries) {
4465 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4466 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4467 		return -EINVAL;
4468 	}
4469 
4470 	return 0;
4471 }
4472 
4473 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4474 {
4475 	struct folio *folio;
4476 	unsigned char *kaddr;
4477 	struct f2fs_summary *summary;
4478 	struct curseg_info *seg_i;
4479 	int written_size = 0;
4480 	int i, j;
4481 
4482 	folio = f2fs_grab_meta_folio(sbi, blkaddr++);
4483 	kaddr = folio_address(folio);
4484 	memset(kaddr, 0, PAGE_SIZE);
4485 
4486 	/* Step 1: write nat cache */
4487 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4488 	memcpy(kaddr, seg_i->journal, sbi->sum_journal_size);
4489 	written_size += sbi->sum_journal_size;
4490 
4491 	/* Step 2: write sit cache */
4492 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4493 	memcpy(kaddr + written_size, seg_i->journal, sbi->sum_journal_size);
4494 	written_size += sbi->sum_journal_size;
4495 
4496 	/* Step 3: write summary entries */
4497 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4498 		seg_i = CURSEG_I(sbi, i);
4499 		for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4500 			if (!folio) {
4501 				folio = f2fs_grab_meta_folio(sbi, blkaddr++);
4502 				kaddr = folio_address(folio);
4503 				memset(kaddr, 0, PAGE_SIZE);
4504 				written_size = 0;
4505 			}
4506 			summary = (struct f2fs_summary *)(kaddr + written_size);
4507 			*summary = sum_entries(seg_i->sum_blk)[j];
4508 			written_size += SUMMARY_SIZE;
4509 
4510 			if (written_size + SUMMARY_SIZE <= sbi->blocksize -
4511 							SUM_FOOTER_SIZE)
4512 				continue;
4513 
4514 			folio_mark_dirty(folio);
4515 			f2fs_folio_put(folio, true);
4516 			folio = NULL;
4517 		}
4518 	}
4519 	if (folio) {
4520 		folio_mark_dirty(folio);
4521 		f2fs_folio_put(folio, true);
4522 	}
4523 }
4524 
4525 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4526 					block_t blkaddr, int type)
4527 {
4528 	int i, end;
4529 
4530 	if (IS_DATASEG(type))
4531 		end = type + NR_CURSEG_DATA_TYPE;
4532 	else
4533 		end = type + NR_CURSEG_NODE_TYPE;
4534 
4535 	for (i = type; i < end; i++)
4536 		write_current_sum_page(sbi, i, blkaddr + (i - type));
4537 }
4538 
4539 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4540 {
4541 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4542 		write_compacted_summaries(sbi, start_blk);
4543 	else
4544 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4545 }
4546 
4547 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4548 {
4549 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4550 }
4551 
4552 int f2fs_lookup_journal_in_cursum(struct f2fs_sb_info *sbi,
4553 			struct f2fs_journal *journal, int type,
4554 			unsigned int val, int alloc)
4555 {
4556 	int i;
4557 
4558 	if (type == NAT_JOURNAL) {
4559 		for (i = 0; i < nats_in_cursum(journal); i++) {
4560 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4561 				return i;
4562 		}
4563 		if (alloc && __has_cursum_space(sbi, journal, 1, NAT_JOURNAL))
4564 			return update_nats_in_cursum(journal, 1);
4565 	} else if (type == SIT_JOURNAL) {
4566 		for (i = 0; i < sits_in_cursum(journal); i++)
4567 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4568 				return i;
4569 		if (alloc && __has_cursum_space(sbi, journal, 1, SIT_JOURNAL))
4570 			return update_sits_in_cursum(journal, 1);
4571 	}
4572 	return -1;
4573 }
4574 
4575 static struct folio *get_current_sit_folio(struct f2fs_sb_info *sbi,
4576 					unsigned int segno)
4577 {
4578 	return f2fs_get_meta_folio(sbi, current_sit_addr(sbi, segno));
4579 }
4580 
4581 static struct folio *get_next_sit_folio(struct f2fs_sb_info *sbi,
4582 					unsigned int start)
4583 {
4584 	struct sit_info *sit_i = SIT_I(sbi);
4585 	struct folio *folio;
4586 	pgoff_t src_off, dst_off;
4587 
4588 	src_off = current_sit_addr(sbi, start);
4589 	dst_off = next_sit_addr(sbi, src_off);
4590 
4591 	folio = f2fs_grab_meta_folio(sbi, dst_off);
4592 	seg_info_to_sit_folio(sbi, folio, start);
4593 
4594 	folio_mark_dirty(folio);
4595 	set_to_next_sit(sit_i, start);
4596 
4597 	return folio;
4598 }
4599 
4600 static struct sit_entry_set *grab_sit_entry_set(void)
4601 {
4602 	struct sit_entry_set *ses =
4603 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4604 						GFP_NOFS, true, NULL);
4605 
4606 	ses->entry_cnt = 0;
4607 	INIT_LIST_HEAD(&ses->set_list);
4608 	return ses;
4609 }
4610 
4611 static void release_sit_entry_set(struct sit_entry_set *ses)
4612 {
4613 	list_del(&ses->set_list);
4614 	kmem_cache_free(sit_entry_set_slab, ses);
4615 }
4616 
4617 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4618 						struct list_head *head)
4619 {
4620 	struct sit_entry_set *next = ses;
4621 
4622 	if (list_is_last(&ses->set_list, head))
4623 		return;
4624 
4625 	list_for_each_entry_continue(next, head, set_list)
4626 		if (ses->entry_cnt <= next->entry_cnt) {
4627 			list_move_tail(&ses->set_list, &next->set_list);
4628 			return;
4629 		}
4630 
4631 	list_move_tail(&ses->set_list, head);
4632 }
4633 
4634 static void add_sit_entry(unsigned int segno, struct list_head *head)
4635 {
4636 	struct sit_entry_set *ses;
4637 	unsigned int start_segno = START_SEGNO(segno);
4638 
4639 	list_for_each_entry(ses, head, set_list) {
4640 		if (ses->start_segno == start_segno) {
4641 			ses->entry_cnt++;
4642 			adjust_sit_entry_set(ses, head);
4643 			return;
4644 		}
4645 	}
4646 
4647 	ses = grab_sit_entry_set();
4648 
4649 	ses->start_segno = start_segno;
4650 	ses->entry_cnt++;
4651 	list_add(&ses->set_list, head);
4652 }
4653 
4654 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4655 {
4656 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4657 	struct list_head *set_list = &sm_info->sit_entry_set;
4658 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4659 	unsigned int segno;
4660 
4661 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4662 		add_sit_entry(segno, set_list);
4663 }
4664 
4665 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4666 {
4667 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4668 	struct f2fs_journal *journal = curseg->journal;
4669 	int i;
4670 
4671 	down_write(&curseg->journal_rwsem);
4672 	for (i = 0; i < sits_in_cursum(journal); i++) {
4673 		unsigned int segno;
4674 		bool dirtied;
4675 
4676 		segno = le32_to_cpu(segno_in_journal(journal, i));
4677 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4678 
4679 		if (!dirtied)
4680 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4681 	}
4682 	update_sits_in_cursum(journal, -i);
4683 	up_write(&curseg->journal_rwsem);
4684 }
4685 
4686 /*
4687  * CP calls this function, which flushes SIT entries including sit_journal,
4688  * and moves prefree segs to free segs.
4689  */
4690 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4691 {
4692 	struct sit_info *sit_i = SIT_I(sbi);
4693 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4694 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4695 	struct f2fs_journal *journal = curseg->journal;
4696 	struct sit_entry_set *ses, *tmp;
4697 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4698 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4699 	struct seg_entry *se;
4700 
4701 	down_write(&sit_i->sentry_lock);
4702 
4703 	if (!sit_i->dirty_sentries)
4704 		goto out;
4705 
4706 	/*
4707 	 * add and account sit entries of dirty bitmap in sit entry
4708 	 * set temporarily
4709 	 */
4710 	add_sits_in_set(sbi);
4711 
4712 	/*
4713 	 * if there are no enough space in journal to store dirty sit
4714 	 * entries, remove all entries from journal and add and account
4715 	 * them in sit entry set.
4716 	 */
4717 	if (!__has_cursum_space(sbi, journal,
4718 			sit_i->dirty_sentries, SIT_JOURNAL) || !to_journal)
4719 		remove_sits_in_journal(sbi);
4720 
4721 	/*
4722 	 * there are two steps to flush sit entries:
4723 	 * #1, flush sit entries to journal in current cold data summary block.
4724 	 * #2, flush sit entries to sit page.
4725 	 */
4726 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4727 		struct folio *folio = NULL;
4728 		struct f2fs_sit_block *raw_sit = NULL;
4729 		unsigned int start_segno = ses->start_segno;
4730 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4731 						(unsigned long)MAIN_SEGS(sbi));
4732 		unsigned int segno = start_segno;
4733 
4734 		if (to_journal &&
4735 			!__has_cursum_space(sbi, journal, ses->entry_cnt,
4736 				SIT_JOURNAL))
4737 			to_journal = false;
4738 
4739 		if (to_journal) {
4740 			down_write(&curseg->journal_rwsem);
4741 		} else {
4742 			folio = get_next_sit_folio(sbi, start_segno);
4743 			raw_sit = folio_address(folio);
4744 		}
4745 
4746 		/* flush dirty sit entries in region of current sit set */
4747 		for_each_set_bit_from(segno, bitmap, end) {
4748 			int offset, sit_offset;
4749 
4750 			se = get_seg_entry(sbi, segno);
4751 #ifdef CONFIG_F2FS_CHECK_FS
4752 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4753 						SIT_VBLOCK_MAP_SIZE))
4754 				f2fs_bug_on(sbi, 1);
4755 #endif
4756 
4757 			/* add discard candidates */
4758 			if (!(cpc->reason & CP_DISCARD)) {
4759 				cpc->trim_start = segno;
4760 				add_discard_addrs(sbi, cpc, false);
4761 			}
4762 
4763 			if (to_journal) {
4764 				offset = f2fs_lookup_journal_in_cursum(sbi, journal,
4765 							SIT_JOURNAL, segno, 1);
4766 				f2fs_bug_on(sbi, offset < 0);
4767 				segno_in_journal(journal, offset) =
4768 							cpu_to_le32(segno);
4769 				seg_info_to_raw_sit(se,
4770 					&sit_in_journal(journal, offset));
4771 				check_block_count(sbi, segno,
4772 					&sit_in_journal(journal, offset));
4773 			} else {
4774 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4775 				seg_info_to_raw_sit(se,
4776 						&raw_sit->entries[sit_offset]);
4777 				check_block_count(sbi, segno,
4778 						&raw_sit->entries[sit_offset]);
4779 			}
4780 
4781 			/* update ckpt_valid_block */
4782 			if (__is_large_section(sbi)) {
4783 				set_ckpt_valid_blocks(sbi, segno);
4784 				sanity_check_valid_blocks(sbi, segno);
4785 			}
4786 
4787 			__clear_bit(segno, bitmap);
4788 			sit_i->dirty_sentries--;
4789 			ses->entry_cnt--;
4790 		}
4791 
4792 		if (to_journal)
4793 			up_write(&curseg->journal_rwsem);
4794 		else
4795 			f2fs_folio_put(folio, true);
4796 
4797 		f2fs_bug_on(sbi, ses->entry_cnt);
4798 		release_sit_entry_set(ses);
4799 	}
4800 
4801 	f2fs_bug_on(sbi, !list_empty(head));
4802 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4803 out:
4804 	if (cpc->reason & CP_DISCARD) {
4805 		__u64 trim_start = cpc->trim_start;
4806 
4807 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4808 			add_discard_addrs(sbi, cpc, false);
4809 
4810 		cpc->trim_start = trim_start;
4811 	}
4812 	up_write(&sit_i->sentry_lock);
4813 
4814 	set_prefree_as_free_segments(sbi);
4815 }
4816 
4817 static int build_sit_info(struct f2fs_sb_info *sbi)
4818 {
4819 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4820 	struct sit_info *sit_i;
4821 	unsigned int sit_segs, start;
4822 	char *src_bitmap, *bitmap;
4823 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4824 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4825 
4826 	/* allocate memory for SIT information */
4827 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4828 	if (!sit_i)
4829 		return -ENOMEM;
4830 
4831 	SM_I(sbi)->sit_info = sit_i;
4832 
4833 	sit_i->sentries =
4834 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4835 					      MAIN_SEGS(sbi)),
4836 			      GFP_KERNEL);
4837 	if (!sit_i->sentries)
4838 		return -ENOMEM;
4839 
4840 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4841 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4842 								GFP_KERNEL);
4843 	if (!sit_i->dirty_sentries_bitmap)
4844 		return -ENOMEM;
4845 
4846 #ifdef CONFIG_F2FS_CHECK_FS
4847 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4848 #else
4849 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4850 #endif
4851 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4852 	if (!sit_i->bitmap)
4853 		return -ENOMEM;
4854 
4855 	bitmap = sit_i->bitmap;
4856 
4857 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4858 		sit_i->sentries[start].cur_valid_map = bitmap;
4859 		bitmap += SIT_VBLOCK_MAP_SIZE;
4860 
4861 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4862 		bitmap += SIT_VBLOCK_MAP_SIZE;
4863 
4864 #ifdef CONFIG_F2FS_CHECK_FS
4865 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4866 		bitmap += SIT_VBLOCK_MAP_SIZE;
4867 #endif
4868 
4869 		if (discard_map) {
4870 			sit_i->sentries[start].discard_map = bitmap;
4871 			bitmap += SIT_VBLOCK_MAP_SIZE;
4872 		}
4873 	}
4874 
4875 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4876 	if (!sit_i->tmp_map)
4877 		return -ENOMEM;
4878 
4879 	if (__is_large_section(sbi)) {
4880 		sit_i->sec_entries =
4881 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4882 						      MAIN_SECS(sbi)),
4883 				      GFP_KERNEL);
4884 		if (!sit_i->sec_entries)
4885 			return -ENOMEM;
4886 	}
4887 
4888 	/* get information related with SIT */
4889 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4890 
4891 	/* setup SIT bitmap from ckeckpoint pack */
4892 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4893 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4894 
4895 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4896 	if (!sit_i->sit_bitmap)
4897 		return -ENOMEM;
4898 
4899 #ifdef CONFIG_F2FS_CHECK_FS
4900 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4901 					sit_bitmap_size, GFP_KERNEL);
4902 	if (!sit_i->sit_bitmap_mir)
4903 		return -ENOMEM;
4904 
4905 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4906 					main_bitmap_size, GFP_KERNEL);
4907 	if (!sit_i->invalid_segmap)
4908 		return -ENOMEM;
4909 #endif
4910 
4911 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4912 	sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);
4913 	sit_i->written_valid_blocks = 0;
4914 	sit_i->bitmap_size = sit_bitmap_size;
4915 	sit_i->dirty_sentries = 0;
4916 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4917 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4918 	sit_i->mounted_time = ktime_get_boottime_seconds();
4919 	init_rwsem(&sit_i->sentry_lock);
4920 	return 0;
4921 }
4922 
4923 static int build_free_segmap(struct f2fs_sb_info *sbi)
4924 {
4925 	struct free_segmap_info *free_i;
4926 	unsigned int bitmap_size, sec_bitmap_size;
4927 
4928 	/* allocate memory for free segmap information */
4929 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4930 	if (!free_i)
4931 		return -ENOMEM;
4932 
4933 	SM_I(sbi)->free_info = free_i;
4934 
4935 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4936 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4937 	if (!free_i->free_segmap)
4938 		return -ENOMEM;
4939 
4940 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4941 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4942 	if (!free_i->free_secmap)
4943 		return -ENOMEM;
4944 
4945 	/* set all segments as dirty temporarily */
4946 	memset(free_i->free_segmap, 0xff, bitmap_size);
4947 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4948 
4949 	/* init free segmap information */
4950 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4951 	free_i->free_segments = 0;
4952 	free_i->free_sections = 0;
4953 	spin_lock_init(&free_i->segmap_lock);
4954 	return 0;
4955 }
4956 
4957 static int build_curseg(struct f2fs_sb_info *sbi)
4958 {
4959 	struct curseg_info *array;
4960 	int i;
4961 
4962 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4963 					sizeof(*array)), GFP_KERNEL);
4964 	if (!array)
4965 		return -ENOMEM;
4966 
4967 	SM_I(sbi)->curseg_array = array;
4968 
4969 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4970 		mutex_init(&array[i].curseg_mutex);
4971 		array[i].sum_blk = f2fs_kzalloc(sbi, sbi->sum_blocksize,
4972 				GFP_KERNEL);
4973 		if (!array[i].sum_blk)
4974 			return -ENOMEM;
4975 		init_rwsem(&array[i].journal_rwsem);
4976 		array[i].journal = f2fs_kzalloc(sbi,
4977 				sbi->sum_journal_size, GFP_KERNEL);
4978 		if (!array[i].journal)
4979 			return -ENOMEM;
4980 		array[i].seg_type = log_type_to_seg_type(i);
4981 		reset_curseg_fields(&array[i]);
4982 	}
4983 	return restore_curseg_summaries(sbi);
4984 }
4985 
4986 static int build_sit_entries(struct f2fs_sb_info *sbi)
4987 {
4988 	struct sit_info *sit_i = SIT_I(sbi);
4989 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4990 	struct f2fs_journal *journal = curseg->journal;
4991 	struct seg_entry *se;
4992 	struct f2fs_sit_entry sit;
4993 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4994 	unsigned int i, start, end;
4995 	unsigned int readed, start_blk = 0;
4996 	int err = 0;
4997 	block_t sit_valid_blocks[2] = {0, 0};
4998 
4999 	do {
5000 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
5001 							META_SIT, true);
5002 
5003 		start = start_blk * sit_i->sents_per_block;
5004 		end = (start_blk + readed) * sit_i->sents_per_block;
5005 
5006 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
5007 			struct f2fs_sit_block *sit_blk;
5008 			struct folio *folio;
5009 
5010 			se = &sit_i->sentries[start];
5011 			folio = get_current_sit_folio(sbi, start);
5012 			if (IS_ERR(folio))
5013 				return PTR_ERR(folio);
5014 			sit_blk = folio_address(folio);
5015 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
5016 			f2fs_folio_put(folio, true);
5017 
5018 			err = check_block_count(sbi, start, &sit);
5019 			if (err)
5020 				return err;
5021 			seg_info_from_raw_sit(se, &sit);
5022 
5023 			if (se->type >= NR_PERSISTENT_LOG) {
5024 				f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
5025 							se->type, start);
5026 				f2fs_handle_error(sbi,
5027 						ERROR_INCONSISTENT_SUM_TYPE);
5028 				return -EFSCORRUPTED;
5029 			}
5030 
5031 			sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
5032 
5033 			if (!f2fs_block_unit_discard(sbi))
5034 				goto init_discard_map_done;
5035 
5036 			/* build discard map only one time */
5037 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
5038 				memset(se->discard_map, 0xff,
5039 						SIT_VBLOCK_MAP_SIZE);
5040 				goto init_discard_map_done;
5041 			}
5042 			memcpy(se->discard_map, se->cur_valid_map,
5043 						SIT_VBLOCK_MAP_SIZE);
5044 			sbi->discard_blks += BLKS_PER_SEG(sbi) -
5045 						se->valid_blocks;
5046 init_discard_map_done:
5047 			if (__is_large_section(sbi))
5048 				get_sec_entry(sbi, start)->valid_blocks +=
5049 							se->valid_blocks;
5050 		}
5051 		start_blk += readed;
5052 	} while (start_blk < sit_blk_cnt);
5053 
5054 	down_read(&curseg->journal_rwsem);
5055 	for (i = 0; i < sits_in_cursum(journal); i++) {
5056 		unsigned int old_valid_blocks;
5057 
5058 		start = le32_to_cpu(segno_in_journal(journal, i));
5059 		if (start >= MAIN_SEGS(sbi)) {
5060 			f2fs_err(sbi, "Wrong journal entry on segno %u",
5061 				 start);
5062 			err = -EFSCORRUPTED;
5063 			f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
5064 			break;
5065 		}
5066 
5067 		se = &sit_i->sentries[start];
5068 		sit = sit_in_journal(journal, i);
5069 
5070 		old_valid_blocks = se->valid_blocks;
5071 
5072 		sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
5073 
5074 		err = check_block_count(sbi, start, &sit);
5075 		if (err)
5076 			break;
5077 		seg_info_from_raw_sit(se, &sit);
5078 
5079 		if (se->type >= NR_PERSISTENT_LOG) {
5080 			f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
5081 							se->type, start);
5082 			err = -EFSCORRUPTED;
5083 			f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
5084 			break;
5085 		}
5086 
5087 		sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
5088 
5089 		if (f2fs_block_unit_discard(sbi)) {
5090 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
5091 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
5092 			} else {
5093 				memcpy(se->discard_map, se->cur_valid_map,
5094 							SIT_VBLOCK_MAP_SIZE);
5095 				sbi->discard_blks += old_valid_blocks;
5096 				sbi->discard_blks -= se->valid_blocks;
5097 			}
5098 		}
5099 
5100 		if (__is_large_section(sbi)) {
5101 			get_sec_entry(sbi, start)->valid_blocks +=
5102 							se->valid_blocks;
5103 			get_sec_entry(sbi, start)->valid_blocks -=
5104 							old_valid_blocks;
5105 		}
5106 	}
5107 	up_read(&curseg->journal_rwsem);
5108 
5109 	/* update ckpt_valid_block */
5110 	if (__is_large_section(sbi)) {
5111 		unsigned int segno;
5112 
5113 		for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5114 			set_ckpt_valid_blocks(sbi, segno);
5115 			sanity_check_valid_blocks(sbi, segno);
5116 		}
5117 	}
5118 
5119 	if (err)
5120 		return err;
5121 
5122 	if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
5123 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
5124 			 sit_valid_blocks[NODE], valid_node_count(sbi));
5125 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
5126 		return -EFSCORRUPTED;
5127 	}
5128 
5129 	if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
5130 				valid_user_blocks(sbi)) {
5131 		f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
5132 			 sit_valid_blocks[DATA], sit_valid_blocks[NODE],
5133 			 valid_user_blocks(sbi));
5134 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
5135 		return -EFSCORRUPTED;
5136 	}
5137 
5138 	return 0;
5139 }
5140 
5141 static void init_free_segmap(struct f2fs_sb_info *sbi)
5142 {
5143 	unsigned int start;
5144 	int type;
5145 	struct seg_entry *sentry;
5146 
5147 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
5148 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
5149 			continue;
5150 		sentry = get_seg_entry(sbi, start);
5151 		if (!sentry->valid_blocks)
5152 			__set_free(sbi, start);
5153 		else
5154 			SIT_I(sbi)->written_valid_blocks +=
5155 						sentry->valid_blocks;
5156 	}
5157 
5158 	/* set use the current segments */
5159 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
5160 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
5161 
5162 		__set_test_and_inuse(sbi, curseg_t->segno);
5163 	}
5164 }
5165 
5166 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
5167 {
5168 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5169 	struct free_segmap_info *free_i = FREE_I(sbi);
5170 	unsigned int segno = 0, offset = 0, secno;
5171 	block_t valid_blocks, usable_blks_in_seg;
5172 
5173 	while (1) {
5174 		/* find dirty segment based on free segmap */
5175 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
5176 		if (segno >= MAIN_SEGS(sbi))
5177 			break;
5178 		offset = segno + 1;
5179 		valid_blocks = get_valid_blocks(sbi, segno, false);
5180 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
5181 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
5182 			continue;
5183 		if (valid_blocks > usable_blks_in_seg) {
5184 			f2fs_bug_on(sbi, 1);
5185 			continue;
5186 		}
5187 		mutex_lock(&dirty_i->seglist_lock);
5188 		__locate_dirty_segment(sbi, segno, DIRTY);
5189 		mutex_unlock(&dirty_i->seglist_lock);
5190 	}
5191 
5192 	if (!__is_large_section(sbi))
5193 		return;
5194 
5195 	mutex_lock(&dirty_i->seglist_lock);
5196 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5197 		valid_blocks = get_valid_blocks(sbi, segno, true);
5198 		secno = GET_SEC_FROM_SEG(sbi, segno);
5199 
5200 		if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
5201 			continue;
5202 		if (is_cursec(sbi, secno))
5203 			continue;
5204 		set_bit(secno, dirty_i->dirty_secmap);
5205 	}
5206 	mutex_unlock(&dirty_i->seglist_lock);
5207 }
5208 
5209 static int init_victim_secmap(struct f2fs_sb_info *sbi)
5210 {
5211 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5212 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
5213 
5214 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5215 	if (!dirty_i->victim_secmap)
5216 		return -ENOMEM;
5217 
5218 	dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5219 	if (!dirty_i->pinned_secmap)
5220 		return -ENOMEM;
5221 
5222 	dirty_i->pinned_secmap_cnt = 0;
5223 	dirty_i->enable_pin_section = true;
5224 	return 0;
5225 }
5226 
5227 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
5228 {
5229 	struct dirty_seglist_info *dirty_i;
5230 	unsigned int bitmap_size, i;
5231 
5232 	/* allocate memory for dirty segments list information */
5233 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
5234 								GFP_KERNEL);
5235 	if (!dirty_i)
5236 		return -ENOMEM;
5237 
5238 	SM_I(sbi)->dirty_info = dirty_i;
5239 	mutex_init(&dirty_i->seglist_lock);
5240 
5241 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
5242 
5243 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
5244 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
5245 								GFP_KERNEL);
5246 		if (!dirty_i->dirty_segmap[i])
5247 			return -ENOMEM;
5248 	}
5249 
5250 	if (__is_large_section(sbi)) {
5251 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
5252 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
5253 						bitmap_size, GFP_KERNEL);
5254 		if (!dirty_i->dirty_secmap)
5255 			return -ENOMEM;
5256 	}
5257 
5258 	init_dirty_segmap(sbi);
5259 	return init_victim_secmap(sbi);
5260 }
5261 
5262 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
5263 {
5264 	int i;
5265 
5266 	/*
5267 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
5268 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
5269 	 */
5270 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5271 		struct curseg_info *curseg = CURSEG_I(sbi, i);
5272 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
5273 		unsigned int blkofs = curseg->next_blkoff;
5274 
5275 		if (f2fs_sb_has_readonly(sbi) &&
5276 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
5277 			continue;
5278 
5279 		sanity_check_seg_type(sbi, curseg->seg_type);
5280 
5281 		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
5282 			f2fs_err(sbi,
5283 				 "Current segment has invalid alloc_type:%d",
5284 				 curseg->alloc_type);
5285 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
5286 			return -EFSCORRUPTED;
5287 		}
5288 
5289 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
5290 			goto out;
5291 
5292 		if (curseg->alloc_type == SSR)
5293 			continue;
5294 
5295 		for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
5296 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
5297 				continue;
5298 out:
5299 			f2fs_err(sbi,
5300 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
5301 				 i, curseg->segno, curseg->alloc_type,
5302 				 curseg->next_blkoff, blkofs);
5303 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
5304 			return -EFSCORRUPTED;
5305 		}
5306 	}
5307 	return 0;
5308 }
5309 
5310 #ifdef CONFIG_BLK_DEV_ZONED
5311 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
5312 				    struct f2fs_dev_info *fdev,
5313 				    struct blk_zone *zone)
5314 {
5315 	unsigned int zone_segno;
5316 	block_t zone_block, valid_block_cnt;
5317 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5318 	int ret;
5319 	unsigned int nofs_flags;
5320 
5321 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5322 		return 0;
5323 
5324 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
5325 	zone_segno = GET_SEGNO(sbi, zone_block);
5326 
5327 	/*
5328 	 * Skip check of zones cursegs point to, since
5329 	 * fix_curseg_write_pointer() checks them.
5330 	 */
5331 	if (zone_segno >= MAIN_SEGS(sbi))
5332 		return 0;
5333 
5334 	/*
5335 	 * Get # of valid block of the zone.
5336 	 */
5337 	valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
5338 	if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
5339 		f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
5340 				zone_segno, valid_block_cnt,
5341 				blk_zone_cond_str(zone->cond));
5342 		return 0;
5343 	}
5344 
5345 	if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
5346 	    (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
5347 		return 0;
5348 
5349 	if (!valid_block_cnt) {
5350 		f2fs_notice(sbi, "Zone without valid block has non-zero write "
5351 			    "pointer. Reset the write pointer: cond[%s]",
5352 			    blk_zone_cond_str(zone->cond));
5353 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
5354 					zone->len >> log_sectors_per_block);
5355 		if (ret)
5356 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5357 				 fdev->path, ret);
5358 		return ret;
5359 	}
5360 
5361 	/*
5362 	 * If there are valid blocks and the write pointer doesn't match
5363 	 * with them, we need to report the inconsistency and fill
5364 	 * the zone till the end to close the zone. This inconsistency
5365 	 * does not cause write error because the zone will not be
5366 	 * selected for write operation until it get discarded.
5367 	 */
5368 	f2fs_notice(sbi, "Valid blocks are not aligned with write "
5369 		    "pointer: valid block[0x%x,0x%x] cond[%s]",
5370 		    zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond));
5371 
5372 	nofs_flags = memalloc_nofs_save();
5373 	ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
5374 				zone->start, zone->len);
5375 	memalloc_nofs_restore(nofs_flags);
5376 	if (ret == -EOPNOTSUPP) {
5377 		ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
5378 					zone->len - (zone->wp - zone->start),
5379 					GFP_NOFS, 0);
5380 		if (ret)
5381 			f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
5382 					fdev->path, ret);
5383 	} else if (ret) {
5384 		f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
5385 				fdev->path, ret);
5386 	}
5387 
5388 	return ret;
5389 }
5390 
5391 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
5392 						  block_t zone_blkaddr)
5393 {
5394 	int i;
5395 
5396 	for (i = 0; i < sbi->s_ndevs; i++) {
5397 		if (!bdev_is_zoned(FDEV(i).bdev))
5398 			continue;
5399 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
5400 				zone_blkaddr <= FDEV(i).end_blk))
5401 			return &FDEV(i);
5402 	}
5403 
5404 	return NULL;
5405 }
5406 
5407 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
5408 			      void *data)
5409 {
5410 	memcpy(data, zone, sizeof(struct blk_zone));
5411 	return 0;
5412 }
5413 
5414 static int do_fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
5415 {
5416 	struct curseg_info *cs = CURSEG_I(sbi, type);
5417 	struct f2fs_dev_info *zbd;
5418 	struct blk_zone zone;
5419 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
5420 	block_t cs_zone_block, wp_block;
5421 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5422 	sector_t zone_sector;
5423 	int err;
5424 
5425 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5426 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5427 
5428 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5429 	if (!zbd)
5430 		return 0;
5431 
5432 	/* report zone for the sector the curseg points to */
5433 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5434 		<< log_sectors_per_block;
5435 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5436 				  report_one_zone_cb, &zone);
5437 	if (err != 1) {
5438 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5439 			 zbd->path, err);
5440 		return err;
5441 	}
5442 
5443 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5444 		return 0;
5445 
5446 	/*
5447 	 * When safely unmounted in the previous mount, we could use current
5448 	 * segments. Otherwise, allocate new sections.
5449 	 */
5450 	if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5451 		wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
5452 		wp_segno = GET_SEGNO(sbi, wp_block);
5453 		wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5454 		wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
5455 
5456 		if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
5457 				wp_sector_off == 0)
5458 			return 0;
5459 
5460 		f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5461 			    "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
5462 			    cs->next_blkoff, wp_segno, wp_blkoff);
5463 	}
5464 
5465 	/* Allocate a new section if it's not new. */
5466 	if (cs->next_blkoff ||
5467 	    cs->segno != GET_SEG_FROM_SEC(sbi, GET_ZONE_FROM_SEC(sbi, cs_section))) {
5468 		unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
5469 
5470 		f2fs_allocate_new_section(sbi, type, true);
5471 		f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5472 				"[0x%x,0x%x] -> [0x%x,0x%x]",
5473 				type, old_segno, old_blkoff,
5474 				cs->segno, cs->next_blkoff);
5475 	}
5476 
5477 	/* check consistency of the zone curseg pointed to */
5478 	if (check_zone_write_pointer(sbi, zbd, &zone))
5479 		return -EIO;
5480 
5481 	/* check newly assigned zone */
5482 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5483 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5484 
5485 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5486 	if (!zbd)
5487 		return 0;
5488 
5489 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5490 		<< log_sectors_per_block;
5491 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5492 				  report_one_zone_cb, &zone);
5493 	if (err != 1) {
5494 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5495 			 zbd->path, err);
5496 		return err;
5497 	}
5498 
5499 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5500 		return 0;
5501 
5502 	if (zone.wp != zone.start) {
5503 		f2fs_notice(sbi,
5504 			    "New zone for curseg[%d] is not yet discarded. "
5505 			    "Reset the zone: curseg[0x%x,0x%x]",
5506 			    type, cs->segno, cs->next_blkoff);
5507 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,	cs_zone_block,
5508 					zone.len >> log_sectors_per_block);
5509 		if (err) {
5510 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5511 				 zbd->path, err);
5512 			return err;
5513 		}
5514 	}
5515 
5516 	return 0;
5517 }
5518 
5519 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5520 {
5521 	int i, ret;
5522 
5523 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5524 		ret = do_fix_curseg_write_pointer(sbi, i);
5525 		if (ret)
5526 			return ret;
5527 	}
5528 
5529 	return 0;
5530 }
5531 
5532 struct check_zone_write_pointer_args {
5533 	struct f2fs_sb_info *sbi;
5534 	struct f2fs_dev_info *fdev;
5535 };
5536 
5537 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5538 				      void *data)
5539 {
5540 	struct check_zone_write_pointer_args *args;
5541 
5542 	args = (struct check_zone_write_pointer_args *)data;
5543 
5544 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
5545 }
5546 
5547 static int check_write_pointer(struct f2fs_sb_info *sbi)
5548 {
5549 	int i, ret;
5550 	struct check_zone_write_pointer_args args;
5551 
5552 	for (i = 0; i < sbi->s_ndevs; i++) {
5553 		if (!bdev_is_zoned(FDEV(i).bdev))
5554 			continue;
5555 
5556 		args.sbi = sbi;
5557 		args.fdev = &FDEV(i);
5558 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5559 					  check_zone_write_pointer_cb, &args);
5560 		if (ret < 0)
5561 			return ret;
5562 	}
5563 
5564 	return 0;
5565 }
5566 
5567 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
5568 {
5569 	int ret;
5570 
5571 	if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
5572 	    f2fs_hw_is_readonly(sbi))
5573 		return 0;
5574 
5575 	f2fs_notice(sbi, "Checking entire write pointers");
5576 	ret = fix_curseg_write_pointer(sbi);
5577 	if (!ret)
5578 		ret = check_write_pointer(sbi);
5579 	return ret;
5580 }
5581 
5582 /*
5583  * Return the number of usable blocks in a segment. The number of blocks
5584  * returned is always equal to the number of blocks in a segment for
5585  * segments fully contained within a sequential zone capacity or a
5586  * conventional zone. For segments partially contained in a sequential
5587  * zone capacity, the number of usable blocks up to the zone capacity
5588  * is returned. 0 is returned in all other cases.
5589  */
5590 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5591 			struct f2fs_sb_info *sbi, unsigned int segno)
5592 {
5593 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5594 	unsigned int secno;
5595 
5596 	if (!sbi->unusable_blocks_per_sec)
5597 		return BLKS_PER_SEG(sbi);
5598 
5599 	secno = GET_SEC_FROM_SEG(sbi, segno);
5600 	seg_start = START_BLOCK(sbi, segno);
5601 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5602 	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5603 
5604 	/*
5605 	 * If segment starts before zone capacity and spans beyond
5606 	 * zone capacity, then usable blocks are from seg start to
5607 	 * zone capacity. If the segment starts after the zone capacity,
5608 	 * then there are no usable blocks.
5609 	 */
5610 	if (seg_start >= sec_cap_blkaddr)
5611 		return 0;
5612 	if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
5613 		return sec_cap_blkaddr - seg_start;
5614 
5615 	return BLKS_PER_SEG(sbi);
5616 }
5617 #else
5618 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
5619 {
5620 	return 0;
5621 }
5622 
5623 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5624 							unsigned int segno)
5625 {
5626 	return 0;
5627 }
5628 
5629 #endif
5630 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5631 					unsigned int segno)
5632 {
5633 	if (f2fs_sb_has_blkzoned(sbi))
5634 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5635 
5636 	return BLKS_PER_SEG(sbi);
5637 }
5638 
5639 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
5640 {
5641 	if (f2fs_sb_has_blkzoned(sbi))
5642 		return CAP_SEGS_PER_SEC(sbi);
5643 
5644 	return SEGS_PER_SEC(sbi);
5645 }
5646 
5647 unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
5648 	unsigned int segno)
5649 {
5650 	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
5651 	unsigned int secno = 0, start = 0;
5652 	unsigned int total_valid_blocks = 0;
5653 	unsigned long long mtime = 0;
5654 	unsigned int i = 0;
5655 
5656 	secno = GET_SEC_FROM_SEG(sbi, segno);
5657 	start = GET_SEG_FROM_SEC(sbi, secno);
5658 
5659 	if (!__is_large_section(sbi)) {
5660 		mtime = get_seg_entry(sbi, start + i)->mtime;
5661 		goto out;
5662 	}
5663 
5664 	for (i = 0; i < usable_segs_per_sec; i++) {
5665 		/* for large section, only check the mtime of valid segments */
5666 		struct seg_entry *se = get_seg_entry(sbi, start+i);
5667 
5668 		mtime += se->mtime * se->valid_blocks;
5669 		total_valid_blocks += se->valid_blocks;
5670 	}
5671 
5672 	if (total_valid_blocks == 0)
5673 		return INVALID_MTIME;
5674 
5675 	mtime = div_u64(mtime, total_valid_blocks);
5676 out:
5677 	if (unlikely(mtime == INVALID_MTIME))
5678 		mtime -= 1;
5679 	return mtime;
5680 }
5681 
5682 /*
5683  * Update min, max modified time for cost-benefit GC algorithm
5684  */
5685 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5686 {
5687 	struct sit_info *sit_i = SIT_I(sbi);
5688 	unsigned int segno;
5689 
5690 	down_write(&sit_i->sentry_lock);
5691 
5692 	sit_i->min_mtime = ULLONG_MAX;
5693 
5694 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5695 		unsigned long long mtime = 0;
5696 
5697 		mtime = f2fs_get_section_mtime(sbi, segno);
5698 
5699 		if (sit_i->min_mtime > mtime)
5700 			sit_i->min_mtime = mtime;
5701 	}
5702 	sit_i->max_mtime = get_mtime(sbi, false);
5703 	sit_i->dirty_max_mtime = 0;
5704 	up_write(&sit_i->sentry_lock);
5705 }
5706 
5707 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5708 {
5709 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5710 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5711 	struct f2fs_sm_info *sm_info;
5712 	int err;
5713 
5714 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5715 	if (!sm_info)
5716 		return -ENOMEM;
5717 
5718 	/* init sm info */
5719 	sbi->sm_info = sm_info;
5720 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5721 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5722 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5723 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5724 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5725 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5726 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5727 	sm_info->rec_prefree_segments = sm_info->main_segments *
5728 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5729 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5730 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5731 
5732 	if (!f2fs_lfs_mode(sbi))
5733 		sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
5734 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5735 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5736 	sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
5737 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5738 	sm_info->min_ssr_sections = reserved_sections(sbi);
5739 
5740 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5741 
5742 	init_f2fs_rwsem(&sm_info->curseg_lock);
5743 
5744 	err = f2fs_create_flush_cmd_control(sbi);
5745 	if (err)
5746 		return err;
5747 
5748 	err = create_discard_cmd_control(sbi);
5749 	if (err)
5750 		return err;
5751 
5752 	err = build_sit_info(sbi);
5753 	if (err)
5754 		return err;
5755 	err = build_free_segmap(sbi);
5756 	if (err)
5757 		return err;
5758 	err = build_curseg(sbi);
5759 	if (err)
5760 		return err;
5761 
5762 	/* reinit free segmap based on SIT */
5763 	err = build_sit_entries(sbi);
5764 	if (err)
5765 		return err;
5766 
5767 	init_free_segmap(sbi);
5768 	err = build_dirty_segmap(sbi);
5769 	if (err)
5770 		return err;
5771 
5772 	err = sanity_check_curseg(sbi);
5773 	if (err)
5774 		return err;
5775 
5776 	init_min_max_mtime(sbi);
5777 	return 0;
5778 }
5779 
5780 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5781 		enum dirty_type dirty_type)
5782 {
5783 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5784 
5785 	mutex_lock(&dirty_i->seglist_lock);
5786 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5787 	dirty_i->nr_dirty[dirty_type] = 0;
5788 	mutex_unlock(&dirty_i->seglist_lock);
5789 }
5790 
5791 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5792 {
5793 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5794 
5795 	kvfree(dirty_i->pinned_secmap);
5796 	kvfree(dirty_i->victim_secmap);
5797 }
5798 
5799 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5800 {
5801 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5802 	int i;
5803 
5804 	if (!dirty_i)
5805 		return;
5806 
5807 	/* discard pre-free/dirty segments list */
5808 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5809 		discard_dirty_segmap(sbi, i);
5810 
5811 	if (__is_large_section(sbi)) {
5812 		mutex_lock(&dirty_i->seglist_lock);
5813 		kvfree(dirty_i->dirty_secmap);
5814 		mutex_unlock(&dirty_i->seglist_lock);
5815 	}
5816 
5817 	destroy_victim_secmap(sbi);
5818 	SM_I(sbi)->dirty_info = NULL;
5819 	kfree(dirty_i);
5820 }
5821 
5822 static void destroy_curseg(struct f2fs_sb_info *sbi)
5823 {
5824 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5825 	int i;
5826 
5827 	if (!array)
5828 		return;
5829 	SM_I(sbi)->curseg_array = NULL;
5830 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5831 		kfree(array[i].sum_blk);
5832 		kfree(array[i].journal);
5833 	}
5834 	kfree(array);
5835 }
5836 
5837 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5838 {
5839 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5840 
5841 	if (!free_i)
5842 		return;
5843 	SM_I(sbi)->free_info = NULL;
5844 	kvfree(free_i->free_segmap);
5845 	kvfree(free_i->free_secmap);
5846 	kfree(free_i);
5847 }
5848 
5849 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5850 {
5851 	struct sit_info *sit_i = SIT_I(sbi);
5852 
5853 	if (!sit_i)
5854 		return;
5855 
5856 	if (sit_i->sentries)
5857 		kvfree(sit_i->bitmap);
5858 	kfree(sit_i->tmp_map);
5859 
5860 	kvfree(sit_i->sentries);
5861 	kvfree(sit_i->sec_entries);
5862 	kvfree(sit_i->dirty_sentries_bitmap);
5863 
5864 	SM_I(sbi)->sit_info = NULL;
5865 	kfree(sit_i->sit_bitmap);
5866 #ifdef CONFIG_F2FS_CHECK_FS
5867 	kfree(sit_i->sit_bitmap_mir);
5868 	kvfree(sit_i->invalid_segmap);
5869 #endif
5870 	kfree(sit_i);
5871 }
5872 
5873 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5874 {
5875 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5876 
5877 	if (!sm_info)
5878 		return;
5879 	f2fs_destroy_flush_cmd_control(sbi, true);
5880 	destroy_discard_cmd_control(sbi);
5881 	destroy_dirty_segmap(sbi);
5882 	destroy_curseg(sbi);
5883 	destroy_free_segmap(sbi);
5884 	destroy_sit_info(sbi);
5885 	sbi->sm_info = NULL;
5886 	kfree(sm_info);
5887 }
5888 
5889 int __init f2fs_create_segment_manager_caches(void)
5890 {
5891 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5892 			sizeof(struct discard_entry));
5893 	if (!discard_entry_slab)
5894 		goto fail;
5895 
5896 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5897 			sizeof(struct discard_cmd));
5898 	if (!discard_cmd_slab)
5899 		goto destroy_discard_entry;
5900 
5901 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5902 			sizeof(struct sit_entry_set));
5903 	if (!sit_entry_set_slab)
5904 		goto destroy_discard_cmd;
5905 
5906 	revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
5907 			sizeof(struct revoke_entry));
5908 	if (!revoke_entry_slab)
5909 		goto destroy_sit_entry_set;
5910 	return 0;
5911 
5912 destroy_sit_entry_set:
5913 	kmem_cache_destroy(sit_entry_set_slab);
5914 destroy_discard_cmd:
5915 	kmem_cache_destroy(discard_cmd_slab);
5916 destroy_discard_entry:
5917 	kmem_cache_destroy(discard_entry_slab);
5918 fail:
5919 	return -ENOMEM;
5920 }
5921 
5922 void f2fs_destroy_segment_manager_caches(void)
5923 {
5924 	kmem_cache_destroy(sit_entry_set_slab);
5925 	kmem_cache_destroy(discard_cmd_slab);
5926 	kmem_cache_destroy(discard_entry_slab);
5927 	kmem_cache_destroy(revoke_entry_slab);
5928 }
5929