xref: /linux/fs/f2fs/segment.c (revision b1a54551dd9ed5ef1763b97b35a0999ca002b95c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *revoke_entry_slab;
34 
35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
188 void f2fs_abort_atomic_write(struct inode *inode, bool clean)
189 {
190 	struct f2fs_inode_info *fi = F2FS_I(inode);
191 
192 	if (!f2fs_is_atomic_file(inode))
193 		return;
194 
195 	release_atomic_write_cnt(inode);
196 	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
197 	clear_inode_flag(inode, FI_ATOMIC_REPLACE);
198 	clear_inode_flag(inode, FI_ATOMIC_FILE);
199 	stat_dec_atomic_inode(inode);
200 
201 	F2FS_I(inode)->atomic_write_task = NULL;
202 
203 	if (clean) {
204 		truncate_inode_pages_final(inode->i_mapping);
205 		f2fs_i_size_write(inode, fi->original_i_size);
206 		fi->original_i_size = 0;
207 	}
208 	/* avoid stale dirty inode during eviction */
209 	sync_inode_metadata(inode, 0);
210 }
211 
212 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
213 			block_t new_addr, block_t *old_addr, bool recover)
214 {
215 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
216 	struct dnode_of_data dn;
217 	struct node_info ni;
218 	int err;
219 
220 retry:
221 	set_new_dnode(&dn, inode, NULL, NULL, 0);
222 	err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
223 	if (err) {
224 		if (err == -ENOMEM) {
225 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
226 			goto retry;
227 		}
228 		return err;
229 	}
230 
231 	err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
232 	if (err) {
233 		f2fs_put_dnode(&dn);
234 		return err;
235 	}
236 
237 	if (recover) {
238 		/* dn.data_blkaddr is always valid */
239 		if (!__is_valid_data_blkaddr(new_addr)) {
240 			if (new_addr == NULL_ADDR)
241 				dec_valid_block_count(sbi, inode, 1);
242 			f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
243 			f2fs_update_data_blkaddr(&dn, new_addr);
244 		} else {
245 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
246 				new_addr, ni.version, true, true);
247 		}
248 	} else {
249 		blkcnt_t count = 1;
250 
251 		err = inc_valid_block_count(sbi, inode, &count);
252 		if (err) {
253 			f2fs_put_dnode(&dn);
254 			return err;
255 		}
256 
257 		*old_addr = dn.data_blkaddr;
258 		f2fs_truncate_data_blocks_range(&dn, 1);
259 		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
260 
261 		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
262 					ni.version, true, false);
263 	}
264 
265 	f2fs_put_dnode(&dn);
266 
267 	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
268 			index, old_addr ? *old_addr : 0, new_addr, recover);
269 	return 0;
270 }
271 
272 static void __complete_revoke_list(struct inode *inode, struct list_head *head,
273 					bool revoke)
274 {
275 	struct revoke_entry *cur, *tmp;
276 	pgoff_t start_index = 0;
277 	bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
278 
279 	list_for_each_entry_safe(cur, tmp, head, list) {
280 		if (revoke) {
281 			__replace_atomic_write_block(inode, cur->index,
282 						cur->old_addr, NULL, true);
283 		} else if (truncate) {
284 			f2fs_truncate_hole(inode, start_index, cur->index);
285 			start_index = cur->index + 1;
286 		}
287 
288 		list_del(&cur->list);
289 		kmem_cache_free(revoke_entry_slab, cur);
290 	}
291 
292 	if (!revoke && truncate)
293 		f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
294 }
295 
296 static int __f2fs_commit_atomic_write(struct inode *inode)
297 {
298 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
299 	struct f2fs_inode_info *fi = F2FS_I(inode);
300 	struct inode *cow_inode = fi->cow_inode;
301 	struct revoke_entry *new;
302 	struct list_head revoke_list;
303 	block_t blkaddr;
304 	struct dnode_of_data dn;
305 	pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
306 	pgoff_t off = 0, blen, index;
307 	int ret = 0, i;
308 
309 	INIT_LIST_HEAD(&revoke_list);
310 
311 	while (len) {
312 		blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
313 
314 		set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
315 		ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
316 		if (ret && ret != -ENOENT) {
317 			goto out;
318 		} else if (ret == -ENOENT) {
319 			ret = 0;
320 			if (dn.max_level == 0)
321 				goto out;
322 			goto next;
323 		}
324 
325 		blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
326 				len);
327 		index = off;
328 		for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
329 			blkaddr = f2fs_data_blkaddr(&dn);
330 
331 			if (!__is_valid_data_blkaddr(blkaddr)) {
332 				continue;
333 			} else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
334 					DATA_GENERIC_ENHANCE)) {
335 				f2fs_put_dnode(&dn);
336 				ret = -EFSCORRUPTED;
337 				f2fs_handle_error(sbi,
338 						ERROR_INVALID_BLKADDR);
339 				goto out;
340 			}
341 
342 			new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
343 							true, NULL);
344 
345 			ret = __replace_atomic_write_block(inode, index, blkaddr,
346 							&new->old_addr, false);
347 			if (ret) {
348 				f2fs_put_dnode(&dn);
349 				kmem_cache_free(revoke_entry_slab, new);
350 				goto out;
351 			}
352 
353 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
354 			new->index = index;
355 			list_add_tail(&new->list, &revoke_list);
356 		}
357 		f2fs_put_dnode(&dn);
358 next:
359 		off += blen;
360 		len -= blen;
361 	}
362 
363 out:
364 	if (ret) {
365 		sbi->revoked_atomic_block += fi->atomic_write_cnt;
366 	} else {
367 		sbi->committed_atomic_block += fi->atomic_write_cnt;
368 		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
369 	}
370 
371 	__complete_revoke_list(inode, &revoke_list, ret ? true : false);
372 
373 	return ret;
374 }
375 
376 int f2fs_commit_atomic_write(struct inode *inode)
377 {
378 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
379 	struct f2fs_inode_info *fi = F2FS_I(inode);
380 	int err;
381 
382 	err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
383 	if (err)
384 		return err;
385 
386 	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
387 	f2fs_lock_op(sbi);
388 
389 	err = __f2fs_commit_atomic_write(inode);
390 
391 	f2fs_unlock_op(sbi);
392 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
393 
394 	return err;
395 }
396 
397 /*
398  * This function balances dirty node and dentry pages.
399  * In addition, it controls garbage collection.
400  */
401 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
402 {
403 	if (time_to_inject(sbi, FAULT_CHECKPOINT))
404 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
405 
406 	/* balance_fs_bg is able to be pending */
407 	if (need && excess_cached_nats(sbi))
408 		f2fs_balance_fs_bg(sbi, false);
409 
410 	if (!f2fs_is_checkpoint_ready(sbi))
411 		return;
412 
413 	/*
414 	 * We should do GC or end up with checkpoint, if there are so many dirty
415 	 * dir/node pages without enough free segments.
416 	 */
417 	if (has_enough_free_secs(sbi, 0, 0))
418 		return;
419 
420 	if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
421 				sbi->gc_thread->f2fs_gc_task) {
422 		DEFINE_WAIT(wait);
423 
424 		prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
425 					TASK_UNINTERRUPTIBLE);
426 		wake_up(&sbi->gc_thread->gc_wait_queue_head);
427 		io_schedule();
428 		finish_wait(&sbi->gc_thread->fggc_wq, &wait);
429 	} else {
430 		struct f2fs_gc_control gc_control = {
431 			.victim_segno = NULL_SEGNO,
432 			.init_gc_type = BG_GC,
433 			.no_bg_gc = true,
434 			.should_migrate_blocks = false,
435 			.err_gc_skipped = false,
436 			.nr_free_secs = 1 };
437 		f2fs_down_write(&sbi->gc_lock);
438 		stat_inc_gc_call_count(sbi, FOREGROUND);
439 		f2fs_gc(sbi, &gc_control);
440 	}
441 }
442 
443 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
444 {
445 	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
446 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
447 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
448 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
449 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
450 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
451 	unsigned int threshold = sbi->blocks_per_seg * factor *
452 					DEFAULT_DIRTY_THRESHOLD;
453 	unsigned int global_threshold = threshold * 3 / 2;
454 
455 	if (dents >= threshold || qdata >= threshold ||
456 		nodes >= threshold || meta >= threshold ||
457 		imeta >= threshold)
458 		return true;
459 	return dents + qdata + nodes + meta + imeta >  global_threshold;
460 }
461 
462 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
463 {
464 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
465 		return;
466 
467 	/* try to shrink extent cache when there is no enough memory */
468 	if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
469 		f2fs_shrink_read_extent_tree(sbi,
470 				READ_EXTENT_CACHE_SHRINK_NUMBER);
471 
472 	/* try to shrink age extent cache when there is no enough memory */
473 	if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
474 		f2fs_shrink_age_extent_tree(sbi,
475 				AGE_EXTENT_CACHE_SHRINK_NUMBER);
476 
477 	/* check the # of cached NAT entries */
478 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
479 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
480 
481 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
482 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
483 	else
484 		f2fs_build_free_nids(sbi, false, false);
485 
486 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
487 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
488 		goto do_sync;
489 
490 	/* there is background inflight IO or foreground operation recently */
491 	if (is_inflight_io(sbi, REQ_TIME) ||
492 		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
493 		return;
494 
495 	/* exceed periodical checkpoint timeout threshold */
496 	if (f2fs_time_over(sbi, CP_TIME))
497 		goto do_sync;
498 
499 	/* checkpoint is the only way to shrink partial cached entries */
500 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
501 		f2fs_available_free_memory(sbi, INO_ENTRIES))
502 		return;
503 
504 do_sync:
505 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
506 		struct blk_plug plug;
507 
508 		mutex_lock(&sbi->flush_lock);
509 
510 		blk_start_plug(&plug);
511 		f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
512 		blk_finish_plug(&plug);
513 
514 		mutex_unlock(&sbi->flush_lock);
515 	}
516 	stat_inc_cp_call_count(sbi, BACKGROUND);
517 	f2fs_sync_fs(sbi->sb, 1);
518 }
519 
520 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
521 				struct block_device *bdev)
522 {
523 	int ret = blkdev_issue_flush(bdev);
524 
525 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
526 				test_opt(sbi, FLUSH_MERGE), ret);
527 	if (!ret)
528 		f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
529 	return ret;
530 }
531 
532 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
533 {
534 	int ret = 0;
535 	int i;
536 
537 	if (!f2fs_is_multi_device(sbi))
538 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
539 
540 	for (i = 0; i < sbi->s_ndevs; i++) {
541 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
542 			continue;
543 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
544 		if (ret)
545 			break;
546 	}
547 	return ret;
548 }
549 
550 static int issue_flush_thread(void *data)
551 {
552 	struct f2fs_sb_info *sbi = data;
553 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
554 	wait_queue_head_t *q = &fcc->flush_wait_queue;
555 repeat:
556 	if (kthread_should_stop())
557 		return 0;
558 
559 	if (!llist_empty(&fcc->issue_list)) {
560 		struct flush_cmd *cmd, *next;
561 		int ret;
562 
563 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
564 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
565 
566 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
567 
568 		ret = submit_flush_wait(sbi, cmd->ino);
569 		atomic_inc(&fcc->issued_flush);
570 
571 		llist_for_each_entry_safe(cmd, next,
572 					  fcc->dispatch_list, llnode) {
573 			cmd->ret = ret;
574 			complete(&cmd->wait);
575 		}
576 		fcc->dispatch_list = NULL;
577 	}
578 
579 	wait_event_interruptible(*q,
580 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
581 	goto repeat;
582 }
583 
584 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
585 {
586 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
587 	struct flush_cmd cmd;
588 	int ret;
589 
590 	if (test_opt(sbi, NOBARRIER))
591 		return 0;
592 
593 	if (!test_opt(sbi, FLUSH_MERGE)) {
594 		atomic_inc(&fcc->queued_flush);
595 		ret = submit_flush_wait(sbi, ino);
596 		atomic_dec(&fcc->queued_flush);
597 		atomic_inc(&fcc->issued_flush);
598 		return ret;
599 	}
600 
601 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
602 	    f2fs_is_multi_device(sbi)) {
603 		ret = submit_flush_wait(sbi, ino);
604 		atomic_dec(&fcc->queued_flush);
605 
606 		atomic_inc(&fcc->issued_flush);
607 		return ret;
608 	}
609 
610 	cmd.ino = ino;
611 	init_completion(&cmd.wait);
612 
613 	llist_add(&cmd.llnode, &fcc->issue_list);
614 
615 	/*
616 	 * update issue_list before we wake up issue_flush thread, this
617 	 * smp_mb() pairs with another barrier in ___wait_event(), see
618 	 * more details in comments of waitqueue_active().
619 	 */
620 	smp_mb();
621 
622 	if (waitqueue_active(&fcc->flush_wait_queue))
623 		wake_up(&fcc->flush_wait_queue);
624 
625 	if (fcc->f2fs_issue_flush) {
626 		wait_for_completion(&cmd.wait);
627 		atomic_dec(&fcc->queued_flush);
628 	} else {
629 		struct llist_node *list;
630 
631 		list = llist_del_all(&fcc->issue_list);
632 		if (!list) {
633 			wait_for_completion(&cmd.wait);
634 			atomic_dec(&fcc->queued_flush);
635 		} else {
636 			struct flush_cmd *tmp, *next;
637 
638 			ret = submit_flush_wait(sbi, ino);
639 
640 			llist_for_each_entry_safe(tmp, next, list, llnode) {
641 				if (tmp == &cmd) {
642 					cmd.ret = ret;
643 					atomic_dec(&fcc->queued_flush);
644 					continue;
645 				}
646 				tmp->ret = ret;
647 				complete(&tmp->wait);
648 			}
649 		}
650 	}
651 
652 	return cmd.ret;
653 }
654 
655 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
656 {
657 	dev_t dev = sbi->sb->s_bdev->bd_dev;
658 	struct flush_cmd_control *fcc;
659 
660 	if (SM_I(sbi)->fcc_info) {
661 		fcc = SM_I(sbi)->fcc_info;
662 		if (fcc->f2fs_issue_flush)
663 			return 0;
664 		goto init_thread;
665 	}
666 
667 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
668 	if (!fcc)
669 		return -ENOMEM;
670 	atomic_set(&fcc->issued_flush, 0);
671 	atomic_set(&fcc->queued_flush, 0);
672 	init_waitqueue_head(&fcc->flush_wait_queue);
673 	init_llist_head(&fcc->issue_list);
674 	SM_I(sbi)->fcc_info = fcc;
675 	if (!test_opt(sbi, FLUSH_MERGE))
676 		return 0;
677 
678 init_thread:
679 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
680 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
681 	if (IS_ERR(fcc->f2fs_issue_flush)) {
682 		int err = PTR_ERR(fcc->f2fs_issue_flush);
683 
684 		fcc->f2fs_issue_flush = NULL;
685 		return err;
686 	}
687 
688 	return 0;
689 }
690 
691 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
692 {
693 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
694 
695 	if (fcc && fcc->f2fs_issue_flush) {
696 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
697 
698 		fcc->f2fs_issue_flush = NULL;
699 		kthread_stop(flush_thread);
700 	}
701 	if (free) {
702 		kfree(fcc);
703 		SM_I(sbi)->fcc_info = NULL;
704 	}
705 }
706 
707 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
708 {
709 	int ret = 0, i;
710 
711 	if (!f2fs_is_multi_device(sbi))
712 		return 0;
713 
714 	if (test_opt(sbi, NOBARRIER))
715 		return 0;
716 
717 	for (i = 1; i < sbi->s_ndevs; i++) {
718 		int count = DEFAULT_RETRY_IO_COUNT;
719 
720 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
721 			continue;
722 
723 		do {
724 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
725 			if (ret)
726 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
727 		} while (ret && --count);
728 
729 		if (ret) {
730 			f2fs_stop_checkpoint(sbi, false,
731 					STOP_CP_REASON_FLUSH_FAIL);
732 			break;
733 		}
734 
735 		spin_lock(&sbi->dev_lock);
736 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
737 		spin_unlock(&sbi->dev_lock);
738 	}
739 
740 	return ret;
741 }
742 
743 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
744 		enum dirty_type dirty_type)
745 {
746 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
747 
748 	/* need not be added */
749 	if (IS_CURSEG(sbi, segno))
750 		return;
751 
752 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
753 		dirty_i->nr_dirty[dirty_type]++;
754 
755 	if (dirty_type == DIRTY) {
756 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
757 		enum dirty_type t = sentry->type;
758 
759 		if (unlikely(t >= DIRTY)) {
760 			f2fs_bug_on(sbi, 1);
761 			return;
762 		}
763 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
764 			dirty_i->nr_dirty[t]++;
765 
766 		if (__is_large_section(sbi)) {
767 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
768 			block_t valid_blocks =
769 				get_valid_blocks(sbi, segno, true);
770 
771 			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
772 					valid_blocks == CAP_BLKS_PER_SEC(sbi)));
773 
774 			if (!IS_CURSEC(sbi, secno))
775 				set_bit(secno, dirty_i->dirty_secmap);
776 		}
777 	}
778 }
779 
780 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
781 		enum dirty_type dirty_type)
782 {
783 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
784 	block_t valid_blocks;
785 
786 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
787 		dirty_i->nr_dirty[dirty_type]--;
788 
789 	if (dirty_type == DIRTY) {
790 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
791 		enum dirty_type t = sentry->type;
792 
793 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
794 			dirty_i->nr_dirty[t]--;
795 
796 		valid_blocks = get_valid_blocks(sbi, segno, true);
797 		if (valid_blocks == 0) {
798 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
799 						dirty_i->victim_secmap);
800 #ifdef CONFIG_F2FS_CHECK_FS
801 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
802 #endif
803 		}
804 		if (__is_large_section(sbi)) {
805 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
806 
807 			if (!valid_blocks ||
808 					valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
809 				clear_bit(secno, dirty_i->dirty_secmap);
810 				return;
811 			}
812 
813 			if (!IS_CURSEC(sbi, secno))
814 				set_bit(secno, dirty_i->dirty_secmap);
815 		}
816 	}
817 }
818 
819 /*
820  * Should not occur error such as -ENOMEM.
821  * Adding dirty entry into seglist is not critical operation.
822  * If a given segment is one of current working segments, it won't be added.
823  */
824 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
825 {
826 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
827 	unsigned short valid_blocks, ckpt_valid_blocks;
828 	unsigned int usable_blocks;
829 
830 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
831 		return;
832 
833 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
834 	mutex_lock(&dirty_i->seglist_lock);
835 
836 	valid_blocks = get_valid_blocks(sbi, segno, false);
837 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
838 
839 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
840 		ckpt_valid_blocks == usable_blocks)) {
841 		__locate_dirty_segment(sbi, segno, PRE);
842 		__remove_dirty_segment(sbi, segno, DIRTY);
843 	} else if (valid_blocks < usable_blocks) {
844 		__locate_dirty_segment(sbi, segno, DIRTY);
845 	} else {
846 		/* Recovery routine with SSR needs this */
847 		__remove_dirty_segment(sbi, segno, DIRTY);
848 	}
849 
850 	mutex_unlock(&dirty_i->seglist_lock);
851 }
852 
853 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
854 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
855 {
856 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
857 	unsigned int segno;
858 
859 	mutex_lock(&dirty_i->seglist_lock);
860 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
861 		if (get_valid_blocks(sbi, segno, false))
862 			continue;
863 		if (IS_CURSEG(sbi, segno))
864 			continue;
865 		__locate_dirty_segment(sbi, segno, PRE);
866 		__remove_dirty_segment(sbi, segno, DIRTY);
867 	}
868 	mutex_unlock(&dirty_i->seglist_lock);
869 }
870 
871 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
872 {
873 	int ovp_hole_segs =
874 		(overprovision_segments(sbi) - reserved_segments(sbi));
875 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
876 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
877 	block_t holes[2] = {0, 0};	/* DATA and NODE */
878 	block_t unusable;
879 	struct seg_entry *se;
880 	unsigned int segno;
881 
882 	mutex_lock(&dirty_i->seglist_lock);
883 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
884 		se = get_seg_entry(sbi, segno);
885 		if (IS_NODESEG(se->type))
886 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
887 							se->valid_blocks;
888 		else
889 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
890 							se->valid_blocks;
891 	}
892 	mutex_unlock(&dirty_i->seglist_lock);
893 
894 	unusable = max(holes[DATA], holes[NODE]);
895 	if (unusable > ovp_holes)
896 		return unusable - ovp_holes;
897 	return 0;
898 }
899 
900 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
901 {
902 	int ovp_hole_segs =
903 		(overprovision_segments(sbi) - reserved_segments(sbi));
904 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
905 		return -EAGAIN;
906 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
907 		dirty_segments(sbi) > ovp_hole_segs)
908 		return -EAGAIN;
909 	return 0;
910 }
911 
912 /* This is only used by SBI_CP_DISABLED */
913 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
914 {
915 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
916 	unsigned int segno = 0;
917 
918 	mutex_lock(&dirty_i->seglist_lock);
919 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
920 		if (get_valid_blocks(sbi, segno, false))
921 			continue;
922 		if (get_ckpt_valid_blocks(sbi, segno, false))
923 			continue;
924 		mutex_unlock(&dirty_i->seglist_lock);
925 		return segno;
926 	}
927 	mutex_unlock(&dirty_i->seglist_lock);
928 	return NULL_SEGNO;
929 }
930 
931 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
932 		struct block_device *bdev, block_t lstart,
933 		block_t start, block_t len)
934 {
935 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
936 	struct list_head *pend_list;
937 	struct discard_cmd *dc;
938 
939 	f2fs_bug_on(sbi, !len);
940 
941 	pend_list = &dcc->pend_list[plist_idx(len)];
942 
943 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
944 	INIT_LIST_HEAD(&dc->list);
945 	dc->bdev = bdev;
946 	dc->di.lstart = lstart;
947 	dc->di.start = start;
948 	dc->di.len = len;
949 	dc->ref = 0;
950 	dc->state = D_PREP;
951 	dc->queued = 0;
952 	dc->error = 0;
953 	init_completion(&dc->wait);
954 	list_add_tail(&dc->list, pend_list);
955 	spin_lock_init(&dc->lock);
956 	dc->bio_ref = 0;
957 	atomic_inc(&dcc->discard_cmd_cnt);
958 	dcc->undiscard_blks += len;
959 
960 	return dc;
961 }
962 
963 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
964 {
965 #ifdef CONFIG_F2FS_CHECK_FS
966 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
967 	struct rb_node *cur = rb_first_cached(&dcc->root), *next;
968 	struct discard_cmd *cur_dc, *next_dc;
969 
970 	while (cur) {
971 		next = rb_next(cur);
972 		if (!next)
973 			return true;
974 
975 		cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
976 		next_dc = rb_entry(next, struct discard_cmd, rb_node);
977 
978 		if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
979 			f2fs_info(sbi, "broken discard_rbtree, "
980 				"cur(%u, %u) next(%u, %u)",
981 				cur_dc->di.lstart, cur_dc->di.len,
982 				next_dc->di.lstart, next_dc->di.len);
983 			return false;
984 		}
985 		cur = next;
986 	}
987 #endif
988 	return true;
989 }
990 
991 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
992 						block_t blkaddr)
993 {
994 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
995 	struct rb_node *node = dcc->root.rb_root.rb_node;
996 	struct discard_cmd *dc;
997 
998 	while (node) {
999 		dc = rb_entry(node, struct discard_cmd, rb_node);
1000 
1001 		if (blkaddr < dc->di.lstart)
1002 			node = node->rb_left;
1003 		else if (blkaddr >= dc->di.lstart + dc->di.len)
1004 			node = node->rb_right;
1005 		else
1006 			return dc;
1007 	}
1008 	return NULL;
1009 }
1010 
1011 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
1012 				block_t blkaddr,
1013 				struct discard_cmd **prev_entry,
1014 				struct discard_cmd **next_entry,
1015 				struct rb_node ***insert_p,
1016 				struct rb_node **insert_parent)
1017 {
1018 	struct rb_node **pnode = &root->rb_root.rb_node;
1019 	struct rb_node *parent = NULL, *tmp_node;
1020 	struct discard_cmd *dc;
1021 
1022 	*insert_p = NULL;
1023 	*insert_parent = NULL;
1024 	*prev_entry = NULL;
1025 	*next_entry = NULL;
1026 
1027 	if (RB_EMPTY_ROOT(&root->rb_root))
1028 		return NULL;
1029 
1030 	while (*pnode) {
1031 		parent = *pnode;
1032 		dc = rb_entry(*pnode, struct discard_cmd, rb_node);
1033 
1034 		if (blkaddr < dc->di.lstart)
1035 			pnode = &(*pnode)->rb_left;
1036 		else if (blkaddr >= dc->di.lstart + dc->di.len)
1037 			pnode = &(*pnode)->rb_right;
1038 		else
1039 			goto lookup_neighbors;
1040 	}
1041 
1042 	*insert_p = pnode;
1043 	*insert_parent = parent;
1044 
1045 	dc = rb_entry(parent, struct discard_cmd, rb_node);
1046 	tmp_node = parent;
1047 	if (parent && blkaddr > dc->di.lstart)
1048 		tmp_node = rb_next(parent);
1049 	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1050 
1051 	tmp_node = parent;
1052 	if (parent && blkaddr < dc->di.lstart)
1053 		tmp_node = rb_prev(parent);
1054 	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1055 	return NULL;
1056 
1057 lookup_neighbors:
1058 	/* lookup prev node for merging backward later */
1059 	tmp_node = rb_prev(&dc->rb_node);
1060 	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1061 
1062 	/* lookup next node for merging frontward later */
1063 	tmp_node = rb_next(&dc->rb_node);
1064 	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1065 	return dc;
1066 }
1067 
1068 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1069 							struct discard_cmd *dc)
1070 {
1071 	if (dc->state == D_DONE)
1072 		atomic_sub(dc->queued, &dcc->queued_discard);
1073 
1074 	list_del(&dc->list);
1075 	rb_erase_cached(&dc->rb_node, &dcc->root);
1076 	dcc->undiscard_blks -= dc->di.len;
1077 
1078 	kmem_cache_free(discard_cmd_slab, dc);
1079 
1080 	atomic_dec(&dcc->discard_cmd_cnt);
1081 }
1082 
1083 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1084 							struct discard_cmd *dc)
1085 {
1086 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1087 	unsigned long flags;
1088 
1089 	trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
1090 
1091 	spin_lock_irqsave(&dc->lock, flags);
1092 	if (dc->bio_ref) {
1093 		spin_unlock_irqrestore(&dc->lock, flags);
1094 		return;
1095 	}
1096 	spin_unlock_irqrestore(&dc->lock, flags);
1097 
1098 	f2fs_bug_on(sbi, dc->ref);
1099 
1100 	if (dc->error == -EOPNOTSUPP)
1101 		dc->error = 0;
1102 
1103 	if (dc->error)
1104 		printk_ratelimited(
1105 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1106 			KERN_INFO, sbi->sb->s_id,
1107 			dc->di.lstart, dc->di.start, dc->di.len, dc->error);
1108 	__detach_discard_cmd(dcc, dc);
1109 }
1110 
1111 static void f2fs_submit_discard_endio(struct bio *bio)
1112 {
1113 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1114 	unsigned long flags;
1115 
1116 	spin_lock_irqsave(&dc->lock, flags);
1117 	if (!dc->error)
1118 		dc->error = blk_status_to_errno(bio->bi_status);
1119 	dc->bio_ref--;
1120 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1121 		dc->state = D_DONE;
1122 		complete_all(&dc->wait);
1123 	}
1124 	spin_unlock_irqrestore(&dc->lock, flags);
1125 	bio_put(bio);
1126 }
1127 
1128 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1129 				block_t start, block_t end)
1130 {
1131 #ifdef CONFIG_F2FS_CHECK_FS
1132 	struct seg_entry *sentry;
1133 	unsigned int segno;
1134 	block_t blk = start;
1135 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1136 	unsigned long *map;
1137 
1138 	while (blk < end) {
1139 		segno = GET_SEGNO(sbi, blk);
1140 		sentry = get_seg_entry(sbi, segno);
1141 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1142 
1143 		if (end < START_BLOCK(sbi, segno + 1))
1144 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1145 		else
1146 			size = max_blocks;
1147 		map = (unsigned long *)(sentry->cur_valid_map);
1148 		offset = __find_rev_next_bit(map, size, offset);
1149 		f2fs_bug_on(sbi, offset != size);
1150 		blk = START_BLOCK(sbi, segno + 1);
1151 	}
1152 #endif
1153 }
1154 
1155 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1156 				struct discard_policy *dpolicy,
1157 				int discard_type, unsigned int granularity)
1158 {
1159 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1160 
1161 	/* common policy */
1162 	dpolicy->type = discard_type;
1163 	dpolicy->sync = true;
1164 	dpolicy->ordered = false;
1165 	dpolicy->granularity = granularity;
1166 
1167 	dpolicy->max_requests = dcc->max_discard_request;
1168 	dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
1169 	dpolicy->timeout = false;
1170 
1171 	if (discard_type == DPOLICY_BG) {
1172 		dpolicy->min_interval = dcc->min_discard_issue_time;
1173 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1174 		dpolicy->max_interval = dcc->max_discard_issue_time;
1175 		if (dcc->discard_io_aware == DPOLICY_IO_AWARE_ENABLE)
1176 			dpolicy->io_aware = true;
1177 		else if (dcc->discard_io_aware == DPOLICY_IO_AWARE_DISABLE)
1178 			dpolicy->io_aware = false;
1179 		dpolicy->sync = false;
1180 		dpolicy->ordered = true;
1181 		if (utilization(sbi) > dcc->discard_urgent_util) {
1182 			dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1183 			if (atomic_read(&dcc->discard_cmd_cnt))
1184 				dpolicy->max_interval =
1185 					dcc->min_discard_issue_time;
1186 		}
1187 	} else if (discard_type == DPOLICY_FORCE) {
1188 		dpolicy->min_interval = dcc->min_discard_issue_time;
1189 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1190 		dpolicy->max_interval = dcc->max_discard_issue_time;
1191 		dpolicy->io_aware = false;
1192 	} else if (discard_type == DPOLICY_FSTRIM) {
1193 		dpolicy->io_aware = false;
1194 	} else if (discard_type == DPOLICY_UMOUNT) {
1195 		dpolicy->io_aware = false;
1196 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1197 		dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1198 		dpolicy->timeout = true;
1199 	}
1200 }
1201 
1202 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1203 				struct block_device *bdev, block_t lstart,
1204 				block_t start, block_t len);
1205 
1206 #ifdef CONFIG_BLK_DEV_ZONED
1207 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1208 				   struct discard_cmd *dc, blk_opf_t flag,
1209 				   struct list_head *wait_list,
1210 				   unsigned int *issued)
1211 {
1212 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1213 	struct block_device *bdev = dc->bdev;
1214 	struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
1215 	unsigned long flags;
1216 
1217 	trace_f2fs_issue_reset_zone(bdev, dc->di.start);
1218 
1219 	spin_lock_irqsave(&dc->lock, flags);
1220 	dc->state = D_SUBMIT;
1221 	dc->bio_ref++;
1222 	spin_unlock_irqrestore(&dc->lock, flags);
1223 
1224 	if (issued)
1225 		(*issued)++;
1226 
1227 	atomic_inc(&dcc->queued_discard);
1228 	dc->queued++;
1229 	list_move_tail(&dc->list, wait_list);
1230 
1231 	/* sanity check on discard range */
1232 	__check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1233 
1234 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
1235 	bio->bi_private = dc;
1236 	bio->bi_end_io = f2fs_submit_discard_endio;
1237 	submit_bio(bio);
1238 
1239 	atomic_inc(&dcc->issued_discard);
1240 	f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1241 }
1242 #endif
1243 
1244 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1245 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1246 				struct discard_policy *dpolicy,
1247 				struct discard_cmd *dc, int *issued)
1248 {
1249 	struct block_device *bdev = dc->bdev;
1250 	unsigned int max_discard_blocks =
1251 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1252 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1253 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1254 					&(dcc->fstrim_list) : &(dcc->wait_list);
1255 	blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
1256 	block_t lstart, start, len, total_len;
1257 	int err = 0;
1258 
1259 	if (dc->state != D_PREP)
1260 		return 0;
1261 
1262 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1263 		return 0;
1264 
1265 #ifdef CONFIG_BLK_DEV_ZONED
1266 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1267 		int devi = f2fs_bdev_index(sbi, bdev);
1268 
1269 		if (devi < 0)
1270 			return -EINVAL;
1271 
1272 		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1273 			__submit_zone_reset_cmd(sbi, dc, flag,
1274 						wait_list, issued);
1275 			return 0;
1276 		}
1277 	}
1278 #endif
1279 
1280 	trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
1281 
1282 	lstart = dc->di.lstart;
1283 	start = dc->di.start;
1284 	len = dc->di.len;
1285 	total_len = len;
1286 
1287 	dc->di.len = 0;
1288 
1289 	while (total_len && *issued < dpolicy->max_requests && !err) {
1290 		struct bio *bio = NULL;
1291 		unsigned long flags;
1292 		bool last = true;
1293 
1294 		if (len > max_discard_blocks) {
1295 			len = max_discard_blocks;
1296 			last = false;
1297 		}
1298 
1299 		(*issued)++;
1300 		if (*issued == dpolicy->max_requests)
1301 			last = true;
1302 
1303 		dc->di.len += len;
1304 
1305 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1306 			err = -EIO;
1307 		} else {
1308 			err = __blkdev_issue_discard(bdev,
1309 					SECTOR_FROM_BLOCK(start),
1310 					SECTOR_FROM_BLOCK(len),
1311 					GFP_NOFS, &bio);
1312 		}
1313 		if (err) {
1314 			spin_lock_irqsave(&dc->lock, flags);
1315 			if (dc->state == D_PARTIAL)
1316 				dc->state = D_SUBMIT;
1317 			spin_unlock_irqrestore(&dc->lock, flags);
1318 
1319 			break;
1320 		}
1321 
1322 		f2fs_bug_on(sbi, !bio);
1323 
1324 		/*
1325 		 * should keep before submission to avoid D_DONE
1326 		 * right away
1327 		 */
1328 		spin_lock_irqsave(&dc->lock, flags);
1329 		if (last)
1330 			dc->state = D_SUBMIT;
1331 		else
1332 			dc->state = D_PARTIAL;
1333 		dc->bio_ref++;
1334 		spin_unlock_irqrestore(&dc->lock, flags);
1335 
1336 		atomic_inc(&dcc->queued_discard);
1337 		dc->queued++;
1338 		list_move_tail(&dc->list, wait_list);
1339 
1340 		/* sanity check on discard range */
1341 		__check_sit_bitmap(sbi, lstart, lstart + len);
1342 
1343 		bio->bi_private = dc;
1344 		bio->bi_end_io = f2fs_submit_discard_endio;
1345 		bio->bi_opf |= flag;
1346 		submit_bio(bio);
1347 
1348 		atomic_inc(&dcc->issued_discard);
1349 
1350 		f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1351 
1352 		lstart += len;
1353 		start += len;
1354 		total_len -= len;
1355 		len = total_len;
1356 	}
1357 
1358 	if (!err && len) {
1359 		dcc->undiscard_blks -= len;
1360 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1361 	}
1362 	return err;
1363 }
1364 
1365 static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1366 				struct block_device *bdev, block_t lstart,
1367 				block_t start, block_t len)
1368 {
1369 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1370 	struct rb_node **p = &dcc->root.rb_root.rb_node;
1371 	struct rb_node *parent = NULL;
1372 	struct discard_cmd *dc;
1373 	bool leftmost = true;
1374 
1375 	/* look up rb tree to find parent node */
1376 	while (*p) {
1377 		parent = *p;
1378 		dc = rb_entry(parent, struct discard_cmd, rb_node);
1379 
1380 		if (lstart < dc->di.lstart) {
1381 			p = &(*p)->rb_left;
1382 		} else if (lstart >= dc->di.lstart + dc->di.len) {
1383 			p = &(*p)->rb_right;
1384 			leftmost = false;
1385 		} else {
1386 			/* Let's skip to add, if exists */
1387 			return;
1388 		}
1389 	}
1390 
1391 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1392 
1393 	rb_link_node(&dc->rb_node, parent, p);
1394 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1395 }
1396 
1397 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1398 						struct discard_cmd *dc)
1399 {
1400 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
1401 }
1402 
1403 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1404 				struct discard_cmd *dc, block_t blkaddr)
1405 {
1406 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1407 	struct discard_info di = dc->di;
1408 	bool modified = false;
1409 
1410 	if (dc->state == D_DONE || dc->di.len == 1) {
1411 		__remove_discard_cmd(sbi, dc);
1412 		return;
1413 	}
1414 
1415 	dcc->undiscard_blks -= di.len;
1416 
1417 	if (blkaddr > di.lstart) {
1418 		dc->di.len = blkaddr - dc->di.lstart;
1419 		dcc->undiscard_blks += dc->di.len;
1420 		__relocate_discard_cmd(dcc, dc);
1421 		modified = true;
1422 	}
1423 
1424 	if (blkaddr < di.lstart + di.len - 1) {
1425 		if (modified) {
1426 			__insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1427 					di.start + blkaddr + 1 - di.lstart,
1428 					di.lstart + di.len - 1 - blkaddr);
1429 		} else {
1430 			dc->di.lstart++;
1431 			dc->di.len--;
1432 			dc->di.start++;
1433 			dcc->undiscard_blks += dc->di.len;
1434 			__relocate_discard_cmd(dcc, dc);
1435 		}
1436 	}
1437 }
1438 
1439 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1440 				struct block_device *bdev, block_t lstart,
1441 				block_t start, block_t len)
1442 {
1443 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1444 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1445 	struct discard_cmd *dc;
1446 	struct discard_info di = {0};
1447 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1448 	unsigned int max_discard_blocks =
1449 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1450 	block_t end = lstart + len;
1451 
1452 	dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
1453 				&prev_dc, &next_dc, &insert_p, &insert_parent);
1454 	if (dc)
1455 		prev_dc = dc;
1456 
1457 	if (!prev_dc) {
1458 		di.lstart = lstart;
1459 		di.len = next_dc ? next_dc->di.lstart - lstart : len;
1460 		di.len = min(di.len, len);
1461 		di.start = start;
1462 	}
1463 
1464 	while (1) {
1465 		struct rb_node *node;
1466 		bool merged = false;
1467 		struct discard_cmd *tdc = NULL;
1468 
1469 		if (prev_dc) {
1470 			di.lstart = prev_dc->di.lstart + prev_dc->di.len;
1471 			if (di.lstart < lstart)
1472 				di.lstart = lstart;
1473 			if (di.lstart >= end)
1474 				break;
1475 
1476 			if (!next_dc || next_dc->di.lstart > end)
1477 				di.len = end - di.lstart;
1478 			else
1479 				di.len = next_dc->di.lstart - di.lstart;
1480 			di.start = start + di.lstart - lstart;
1481 		}
1482 
1483 		if (!di.len)
1484 			goto next;
1485 
1486 		if (prev_dc && prev_dc->state == D_PREP &&
1487 			prev_dc->bdev == bdev &&
1488 			__is_discard_back_mergeable(&di, &prev_dc->di,
1489 							max_discard_blocks)) {
1490 			prev_dc->di.len += di.len;
1491 			dcc->undiscard_blks += di.len;
1492 			__relocate_discard_cmd(dcc, prev_dc);
1493 			di = prev_dc->di;
1494 			tdc = prev_dc;
1495 			merged = true;
1496 		}
1497 
1498 		if (next_dc && next_dc->state == D_PREP &&
1499 			next_dc->bdev == bdev &&
1500 			__is_discard_front_mergeable(&di, &next_dc->di,
1501 							max_discard_blocks)) {
1502 			next_dc->di.lstart = di.lstart;
1503 			next_dc->di.len += di.len;
1504 			next_dc->di.start = di.start;
1505 			dcc->undiscard_blks += di.len;
1506 			__relocate_discard_cmd(dcc, next_dc);
1507 			if (tdc)
1508 				__remove_discard_cmd(sbi, tdc);
1509 			merged = true;
1510 		}
1511 
1512 		if (!merged)
1513 			__insert_discard_cmd(sbi, bdev,
1514 						di.lstart, di.start, di.len);
1515  next:
1516 		prev_dc = next_dc;
1517 		if (!prev_dc)
1518 			break;
1519 
1520 		node = rb_next(&prev_dc->rb_node);
1521 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1522 	}
1523 }
1524 
1525 #ifdef CONFIG_BLK_DEV_ZONED
1526 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1527 		struct block_device *bdev, block_t blkstart, block_t lblkstart,
1528 		block_t blklen)
1529 {
1530 	trace_f2fs_queue_reset_zone(bdev, blkstart);
1531 
1532 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1533 	__insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1534 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1535 }
1536 #endif
1537 
1538 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1539 		struct block_device *bdev, block_t blkstart, block_t blklen)
1540 {
1541 	block_t lblkstart = blkstart;
1542 
1543 	if (!f2fs_bdev_support_discard(bdev))
1544 		return;
1545 
1546 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1547 
1548 	if (f2fs_is_multi_device(sbi)) {
1549 		int devi = f2fs_target_device_index(sbi, blkstart);
1550 
1551 		blkstart -= FDEV(devi).start_blk;
1552 	}
1553 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1554 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1555 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1556 }
1557 
1558 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1559 		struct discard_policy *dpolicy, int *issued)
1560 {
1561 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1562 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1563 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1564 	struct discard_cmd *dc;
1565 	struct blk_plug plug;
1566 	bool io_interrupted = false;
1567 
1568 	mutex_lock(&dcc->cmd_lock);
1569 	dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
1570 				&prev_dc, &next_dc, &insert_p, &insert_parent);
1571 	if (!dc)
1572 		dc = next_dc;
1573 
1574 	blk_start_plug(&plug);
1575 
1576 	while (dc) {
1577 		struct rb_node *node;
1578 		int err = 0;
1579 
1580 		if (dc->state != D_PREP)
1581 			goto next;
1582 
1583 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1584 			io_interrupted = true;
1585 			break;
1586 		}
1587 
1588 		dcc->next_pos = dc->di.lstart + dc->di.len;
1589 		err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1590 
1591 		if (*issued >= dpolicy->max_requests)
1592 			break;
1593 next:
1594 		node = rb_next(&dc->rb_node);
1595 		if (err)
1596 			__remove_discard_cmd(sbi, dc);
1597 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1598 	}
1599 
1600 	blk_finish_plug(&plug);
1601 
1602 	if (!dc)
1603 		dcc->next_pos = 0;
1604 
1605 	mutex_unlock(&dcc->cmd_lock);
1606 
1607 	if (!(*issued) && io_interrupted)
1608 		*issued = -1;
1609 }
1610 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1611 					struct discard_policy *dpolicy);
1612 
1613 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1614 					struct discard_policy *dpolicy)
1615 {
1616 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1617 	struct list_head *pend_list;
1618 	struct discard_cmd *dc, *tmp;
1619 	struct blk_plug plug;
1620 	int i, issued;
1621 	bool io_interrupted = false;
1622 
1623 	if (dpolicy->timeout)
1624 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1625 
1626 retry:
1627 	issued = 0;
1628 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1629 		if (dpolicy->timeout &&
1630 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1631 			break;
1632 
1633 		if (i + 1 < dpolicy->granularity)
1634 			break;
1635 
1636 		if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
1637 			__issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1638 			return issued;
1639 		}
1640 
1641 		pend_list = &dcc->pend_list[i];
1642 
1643 		mutex_lock(&dcc->cmd_lock);
1644 		if (list_empty(pend_list))
1645 			goto next;
1646 		if (unlikely(dcc->rbtree_check))
1647 			f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1648 		blk_start_plug(&plug);
1649 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1650 			f2fs_bug_on(sbi, dc->state != D_PREP);
1651 
1652 			if (dpolicy->timeout &&
1653 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1654 				break;
1655 
1656 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1657 						!is_idle(sbi, DISCARD_TIME)) {
1658 				io_interrupted = true;
1659 				break;
1660 			}
1661 
1662 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1663 
1664 			if (issued >= dpolicy->max_requests)
1665 				break;
1666 		}
1667 		blk_finish_plug(&plug);
1668 next:
1669 		mutex_unlock(&dcc->cmd_lock);
1670 
1671 		if (issued >= dpolicy->max_requests || io_interrupted)
1672 			break;
1673 	}
1674 
1675 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1676 		__wait_all_discard_cmd(sbi, dpolicy);
1677 		goto retry;
1678 	}
1679 
1680 	if (!issued && io_interrupted)
1681 		issued = -1;
1682 
1683 	return issued;
1684 }
1685 
1686 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1687 {
1688 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1689 	struct list_head *pend_list;
1690 	struct discard_cmd *dc, *tmp;
1691 	int i;
1692 	bool dropped = false;
1693 
1694 	mutex_lock(&dcc->cmd_lock);
1695 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1696 		pend_list = &dcc->pend_list[i];
1697 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1698 			f2fs_bug_on(sbi, dc->state != D_PREP);
1699 			__remove_discard_cmd(sbi, dc);
1700 			dropped = true;
1701 		}
1702 	}
1703 	mutex_unlock(&dcc->cmd_lock);
1704 
1705 	return dropped;
1706 }
1707 
1708 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1709 {
1710 	__drop_discard_cmd(sbi);
1711 }
1712 
1713 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1714 							struct discard_cmd *dc)
1715 {
1716 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1717 	unsigned int len = 0;
1718 
1719 	wait_for_completion_io(&dc->wait);
1720 	mutex_lock(&dcc->cmd_lock);
1721 	f2fs_bug_on(sbi, dc->state != D_DONE);
1722 	dc->ref--;
1723 	if (!dc->ref) {
1724 		if (!dc->error)
1725 			len = dc->di.len;
1726 		__remove_discard_cmd(sbi, dc);
1727 	}
1728 	mutex_unlock(&dcc->cmd_lock);
1729 
1730 	return len;
1731 }
1732 
1733 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1734 						struct discard_policy *dpolicy,
1735 						block_t start, block_t end)
1736 {
1737 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1738 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1739 					&(dcc->fstrim_list) : &(dcc->wait_list);
1740 	struct discard_cmd *dc = NULL, *iter, *tmp;
1741 	unsigned int trimmed = 0;
1742 
1743 next:
1744 	dc = NULL;
1745 
1746 	mutex_lock(&dcc->cmd_lock);
1747 	list_for_each_entry_safe(iter, tmp, wait_list, list) {
1748 		if (iter->di.lstart + iter->di.len <= start ||
1749 					end <= iter->di.lstart)
1750 			continue;
1751 		if (iter->di.len < dpolicy->granularity)
1752 			continue;
1753 		if (iter->state == D_DONE && !iter->ref) {
1754 			wait_for_completion_io(&iter->wait);
1755 			if (!iter->error)
1756 				trimmed += iter->di.len;
1757 			__remove_discard_cmd(sbi, iter);
1758 		} else {
1759 			iter->ref++;
1760 			dc = iter;
1761 			break;
1762 		}
1763 	}
1764 	mutex_unlock(&dcc->cmd_lock);
1765 
1766 	if (dc) {
1767 		trimmed += __wait_one_discard_bio(sbi, dc);
1768 		goto next;
1769 	}
1770 
1771 	return trimmed;
1772 }
1773 
1774 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1775 						struct discard_policy *dpolicy)
1776 {
1777 	struct discard_policy dp;
1778 	unsigned int discard_blks;
1779 
1780 	if (dpolicy)
1781 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1782 
1783 	/* wait all */
1784 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1785 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1786 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1787 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1788 
1789 	return discard_blks;
1790 }
1791 
1792 /* This should be covered by global mutex, &sit_i->sentry_lock */
1793 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1794 {
1795 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1796 	struct discard_cmd *dc;
1797 	bool need_wait = false;
1798 
1799 	mutex_lock(&dcc->cmd_lock);
1800 	dc = __lookup_discard_cmd(sbi, blkaddr);
1801 #ifdef CONFIG_BLK_DEV_ZONED
1802 	if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1803 		int devi = f2fs_bdev_index(sbi, dc->bdev);
1804 
1805 		if (devi < 0) {
1806 			mutex_unlock(&dcc->cmd_lock);
1807 			return;
1808 		}
1809 
1810 		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1811 			/* force submit zone reset */
1812 			if (dc->state == D_PREP)
1813 				__submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1814 							&dcc->wait_list, NULL);
1815 			dc->ref++;
1816 			mutex_unlock(&dcc->cmd_lock);
1817 			/* wait zone reset */
1818 			__wait_one_discard_bio(sbi, dc);
1819 			return;
1820 		}
1821 	}
1822 #endif
1823 	if (dc) {
1824 		if (dc->state == D_PREP) {
1825 			__punch_discard_cmd(sbi, dc, blkaddr);
1826 		} else {
1827 			dc->ref++;
1828 			need_wait = true;
1829 		}
1830 	}
1831 	mutex_unlock(&dcc->cmd_lock);
1832 
1833 	if (need_wait)
1834 		__wait_one_discard_bio(sbi, dc);
1835 }
1836 
1837 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1838 {
1839 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1840 
1841 	if (dcc && dcc->f2fs_issue_discard) {
1842 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1843 
1844 		dcc->f2fs_issue_discard = NULL;
1845 		kthread_stop(discard_thread);
1846 	}
1847 }
1848 
1849 /**
1850  * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
1851  * @sbi: the f2fs_sb_info data for discard cmd to issue
1852  *
1853  * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
1854  *
1855  * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
1856  */
1857 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1858 {
1859 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1860 	struct discard_policy dpolicy;
1861 	bool dropped;
1862 
1863 	if (!atomic_read(&dcc->discard_cmd_cnt))
1864 		return true;
1865 
1866 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1867 					dcc->discard_granularity);
1868 	__issue_discard_cmd(sbi, &dpolicy);
1869 	dropped = __drop_discard_cmd(sbi);
1870 
1871 	/* just to make sure there is no pending discard commands */
1872 	__wait_all_discard_cmd(sbi, NULL);
1873 
1874 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1875 	return !dropped;
1876 }
1877 
1878 static int issue_discard_thread(void *data)
1879 {
1880 	struct f2fs_sb_info *sbi = data;
1881 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1882 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1883 	struct discard_policy dpolicy;
1884 	unsigned int wait_ms = dcc->min_discard_issue_time;
1885 	int issued;
1886 
1887 	set_freezable();
1888 
1889 	do {
1890 		wait_event_freezable_timeout(*q,
1891 				kthread_should_stop() || dcc->discard_wake,
1892 				msecs_to_jiffies(wait_ms));
1893 
1894 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1895 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1896 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1897 						MIN_DISCARD_GRANULARITY);
1898 		else
1899 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1900 						dcc->discard_granularity);
1901 
1902 		if (dcc->discard_wake)
1903 			dcc->discard_wake = false;
1904 
1905 		/* clean up pending candidates before going to sleep */
1906 		if (atomic_read(&dcc->queued_discard))
1907 			__wait_all_discard_cmd(sbi, NULL);
1908 
1909 		if (f2fs_readonly(sbi->sb))
1910 			continue;
1911 		if (kthread_should_stop())
1912 			return 0;
1913 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1914 			!atomic_read(&dcc->discard_cmd_cnt)) {
1915 			wait_ms = dpolicy.max_interval;
1916 			continue;
1917 		}
1918 
1919 		sb_start_intwrite(sbi->sb);
1920 
1921 		issued = __issue_discard_cmd(sbi, &dpolicy);
1922 		if (issued > 0) {
1923 			__wait_all_discard_cmd(sbi, &dpolicy);
1924 			wait_ms = dpolicy.min_interval;
1925 		} else if (issued == -1) {
1926 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1927 			if (!wait_ms)
1928 				wait_ms = dpolicy.mid_interval;
1929 		} else {
1930 			wait_ms = dpolicy.max_interval;
1931 		}
1932 		if (!atomic_read(&dcc->discard_cmd_cnt))
1933 			wait_ms = dpolicy.max_interval;
1934 
1935 		sb_end_intwrite(sbi->sb);
1936 
1937 	} while (!kthread_should_stop());
1938 	return 0;
1939 }
1940 
1941 #ifdef CONFIG_BLK_DEV_ZONED
1942 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1943 		struct block_device *bdev, block_t blkstart, block_t blklen)
1944 {
1945 	sector_t sector, nr_sects;
1946 	block_t lblkstart = blkstart;
1947 	int devi = 0;
1948 	u64 remainder = 0;
1949 
1950 	if (f2fs_is_multi_device(sbi)) {
1951 		devi = f2fs_target_device_index(sbi, blkstart);
1952 		if (blkstart < FDEV(devi).start_blk ||
1953 		    blkstart > FDEV(devi).end_blk) {
1954 			f2fs_err(sbi, "Invalid block %x", blkstart);
1955 			return -EIO;
1956 		}
1957 		blkstart -= FDEV(devi).start_blk;
1958 	}
1959 
1960 	/* For sequential zones, reset the zone write pointer */
1961 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1962 		sector = SECTOR_FROM_BLOCK(blkstart);
1963 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1964 		div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
1965 
1966 		if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
1967 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1968 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1969 				 blkstart, blklen);
1970 			return -EIO;
1971 		}
1972 
1973 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
1974 			trace_f2fs_issue_reset_zone(bdev, blkstart);
1975 			return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1976 						sector, nr_sects, GFP_NOFS);
1977 		}
1978 
1979 		__queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
1980 		return 0;
1981 	}
1982 
1983 	/* For conventional zones, use regular discard if supported */
1984 	__queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1985 	return 0;
1986 }
1987 #endif
1988 
1989 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1990 		struct block_device *bdev, block_t blkstart, block_t blklen)
1991 {
1992 #ifdef CONFIG_BLK_DEV_ZONED
1993 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1994 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1995 #endif
1996 	__queue_discard_cmd(sbi, bdev, blkstart, blklen);
1997 	return 0;
1998 }
1999 
2000 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2001 				block_t blkstart, block_t blklen)
2002 {
2003 	sector_t start = blkstart, len = 0;
2004 	struct block_device *bdev;
2005 	struct seg_entry *se;
2006 	unsigned int offset;
2007 	block_t i;
2008 	int err = 0;
2009 
2010 	bdev = f2fs_target_device(sbi, blkstart, NULL);
2011 
2012 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
2013 		if (i != start) {
2014 			struct block_device *bdev2 =
2015 				f2fs_target_device(sbi, i, NULL);
2016 
2017 			if (bdev2 != bdev) {
2018 				err = __issue_discard_async(sbi, bdev,
2019 						start, len);
2020 				if (err)
2021 					return err;
2022 				bdev = bdev2;
2023 				start = i;
2024 				len = 0;
2025 			}
2026 		}
2027 
2028 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2029 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2030 
2031 		if (f2fs_block_unit_discard(sbi) &&
2032 				!f2fs_test_and_set_bit(offset, se->discard_map))
2033 			sbi->discard_blks--;
2034 	}
2035 
2036 	if (len)
2037 		err = __issue_discard_async(sbi, bdev, start, len);
2038 	return err;
2039 }
2040 
2041 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2042 							bool check_only)
2043 {
2044 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2045 	int max_blocks = sbi->blocks_per_seg;
2046 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2047 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2048 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2049 	unsigned long *discard_map = (unsigned long *)se->discard_map;
2050 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
2051 	unsigned int start = 0, end = -1;
2052 	bool force = (cpc->reason & CP_DISCARD);
2053 	struct discard_entry *de = NULL;
2054 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2055 	int i;
2056 
2057 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
2058 			!f2fs_block_unit_discard(sbi))
2059 		return false;
2060 
2061 	if (!force) {
2062 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
2063 			SM_I(sbi)->dcc_info->nr_discards >=
2064 				SM_I(sbi)->dcc_info->max_discards)
2065 			return false;
2066 	}
2067 
2068 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
2069 	for (i = 0; i < entries; i++)
2070 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
2071 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
2072 
2073 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
2074 				SM_I(sbi)->dcc_info->max_discards) {
2075 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
2076 		if (start >= max_blocks)
2077 			break;
2078 
2079 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
2080 		if (force && start && end != max_blocks
2081 					&& (end - start) < cpc->trim_minlen)
2082 			continue;
2083 
2084 		if (check_only)
2085 			return true;
2086 
2087 		if (!de) {
2088 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
2089 						GFP_F2FS_ZERO, true, NULL);
2090 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2091 			list_add_tail(&de->list, head);
2092 		}
2093 
2094 		for (i = start; i < end; i++)
2095 			__set_bit_le(i, (void *)de->discard_map);
2096 
2097 		SM_I(sbi)->dcc_info->nr_discards += end - start;
2098 	}
2099 	return false;
2100 }
2101 
2102 static void release_discard_addr(struct discard_entry *entry)
2103 {
2104 	list_del(&entry->list);
2105 	kmem_cache_free(discard_entry_slab, entry);
2106 }
2107 
2108 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2109 {
2110 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2111 	struct discard_entry *entry, *this;
2112 
2113 	/* drop caches */
2114 	list_for_each_entry_safe(entry, this, head, list)
2115 		release_discard_addr(entry);
2116 }
2117 
2118 /*
2119  * Should call f2fs_clear_prefree_segments after checkpoint is done.
2120  */
2121 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2122 {
2123 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2124 	unsigned int segno;
2125 
2126 	mutex_lock(&dirty_i->seglist_lock);
2127 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2128 		__set_test_and_free(sbi, segno, false);
2129 	mutex_unlock(&dirty_i->seglist_lock);
2130 }
2131 
2132 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2133 						struct cp_control *cpc)
2134 {
2135 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2136 	struct list_head *head = &dcc->entry_list;
2137 	struct discard_entry *entry, *this;
2138 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2139 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2140 	unsigned int start = 0, end = -1;
2141 	unsigned int secno, start_segno;
2142 	bool force = (cpc->reason & CP_DISCARD);
2143 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2144 						DISCARD_UNIT_SECTION;
2145 
2146 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2147 		section_alignment = true;
2148 
2149 	mutex_lock(&dirty_i->seglist_lock);
2150 
2151 	while (1) {
2152 		int i;
2153 
2154 		if (section_alignment && end != -1)
2155 			end--;
2156 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2157 		if (start >= MAIN_SEGS(sbi))
2158 			break;
2159 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2160 								start + 1);
2161 
2162 		if (section_alignment) {
2163 			start = rounddown(start, sbi->segs_per_sec);
2164 			end = roundup(end, sbi->segs_per_sec);
2165 		}
2166 
2167 		for (i = start; i < end; i++) {
2168 			if (test_and_clear_bit(i, prefree_map))
2169 				dirty_i->nr_dirty[PRE]--;
2170 		}
2171 
2172 		if (!f2fs_realtime_discard_enable(sbi))
2173 			continue;
2174 
2175 		if (force && start >= cpc->trim_start &&
2176 					(end - 1) <= cpc->trim_end)
2177 			continue;
2178 
2179 		/* Should cover 2MB zoned device for zone-based reset */
2180 		if (!f2fs_sb_has_blkzoned(sbi) &&
2181 		    (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2182 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2183 				(end - start) << sbi->log_blocks_per_seg);
2184 			continue;
2185 		}
2186 next:
2187 		secno = GET_SEC_FROM_SEG(sbi, start);
2188 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2189 		if (!IS_CURSEC(sbi, secno) &&
2190 			!get_valid_blocks(sbi, start, true))
2191 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2192 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
2193 
2194 		start = start_segno + sbi->segs_per_sec;
2195 		if (start < end)
2196 			goto next;
2197 		else
2198 			end = start - 1;
2199 	}
2200 	mutex_unlock(&dirty_i->seglist_lock);
2201 
2202 	if (!f2fs_block_unit_discard(sbi))
2203 		goto wakeup;
2204 
2205 	/* send small discards */
2206 	list_for_each_entry_safe(entry, this, head, list) {
2207 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2208 		bool is_valid = test_bit_le(0, entry->discard_map);
2209 
2210 find_next:
2211 		if (is_valid) {
2212 			next_pos = find_next_zero_bit_le(entry->discard_map,
2213 					sbi->blocks_per_seg, cur_pos);
2214 			len = next_pos - cur_pos;
2215 
2216 			if (f2fs_sb_has_blkzoned(sbi) ||
2217 			    (force && len < cpc->trim_minlen))
2218 				goto skip;
2219 
2220 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2221 									len);
2222 			total_len += len;
2223 		} else {
2224 			next_pos = find_next_bit_le(entry->discard_map,
2225 					sbi->blocks_per_seg, cur_pos);
2226 		}
2227 skip:
2228 		cur_pos = next_pos;
2229 		is_valid = !is_valid;
2230 
2231 		if (cur_pos < sbi->blocks_per_seg)
2232 			goto find_next;
2233 
2234 		release_discard_addr(entry);
2235 		dcc->nr_discards -= total_len;
2236 	}
2237 
2238 wakeup:
2239 	wake_up_discard_thread(sbi, false);
2240 }
2241 
2242 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2243 {
2244 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2245 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2246 	int err = 0;
2247 
2248 	if (!f2fs_realtime_discard_enable(sbi))
2249 		return 0;
2250 
2251 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2252 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2253 	if (IS_ERR(dcc->f2fs_issue_discard)) {
2254 		err = PTR_ERR(dcc->f2fs_issue_discard);
2255 		dcc->f2fs_issue_discard = NULL;
2256 	}
2257 
2258 	return err;
2259 }
2260 
2261 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2262 {
2263 	struct discard_cmd_control *dcc;
2264 	int err = 0, i;
2265 
2266 	if (SM_I(sbi)->dcc_info) {
2267 		dcc = SM_I(sbi)->dcc_info;
2268 		goto init_thread;
2269 	}
2270 
2271 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2272 	if (!dcc)
2273 		return -ENOMEM;
2274 
2275 	dcc->discard_io_aware_gran = MAX_PLIST_NUM;
2276 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2277 	dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
2278 	dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
2279 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2280 		dcc->discard_granularity = sbi->blocks_per_seg;
2281 	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2282 		dcc->discard_granularity = BLKS_PER_SEC(sbi);
2283 
2284 	INIT_LIST_HEAD(&dcc->entry_list);
2285 	for (i = 0; i < MAX_PLIST_NUM; i++)
2286 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2287 	INIT_LIST_HEAD(&dcc->wait_list);
2288 	INIT_LIST_HEAD(&dcc->fstrim_list);
2289 	mutex_init(&dcc->cmd_lock);
2290 	atomic_set(&dcc->issued_discard, 0);
2291 	atomic_set(&dcc->queued_discard, 0);
2292 	atomic_set(&dcc->discard_cmd_cnt, 0);
2293 	dcc->nr_discards = 0;
2294 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2295 	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2296 	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2297 	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2298 	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2299 	dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2300 	dcc->undiscard_blks = 0;
2301 	dcc->next_pos = 0;
2302 	dcc->root = RB_ROOT_CACHED;
2303 	dcc->rbtree_check = false;
2304 
2305 	init_waitqueue_head(&dcc->discard_wait_queue);
2306 	SM_I(sbi)->dcc_info = dcc;
2307 init_thread:
2308 	err = f2fs_start_discard_thread(sbi);
2309 	if (err) {
2310 		kfree(dcc);
2311 		SM_I(sbi)->dcc_info = NULL;
2312 	}
2313 
2314 	return err;
2315 }
2316 
2317 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2318 {
2319 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2320 
2321 	if (!dcc)
2322 		return;
2323 
2324 	f2fs_stop_discard_thread(sbi);
2325 
2326 	/*
2327 	 * Recovery can cache discard commands, so in error path of
2328 	 * fill_super(), it needs to give a chance to handle them.
2329 	 */
2330 	f2fs_issue_discard_timeout(sbi);
2331 
2332 	kfree(dcc);
2333 	SM_I(sbi)->dcc_info = NULL;
2334 }
2335 
2336 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2337 {
2338 	struct sit_info *sit_i = SIT_I(sbi);
2339 
2340 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2341 		sit_i->dirty_sentries++;
2342 		return false;
2343 	}
2344 
2345 	return true;
2346 }
2347 
2348 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2349 					unsigned int segno, int modified)
2350 {
2351 	struct seg_entry *se = get_seg_entry(sbi, segno);
2352 
2353 	se->type = type;
2354 	if (modified)
2355 		__mark_sit_entry_dirty(sbi, segno);
2356 }
2357 
2358 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2359 								block_t blkaddr)
2360 {
2361 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2362 
2363 	if (segno == NULL_SEGNO)
2364 		return 0;
2365 	return get_seg_entry(sbi, segno)->mtime;
2366 }
2367 
2368 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2369 						unsigned long long old_mtime)
2370 {
2371 	struct seg_entry *se;
2372 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2373 	unsigned long long ctime = get_mtime(sbi, false);
2374 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2375 
2376 	if (segno == NULL_SEGNO)
2377 		return;
2378 
2379 	se = get_seg_entry(sbi, segno);
2380 
2381 	if (!se->mtime)
2382 		se->mtime = mtime;
2383 	else
2384 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2385 						se->valid_blocks + 1);
2386 
2387 	if (ctime > SIT_I(sbi)->max_mtime)
2388 		SIT_I(sbi)->max_mtime = ctime;
2389 }
2390 
2391 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2392 {
2393 	struct seg_entry *se;
2394 	unsigned int segno, offset;
2395 	long int new_vblocks;
2396 	bool exist;
2397 #ifdef CONFIG_F2FS_CHECK_FS
2398 	bool mir_exist;
2399 #endif
2400 
2401 	segno = GET_SEGNO(sbi, blkaddr);
2402 
2403 	se = get_seg_entry(sbi, segno);
2404 	new_vblocks = se->valid_blocks + del;
2405 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2406 
2407 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2408 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2409 
2410 	se->valid_blocks = new_vblocks;
2411 
2412 	/* Update valid block bitmap */
2413 	if (del > 0) {
2414 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2415 #ifdef CONFIG_F2FS_CHECK_FS
2416 		mir_exist = f2fs_test_and_set_bit(offset,
2417 						se->cur_valid_map_mir);
2418 		if (unlikely(exist != mir_exist)) {
2419 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2420 				 blkaddr, exist);
2421 			f2fs_bug_on(sbi, 1);
2422 		}
2423 #endif
2424 		if (unlikely(exist)) {
2425 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2426 				 blkaddr);
2427 			f2fs_bug_on(sbi, 1);
2428 			se->valid_blocks--;
2429 			del = 0;
2430 		}
2431 
2432 		if (f2fs_block_unit_discard(sbi) &&
2433 				!f2fs_test_and_set_bit(offset, se->discard_map))
2434 			sbi->discard_blks--;
2435 
2436 		/*
2437 		 * SSR should never reuse block which is checkpointed
2438 		 * or newly invalidated.
2439 		 */
2440 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2441 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2442 				se->ckpt_valid_blocks++;
2443 		}
2444 	} else {
2445 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2446 #ifdef CONFIG_F2FS_CHECK_FS
2447 		mir_exist = f2fs_test_and_clear_bit(offset,
2448 						se->cur_valid_map_mir);
2449 		if (unlikely(exist != mir_exist)) {
2450 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2451 				 blkaddr, exist);
2452 			f2fs_bug_on(sbi, 1);
2453 		}
2454 #endif
2455 		if (unlikely(!exist)) {
2456 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2457 				 blkaddr);
2458 			f2fs_bug_on(sbi, 1);
2459 			se->valid_blocks++;
2460 			del = 0;
2461 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2462 			/*
2463 			 * If checkpoints are off, we must not reuse data that
2464 			 * was used in the previous checkpoint. If it was used
2465 			 * before, we must track that to know how much space we
2466 			 * really have.
2467 			 */
2468 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2469 				spin_lock(&sbi->stat_lock);
2470 				sbi->unusable_block_count++;
2471 				spin_unlock(&sbi->stat_lock);
2472 			}
2473 		}
2474 
2475 		if (f2fs_block_unit_discard(sbi) &&
2476 			f2fs_test_and_clear_bit(offset, se->discard_map))
2477 			sbi->discard_blks++;
2478 	}
2479 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2480 		se->ckpt_valid_blocks += del;
2481 
2482 	__mark_sit_entry_dirty(sbi, segno);
2483 
2484 	/* update total number of valid blocks to be written in ckpt area */
2485 	SIT_I(sbi)->written_valid_blocks += del;
2486 
2487 	if (__is_large_section(sbi))
2488 		get_sec_entry(sbi, segno)->valid_blocks += del;
2489 }
2490 
2491 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2492 {
2493 	unsigned int segno = GET_SEGNO(sbi, addr);
2494 	struct sit_info *sit_i = SIT_I(sbi);
2495 
2496 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2497 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2498 		return;
2499 
2500 	f2fs_invalidate_internal_cache(sbi, addr);
2501 
2502 	/* add it into sit main buffer */
2503 	down_write(&sit_i->sentry_lock);
2504 
2505 	update_segment_mtime(sbi, addr, 0);
2506 	update_sit_entry(sbi, addr, -1);
2507 
2508 	/* add it into dirty seglist */
2509 	locate_dirty_segment(sbi, segno);
2510 
2511 	up_write(&sit_i->sentry_lock);
2512 }
2513 
2514 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2515 {
2516 	struct sit_info *sit_i = SIT_I(sbi);
2517 	unsigned int segno, offset;
2518 	struct seg_entry *se;
2519 	bool is_cp = false;
2520 
2521 	if (!__is_valid_data_blkaddr(blkaddr))
2522 		return true;
2523 
2524 	down_read(&sit_i->sentry_lock);
2525 
2526 	segno = GET_SEGNO(sbi, blkaddr);
2527 	se = get_seg_entry(sbi, segno);
2528 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2529 
2530 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2531 		is_cp = true;
2532 
2533 	up_read(&sit_i->sentry_lock);
2534 
2535 	return is_cp;
2536 }
2537 
2538 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2539 {
2540 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2541 
2542 	if (sbi->ckpt->alloc_type[type] == SSR)
2543 		return sbi->blocks_per_seg;
2544 	return curseg->next_blkoff;
2545 }
2546 
2547 /*
2548  * Calculate the number of current summary pages for writing
2549  */
2550 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2551 {
2552 	int valid_sum_count = 0;
2553 	int i, sum_in_page;
2554 
2555 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2556 		if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2557 			valid_sum_count +=
2558 				le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2559 		else
2560 			valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2561 	}
2562 
2563 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2564 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2565 	if (valid_sum_count <= sum_in_page)
2566 		return 1;
2567 	else if ((valid_sum_count - sum_in_page) <=
2568 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2569 		return 2;
2570 	return 3;
2571 }
2572 
2573 /*
2574  * Caller should put this summary page
2575  */
2576 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2577 {
2578 	if (unlikely(f2fs_cp_error(sbi)))
2579 		return ERR_PTR(-EIO);
2580 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2581 }
2582 
2583 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2584 					void *src, block_t blk_addr)
2585 {
2586 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2587 
2588 	memcpy(page_address(page), src, PAGE_SIZE);
2589 	set_page_dirty(page);
2590 	f2fs_put_page(page, 1);
2591 }
2592 
2593 static void write_sum_page(struct f2fs_sb_info *sbi,
2594 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2595 {
2596 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2597 }
2598 
2599 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2600 						int type, block_t blk_addr)
2601 {
2602 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2603 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2604 	struct f2fs_summary_block *src = curseg->sum_blk;
2605 	struct f2fs_summary_block *dst;
2606 
2607 	dst = (struct f2fs_summary_block *)page_address(page);
2608 	memset(dst, 0, PAGE_SIZE);
2609 
2610 	mutex_lock(&curseg->curseg_mutex);
2611 
2612 	down_read(&curseg->journal_rwsem);
2613 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2614 	up_read(&curseg->journal_rwsem);
2615 
2616 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2617 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2618 
2619 	mutex_unlock(&curseg->curseg_mutex);
2620 
2621 	set_page_dirty(page);
2622 	f2fs_put_page(page, 1);
2623 }
2624 
2625 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2626 				struct curseg_info *curseg, int type)
2627 {
2628 	unsigned int segno = curseg->segno + 1;
2629 	struct free_segmap_info *free_i = FREE_I(sbi);
2630 
2631 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2632 		return !test_bit(segno, free_i->free_segmap);
2633 	return 0;
2634 }
2635 
2636 /*
2637  * Find a new segment from the free segments bitmap to right order
2638  * This function should be returned with success, otherwise BUG
2639  */
2640 static void get_new_segment(struct f2fs_sb_info *sbi,
2641 			unsigned int *newseg, bool new_sec, int dir)
2642 {
2643 	struct free_segmap_info *free_i = FREE_I(sbi);
2644 	unsigned int segno, secno, zoneno;
2645 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2646 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2647 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2648 	unsigned int left_start = hint;
2649 	bool init = true;
2650 	int go_left = 0;
2651 	int i;
2652 
2653 	spin_lock(&free_i->segmap_lock);
2654 
2655 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2656 		segno = find_next_zero_bit(free_i->free_segmap,
2657 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2658 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2659 			goto got_it;
2660 	}
2661 find_other_zone:
2662 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2663 	if (secno >= MAIN_SECS(sbi)) {
2664 		if (dir == ALLOC_RIGHT) {
2665 			secno = find_first_zero_bit(free_i->free_secmap,
2666 							MAIN_SECS(sbi));
2667 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2668 		} else {
2669 			go_left = 1;
2670 			left_start = hint - 1;
2671 		}
2672 	}
2673 	if (go_left == 0)
2674 		goto skip_left;
2675 
2676 	while (test_bit(left_start, free_i->free_secmap)) {
2677 		if (left_start > 0) {
2678 			left_start--;
2679 			continue;
2680 		}
2681 		left_start = find_first_zero_bit(free_i->free_secmap,
2682 							MAIN_SECS(sbi));
2683 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2684 		break;
2685 	}
2686 	secno = left_start;
2687 skip_left:
2688 	segno = GET_SEG_FROM_SEC(sbi, secno);
2689 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2690 
2691 	/* give up on finding another zone */
2692 	if (!init)
2693 		goto got_it;
2694 	if (sbi->secs_per_zone == 1)
2695 		goto got_it;
2696 	if (zoneno == old_zoneno)
2697 		goto got_it;
2698 	if (dir == ALLOC_LEFT) {
2699 		if (!go_left && zoneno + 1 >= total_zones)
2700 			goto got_it;
2701 		if (go_left && zoneno == 0)
2702 			goto got_it;
2703 	}
2704 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2705 		if (CURSEG_I(sbi, i)->zone == zoneno)
2706 			break;
2707 
2708 	if (i < NR_CURSEG_TYPE) {
2709 		/* zone is in user, try another */
2710 		if (go_left)
2711 			hint = zoneno * sbi->secs_per_zone - 1;
2712 		else if (zoneno + 1 >= total_zones)
2713 			hint = 0;
2714 		else
2715 			hint = (zoneno + 1) * sbi->secs_per_zone;
2716 		init = false;
2717 		goto find_other_zone;
2718 	}
2719 got_it:
2720 	/* set it as dirty segment in free segmap */
2721 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2722 	__set_inuse(sbi, segno);
2723 	*newseg = segno;
2724 	spin_unlock(&free_i->segmap_lock);
2725 }
2726 
2727 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2728 {
2729 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2730 	struct summary_footer *sum_footer;
2731 	unsigned short seg_type = curseg->seg_type;
2732 
2733 	curseg->inited = true;
2734 	curseg->segno = curseg->next_segno;
2735 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2736 	curseg->next_blkoff = 0;
2737 	curseg->next_segno = NULL_SEGNO;
2738 
2739 	sum_footer = &(curseg->sum_blk->footer);
2740 	memset(sum_footer, 0, sizeof(struct summary_footer));
2741 
2742 	sanity_check_seg_type(sbi, seg_type);
2743 
2744 	if (IS_DATASEG(seg_type))
2745 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2746 	if (IS_NODESEG(seg_type))
2747 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2748 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2749 }
2750 
2751 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2752 {
2753 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2754 	unsigned short seg_type = curseg->seg_type;
2755 
2756 	sanity_check_seg_type(sbi, seg_type);
2757 	if (f2fs_need_rand_seg(sbi))
2758 		return get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
2759 
2760 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2761 	if (__is_large_section(sbi))
2762 		return curseg->segno;
2763 
2764 	/* inmem log may not locate on any segment after mount */
2765 	if (!curseg->inited)
2766 		return 0;
2767 
2768 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2769 		return 0;
2770 
2771 	if (test_opt(sbi, NOHEAP) &&
2772 		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2773 		return 0;
2774 
2775 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2776 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2777 
2778 	/* find segments from 0 to reuse freed segments */
2779 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2780 		return 0;
2781 
2782 	return curseg->segno;
2783 }
2784 
2785 /*
2786  * Allocate a current working segment.
2787  * This function always allocates a free segment in LFS manner.
2788  */
2789 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2790 {
2791 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2792 	unsigned short seg_type = curseg->seg_type;
2793 	unsigned int segno = curseg->segno;
2794 	int dir = ALLOC_LEFT;
2795 
2796 	if (curseg->inited)
2797 		write_sum_page(sbi, curseg->sum_blk,
2798 				GET_SUM_BLOCK(sbi, segno));
2799 	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2800 		dir = ALLOC_RIGHT;
2801 
2802 	if (test_opt(sbi, NOHEAP))
2803 		dir = ALLOC_RIGHT;
2804 
2805 	segno = __get_next_segno(sbi, type);
2806 	get_new_segment(sbi, &segno, new_sec, dir);
2807 	curseg->next_segno = segno;
2808 	reset_curseg(sbi, type, 1);
2809 	curseg->alloc_type = LFS;
2810 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2811 		curseg->fragment_remained_chunk =
2812 				get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2813 }
2814 
2815 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2816 					int segno, block_t start)
2817 {
2818 	struct seg_entry *se = get_seg_entry(sbi, segno);
2819 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2820 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2821 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2822 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2823 	int i;
2824 
2825 	for (i = 0; i < entries; i++)
2826 		target_map[i] = ckpt_map[i] | cur_map[i];
2827 
2828 	return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2829 }
2830 
2831 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
2832 		struct curseg_info *seg)
2833 {
2834 	return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
2835 }
2836 
2837 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2838 {
2839 	return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2840 }
2841 
2842 /*
2843  * This function always allocates a used segment(from dirty seglist) by SSR
2844  * manner, so it should recover the existing segment information of valid blocks
2845  */
2846 static void change_curseg(struct f2fs_sb_info *sbi, int type)
2847 {
2848 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2849 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2850 	unsigned int new_segno = curseg->next_segno;
2851 	struct f2fs_summary_block *sum_node;
2852 	struct page *sum_page;
2853 
2854 	write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
2855 
2856 	__set_test_and_inuse(sbi, new_segno);
2857 
2858 	mutex_lock(&dirty_i->seglist_lock);
2859 	__remove_dirty_segment(sbi, new_segno, PRE);
2860 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2861 	mutex_unlock(&dirty_i->seglist_lock);
2862 
2863 	reset_curseg(sbi, type, 1);
2864 	curseg->alloc_type = SSR;
2865 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2866 
2867 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2868 	if (IS_ERR(sum_page)) {
2869 		/* GC won't be able to use stale summary pages by cp_error */
2870 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2871 		return;
2872 	}
2873 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2874 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2875 	f2fs_put_page(sum_page, 1);
2876 }
2877 
2878 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2879 				int alloc_mode, unsigned long long age);
2880 
2881 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2882 					int target_type, int alloc_mode,
2883 					unsigned long long age)
2884 {
2885 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2886 
2887 	curseg->seg_type = target_type;
2888 
2889 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2890 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2891 
2892 		curseg->seg_type = se->type;
2893 		change_curseg(sbi, type);
2894 	} else {
2895 		/* allocate cold segment by default */
2896 		curseg->seg_type = CURSEG_COLD_DATA;
2897 		new_curseg(sbi, type, true);
2898 	}
2899 	stat_inc_seg_type(sbi, curseg);
2900 }
2901 
2902 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2903 {
2904 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2905 
2906 	if (!sbi->am.atgc_enabled)
2907 		return;
2908 
2909 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2910 
2911 	mutex_lock(&curseg->curseg_mutex);
2912 	down_write(&SIT_I(sbi)->sentry_lock);
2913 
2914 	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2915 
2916 	up_write(&SIT_I(sbi)->sentry_lock);
2917 	mutex_unlock(&curseg->curseg_mutex);
2918 
2919 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2920 
2921 }
2922 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2923 {
2924 	__f2fs_init_atgc_curseg(sbi);
2925 }
2926 
2927 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2928 {
2929 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2930 
2931 	mutex_lock(&curseg->curseg_mutex);
2932 	if (!curseg->inited)
2933 		goto out;
2934 
2935 	if (get_valid_blocks(sbi, curseg->segno, false)) {
2936 		write_sum_page(sbi, curseg->sum_blk,
2937 				GET_SUM_BLOCK(sbi, curseg->segno));
2938 	} else {
2939 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2940 		__set_test_and_free(sbi, curseg->segno, true);
2941 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2942 	}
2943 out:
2944 	mutex_unlock(&curseg->curseg_mutex);
2945 }
2946 
2947 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2948 {
2949 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2950 
2951 	if (sbi->am.atgc_enabled)
2952 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2953 }
2954 
2955 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2956 {
2957 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2958 
2959 	mutex_lock(&curseg->curseg_mutex);
2960 	if (!curseg->inited)
2961 		goto out;
2962 	if (get_valid_blocks(sbi, curseg->segno, false))
2963 		goto out;
2964 
2965 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2966 	__set_test_and_inuse(sbi, curseg->segno);
2967 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2968 out:
2969 	mutex_unlock(&curseg->curseg_mutex);
2970 }
2971 
2972 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2973 {
2974 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2975 
2976 	if (sbi->am.atgc_enabled)
2977 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2978 }
2979 
2980 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2981 				int alloc_mode, unsigned long long age)
2982 {
2983 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2984 	unsigned segno = NULL_SEGNO;
2985 	unsigned short seg_type = curseg->seg_type;
2986 	int i, cnt;
2987 	bool reversed = false;
2988 
2989 	sanity_check_seg_type(sbi, seg_type);
2990 
2991 	/* f2fs_need_SSR() already forces to do this */
2992 	if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2993 		curseg->next_segno = segno;
2994 		return 1;
2995 	}
2996 
2997 	/* For node segments, let's do SSR more intensively */
2998 	if (IS_NODESEG(seg_type)) {
2999 		if (seg_type >= CURSEG_WARM_NODE) {
3000 			reversed = true;
3001 			i = CURSEG_COLD_NODE;
3002 		} else {
3003 			i = CURSEG_HOT_NODE;
3004 		}
3005 		cnt = NR_CURSEG_NODE_TYPE;
3006 	} else {
3007 		if (seg_type >= CURSEG_WARM_DATA) {
3008 			reversed = true;
3009 			i = CURSEG_COLD_DATA;
3010 		} else {
3011 			i = CURSEG_HOT_DATA;
3012 		}
3013 		cnt = NR_CURSEG_DATA_TYPE;
3014 	}
3015 
3016 	for (; cnt-- > 0; reversed ? i-- : i++) {
3017 		if (i == seg_type)
3018 			continue;
3019 		if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
3020 			curseg->next_segno = segno;
3021 			return 1;
3022 		}
3023 	}
3024 
3025 	/* find valid_blocks=0 in dirty list */
3026 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3027 		segno = get_free_segment(sbi);
3028 		if (segno != NULL_SEGNO) {
3029 			curseg->next_segno = segno;
3030 			return 1;
3031 		}
3032 	}
3033 	return 0;
3034 }
3035 
3036 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3037 {
3038 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3039 
3040 	if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3041 	    curseg->seg_type == CURSEG_WARM_NODE)
3042 		return true;
3043 	if (curseg->alloc_type == LFS &&
3044 	    is_next_segment_free(sbi, curseg, type) &&
3045 	    likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3046 		return true;
3047 	if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3048 		return true;
3049 	return false;
3050 }
3051 
3052 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3053 					unsigned int start, unsigned int end)
3054 {
3055 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3056 	unsigned int segno;
3057 
3058 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3059 	mutex_lock(&curseg->curseg_mutex);
3060 	down_write(&SIT_I(sbi)->sentry_lock);
3061 
3062 	segno = CURSEG_I(sbi, type)->segno;
3063 	if (segno < start || segno > end)
3064 		goto unlock;
3065 
3066 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3067 		change_curseg(sbi, type);
3068 	else
3069 		new_curseg(sbi, type, true);
3070 
3071 	stat_inc_seg_type(sbi, curseg);
3072 
3073 	locate_dirty_segment(sbi, segno);
3074 unlock:
3075 	up_write(&SIT_I(sbi)->sentry_lock);
3076 
3077 	if (segno != curseg->segno)
3078 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3079 			    type, segno, curseg->segno);
3080 
3081 	mutex_unlock(&curseg->curseg_mutex);
3082 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3083 }
3084 
3085 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3086 						bool new_sec, bool force)
3087 {
3088 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3089 	unsigned int old_segno;
3090 
3091 	if (!force && curseg->inited &&
3092 	    !curseg->next_blkoff &&
3093 	    !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3094 	    !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3095 		return;
3096 
3097 	old_segno = curseg->segno;
3098 	new_curseg(sbi, type, true);
3099 	stat_inc_seg_type(sbi, curseg);
3100 	locate_dirty_segment(sbi, old_segno);
3101 }
3102 
3103 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3104 {
3105 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3106 	down_write(&SIT_I(sbi)->sentry_lock);
3107 	__allocate_new_segment(sbi, type, true, force);
3108 	up_write(&SIT_I(sbi)->sentry_lock);
3109 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3110 }
3111 
3112 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3113 {
3114 	int i;
3115 
3116 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3117 	down_write(&SIT_I(sbi)->sentry_lock);
3118 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3119 		__allocate_new_segment(sbi, i, false, false);
3120 	up_write(&SIT_I(sbi)->sentry_lock);
3121 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3122 }
3123 
3124 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3125 						struct cp_control *cpc)
3126 {
3127 	__u64 trim_start = cpc->trim_start;
3128 	bool has_candidate = false;
3129 
3130 	down_write(&SIT_I(sbi)->sentry_lock);
3131 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3132 		if (add_discard_addrs(sbi, cpc, true)) {
3133 			has_candidate = true;
3134 			break;
3135 		}
3136 	}
3137 	up_write(&SIT_I(sbi)->sentry_lock);
3138 
3139 	cpc->trim_start = trim_start;
3140 	return has_candidate;
3141 }
3142 
3143 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3144 					struct discard_policy *dpolicy,
3145 					unsigned int start, unsigned int end)
3146 {
3147 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3148 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3149 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3150 	struct discard_cmd *dc;
3151 	struct blk_plug plug;
3152 	int issued;
3153 	unsigned int trimmed = 0;
3154 
3155 next:
3156 	issued = 0;
3157 
3158 	mutex_lock(&dcc->cmd_lock);
3159 	if (unlikely(dcc->rbtree_check))
3160 		f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3161 
3162 	dc = __lookup_discard_cmd_ret(&dcc->root, start,
3163 				&prev_dc, &next_dc, &insert_p, &insert_parent);
3164 	if (!dc)
3165 		dc = next_dc;
3166 
3167 	blk_start_plug(&plug);
3168 
3169 	while (dc && dc->di.lstart <= end) {
3170 		struct rb_node *node;
3171 		int err = 0;
3172 
3173 		if (dc->di.len < dpolicy->granularity)
3174 			goto skip;
3175 
3176 		if (dc->state != D_PREP) {
3177 			list_move_tail(&dc->list, &dcc->fstrim_list);
3178 			goto skip;
3179 		}
3180 
3181 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3182 
3183 		if (issued >= dpolicy->max_requests) {
3184 			start = dc->di.lstart + dc->di.len;
3185 
3186 			if (err)
3187 				__remove_discard_cmd(sbi, dc);
3188 
3189 			blk_finish_plug(&plug);
3190 			mutex_unlock(&dcc->cmd_lock);
3191 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3192 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3193 			goto next;
3194 		}
3195 skip:
3196 		node = rb_next(&dc->rb_node);
3197 		if (err)
3198 			__remove_discard_cmd(sbi, dc);
3199 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3200 
3201 		if (fatal_signal_pending(current))
3202 			break;
3203 	}
3204 
3205 	blk_finish_plug(&plug);
3206 	mutex_unlock(&dcc->cmd_lock);
3207 
3208 	return trimmed;
3209 }
3210 
3211 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3212 {
3213 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3214 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3215 	unsigned int start_segno, end_segno;
3216 	block_t start_block, end_block;
3217 	struct cp_control cpc;
3218 	struct discard_policy dpolicy;
3219 	unsigned long long trimmed = 0;
3220 	int err = 0;
3221 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3222 
3223 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3224 		return -EINVAL;
3225 
3226 	if (end < MAIN_BLKADDR(sbi))
3227 		goto out;
3228 
3229 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3230 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3231 		return -EFSCORRUPTED;
3232 	}
3233 
3234 	/* start/end segment number in main_area */
3235 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3236 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3237 						GET_SEGNO(sbi, end);
3238 	if (need_align) {
3239 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
3240 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3241 	}
3242 
3243 	cpc.reason = CP_DISCARD;
3244 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3245 	cpc.trim_start = start_segno;
3246 	cpc.trim_end = end_segno;
3247 
3248 	if (sbi->discard_blks == 0)
3249 		goto out;
3250 
3251 	f2fs_down_write(&sbi->gc_lock);
3252 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
3253 	err = f2fs_write_checkpoint(sbi, &cpc);
3254 	f2fs_up_write(&sbi->gc_lock);
3255 	if (err)
3256 		goto out;
3257 
3258 	/*
3259 	 * We filed discard candidates, but actually we don't need to wait for
3260 	 * all of them, since they'll be issued in idle time along with runtime
3261 	 * discard option. User configuration looks like using runtime discard
3262 	 * or periodic fstrim instead of it.
3263 	 */
3264 	if (f2fs_realtime_discard_enable(sbi))
3265 		goto out;
3266 
3267 	start_block = START_BLOCK(sbi, start_segno);
3268 	end_block = START_BLOCK(sbi, end_segno + 1);
3269 
3270 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3271 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3272 					start_block, end_block);
3273 
3274 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3275 					start_block, end_block);
3276 out:
3277 	if (!err)
3278 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3279 	return err;
3280 }
3281 
3282 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3283 {
3284 	switch (hint) {
3285 	case WRITE_LIFE_SHORT:
3286 		return CURSEG_HOT_DATA;
3287 	case WRITE_LIFE_EXTREME:
3288 		return CURSEG_COLD_DATA;
3289 	default:
3290 		return CURSEG_WARM_DATA;
3291 	}
3292 }
3293 
3294 static int __get_segment_type_2(struct f2fs_io_info *fio)
3295 {
3296 	if (fio->type == DATA)
3297 		return CURSEG_HOT_DATA;
3298 	else
3299 		return CURSEG_HOT_NODE;
3300 }
3301 
3302 static int __get_segment_type_4(struct f2fs_io_info *fio)
3303 {
3304 	if (fio->type == DATA) {
3305 		struct inode *inode = fio->page->mapping->host;
3306 
3307 		if (S_ISDIR(inode->i_mode))
3308 			return CURSEG_HOT_DATA;
3309 		else
3310 			return CURSEG_COLD_DATA;
3311 	} else {
3312 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3313 			return CURSEG_WARM_NODE;
3314 		else
3315 			return CURSEG_COLD_NODE;
3316 	}
3317 }
3318 
3319 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
3320 {
3321 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3322 	struct extent_info ei = {};
3323 
3324 	if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
3325 		if (!ei.age)
3326 			return NO_CHECK_TYPE;
3327 		if (ei.age <= sbi->hot_data_age_threshold)
3328 			return CURSEG_HOT_DATA;
3329 		if (ei.age <= sbi->warm_data_age_threshold)
3330 			return CURSEG_WARM_DATA;
3331 		return CURSEG_COLD_DATA;
3332 	}
3333 	return NO_CHECK_TYPE;
3334 }
3335 
3336 static int __get_segment_type_6(struct f2fs_io_info *fio)
3337 {
3338 	if (fio->type == DATA) {
3339 		struct inode *inode = fio->page->mapping->host;
3340 		int type;
3341 
3342 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3343 			return CURSEG_COLD_DATA_PINNED;
3344 
3345 		if (page_private_gcing(fio->page)) {
3346 			if (fio->sbi->am.atgc_enabled &&
3347 				(fio->io_type == FS_DATA_IO) &&
3348 				(fio->sbi->gc_mode != GC_URGENT_HIGH))
3349 				return CURSEG_ALL_DATA_ATGC;
3350 			else
3351 				return CURSEG_COLD_DATA;
3352 		}
3353 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3354 			return CURSEG_COLD_DATA;
3355 
3356 		type = __get_age_segment_type(inode, fio->page->index);
3357 		if (type != NO_CHECK_TYPE)
3358 			return type;
3359 
3360 		if (file_is_hot(inode) ||
3361 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3362 				f2fs_is_cow_file(inode))
3363 			return CURSEG_HOT_DATA;
3364 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3365 	} else {
3366 		if (IS_DNODE(fio->page))
3367 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3368 						CURSEG_HOT_NODE;
3369 		return CURSEG_COLD_NODE;
3370 	}
3371 }
3372 
3373 static int __get_segment_type(struct f2fs_io_info *fio)
3374 {
3375 	int type = 0;
3376 
3377 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3378 	case 2:
3379 		type = __get_segment_type_2(fio);
3380 		break;
3381 	case 4:
3382 		type = __get_segment_type_4(fio);
3383 		break;
3384 	case 6:
3385 		type = __get_segment_type_6(fio);
3386 		break;
3387 	default:
3388 		f2fs_bug_on(fio->sbi, true);
3389 	}
3390 
3391 	if (IS_HOT(type))
3392 		fio->temp = HOT;
3393 	else if (IS_WARM(type))
3394 		fio->temp = WARM;
3395 	else
3396 		fio->temp = COLD;
3397 	return type;
3398 }
3399 
3400 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3401 		struct curseg_info *seg)
3402 {
3403 	/* To allocate block chunks in different sizes, use random number */
3404 	if (--seg->fragment_remained_chunk > 0)
3405 		return;
3406 
3407 	seg->fragment_remained_chunk =
3408 		get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3409 	seg->next_blkoff +=
3410 		get_random_u32_inclusive(1, sbi->max_fragment_hole);
3411 }
3412 
3413 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3414 		block_t old_blkaddr, block_t *new_blkaddr,
3415 		struct f2fs_summary *sum, int type,
3416 		struct f2fs_io_info *fio)
3417 {
3418 	struct sit_info *sit_i = SIT_I(sbi);
3419 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3420 	unsigned long long old_mtime;
3421 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3422 	struct seg_entry *se = NULL;
3423 	bool segment_full = false;
3424 
3425 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3426 
3427 	mutex_lock(&curseg->curseg_mutex);
3428 	down_write(&sit_i->sentry_lock);
3429 
3430 	if (from_gc) {
3431 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3432 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3433 		sanity_check_seg_type(sbi, se->type);
3434 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3435 	}
3436 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3437 
3438 	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3439 
3440 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3441 
3442 	curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
3443 	if (curseg->alloc_type == SSR) {
3444 		curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3445 	} else {
3446 		curseg->next_blkoff++;
3447 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3448 			f2fs_randomize_chunk(sbi, curseg);
3449 	}
3450 	if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3451 		segment_full = true;
3452 	stat_inc_block_count(sbi, curseg);
3453 
3454 	if (from_gc) {
3455 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3456 	} else {
3457 		update_segment_mtime(sbi, old_blkaddr, 0);
3458 		old_mtime = 0;
3459 	}
3460 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3461 
3462 	/*
3463 	 * SIT information should be updated before segment allocation,
3464 	 * since SSR needs latest valid block information.
3465 	 */
3466 	update_sit_entry(sbi, *new_blkaddr, 1);
3467 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3468 		update_sit_entry(sbi, old_blkaddr, -1);
3469 
3470 	/*
3471 	 * If the current segment is full, flush it out and replace it with a
3472 	 * new segment.
3473 	 */
3474 	if (segment_full) {
3475 		if (from_gc) {
3476 			get_atssr_segment(sbi, type, se->type,
3477 						AT_SSR, se->mtime);
3478 		} else {
3479 			if (need_new_seg(sbi, type))
3480 				new_curseg(sbi, type, false);
3481 			else
3482 				change_curseg(sbi, type);
3483 			stat_inc_seg_type(sbi, curseg);
3484 		}
3485 	}
3486 	/*
3487 	 * segment dirty status should be updated after segment allocation,
3488 	 * so we just need to update status only one time after previous
3489 	 * segment being closed.
3490 	 */
3491 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3492 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3493 
3494 	if (IS_DATASEG(type))
3495 		atomic64_inc(&sbi->allocated_data_blocks);
3496 
3497 	up_write(&sit_i->sentry_lock);
3498 
3499 	if (page && IS_NODESEG(type)) {
3500 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3501 
3502 		f2fs_inode_chksum_set(sbi, page);
3503 	}
3504 
3505 	if (fio) {
3506 		struct f2fs_bio_info *io;
3507 
3508 		if (F2FS_IO_ALIGNED(sbi))
3509 			fio->retry = 0;
3510 
3511 		INIT_LIST_HEAD(&fio->list);
3512 		fio->in_list = 1;
3513 		io = sbi->write_io[fio->type] + fio->temp;
3514 		spin_lock(&io->io_lock);
3515 		list_add_tail(&fio->list, &io->io_list);
3516 		spin_unlock(&io->io_lock);
3517 	}
3518 
3519 	mutex_unlock(&curseg->curseg_mutex);
3520 
3521 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3522 }
3523 
3524 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3525 					block_t blkaddr, unsigned int blkcnt)
3526 {
3527 	if (!f2fs_is_multi_device(sbi))
3528 		return;
3529 
3530 	while (1) {
3531 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3532 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3533 
3534 		/* update device state for fsync */
3535 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3536 
3537 		/* update device state for checkpoint */
3538 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3539 			spin_lock(&sbi->dev_lock);
3540 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3541 			spin_unlock(&sbi->dev_lock);
3542 		}
3543 
3544 		if (blkcnt <= blks)
3545 			break;
3546 		blkcnt -= blks;
3547 		blkaddr += blks;
3548 	}
3549 }
3550 
3551 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3552 {
3553 	int type = __get_segment_type(fio);
3554 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3555 
3556 	if (keep_order)
3557 		f2fs_down_read(&fio->sbi->io_order_lock);
3558 reallocate:
3559 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3560 			&fio->new_blkaddr, sum, type, fio);
3561 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3562 		f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
3563 
3564 	/* writeout dirty page into bdev */
3565 	f2fs_submit_page_write(fio);
3566 	if (fio->retry) {
3567 		fio->old_blkaddr = fio->new_blkaddr;
3568 		goto reallocate;
3569 	}
3570 
3571 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3572 
3573 	if (keep_order)
3574 		f2fs_up_read(&fio->sbi->io_order_lock);
3575 }
3576 
3577 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3578 					enum iostat_type io_type)
3579 {
3580 	struct f2fs_io_info fio = {
3581 		.sbi = sbi,
3582 		.type = META,
3583 		.temp = HOT,
3584 		.op = REQ_OP_WRITE,
3585 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3586 		.old_blkaddr = page->index,
3587 		.new_blkaddr = page->index,
3588 		.page = page,
3589 		.encrypted_page = NULL,
3590 		.in_list = 0,
3591 	};
3592 
3593 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3594 		fio.op_flags &= ~REQ_META;
3595 
3596 	set_page_writeback(page);
3597 	f2fs_submit_page_write(&fio);
3598 
3599 	stat_inc_meta_count(sbi, page->index);
3600 	f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3601 }
3602 
3603 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3604 {
3605 	struct f2fs_summary sum;
3606 
3607 	set_summary(&sum, nid, 0, 0);
3608 	do_write_page(&sum, fio);
3609 
3610 	f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3611 }
3612 
3613 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3614 					struct f2fs_io_info *fio)
3615 {
3616 	struct f2fs_sb_info *sbi = fio->sbi;
3617 	struct f2fs_summary sum;
3618 
3619 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3620 	if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
3621 		f2fs_update_age_extent_cache(dn);
3622 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3623 	do_write_page(&sum, fio);
3624 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3625 
3626 	f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3627 }
3628 
3629 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3630 {
3631 	int err;
3632 	struct f2fs_sb_info *sbi = fio->sbi;
3633 	unsigned int segno;
3634 
3635 	fio->new_blkaddr = fio->old_blkaddr;
3636 	/* i/o temperature is needed for passing down write hints */
3637 	__get_segment_type(fio);
3638 
3639 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3640 
3641 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3642 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3643 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3644 			  __func__, segno);
3645 		err = -EFSCORRUPTED;
3646 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
3647 		goto drop_bio;
3648 	}
3649 
3650 	if (f2fs_cp_error(sbi)) {
3651 		err = -EIO;
3652 		goto drop_bio;
3653 	}
3654 
3655 	if (fio->post_read)
3656 		invalidate_mapping_pages(META_MAPPING(sbi),
3657 				fio->new_blkaddr, fio->new_blkaddr);
3658 
3659 	stat_inc_inplace_blocks(fio->sbi);
3660 
3661 	if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
3662 		err = f2fs_merge_page_bio(fio);
3663 	else
3664 		err = f2fs_submit_page_bio(fio);
3665 	if (!err) {
3666 		f2fs_update_device_state(fio->sbi, fio->ino,
3667 						fio->new_blkaddr, 1);
3668 		f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
3669 						fio->io_type, F2FS_BLKSIZE);
3670 	}
3671 
3672 	return err;
3673 drop_bio:
3674 	if (fio->bio && *(fio->bio)) {
3675 		struct bio *bio = *(fio->bio);
3676 
3677 		bio->bi_status = BLK_STS_IOERR;
3678 		bio_endio(bio);
3679 		*(fio->bio) = NULL;
3680 	}
3681 	return err;
3682 }
3683 
3684 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3685 						unsigned int segno)
3686 {
3687 	int i;
3688 
3689 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3690 		if (CURSEG_I(sbi, i)->segno == segno)
3691 			break;
3692 	}
3693 	return i;
3694 }
3695 
3696 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3697 				block_t old_blkaddr, block_t new_blkaddr,
3698 				bool recover_curseg, bool recover_newaddr,
3699 				bool from_gc)
3700 {
3701 	struct sit_info *sit_i = SIT_I(sbi);
3702 	struct curseg_info *curseg;
3703 	unsigned int segno, old_cursegno;
3704 	struct seg_entry *se;
3705 	int type;
3706 	unsigned short old_blkoff;
3707 	unsigned char old_alloc_type;
3708 
3709 	segno = GET_SEGNO(sbi, new_blkaddr);
3710 	se = get_seg_entry(sbi, segno);
3711 	type = se->type;
3712 
3713 	f2fs_down_write(&SM_I(sbi)->curseg_lock);
3714 
3715 	if (!recover_curseg) {
3716 		/* for recovery flow */
3717 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3718 			if (old_blkaddr == NULL_ADDR)
3719 				type = CURSEG_COLD_DATA;
3720 			else
3721 				type = CURSEG_WARM_DATA;
3722 		}
3723 	} else {
3724 		if (IS_CURSEG(sbi, segno)) {
3725 			/* se->type is volatile as SSR allocation */
3726 			type = __f2fs_get_curseg(sbi, segno);
3727 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3728 		} else {
3729 			type = CURSEG_WARM_DATA;
3730 		}
3731 	}
3732 
3733 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3734 	curseg = CURSEG_I(sbi, type);
3735 
3736 	mutex_lock(&curseg->curseg_mutex);
3737 	down_write(&sit_i->sentry_lock);
3738 
3739 	old_cursegno = curseg->segno;
3740 	old_blkoff = curseg->next_blkoff;
3741 	old_alloc_type = curseg->alloc_type;
3742 
3743 	/* change the current segment */
3744 	if (segno != curseg->segno) {
3745 		curseg->next_segno = segno;
3746 		change_curseg(sbi, type);
3747 	}
3748 
3749 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3750 	curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
3751 
3752 	if (!recover_curseg || recover_newaddr) {
3753 		if (!from_gc)
3754 			update_segment_mtime(sbi, new_blkaddr, 0);
3755 		update_sit_entry(sbi, new_blkaddr, 1);
3756 	}
3757 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3758 		f2fs_invalidate_internal_cache(sbi, old_blkaddr);
3759 		if (!from_gc)
3760 			update_segment_mtime(sbi, old_blkaddr, 0);
3761 		update_sit_entry(sbi, old_blkaddr, -1);
3762 	}
3763 
3764 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3765 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3766 
3767 	locate_dirty_segment(sbi, old_cursegno);
3768 
3769 	if (recover_curseg) {
3770 		if (old_cursegno != curseg->segno) {
3771 			curseg->next_segno = old_cursegno;
3772 			change_curseg(sbi, type);
3773 		}
3774 		curseg->next_blkoff = old_blkoff;
3775 		curseg->alloc_type = old_alloc_type;
3776 	}
3777 
3778 	up_write(&sit_i->sentry_lock);
3779 	mutex_unlock(&curseg->curseg_mutex);
3780 	f2fs_up_write(&SM_I(sbi)->curseg_lock);
3781 }
3782 
3783 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3784 				block_t old_addr, block_t new_addr,
3785 				unsigned char version, bool recover_curseg,
3786 				bool recover_newaddr)
3787 {
3788 	struct f2fs_summary sum;
3789 
3790 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3791 
3792 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3793 					recover_curseg, recover_newaddr, false);
3794 
3795 	f2fs_update_data_blkaddr(dn, new_addr);
3796 }
3797 
3798 void f2fs_wait_on_page_writeback(struct page *page,
3799 				enum page_type type, bool ordered, bool locked)
3800 {
3801 	if (PageWriteback(page)) {
3802 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3803 
3804 		/* submit cached LFS IO */
3805 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3806 		/* submit cached IPU IO */
3807 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3808 		if (ordered) {
3809 			wait_on_page_writeback(page);
3810 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3811 		} else {
3812 			wait_for_stable_page(page);
3813 		}
3814 	}
3815 }
3816 
3817 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3818 {
3819 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3820 	struct page *cpage;
3821 
3822 	if (!f2fs_post_read_required(inode))
3823 		return;
3824 
3825 	if (!__is_valid_data_blkaddr(blkaddr))
3826 		return;
3827 
3828 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3829 	if (cpage) {
3830 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3831 		f2fs_put_page(cpage, 1);
3832 	}
3833 }
3834 
3835 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3836 								block_t len)
3837 {
3838 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3839 	block_t i;
3840 
3841 	if (!f2fs_post_read_required(inode))
3842 		return;
3843 
3844 	for (i = 0; i < len; i++)
3845 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3846 
3847 	invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
3848 }
3849 
3850 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3851 {
3852 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3853 	struct curseg_info *seg_i;
3854 	unsigned char *kaddr;
3855 	struct page *page;
3856 	block_t start;
3857 	int i, j, offset;
3858 
3859 	start = start_sum_block(sbi);
3860 
3861 	page = f2fs_get_meta_page(sbi, start++);
3862 	if (IS_ERR(page))
3863 		return PTR_ERR(page);
3864 	kaddr = (unsigned char *)page_address(page);
3865 
3866 	/* Step 1: restore nat cache */
3867 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3868 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3869 
3870 	/* Step 2: restore sit cache */
3871 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3872 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3873 	offset = 2 * SUM_JOURNAL_SIZE;
3874 
3875 	/* Step 3: restore summary entries */
3876 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3877 		unsigned short blk_off;
3878 		unsigned int segno;
3879 
3880 		seg_i = CURSEG_I(sbi, i);
3881 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3882 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3883 		seg_i->next_segno = segno;
3884 		reset_curseg(sbi, i, 0);
3885 		seg_i->alloc_type = ckpt->alloc_type[i];
3886 		seg_i->next_blkoff = blk_off;
3887 
3888 		if (seg_i->alloc_type == SSR)
3889 			blk_off = sbi->blocks_per_seg;
3890 
3891 		for (j = 0; j < blk_off; j++) {
3892 			struct f2fs_summary *s;
3893 
3894 			s = (struct f2fs_summary *)(kaddr + offset);
3895 			seg_i->sum_blk->entries[j] = *s;
3896 			offset += SUMMARY_SIZE;
3897 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3898 						SUM_FOOTER_SIZE)
3899 				continue;
3900 
3901 			f2fs_put_page(page, 1);
3902 			page = NULL;
3903 
3904 			page = f2fs_get_meta_page(sbi, start++);
3905 			if (IS_ERR(page))
3906 				return PTR_ERR(page);
3907 			kaddr = (unsigned char *)page_address(page);
3908 			offset = 0;
3909 		}
3910 	}
3911 	f2fs_put_page(page, 1);
3912 	return 0;
3913 }
3914 
3915 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3916 {
3917 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3918 	struct f2fs_summary_block *sum;
3919 	struct curseg_info *curseg;
3920 	struct page *new;
3921 	unsigned short blk_off;
3922 	unsigned int segno = 0;
3923 	block_t blk_addr = 0;
3924 	int err = 0;
3925 
3926 	/* get segment number and block addr */
3927 	if (IS_DATASEG(type)) {
3928 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3929 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3930 							CURSEG_HOT_DATA]);
3931 		if (__exist_node_summaries(sbi))
3932 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3933 		else
3934 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3935 	} else {
3936 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3937 							CURSEG_HOT_NODE]);
3938 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3939 							CURSEG_HOT_NODE]);
3940 		if (__exist_node_summaries(sbi))
3941 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3942 							type - CURSEG_HOT_NODE);
3943 		else
3944 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3945 	}
3946 
3947 	new = f2fs_get_meta_page(sbi, blk_addr);
3948 	if (IS_ERR(new))
3949 		return PTR_ERR(new);
3950 	sum = (struct f2fs_summary_block *)page_address(new);
3951 
3952 	if (IS_NODESEG(type)) {
3953 		if (__exist_node_summaries(sbi)) {
3954 			struct f2fs_summary *ns = &sum->entries[0];
3955 			int i;
3956 
3957 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3958 				ns->version = 0;
3959 				ns->ofs_in_node = 0;
3960 			}
3961 		} else {
3962 			err = f2fs_restore_node_summary(sbi, segno, sum);
3963 			if (err)
3964 				goto out;
3965 		}
3966 	}
3967 
3968 	/* set uncompleted segment to curseg */
3969 	curseg = CURSEG_I(sbi, type);
3970 	mutex_lock(&curseg->curseg_mutex);
3971 
3972 	/* update journal info */
3973 	down_write(&curseg->journal_rwsem);
3974 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3975 	up_write(&curseg->journal_rwsem);
3976 
3977 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3978 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3979 	curseg->next_segno = segno;
3980 	reset_curseg(sbi, type, 0);
3981 	curseg->alloc_type = ckpt->alloc_type[type];
3982 	curseg->next_blkoff = blk_off;
3983 	mutex_unlock(&curseg->curseg_mutex);
3984 out:
3985 	f2fs_put_page(new, 1);
3986 	return err;
3987 }
3988 
3989 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3990 {
3991 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3992 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3993 	int type = CURSEG_HOT_DATA;
3994 	int err;
3995 
3996 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3997 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3998 
3999 		if (npages >= 2)
4000 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4001 							META_CP, true);
4002 
4003 		/* restore for compacted data summary */
4004 		err = read_compacted_summaries(sbi);
4005 		if (err)
4006 			return err;
4007 		type = CURSEG_HOT_NODE;
4008 	}
4009 
4010 	if (__exist_node_summaries(sbi))
4011 		f2fs_ra_meta_pages(sbi,
4012 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4013 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4014 
4015 	for (; type <= CURSEG_COLD_NODE; type++) {
4016 		err = read_normal_summaries(sbi, type);
4017 		if (err)
4018 			return err;
4019 	}
4020 
4021 	/* sanity check for summary blocks */
4022 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
4023 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
4024 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4025 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4026 		return -EINVAL;
4027 	}
4028 
4029 	return 0;
4030 }
4031 
4032 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4033 {
4034 	struct page *page;
4035 	unsigned char *kaddr;
4036 	struct f2fs_summary *summary;
4037 	struct curseg_info *seg_i;
4038 	int written_size = 0;
4039 	int i, j;
4040 
4041 	page = f2fs_grab_meta_page(sbi, blkaddr++);
4042 	kaddr = (unsigned char *)page_address(page);
4043 	memset(kaddr, 0, PAGE_SIZE);
4044 
4045 	/* Step 1: write nat cache */
4046 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4047 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
4048 	written_size += SUM_JOURNAL_SIZE;
4049 
4050 	/* Step 2: write sit cache */
4051 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4052 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
4053 	written_size += SUM_JOURNAL_SIZE;
4054 
4055 	/* Step 3: write summary entries */
4056 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4057 		seg_i = CURSEG_I(sbi, i);
4058 		for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4059 			if (!page) {
4060 				page = f2fs_grab_meta_page(sbi, blkaddr++);
4061 				kaddr = (unsigned char *)page_address(page);
4062 				memset(kaddr, 0, PAGE_SIZE);
4063 				written_size = 0;
4064 			}
4065 			summary = (struct f2fs_summary *)(kaddr + written_size);
4066 			*summary = seg_i->sum_blk->entries[j];
4067 			written_size += SUMMARY_SIZE;
4068 
4069 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
4070 							SUM_FOOTER_SIZE)
4071 				continue;
4072 
4073 			set_page_dirty(page);
4074 			f2fs_put_page(page, 1);
4075 			page = NULL;
4076 		}
4077 	}
4078 	if (page) {
4079 		set_page_dirty(page);
4080 		f2fs_put_page(page, 1);
4081 	}
4082 }
4083 
4084 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4085 					block_t blkaddr, int type)
4086 {
4087 	int i, end;
4088 
4089 	if (IS_DATASEG(type))
4090 		end = type + NR_CURSEG_DATA_TYPE;
4091 	else
4092 		end = type + NR_CURSEG_NODE_TYPE;
4093 
4094 	for (i = type; i < end; i++)
4095 		write_current_sum_page(sbi, i, blkaddr + (i - type));
4096 }
4097 
4098 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4099 {
4100 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4101 		write_compacted_summaries(sbi, start_blk);
4102 	else
4103 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4104 }
4105 
4106 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4107 {
4108 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4109 }
4110 
4111 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4112 					unsigned int val, int alloc)
4113 {
4114 	int i;
4115 
4116 	if (type == NAT_JOURNAL) {
4117 		for (i = 0; i < nats_in_cursum(journal); i++) {
4118 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4119 				return i;
4120 		}
4121 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4122 			return update_nats_in_cursum(journal, 1);
4123 	} else if (type == SIT_JOURNAL) {
4124 		for (i = 0; i < sits_in_cursum(journal); i++)
4125 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4126 				return i;
4127 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4128 			return update_sits_in_cursum(journal, 1);
4129 	}
4130 	return -1;
4131 }
4132 
4133 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4134 					unsigned int segno)
4135 {
4136 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4137 }
4138 
4139 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4140 					unsigned int start)
4141 {
4142 	struct sit_info *sit_i = SIT_I(sbi);
4143 	struct page *page;
4144 	pgoff_t src_off, dst_off;
4145 
4146 	src_off = current_sit_addr(sbi, start);
4147 	dst_off = next_sit_addr(sbi, src_off);
4148 
4149 	page = f2fs_grab_meta_page(sbi, dst_off);
4150 	seg_info_to_sit_page(sbi, page, start);
4151 
4152 	set_page_dirty(page);
4153 	set_to_next_sit(sit_i, start);
4154 
4155 	return page;
4156 }
4157 
4158 static struct sit_entry_set *grab_sit_entry_set(void)
4159 {
4160 	struct sit_entry_set *ses =
4161 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4162 						GFP_NOFS, true, NULL);
4163 
4164 	ses->entry_cnt = 0;
4165 	INIT_LIST_HEAD(&ses->set_list);
4166 	return ses;
4167 }
4168 
4169 static void release_sit_entry_set(struct sit_entry_set *ses)
4170 {
4171 	list_del(&ses->set_list);
4172 	kmem_cache_free(sit_entry_set_slab, ses);
4173 }
4174 
4175 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4176 						struct list_head *head)
4177 {
4178 	struct sit_entry_set *next = ses;
4179 
4180 	if (list_is_last(&ses->set_list, head))
4181 		return;
4182 
4183 	list_for_each_entry_continue(next, head, set_list)
4184 		if (ses->entry_cnt <= next->entry_cnt) {
4185 			list_move_tail(&ses->set_list, &next->set_list);
4186 			return;
4187 		}
4188 
4189 	list_move_tail(&ses->set_list, head);
4190 }
4191 
4192 static void add_sit_entry(unsigned int segno, struct list_head *head)
4193 {
4194 	struct sit_entry_set *ses;
4195 	unsigned int start_segno = START_SEGNO(segno);
4196 
4197 	list_for_each_entry(ses, head, set_list) {
4198 		if (ses->start_segno == start_segno) {
4199 			ses->entry_cnt++;
4200 			adjust_sit_entry_set(ses, head);
4201 			return;
4202 		}
4203 	}
4204 
4205 	ses = grab_sit_entry_set();
4206 
4207 	ses->start_segno = start_segno;
4208 	ses->entry_cnt++;
4209 	list_add(&ses->set_list, head);
4210 }
4211 
4212 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4213 {
4214 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4215 	struct list_head *set_list = &sm_info->sit_entry_set;
4216 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4217 	unsigned int segno;
4218 
4219 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4220 		add_sit_entry(segno, set_list);
4221 }
4222 
4223 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4224 {
4225 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4226 	struct f2fs_journal *journal = curseg->journal;
4227 	int i;
4228 
4229 	down_write(&curseg->journal_rwsem);
4230 	for (i = 0; i < sits_in_cursum(journal); i++) {
4231 		unsigned int segno;
4232 		bool dirtied;
4233 
4234 		segno = le32_to_cpu(segno_in_journal(journal, i));
4235 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4236 
4237 		if (!dirtied)
4238 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4239 	}
4240 	update_sits_in_cursum(journal, -i);
4241 	up_write(&curseg->journal_rwsem);
4242 }
4243 
4244 /*
4245  * CP calls this function, which flushes SIT entries including sit_journal,
4246  * and moves prefree segs to free segs.
4247  */
4248 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4249 {
4250 	struct sit_info *sit_i = SIT_I(sbi);
4251 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4252 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4253 	struct f2fs_journal *journal = curseg->journal;
4254 	struct sit_entry_set *ses, *tmp;
4255 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4256 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4257 	struct seg_entry *se;
4258 
4259 	down_write(&sit_i->sentry_lock);
4260 
4261 	if (!sit_i->dirty_sentries)
4262 		goto out;
4263 
4264 	/*
4265 	 * add and account sit entries of dirty bitmap in sit entry
4266 	 * set temporarily
4267 	 */
4268 	add_sits_in_set(sbi);
4269 
4270 	/*
4271 	 * if there are no enough space in journal to store dirty sit
4272 	 * entries, remove all entries from journal and add and account
4273 	 * them in sit entry set.
4274 	 */
4275 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4276 								!to_journal)
4277 		remove_sits_in_journal(sbi);
4278 
4279 	/*
4280 	 * there are two steps to flush sit entries:
4281 	 * #1, flush sit entries to journal in current cold data summary block.
4282 	 * #2, flush sit entries to sit page.
4283 	 */
4284 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4285 		struct page *page = NULL;
4286 		struct f2fs_sit_block *raw_sit = NULL;
4287 		unsigned int start_segno = ses->start_segno;
4288 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4289 						(unsigned long)MAIN_SEGS(sbi));
4290 		unsigned int segno = start_segno;
4291 
4292 		if (to_journal &&
4293 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4294 			to_journal = false;
4295 
4296 		if (to_journal) {
4297 			down_write(&curseg->journal_rwsem);
4298 		} else {
4299 			page = get_next_sit_page(sbi, start_segno);
4300 			raw_sit = page_address(page);
4301 		}
4302 
4303 		/* flush dirty sit entries in region of current sit set */
4304 		for_each_set_bit_from(segno, bitmap, end) {
4305 			int offset, sit_offset;
4306 
4307 			se = get_seg_entry(sbi, segno);
4308 #ifdef CONFIG_F2FS_CHECK_FS
4309 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4310 						SIT_VBLOCK_MAP_SIZE))
4311 				f2fs_bug_on(sbi, 1);
4312 #endif
4313 
4314 			/* add discard candidates */
4315 			if (!(cpc->reason & CP_DISCARD)) {
4316 				cpc->trim_start = segno;
4317 				add_discard_addrs(sbi, cpc, false);
4318 			}
4319 
4320 			if (to_journal) {
4321 				offset = f2fs_lookup_journal_in_cursum(journal,
4322 							SIT_JOURNAL, segno, 1);
4323 				f2fs_bug_on(sbi, offset < 0);
4324 				segno_in_journal(journal, offset) =
4325 							cpu_to_le32(segno);
4326 				seg_info_to_raw_sit(se,
4327 					&sit_in_journal(journal, offset));
4328 				check_block_count(sbi, segno,
4329 					&sit_in_journal(journal, offset));
4330 			} else {
4331 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4332 				seg_info_to_raw_sit(se,
4333 						&raw_sit->entries[sit_offset]);
4334 				check_block_count(sbi, segno,
4335 						&raw_sit->entries[sit_offset]);
4336 			}
4337 
4338 			__clear_bit(segno, bitmap);
4339 			sit_i->dirty_sentries--;
4340 			ses->entry_cnt--;
4341 		}
4342 
4343 		if (to_journal)
4344 			up_write(&curseg->journal_rwsem);
4345 		else
4346 			f2fs_put_page(page, 1);
4347 
4348 		f2fs_bug_on(sbi, ses->entry_cnt);
4349 		release_sit_entry_set(ses);
4350 	}
4351 
4352 	f2fs_bug_on(sbi, !list_empty(head));
4353 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4354 out:
4355 	if (cpc->reason & CP_DISCARD) {
4356 		__u64 trim_start = cpc->trim_start;
4357 
4358 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4359 			add_discard_addrs(sbi, cpc, false);
4360 
4361 		cpc->trim_start = trim_start;
4362 	}
4363 	up_write(&sit_i->sentry_lock);
4364 
4365 	set_prefree_as_free_segments(sbi);
4366 }
4367 
4368 static int build_sit_info(struct f2fs_sb_info *sbi)
4369 {
4370 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4371 	struct sit_info *sit_i;
4372 	unsigned int sit_segs, start;
4373 	char *src_bitmap, *bitmap;
4374 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4375 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4376 
4377 	/* allocate memory for SIT information */
4378 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4379 	if (!sit_i)
4380 		return -ENOMEM;
4381 
4382 	SM_I(sbi)->sit_info = sit_i;
4383 
4384 	sit_i->sentries =
4385 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4386 					      MAIN_SEGS(sbi)),
4387 			      GFP_KERNEL);
4388 	if (!sit_i->sentries)
4389 		return -ENOMEM;
4390 
4391 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4392 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4393 								GFP_KERNEL);
4394 	if (!sit_i->dirty_sentries_bitmap)
4395 		return -ENOMEM;
4396 
4397 #ifdef CONFIG_F2FS_CHECK_FS
4398 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4399 #else
4400 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4401 #endif
4402 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4403 	if (!sit_i->bitmap)
4404 		return -ENOMEM;
4405 
4406 	bitmap = sit_i->bitmap;
4407 
4408 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4409 		sit_i->sentries[start].cur_valid_map = bitmap;
4410 		bitmap += SIT_VBLOCK_MAP_SIZE;
4411 
4412 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4413 		bitmap += SIT_VBLOCK_MAP_SIZE;
4414 
4415 #ifdef CONFIG_F2FS_CHECK_FS
4416 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4417 		bitmap += SIT_VBLOCK_MAP_SIZE;
4418 #endif
4419 
4420 		if (discard_map) {
4421 			sit_i->sentries[start].discard_map = bitmap;
4422 			bitmap += SIT_VBLOCK_MAP_SIZE;
4423 		}
4424 	}
4425 
4426 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4427 	if (!sit_i->tmp_map)
4428 		return -ENOMEM;
4429 
4430 	if (__is_large_section(sbi)) {
4431 		sit_i->sec_entries =
4432 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4433 						      MAIN_SECS(sbi)),
4434 				      GFP_KERNEL);
4435 		if (!sit_i->sec_entries)
4436 			return -ENOMEM;
4437 	}
4438 
4439 	/* get information related with SIT */
4440 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4441 
4442 	/* setup SIT bitmap from ckeckpoint pack */
4443 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4444 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4445 
4446 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4447 	if (!sit_i->sit_bitmap)
4448 		return -ENOMEM;
4449 
4450 #ifdef CONFIG_F2FS_CHECK_FS
4451 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4452 					sit_bitmap_size, GFP_KERNEL);
4453 	if (!sit_i->sit_bitmap_mir)
4454 		return -ENOMEM;
4455 
4456 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4457 					main_bitmap_size, GFP_KERNEL);
4458 	if (!sit_i->invalid_segmap)
4459 		return -ENOMEM;
4460 #endif
4461 
4462 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4463 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4464 	sit_i->written_valid_blocks = 0;
4465 	sit_i->bitmap_size = sit_bitmap_size;
4466 	sit_i->dirty_sentries = 0;
4467 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4468 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4469 	sit_i->mounted_time = ktime_get_boottime_seconds();
4470 	init_rwsem(&sit_i->sentry_lock);
4471 	return 0;
4472 }
4473 
4474 static int build_free_segmap(struct f2fs_sb_info *sbi)
4475 {
4476 	struct free_segmap_info *free_i;
4477 	unsigned int bitmap_size, sec_bitmap_size;
4478 
4479 	/* allocate memory for free segmap information */
4480 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4481 	if (!free_i)
4482 		return -ENOMEM;
4483 
4484 	SM_I(sbi)->free_info = free_i;
4485 
4486 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4487 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4488 	if (!free_i->free_segmap)
4489 		return -ENOMEM;
4490 
4491 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4492 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4493 	if (!free_i->free_secmap)
4494 		return -ENOMEM;
4495 
4496 	/* set all segments as dirty temporarily */
4497 	memset(free_i->free_segmap, 0xff, bitmap_size);
4498 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4499 
4500 	/* init free segmap information */
4501 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4502 	free_i->free_segments = 0;
4503 	free_i->free_sections = 0;
4504 	spin_lock_init(&free_i->segmap_lock);
4505 	return 0;
4506 }
4507 
4508 static int build_curseg(struct f2fs_sb_info *sbi)
4509 {
4510 	struct curseg_info *array;
4511 	int i;
4512 
4513 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4514 					sizeof(*array)), GFP_KERNEL);
4515 	if (!array)
4516 		return -ENOMEM;
4517 
4518 	SM_I(sbi)->curseg_array = array;
4519 
4520 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4521 		mutex_init(&array[i].curseg_mutex);
4522 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4523 		if (!array[i].sum_blk)
4524 			return -ENOMEM;
4525 		init_rwsem(&array[i].journal_rwsem);
4526 		array[i].journal = f2fs_kzalloc(sbi,
4527 				sizeof(struct f2fs_journal), GFP_KERNEL);
4528 		if (!array[i].journal)
4529 			return -ENOMEM;
4530 		if (i < NR_PERSISTENT_LOG)
4531 			array[i].seg_type = CURSEG_HOT_DATA + i;
4532 		else if (i == CURSEG_COLD_DATA_PINNED)
4533 			array[i].seg_type = CURSEG_COLD_DATA;
4534 		else if (i == CURSEG_ALL_DATA_ATGC)
4535 			array[i].seg_type = CURSEG_COLD_DATA;
4536 		array[i].segno = NULL_SEGNO;
4537 		array[i].next_blkoff = 0;
4538 		array[i].inited = false;
4539 	}
4540 	return restore_curseg_summaries(sbi);
4541 }
4542 
4543 static int build_sit_entries(struct f2fs_sb_info *sbi)
4544 {
4545 	struct sit_info *sit_i = SIT_I(sbi);
4546 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4547 	struct f2fs_journal *journal = curseg->journal;
4548 	struct seg_entry *se;
4549 	struct f2fs_sit_entry sit;
4550 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4551 	unsigned int i, start, end;
4552 	unsigned int readed, start_blk = 0;
4553 	int err = 0;
4554 	block_t sit_valid_blocks[2] = {0, 0};
4555 
4556 	do {
4557 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4558 							META_SIT, true);
4559 
4560 		start = start_blk * sit_i->sents_per_block;
4561 		end = (start_blk + readed) * sit_i->sents_per_block;
4562 
4563 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4564 			struct f2fs_sit_block *sit_blk;
4565 			struct page *page;
4566 
4567 			se = &sit_i->sentries[start];
4568 			page = get_current_sit_page(sbi, start);
4569 			if (IS_ERR(page))
4570 				return PTR_ERR(page);
4571 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4572 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4573 			f2fs_put_page(page, 1);
4574 
4575 			err = check_block_count(sbi, start, &sit);
4576 			if (err)
4577 				return err;
4578 			seg_info_from_raw_sit(se, &sit);
4579 
4580 			if (se->type >= NR_PERSISTENT_LOG) {
4581 				f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4582 							se->type, start);
4583 				f2fs_handle_error(sbi,
4584 						ERROR_INCONSISTENT_SUM_TYPE);
4585 				return -EFSCORRUPTED;
4586 			}
4587 
4588 			sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4589 
4590 			if (f2fs_block_unit_discard(sbi)) {
4591 				/* build discard map only one time */
4592 				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4593 					memset(se->discard_map, 0xff,
4594 						SIT_VBLOCK_MAP_SIZE);
4595 				} else {
4596 					memcpy(se->discard_map,
4597 						se->cur_valid_map,
4598 						SIT_VBLOCK_MAP_SIZE);
4599 					sbi->discard_blks +=
4600 						sbi->blocks_per_seg -
4601 						se->valid_blocks;
4602 				}
4603 			}
4604 
4605 			if (__is_large_section(sbi))
4606 				get_sec_entry(sbi, start)->valid_blocks +=
4607 							se->valid_blocks;
4608 		}
4609 		start_blk += readed;
4610 	} while (start_blk < sit_blk_cnt);
4611 
4612 	down_read(&curseg->journal_rwsem);
4613 	for (i = 0; i < sits_in_cursum(journal); i++) {
4614 		unsigned int old_valid_blocks;
4615 
4616 		start = le32_to_cpu(segno_in_journal(journal, i));
4617 		if (start >= MAIN_SEGS(sbi)) {
4618 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4619 				 start);
4620 			err = -EFSCORRUPTED;
4621 			f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4622 			break;
4623 		}
4624 
4625 		se = &sit_i->sentries[start];
4626 		sit = sit_in_journal(journal, i);
4627 
4628 		old_valid_blocks = se->valid_blocks;
4629 
4630 		sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
4631 
4632 		err = check_block_count(sbi, start, &sit);
4633 		if (err)
4634 			break;
4635 		seg_info_from_raw_sit(se, &sit);
4636 
4637 		if (se->type >= NR_PERSISTENT_LOG) {
4638 			f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4639 							se->type, start);
4640 			err = -EFSCORRUPTED;
4641 			f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4642 			break;
4643 		}
4644 
4645 		sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4646 
4647 		if (f2fs_block_unit_discard(sbi)) {
4648 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4649 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4650 			} else {
4651 				memcpy(se->discard_map, se->cur_valid_map,
4652 							SIT_VBLOCK_MAP_SIZE);
4653 				sbi->discard_blks += old_valid_blocks;
4654 				sbi->discard_blks -= se->valid_blocks;
4655 			}
4656 		}
4657 
4658 		if (__is_large_section(sbi)) {
4659 			get_sec_entry(sbi, start)->valid_blocks +=
4660 							se->valid_blocks;
4661 			get_sec_entry(sbi, start)->valid_blocks -=
4662 							old_valid_blocks;
4663 		}
4664 	}
4665 	up_read(&curseg->journal_rwsem);
4666 
4667 	if (err)
4668 		return err;
4669 
4670 	if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4671 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4672 			 sit_valid_blocks[NODE], valid_node_count(sbi));
4673 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
4674 		return -EFSCORRUPTED;
4675 	}
4676 
4677 	if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
4678 				valid_user_blocks(sbi)) {
4679 		f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
4680 			 sit_valid_blocks[DATA], sit_valid_blocks[NODE],
4681 			 valid_user_blocks(sbi));
4682 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
4683 		return -EFSCORRUPTED;
4684 	}
4685 
4686 	return 0;
4687 }
4688 
4689 static void init_free_segmap(struct f2fs_sb_info *sbi)
4690 {
4691 	unsigned int start;
4692 	int type;
4693 	struct seg_entry *sentry;
4694 
4695 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4696 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4697 			continue;
4698 		sentry = get_seg_entry(sbi, start);
4699 		if (!sentry->valid_blocks)
4700 			__set_free(sbi, start);
4701 		else
4702 			SIT_I(sbi)->written_valid_blocks +=
4703 						sentry->valid_blocks;
4704 	}
4705 
4706 	/* set use the current segments */
4707 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4708 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4709 
4710 		__set_test_and_inuse(sbi, curseg_t->segno);
4711 	}
4712 }
4713 
4714 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4715 {
4716 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4717 	struct free_segmap_info *free_i = FREE_I(sbi);
4718 	unsigned int segno = 0, offset = 0, secno;
4719 	block_t valid_blocks, usable_blks_in_seg;
4720 
4721 	while (1) {
4722 		/* find dirty segment based on free segmap */
4723 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4724 		if (segno >= MAIN_SEGS(sbi))
4725 			break;
4726 		offset = segno + 1;
4727 		valid_blocks = get_valid_blocks(sbi, segno, false);
4728 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4729 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4730 			continue;
4731 		if (valid_blocks > usable_blks_in_seg) {
4732 			f2fs_bug_on(sbi, 1);
4733 			continue;
4734 		}
4735 		mutex_lock(&dirty_i->seglist_lock);
4736 		__locate_dirty_segment(sbi, segno, DIRTY);
4737 		mutex_unlock(&dirty_i->seglist_lock);
4738 	}
4739 
4740 	if (!__is_large_section(sbi))
4741 		return;
4742 
4743 	mutex_lock(&dirty_i->seglist_lock);
4744 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4745 		valid_blocks = get_valid_blocks(sbi, segno, true);
4746 		secno = GET_SEC_FROM_SEG(sbi, segno);
4747 
4748 		if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
4749 			continue;
4750 		if (IS_CURSEC(sbi, secno))
4751 			continue;
4752 		set_bit(secno, dirty_i->dirty_secmap);
4753 	}
4754 	mutex_unlock(&dirty_i->seglist_lock);
4755 }
4756 
4757 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4758 {
4759 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4760 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4761 
4762 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4763 	if (!dirty_i->victim_secmap)
4764 		return -ENOMEM;
4765 
4766 	dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4767 	if (!dirty_i->pinned_secmap)
4768 		return -ENOMEM;
4769 
4770 	dirty_i->pinned_secmap_cnt = 0;
4771 	dirty_i->enable_pin_section = true;
4772 	return 0;
4773 }
4774 
4775 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4776 {
4777 	struct dirty_seglist_info *dirty_i;
4778 	unsigned int bitmap_size, i;
4779 
4780 	/* allocate memory for dirty segments list information */
4781 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4782 								GFP_KERNEL);
4783 	if (!dirty_i)
4784 		return -ENOMEM;
4785 
4786 	SM_I(sbi)->dirty_info = dirty_i;
4787 	mutex_init(&dirty_i->seglist_lock);
4788 
4789 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4790 
4791 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4792 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4793 								GFP_KERNEL);
4794 		if (!dirty_i->dirty_segmap[i])
4795 			return -ENOMEM;
4796 	}
4797 
4798 	if (__is_large_section(sbi)) {
4799 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4800 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4801 						bitmap_size, GFP_KERNEL);
4802 		if (!dirty_i->dirty_secmap)
4803 			return -ENOMEM;
4804 	}
4805 
4806 	init_dirty_segmap(sbi);
4807 	return init_victim_secmap(sbi);
4808 }
4809 
4810 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4811 {
4812 	int i;
4813 
4814 	/*
4815 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4816 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4817 	 */
4818 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4819 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4820 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4821 		unsigned int blkofs = curseg->next_blkoff;
4822 
4823 		if (f2fs_sb_has_readonly(sbi) &&
4824 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4825 			continue;
4826 
4827 		sanity_check_seg_type(sbi, curseg->seg_type);
4828 
4829 		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
4830 			f2fs_err(sbi,
4831 				 "Current segment has invalid alloc_type:%d",
4832 				 curseg->alloc_type);
4833 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4834 			return -EFSCORRUPTED;
4835 		}
4836 
4837 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4838 			goto out;
4839 
4840 		if (curseg->alloc_type == SSR)
4841 			continue;
4842 
4843 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4844 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4845 				continue;
4846 out:
4847 			f2fs_err(sbi,
4848 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4849 				 i, curseg->segno, curseg->alloc_type,
4850 				 curseg->next_blkoff, blkofs);
4851 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4852 			return -EFSCORRUPTED;
4853 		}
4854 	}
4855 	return 0;
4856 }
4857 
4858 #ifdef CONFIG_BLK_DEV_ZONED
4859 
4860 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4861 				    struct f2fs_dev_info *fdev,
4862 				    struct blk_zone *zone)
4863 {
4864 	unsigned int zone_segno;
4865 	block_t zone_block, valid_block_cnt;
4866 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4867 	int ret;
4868 
4869 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4870 		return 0;
4871 
4872 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4873 	zone_segno = GET_SEGNO(sbi, zone_block);
4874 
4875 	/*
4876 	 * Skip check of zones cursegs point to, since
4877 	 * fix_curseg_write_pointer() checks them.
4878 	 */
4879 	if (zone_segno >= MAIN_SEGS(sbi) ||
4880 	    IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno)))
4881 		return 0;
4882 
4883 	/*
4884 	 * Get # of valid block of the zone.
4885 	 */
4886 	valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
4887 
4888 	if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
4889 	    (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
4890 		return 0;
4891 
4892 	if (!valid_block_cnt) {
4893 		f2fs_notice(sbi, "Zone without valid block has non-zero write "
4894 			    "pointer. Reset the write pointer: cond[0x%x]",
4895 			    zone->cond);
4896 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4897 					zone->len >> log_sectors_per_block);
4898 		if (ret)
4899 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4900 				 fdev->path, ret);
4901 		return ret;
4902 	}
4903 
4904 	/*
4905 	 * If there are valid blocks and the write pointer doesn't match
4906 	 * with them, we need to report the inconsistency and fill
4907 	 * the zone till the end to close the zone. This inconsistency
4908 	 * does not cause write error because the zone will not be
4909 	 * selected for write operation until it get discarded.
4910 	 */
4911 	f2fs_notice(sbi, "Valid blocks are not aligned with write "
4912 		    "pointer: valid block[0x%x,0x%x] cond[0x%x]",
4913 		    zone_segno, valid_block_cnt, zone->cond);
4914 
4915 	ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
4916 				zone->start, zone->len, GFP_NOFS);
4917 	if (ret == -EOPNOTSUPP) {
4918 		ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
4919 					zone->len - (zone->wp - zone->start),
4920 					GFP_NOFS, 0);
4921 		if (ret)
4922 			f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
4923 					fdev->path, ret);
4924 	} else if (ret) {
4925 		f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
4926 				fdev->path, ret);
4927 	}
4928 
4929 	return ret;
4930 }
4931 
4932 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4933 						  block_t zone_blkaddr)
4934 {
4935 	int i;
4936 
4937 	for (i = 0; i < sbi->s_ndevs; i++) {
4938 		if (!bdev_is_zoned(FDEV(i).bdev))
4939 			continue;
4940 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4941 				zone_blkaddr <= FDEV(i).end_blk))
4942 			return &FDEV(i);
4943 	}
4944 
4945 	return NULL;
4946 }
4947 
4948 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4949 			      void *data)
4950 {
4951 	memcpy(data, zone, sizeof(struct blk_zone));
4952 	return 0;
4953 }
4954 
4955 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4956 {
4957 	struct curseg_info *cs = CURSEG_I(sbi, type);
4958 	struct f2fs_dev_info *zbd;
4959 	struct blk_zone zone;
4960 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4961 	block_t cs_zone_block, wp_block;
4962 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4963 	sector_t zone_sector;
4964 	int err;
4965 
4966 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4967 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4968 
4969 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4970 	if (!zbd)
4971 		return 0;
4972 
4973 	/* report zone for the sector the curseg points to */
4974 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4975 		<< log_sectors_per_block;
4976 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4977 				  report_one_zone_cb, &zone);
4978 	if (err != 1) {
4979 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4980 			 zbd->path, err);
4981 		return err;
4982 	}
4983 
4984 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4985 		return 0;
4986 
4987 	/*
4988 	 * When safely unmounted in the previous mount, we could use current
4989 	 * segments. Otherwise, allocate new sections.
4990 	 */
4991 	if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4992 		wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4993 		wp_segno = GET_SEGNO(sbi, wp_block);
4994 		wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4995 		wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4996 
4997 		if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4998 				wp_sector_off == 0)
4999 			return 0;
5000 
5001 		f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5002 			    "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
5003 			    cs->next_blkoff, wp_segno, wp_blkoff);
5004 	}
5005 
5006 	/* Allocate a new section if it's not new. */
5007 	if (cs->next_blkoff) {
5008 		unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
5009 
5010 		f2fs_allocate_new_section(sbi, type, true);
5011 		f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5012 				"[0x%x,0x%x] -> [0x%x,0x%x]",
5013 				type, old_segno, old_blkoff,
5014 				cs->segno, cs->next_blkoff);
5015 	}
5016 
5017 	/* check consistency of the zone curseg pointed to */
5018 	if (check_zone_write_pointer(sbi, zbd, &zone))
5019 		return -EIO;
5020 
5021 	/* check newly assigned zone */
5022 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5023 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5024 
5025 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5026 	if (!zbd)
5027 		return 0;
5028 
5029 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5030 		<< log_sectors_per_block;
5031 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5032 				  report_one_zone_cb, &zone);
5033 	if (err != 1) {
5034 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5035 			 zbd->path, err);
5036 		return err;
5037 	}
5038 
5039 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5040 		return 0;
5041 
5042 	if (zone.wp != zone.start) {
5043 		f2fs_notice(sbi,
5044 			    "New zone for curseg[%d] is not yet discarded. "
5045 			    "Reset the zone: curseg[0x%x,0x%x]",
5046 			    type, cs->segno, cs->next_blkoff);
5047 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,	cs_zone_block,
5048 					zone.len >> log_sectors_per_block);
5049 		if (err) {
5050 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5051 				 zbd->path, err);
5052 			return err;
5053 		}
5054 	}
5055 
5056 	return 0;
5057 }
5058 
5059 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5060 {
5061 	int i, ret;
5062 
5063 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5064 		ret = fix_curseg_write_pointer(sbi, i);
5065 		if (ret)
5066 			return ret;
5067 	}
5068 
5069 	return 0;
5070 }
5071 
5072 struct check_zone_write_pointer_args {
5073 	struct f2fs_sb_info *sbi;
5074 	struct f2fs_dev_info *fdev;
5075 };
5076 
5077 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5078 				      void *data)
5079 {
5080 	struct check_zone_write_pointer_args *args;
5081 
5082 	args = (struct check_zone_write_pointer_args *)data;
5083 
5084 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
5085 }
5086 
5087 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5088 {
5089 	int i, ret;
5090 	struct check_zone_write_pointer_args args;
5091 
5092 	for (i = 0; i < sbi->s_ndevs; i++) {
5093 		if (!bdev_is_zoned(FDEV(i).bdev))
5094 			continue;
5095 
5096 		args.sbi = sbi;
5097 		args.fdev = &FDEV(i);
5098 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5099 					  check_zone_write_pointer_cb, &args);
5100 		if (ret < 0)
5101 			return ret;
5102 	}
5103 
5104 	return 0;
5105 }
5106 
5107 /*
5108  * Return the number of usable blocks in a segment. The number of blocks
5109  * returned is always equal to the number of blocks in a segment for
5110  * segments fully contained within a sequential zone capacity or a
5111  * conventional zone. For segments partially contained in a sequential
5112  * zone capacity, the number of usable blocks up to the zone capacity
5113  * is returned. 0 is returned in all other cases.
5114  */
5115 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5116 			struct f2fs_sb_info *sbi, unsigned int segno)
5117 {
5118 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5119 	unsigned int secno;
5120 
5121 	if (!sbi->unusable_blocks_per_sec)
5122 		return sbi->blocks_per_seg;
5123 
5124 	secno = GET_SEC_FROM_SEG(sbi, segno);
5125 	seg_start = START_BLOCK(sbi, segno);
5126 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5127 	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5128 
5129 	/*
5130 	 * If segment starts before zone capacity and spans beyond
5131 	 * zone capacity, then usable blocks are from seg start to
5132 	 * zone capacity. If the segment starts after the zone capacity,
5133 	 * then there are no usable blocks.
5134 	 */
5135 	if (seg_start >= sec_cap_blkaddr)
5136 		return 0;
5137 	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5138 		return sec_cap_blkaddr - seg_start;
5139 
5140 	return sbi->blocks_per_seg;
5141 }
5142 #else
5143 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5144 {
5145 	return 0;
5146 }
5147 
5148 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5149 {
5150 	return 0;
5151 }
5152 
5153 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5154 							unsigned int segno)
5155 {
5156 	return 0;
5157 }
5158 
5159 #endif
5160 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5161 					unsigned int segno)
5162 {
5163 	if (f2fs_sb_has_blkzoned(sbi))
5164 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5165 
5166 	return sbi->blocks_per_seg;
5167 }
5168 
5169 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5170 					unsigned int segno)
5171 {
5172 	if (f2fs_sb_has_blkzoned(sbi))
5173 		return CAP_SEGS_PER_SEC(sbi);
5174 
5175 	return sbi->segs_per_sec;
5176 }
5177 
5178 /*
5179  * Update min, max modified time for cost-benefit GC algorithm
5180  */
5181 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5182 {
5183 	struct sit_info *sit_i = SIT_I(sbi);
5184 	unsigned int segno;
5185 
5186 	down_write(&sit_i->sentry_lock);
5187 
5188 	sit_i->min_mtime = ULLONG_MAX;
5189 
5190 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5191 		unsigned int i;
5192 		unsigned long long mtime = 0;
5193 
5194 		for (i = 0; i < sbi->segs_per_sec; i++)
5195 			mtime += get_seg_entry(sbi, segno + i)->mtime;
5196 
5197 		mtime = div_u64(mtime, sbi->segs_per_sec);
5198 
5199 		if (sit_i->min_mtime > mtime)
5200 			sit_i->min_mtime = mtime;
5201 	}
5202 	sit_i->max_mtime = get_mtime(sbi, false);
5203 	sit_i->dirty_max_mtime = 0;
5204 	up_write(&sit_i->sentry_lock);
5205 }
5206 
5207 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5208 {
5209 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5210 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5211 	struct f2fs_sm_info *sm_info;
5212 	int err;
5213 
5214 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5215 	if (!sm_info)
5216 		return -ENOMEM;
5217 
5218 	/* init sm info */
5219 	sbi->sm_info = sm_info;
5220 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5221 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5222 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5223 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5224 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5225 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5226 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5227 	sm_info->rec_prefree_segments = sm_info->main_segments *
5228 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5229 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5230 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5231 
5232 	if (!f2fs_lfs_mode(sbi))
5233 		sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
5234 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5235 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5236 	sm_info->min_seq_blocks = sbi->blocks_per_seg;
5237 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5238 	sm_info->min_ssr_sections = reserved_sections(sbi);
5239 
5240 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5241 
5242 	init_f2fs_rwsem(&sm_info->curseg_lock);
5243 
5244 	err = f2fs_create_flush_cmd_control(sbi);
5245 	if (err)
5246 		return err;
5247 
5248 	err = create_discard_cmd_control(sbi);
5249 	if (err)
5250 		return err;
5251 
5252 	err = build_sit_info(sbi);
5253 	if (err)
5254 		return err;
5255 	err = build_free_segmap(sbi);
5256 	if (err)
5257 		return err;
5258 	err = build_curseg(sbi);
5259 	if (err)
5260 		return err;
5261 
5262 	/* reinit free segmap based on SIT */
5263 	err = build_sit_entries(sbi);
5264 	if (err)
5265 		return err;
5266 
5267 	init_free_segmap(sbi);
5268 	err = build_dirty_segmap(sbi);
5269 	if (err)
5270 		return err;
5271 
5272 	err = sanity_check_curseg(sbi);
5273 	if (err)
5274 		return err;
5275 
5276 	init_min_max_mtime(sbi);
5277 	return 0;
5278 }
5279 
5280 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5281 		enum dirty_type dirty_type)
5282 {
5283 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5284 
5285 	mutex_lock(&dirty_i->seglist_lock);
5286 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5287 	dirty_i->nr_dirty[dirty_type] = 0;
5288 	mutex_unlock(&dirty_i->seglist_lock);
5289 }
5290 
5291 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5292 {
5293 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5294 
5295 	kvfree(dirty_i->pinned_secmap);
5296 	kvfree(dirty_i->victim_secmap);
5297 }
5298 
5299 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5300 {
5301 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5302 	int i;
5303 
5304 	if (!dirty_i)
5305 		return;
5306 
5307 	/* discard pre-free/dirty segments list */
5308 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5309 		discard_dirty_segmap(sbi, i);
5310 
5311 	if (__is_large_section(sbi)) {
5312 		mutex_lock(&dirty_i->seglist_lock);
5313 		kvfree(dirty_i->dirty_secmap);
5314 		mutex_unlock(&dirty_i->seglist_lock);
5315 	}
5316 
5317 	destroy_victim_secmap(sbi);
5318 	SM_I(sbi)->dirty_info = NULL;
5319 	kfree(dirty_i);
5320 }
5321 
5322 static void destroy_curseg(struct f2fs_sb_info *sbi)
5323 {
5324 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5325 	int i;
5326 
5327 	if (!array)
5328 		return;
5329 	SM_I(sbi)->curseg_array = NULL;
5330 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5331 		kfree(array[i].sum_blk);
5332 		kfree(array[i].journal);
5333 	}
5334 	kfree(array);
5335 }
5336 
5337 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5338 {
5339 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5340 
5341 	if (!free_i)
5342 		return;
5343 	SM_I(sbi)->free_info = NULL;
5344 	kvfree(free_i->free_segmap);
5345 	kvfree(free_i->free_secmap);
5346 	kfree(free_i);
5347 }
5348 
5349 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5350 {
5351 	struct sit_info *sit_i = SIT_I(sbi);
5352 
5353 	if (!sit_i)
5354 		return;
5355 
5356 	if (sit_i->sentries)
5357 		kvfree(sit_i->bitmap);
5358 	kfree(sit_i->tmp_map);
5359 
5360 	kvfree(sit_i->sentries);
5361 	kvfree(sit_i->sec_entries);
5362 	kvfree(sit_i->dirty_sentries_bitmap);
5363 
5364 	SM_I(sbi)->sit_info = NULL;
5365 	kvfree(sit_i->sit_bitmap);
5366 #ifdef CONFIG_F2FS_CHECK_FS
5367 	kvfree(sit_i->sit_bitmap_mir);
5368 	kvfree(sit_i->invalid_segmap);
5369 #endif
5370 	kfree(sit_i);
5371 }
5372 
5373 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5374 {
5375 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5376 
5377 	if (!sm_info)
5378 		return;
5379 	f2fs_destroy_flush_cmd_control(sbi, true);
5380 	destroy_discard_cmd_control(sbi);
5381 	destroy_dirty_segmap(sbi);
5382 	destroy_curseg(sbi);
5383 	destroy_free_segmap(sbi);
5384 	destroy_sit_info(sbi);
5385 	sbi->sm_info = NULL;
5386 	kfree(sm_info);
5387 }
5388 
5389 int __init f2fs_create_segment_manager_caches(void)
5390 {
5391 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5392 			sizeof(struct discard_entry));
5393 	if (!discard_entry_slab)
5394 		goto fail;
5395 
5396 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5397 			sizeof(struct discard_cmd));
5398 	if (!discard_cmd_slab)
5399 		goto destroy_discard_entry;
5400 
5401 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5402 			sizeof(struct sit_entry_set));
5403 	if (!sit_entry_set_slab)
5404 		goto destroy_discard_cmd;
5405 
5406 	revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
5407 			sizeof(struct revoke_entry));
5408 	if (!revoke_entry_slab)
5409 		goto destroy_sit_entry_set;
5410 	return 0;
5411 
5412 destroy_sit_entry_set:
5413 	kmem_cache_destroy(sit_entry_set_slab);
5414 destroy_discard_cmd:
5415 	kmem_cache_destroy(discard_cmd_slab);
5416 destroy_discard_entry:
5417 	kmem_cache_destroy(discard_entry_slab);
5418 fail:
5419 	return -ENOMEM;
5420 }
5421 
5422 void f2fs_destroy_segment_manager_caches(void)
5423 {
5424 	kmem_cache_destroy(sit_entry_set_slab);
5425 	kmem_cache_destroy(discard_cmd_slab);
5426 	kmem_cache_destroy(discard_entry_slab);
5427 	kmem_cache_destroy(revoke_entry_slab);
5428 }
5429