xref: /linux/fs/f2fs/segment.c (revision f2527d8f566a45fa00ee5abd04d1c9476d4d704f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *revoke_entry_slab;
34 
35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
188 void f2fs_abort_atomic_write(struct inode *inode, bool clean)
189 {
190 	struct f2fs_inode_info *fi = F2FS_I(inode);
191 
192 	if (!f2fs_is_atomic_file(inode))
193 		return;
194 
195 	clear_inode_flag(fi->cow_inode, FI_COW_FILE);
196 	iput(fi->cow_inode);
197 	fi->cow_inode = NULL;
198 	release_atomic_write_cnt(inode);
199 	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
200 	clear_inode_flag(inode, FI_ATOMIC_REPLACE);
201 	clear_inode_flag(inode, FI_ATOMIC_FILE);
202 	stat_dec_atomic_inode(inode);
203 
204 	if (clean) {
205 		truncate_inode_pages_final(inode->i_mapping);
206 		f2fs_i_size_write(inode, fi->original_i_size);
207 	}
208 }
209 
210 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
211 			block_t new_addr, block_t *old_addr, bool recover)
212 {
213 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
214 	struct dnode_of_data dn;
215 	struct node_info ni;
216 	int err;
217 
218 retry:
219 	set_new_dnode(&dn, inode, NULL, NULL, 0);
220 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE_RA);
221 	if (err) {
222 		if (err == -ENOMEM) {
223 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
224 			goto retry;
225 		}
226 		return err;
227 	}
228 
229 	err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
230 	if (err) {
231 		f2fs_put_dnode(&dn);
232 		return err;
233 	}
234 
235 	if (recover) {
236 		/* dn.data_blkaddr is always valid */
237 		if (!__is_valid_data_blkaddr(new_addr)) {
238 			if (new_addr == NULL_ADDR)
239 				dec_valid_block_count(sbi, inode, 1);
240 			f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
241 			f2fs_update_data_blkaddr(&dn, new_addr);
242 		} else {
243 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
244 				new_addr, ni.version, true, true);
245 		}
246 	} else {
247 		blkcnt_t count = 1;
248 
249 		*old_addr = dn.data_blkaddr;
250 		f2fs_truncate_data_blocks_range(&dn, 1);
251 		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
252 		inc_valid_block_count(sbi, inode, &count);
253 		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
254 					ni.version, true, false);
255 	}
256 
257 	f2fs_put_dnode(&dn);
258 	return 0;
259 }
260 
261 static void __complete_revoke_list(struct inode *inode, struct list_head *head,
262 					bool revoke)
263 {
264 	struct revoke_entry *cur, *tmp;
265 	bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
266 
267 	list_for_each_entry_safe(cur, tmp, head, list) {
268 		if (revoke)
269 			__replace_atomic_write_block(inode, cur->index,
270 						cur->old_addr, NULL, true);
271 
272 		list_del(&cur->list);
273 		kmem_cache_free(revoke_entry_slab, cur);
274 	}
275 
276 	if (!revoke && truncate)
277 		f2fs_do_truncate_blocks(inode, 0, false);
278 }
279 
280 static int __f2fs_commit_atomic_write(struct inode *inode)
281 {
282 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
283 	struct f2fs_inode_info *fi = F2FS_I(inode);
284 	struct inode *cow_inode = fi->cow_inode;
285 	struct revoke_entry *new;
286 	struct list_head revoke_list;
287 	block_t blkaddr;
288 	struct dnode_of_data dn;
289 	pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
290 	pgoff_t off = 0, blen, index;
291 	int ret = 0, i;
292 
293 	INIT_LIST_HEAD(&revoke_list);
294 
295 	while (len) {
296 		blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
297 
298 		set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
299 		ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
300 		if (ret && ret != -ENOENT) {
301 			goto out;
302 		} else if (ret == -ENOENT) {
303 			ret = 0;
304 			if (dn.max_level == 0)
305 				goto out;
306 			goto next;
307 		}
308 
309 		blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
310 				len);
311 		index = off;
312 		for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
313 			blkaddr = f2fs_data_blkaddr(&dn);
314 
315 			if (!__is_valid_data_blkaddr(blkaddr)) {
316 				continue;
317 			} else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
318 					DATA_GENERIC_ENHANCE)) {
319 				f2fs_put_dnode(&dn);
320 				ret = -EFSCORRUPTED;
321 				f2fs_handle_error(sbi,
322 						ERROR_INVALID_BLKADDR);
323 				goto out;
324 			}
325 
326 			new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
327 							true, NULL);
328 
329 			ret = __replace_atomic_write_block(inode, index, blkaddr,
330 							&new->old_addr, false);
331 			if (ret) {
332 				f2fs_put_dnode(&dn);
333 				kmem_cache_free(revoke_entry_slab, new);
334 				goto out;
335 			}
336 
337 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
338 			new->index = index;
339 			list_add_tail(&new->list, &revoke_list);
340 		}
341 		f2fs_put_dnode(&dn);
342 next:
343 		off += blen;
344 		len -= blen;
345 	}
346 
347 out:
348 	if (ret) {
349 		sbi->revoked_atomic_block += fi->atomic_write_cnt;
350 	} else {
351 		sbi->committed_atomic_block += fi->atomic_write_cnt;
352 		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
353 	}
354 
355 	__complete_revoke_list(inode, &revoke_list, ret ? true : false);
356 
357 	return ret;
358 }
359 
360 int f2fs_commit_atomic_write(struct inode *inode)
361 {
362 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
363 	struct f2fs_inode_info *fi = F2FS_I(inode);
364 	int err;
365 
366 	err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
367 	if (err)
368 		return err;
369 
370 	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
371 	f2fs_lock_op(sbi);
372 
373 	err = __f2fs_commit_atomic_write(inode);
374 
375 	f2fs_unlock_op(sbi);
376 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
377 
378 	return err;
379 }
380 
381 /*
382  * This function balances dirty node and dentry pages.
383  * In addition, it controls garbage collection.
384  */
385 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
386 {
387 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
388 		f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
389 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
390 	}
391 
392 	/* balance_fs_bg is able to be pending */
393 	if (need && excess_cached_nats(sbi))
394 		f2fs_balance_fs_bg(sbi, false);
395 
396 	if (!f2fs_is_checkpoint_ready(sbi))
397 		return;
398 
399 	/*
400 	 * We should do GC or end up with checkpoint, if there are so many dirty
401 	 * dir/node pages without enough free segments.
402 	 */
403 	if (has_not_enough_free_secs(sbi, 0, 0)) {
404 		if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
405 					sbi->gc_thread->f2fs_gc_task) {
406 			DEFINE_WAIT(wait);
407 
408 			prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
409 						TASK_UNINTERRUPTIBLE);
410 			wake_up(&sbi->gc_thread->gc_wait_queue_head);
411 			io_schedule();
412 			finish_wait(&sbi->gc_thread->fggc_wq, &wait);
413 		} else {
414 			struct f2fs_gc_control gc_control = {
415 				.victim_segno = NULL_SEGNO,
416 				.init_gc_type = BG_GC,
417 				.no_bg_gc = true,
418 				.should_migrate_blocks = false,
419 				.err_gc_skipped = false,
420 				.nr_free_secs = 1 };
421 			f2fs_down_write(&sbi->gc_lock);
422 			f2fs_gc(sbi, &gc_control);
423 		}
424 	}
425 }
426 
427 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
428 {
429 	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
430 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
431 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
432 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
433 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
434 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
435 	unsigned int threshold = sbi->blocks_per_seg * factor *
436 					DEFAULT_DIRTY_THRESHOLD;
437 	unsigned int global_threshold = threshold * 3 / 2;
438 
439 	if (dents >= threshold || qdata >= threshold ||
440 		nodes >= threshold || meta >= threshold ||
441 		imeta >= threshold)
442 		return true;
443 	return dents + qdata + nodes + meta + imeta >  global_threshold;
444 }
445 
446 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
447 {
448 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
449 		return;
450 
451 	/* try to shrink extent cache when there is no enough memory */
452 	if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
453 		f2fs_shrink_read_extent_tree(sbi,
454 				READ_EXTENT_CACHE_SHRINK_NUMBER);
455 
456 	/* try to shrink age extent cache when there is no enough memory */
457 	if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
458 		f2fs_shrink_age_extent_tree(sbi,
459 				AGE_EXTENT_CACHE_SHRINK_NUMBER);
460 
461 	/* check the # of cached NAT entries */
462 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
463 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
464 
465 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
466 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
467 	else
468 		f2fs_build_free_nids(sbi, false, false);
469 
470 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
471 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
472 		goto do_sync;
473 
474 	/* there is background inflight IO or foreground operation recently */
475 	if (is_inflight_io(sbi, REQ_TIME) ||
476 		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
477 		return;
478 
479 	/* exceed periodical checkpoint timeout threshold */
480 	if (f2fs_time_over(sbi, CP_TIME))
481 		goto do_sync;
482 
483 	/* checkpoint is the only way to shrink partial cached entries */
484 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
485 		f2fs_available_free_memory(sbi, INO_ENTRIES))
486 		return;
487 
488 do_sync:
489 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
490 		struct blk_plug plug;
491 
492 		mutex_lock(&sbi->flush_lock);
493 
494 		blk_start_plug(&plug);
495 		f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
496 		blk_finish_plug(&plug);
497 
498 		mutex_unlock(&sbi->flush_lock);
499 	}
500 	f2fs_sync_fs(sbi->sb, 1);
501 	stat_inc_bg_cp_count(sbi->stat_info);
502 }
503 
504 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
505 				struct block_device *bdev)
506 {
507 	int ret = blkdev_issue_flush(bdev);
508 
509 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
510 				test_opt(sbi, FLUSH_MERGE), ret);
511 	return ret;
512 }
513 
514 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
515 {
516 	int ret = 0;
517 	int i;
518 
519 	if (!f2fs_is_multi_device(sbi))
520 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
521 
522 	for (i = 0; i < sbi->s_ndevs; i++) {
523 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
524 			continue;
525 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
526 		if (ret)
527 			break;
528 	}
529 	return ret;
530 }
531 
532 static int issue_flush_thread(void *data)
533 {
534 	struct f2fs_sb_info *sbi = data;
535 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
536 	wait_queue_head_t *q = &fcc->flush_wait_queue;
537 repeat:
538 	if (kthread_should_stop())
539 		return 0;
540 
541 	if (!llist_empty(&fcc->issue_list)) {
542 		struct flush_cmd *cmd, *next;
543 		int ret;
544 
545 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
546 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
547 
548 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
549 
550 		ret = submit_flush_wait(sbi, cmd->ino);
551 		atomic_inc(&fcc->issued_flush);
552 
553 		llist_for_each_entry_safe(cmd, next,
554 					  fcc->dispatch_list, llnode) {
555 			cmd->ret = ret;
556 			complete(&cmd->wait);
557 		}
558 		fcc->dispatch_list = NULL;
559 	}
560 
561 	wait_event_interruptible(*q,
562 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
563 	goto repeat;
564 }
565 
566 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
567 {
568 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
569 	struct flush_cmd cmd;
570 	int ret;
571 
572 	if (test_opt(sbi, NOBARRIER))
573 		return 0;
574 
575 	if (!test_opt(sbi, FLUSH_MERGE)) {
576 		atomic_inc(&fcc->queued_flush);
577 		ret = submit_flush_wait(sbi, ino);
578 		atomic_dec(&fcc->queued_flush);
579 		atomic_inc(&fcc->issued_flush);
580 		return ret;
581 	}
582 
583 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
584 	    f2fs_is_multi_device(sbi)) {
585 		ret = submit_flush_wait(sbi, ino);
586 		atomic_dec(&fcc->queued_flush);
587 
588 		atomic_inc(&fcc->issued_flush);
589 		return ret;
590 	}
591 
592 	cmd.ino = ino;
593 	init_completion(&cmd.wait);
594 
595 	llist_add(&cmd.llnode, &fcc->issue_list);
596 
597 	/*
598 	 * update issue_list before we wake up issue_flush thread, this
599 	 * smp_mb() pairs with another barrier in ___wait_event(), see
600 	 * more details in comments of waitqueue_active().
601 	 */
602 	smp_mb();
603 
604 	if (waitqueue_active(&fcc->flush_wait_queue))
605 		wake_up(&fcc->flush_wait_queue);
606 
607 	if (fcc->f2fs_issue_flush) {
608 		wait_for_completion(&cmd.wait);
609 		atomic_dec(&fcc->queued_flush);
610 	} else {
611 		struct llist_node *list;
612 
613 		list = llist_del_all(&fcc->issue_list);
614 		if (!list) {
615 			wait_for_completion(&cmd.wait);
616 			atomic_dec(&fcc->queued_flush);
617 		} else {
618 			struct flush_cmd *tmp, *next;
619 
620 			ret = submit_flush_wait(sbi, ino);
621 
622 			llist_for_each_entry_safe(tmp, next, list, llnode) {
623 				if (tmp == &cmd) {
624 					cmd.ret = ret;
625 					atomic_dec(&fcc->queued_flush);
626 					continue;
627 				}
628 				tmp->ret = ret;
629 				complete(&tmp->wait);
630 			}
631 		}
632 	}
633 
634 	return cmd.ret;
635 }
636 
637 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
638 {
639 	dev_t dev = sbi->sb->s_bdev->bd_dev;
640 	struct flush_cmd_control *fcc;
641 
642 	if (SM_I(sbi)->fcc_info) {
643 		fcc = SM_I(sbi)->fcc_info;
644 		if (fcc->f2fs_issue_flush)
645 			return 0;
646 		goto init_thread;
647 	}
648 
649 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
650 	if (!fcc)
651 		return -ENOMEM;
652 	atomic_set(&fcc->issued_flush, 0);
653 	atomic_set(&fcc->queued_flush, 0);
654 	init_waitqueue_head(&fcc->flush_wait_queue);
655 	init_llist_head(&fcc->issue_list);
656 	SM_I(sbi)->fcc_info = fcc;
657 	if (!test_opt(sbi, FLUSH_MERGE))
658 		return 0;
659 
660 init_thread:
661 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
662 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
663 	if (IS_ERR(fcc->f2fs_issue_flush)) {
664 		int err = PTR_ERR(fcc->f2fs_issue_flush);
665 
666 		fcc->f2fs_issue_flush = NULL;
667 		return err;
668 	}
669 
670 	return 0;
671 }
672 
673 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
674 {
675 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
676 
677 	if (fcc && fcc->f2fs_issue_flush) {
678 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
679 
680 		fcc->f2fs_issue_flush = NULL;
681 		kthread_stop(flush_thread);
682 	}
683 	if (free) {
684 		kfree(fcc);
685 		SM_I(sbi)->fcc_info = NULL;
686 	}
687 }
688 
689 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
690 {
691 	int ret = 0, i;
692 
693 	if (!f2fs_is_multi_device(sbi))
694 		return 0;
695 
696 	if (test_opt(sbi, NOBARRIER))
697 		return 0;
698 
699 	for (i = 1; i < sbi->s_ndevs; i++) {
700 		int count = DEFAULT_RETRY_IO_COUNT;
701 
702 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
703 			continue;
704 
705 		do {
706 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
707 			if (ret)
708 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
709 		} while (ret && --count);
710 
711 		if (ret) {
712 			f2fs_stop_checkpoint(sbi, false,
713 					STOP_CP_REASON_FLUSH_FAIL);
714 			break;
715 		}
716 
717 		spin_lock(&sbi->dev_lock);
718 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
719 		spin_unlock(&sbi->dev_lock);
720 	}
721 
722 	return ret;
723 }
724 
725 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
726 		enum dirty_type dirty_type)
727 {
728 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
729 
730 	/* need not be added */
731 	if (IS_CURSEG(sbi, segno))
732 		return;
733 
734 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
735 		dirty_i->nr_dirty[dirty_type]++;
736 
737 	if (dirty_type == DIRTY) {
738 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
739 		enum dirty_type t = sentry->type;
740 
741 		if (unlikely(t >= DIRTY)) {
742 			f2fs_bug_on(sbi, 1);
743 			return;
744 		}
745 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
746 			dirty_i->nr_dirty[t]++;
747 
748 		if (__is_large_section(sbi)) {
749 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
750 			block_t valid_blocks =
751 				get_valid_blocks(sbi, segno, true);
752 
753 			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
754 					valid_blocks == CAP_BLKS_PER_SEC(sbi)));
755 
756 			if (!IS_CURSEC(sbi, secno))
757 				set_bit(secno, dirty_i->dirty_secmap);
758 		}
759 	}
760 }
761 
762 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
763 		enum dirty_type dirty_type)
764 {
765 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
766 	block_t valid_blocks;
767 
768 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
769 		dirty_i->nr_dirty[dirty_type]--;
770 
771 	if (dirty_type == DIRTY) {
772 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
773 		enum dirty_type t = sentry->type;
774 
775 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
776 			dirty_i->nr_dirty[t]--;
777 
778 		valid_blocks = get_valid_blocks(sbi, segno, true);
779 		if (valid_blocks == 0) {
780 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
781 						dirty_i->victim_secmap);
782 #ifdef CONFIG_F2FS_CHECK_FS
783 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
784 #endif
785 		}
786 		if (__is_large_section(sbi)) {
787 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
788 
789 			if (!valid_blocks ||
790 					valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
791 				clear_bit(secno, dirty_i->dirty_secmap);
792 				return;
793 			}
794 
795 			if (!IS_CURSEC(sbi, secno))
796 				set_bit(secno, dirty_i->dirty_secmap);
797 		}
798 	}
799 }
800 
801 /*
802  * Should not occur error such as -ENOMEM.
803  * Adding dirty entry into seglist is not critical operation.
804  * If a given segment is one of current working segments, it won't be added.
805  */
806 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
807 {
808 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
809 	unsigned short valid_blocks, ckpt_valid_blocks;
810 	unsigned int usable_blocks;
811 
812 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
813 		return;
814 
815 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
816 	mutex_lock(&dirty_i->seglist_lock);
817 
818 	valid_blocks = get_valid_blocks(sbi, segno, false);
819 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
820 
821 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
822 		ckpt_valid_blocks == usable_blocks)) {
823 		__locate_dirty_segment(sbi, segno, PRE);
824 		__remove_dirty_segment(sbi, segno, DIRTY);
825 	} else if (valid_blocks < usable_blocks) {
826 		__locate_dirty_segment(sbi, segno, DIRTY);
827 	} else {
828 		/* Recovery routine with SSR needs this */
829 		__remove_dirty_segment(sbi, segno, DIRTY);
830 	}
831 
832 	mutex_unlock(&dirty_i->seglist_lock);
833 }
834 
835 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
836 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
837 {
838 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
839 	unsigned int segno;
840 
841 	mutex_lock(&dirty_i->seglist_lock);
842 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
843 		if (get_valid_blocks(sbi, segno, false))
844 			continue;
845 		if (IS_CURSEG(sbi, segno))
846 			continue;
847 		__locate_dirty_segment(sbi, segno, PRE);
848 		__remove_dirty_segment(sbi, segno, DIRTY);
849 	}
850 	mutex_unlock(&dirty_i->seglist_lock);
851 }
852 
853 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
854 {
855 	int ovp_hole_segs =
856 		(overprovision_segments(sbi) - reserved_segments(sbi));
857 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
858 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
859 	block_t holes[2] = {0, 0};	/* DATA and NODE */
860 	block_t unusable;
861 	struct seg_entry *se;
862 	unsigned int segno;
863 
864 	mutex_lock(&dirty_i->seglist_lock);
865 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
866 		se = get_seg_entry(sbi, segno);
867 		if (IS_NODESEG(se->type))
868 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
869 							se->valid_blocks;
870 		else
871 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
872 							se->valid_blocks;
873 	}
874 	mutex_unlock(&dirty_i->seglist_lock);
875 
876 	unusable = max(holes[DATA], holes[NODE]);
877 	if (unusable > ovp_holes)
878 		return unusable - ovp_holes;
879 	return 0;
880 }
881 
882 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
883 {
884 	int ovp_hole_segs =
885 		(overprovision_segments(sbi) - reserved_segments(sbi));
886 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
887 		return -EAGAIN;
888 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
889 		dirty_segments(sbi) > ovp_hole_segs)
890 		return -EAGAIN;
891 	return 0;
892 }
893 
894 /* This is only used by SBI_CP_DISABLED */
895 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
896 {
897 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
898 	unsigned int segno = 0;
899 
900 	mutex_lock(&dirty_i->seglist_lock);
901 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
902 		if (get_valid_blocks(sbi, segno, false))
903 			continue;
904 		if (get_ckpt_valid_blocks(sbi, segno, false))
905 			continue;
906 		mutex_unlock(&dirty_i->seglist_lock);
907 		return segno;
908 	}
909 	mutex_unlock(&dirty_i->seglist_lock);
910 	return NULL_SEGNO;
911 }
912 
913 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
914 		struct block_device *bdev, block_t lstart,
915 		block_t start, block_t len)
916 {
917 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
918 	struct list_head *pend_list;
919 	struct discard_cmd *dc;
920 
921 	f2fs_bug_on(sbi, !len);
922 
923 	pend_list = &dcc->pend_list[plist_idx(len)];
924 
925 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
926 	INIT_LIST_HEAD(&dc->list);
927 	dc->bdev = bdev;
928 	dc->lstart = lstart;
929 	dc->start = start;
930 	dc->len = len;
931 	dc->ref = 0;
932 	dc->state = D_PREP;
933 	dc->queued = 0;
934 	dc->error = 0;
935 	init_completion(&dc->wait);
936 	list_add_tail(&dc->list, pend_list);
937 	spin_lock_init(&dc->lock);
938 	dc->bio_ref = 0;
939 	atomic_inc(&dcc->discard_cmd_cnt);
940 	dcc->undiscard_blks += len;
941 
942 	return dc;
943 }
944 
945 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
946 				struct block_device *bdev, block_t lstart,
947 				block_t start, block_t len,
948 				struct rb_node *parent, struct rb_node **p,
949 				bool leftmost)
950 {
951 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
952 	struct discard_cmd *dc;
953 
954 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
955 
956 	rb_link_node(&dc->rb_node, parent, p);
957 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
958 
959 	return dc;
960 }
961 
962 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
963 							struct discard_cmd *dc)
964 {
965 	if (dc->state == D_DONE)
966 		atomic_sub(dc->queued, &dcc->queued_discard);
967 
968 	list_del(&dc->list);
969 	rb_erase_cached(&dc->rb_node, &dcc->root);
970 	dcc->undiscard_blks -= dc->len;
971 
972 	kmem_cache_free(discard_cmd_slab, dc);
973 
974 	atomic_dec(&dcc->discard_cmd_cnt);
975 }
976 
977 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
978 							struct discard_cmd *dc)
979 {
980 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
981 	unsigned long flags;
982 
983 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
984 
985 	spin_lock_irqsave(&dc->lock, flags);
986 	if (dc->bio_ref) {
987 		spin_unlock_irqrestore(&dc->lock, flags);
988 		return;
989 	}
990 	spin_unlock_irqrestore(&dc->lock, flags);
991 
992 	f2fs_bug_on(sbi, dc->ref);
993 
994 	if (dc->error == -EOPNOTSUPP)
995 		dc->error = 0;
996 
997 	if (dc->error)
998 		printk_ratelimited(
999 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1000 			KERN_INFO, sbi->sb->s_id,
1001 			dc->lstart, dc->start, dc->len, dc->error);
1002 	__detach_discard_cmd(dcc, dc);
1003 }
1004 
1005 static void f2fs_submit_discard_endio(struct bio *bio)
1006 {
1007 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1008 	unsigned long flags;
1009 
1010 	spin_lock_irqsave(&dc->lock, flags);
1011 	if (!dc->error)
1012 		dc->error = blk_status_to_errno(bio->bi_status);
1013 	dc->bio_ref--;
1014 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1015 		dc->state = D_DONE;
1016 		complete_all(&dc->wait);
1017 	}
1018 	spin_unlock_irqrestore(&dc->lock, flags);
1019 	bio_put(bio);
1020 }
1021 
1022 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1023 				block_t start, block_t end)
1024 {
1025 #ifdef CONFIG_F2FS_CHECK_FS
1026 	struct seg_entry *sentry;
1027 	unsigned int segno;
1028 	block_t blk = start;
1029 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1030 	unsigned long *map;
1031 
1032 	while (blk < end) {
1033 		segno = GET_SEGNO(sbi, blk);
1034 		sentry = get_seg_entry(sbi, segno);
1035 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1036 
1037 		if (end < START_BLOCK(sbi, segno + 1))
1038 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1039 		else
1040 			size = max_blocks;
1041 		map = (unsigned long *)(sentry->cur_valid_map);
1042 		offset = __find_rev_next_bit(map, size, offset);
1043 		f2fs_bug_on(sbi, offset != size);
1044 		blk = START_BLOCK(sbi, segno + 1);
1045 	}
1046 #endif
1047 }
1048 
1049 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1050 				struct discard_policy *dpolicy,
1051 				int discard_type, unsigned int granularity)
1052 {
1053 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1054 
1055 	/* common policy */
1056 	dpolicy->type = discard_type;
1057 	dpolicy->sync = true;
1058 	dpolicy->ordered = false;
1059 	dpolicy->granularity = granularity;
1060 
1061 	dpolicy->max_requests = dcc->max_discard_request;
1062 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
1063 	dpolicy->timeout = false;
1064 
1065 	if (discard_type == DPOLICY_BG) {
1066 		dpolicy->min_interval = dcc->min_discard_issue_time;
1067 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1068 		dpolicy->max_interval = dcc->max_discard_issue_time;
1069 		dpolicy->io_aware = true;
1070 		dpolicy->sync = false;
1071 		dpolicy->ordered = true;
1072 		if (utilization(sbi) > dcc->discard_urgent_util) {
1073 			dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1074 			if (atomic_read(&dcc->discard_cmd_cnt))
1075 				dpolicy->max_interval =
1076 					dcc->min_discard_issue_time;
1077 		}
1078 	} else if (discard_type == DPOLICY_FORCE) {
1079 		dpolicy->min_interval = dcc->min_discard_issue_time;
1080 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1081 		dpolicy->max_interval = dcc->max_discard_issue_time;
1082 		dpolicy->io_aware = false;
1083 	} else if (discard_type == DPOLICY_FSTRIM) {
1084 		dpolicy->io_aware = false;
1085 	} else if (discard_type == DPOLICY_UMOUNT) {
1086 		dpolicy->io_aware = false;
1087 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1088 		dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1089 		dpolicy->timeout = true;
1090 	}
1091 }
1092 
1093 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1094 				struct block_device *bdev, block_t lstart,
1095 				block_t start, block_t len);
1096 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1097 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1098 						struct discard_policy *dpolicy,
1099 						struct discard_cmd *dc,
1100 						unsigned int *issued)
1101 {
1102 	struct block_device *bdev = dc->bdev;
1103 	unsigned int max_discard_blocks =
1104 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1105 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1106 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1107 					&(dcc->fstrim_list) : &(dcc->wait_list);
1108 	blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
1109 	block_t lstart, start, len, total_len;
1110 	int err = 0;
1111 
1112 	if (dc->state != D_PREP)
1113 		return 0;
1114 
1115 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1116 		return 0;
1117 
1118 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1119 
1120 	lstart = dc->lstart;
1121 	start = dc->start;
1122 	len = dc->len;
1123 	total_len = len;
1124 
1125 	dc->len = 0;
1126 
1127 	while (total_len && *issued < dpolicy->max_requests && !err) {
1128 		struct bio *bio = NULL;
1129 		unsigned long flags;
1130 		bool last = true;
1131 
1132 		if (len > max_discard_blocks) {
1133 			len = max_discard_blocks;
1134 			last = false;
1135 		}
1136 
1137 		(*issued)++;
1138 		if (*issued == dpolicy->max_requests)
1139 			last = true;
1140 
1141 		dc->len += len;
1142 
1143 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1144 			f2fs_show_injection_info(sbi, FAULT_DISCARD);
1145 			err = -EIO;
1146 		} else {
1147 			err = __blkdev_issue_discard(bdev,
1148 					SECTOR_FROM_BLOCK(start),
1149 					SECTOR_FROM_BLOCK(len),
1150 					GFP_NOFS, &bio);
1151 		}
1152 		if (err) {
1153 			spin_lock_irqsave(&dc->lock, flags);
1154 			if (dc->state == D_PARTIAL)
1155 				dc->state = D_SUBMIT;
1156 			spin_unlock_irqrestore(&dc->lock, flags);
1157 
1158 			break;
1159 		}
1160 
1161 		f2fs_bug_on(sbi, !bio);
1162 
1163 		/*
1164 		 * should keep before submission to avoid D_DONE
1165 		 * right away
1166 		 */
1167 		spin_lock_irqsave(&dc->lock, flags);
1168 		if (last)
1169 			dc->state = D_SUBMIT;
1170 		else
1171 			dc->state = D_PARTIAL;
1172 		dc->bio_ref++;
1173 		spin_unlock_irqrestore(&dc->lock, flags);
1174 
1175 		atomic_inc(&dcc->queued_discard);
1176 		dc->queued++;
1177 		list_move_tail(&dc->list, wait_list);
1178 
1179 		/* sanity check on discard range */
1180 		__check_sit_bitmap(sbi, lstart, lstart + len);
1181 
1182 		bio->bi_private = dc;
1183 		bio->bi_end_io = f2fs_submit_discard_endio;
1184 		bio->bi_opf |= flag;
1185 		submit_bio(bio);
1186 
1187 		atomic_inc(&dcc->issued_discard);
1188 
1189 		f2fs_update_iostat(sbi, NULL, FS_DISCARD, len * F2FS_BLKSIZE);
1190 
1191 		lstart += len;
1192 		start += len;
1193 		total_len -= len;
1194 		len = total_len;
1195 	}
1196 
1197 	if (!err && len) {
1198 		dcc->undiscard_blks -= len;
1199 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1200 	}
1201 	return err;
1202 }
1203 
1204 static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1205 				struct block_device *bdev, block_t lstart,
1206 				block_t start, block_t len,
1207 				struct rb_node **insert_p,
1208 				struct rb_node *insert_parent)
1209 {
1210 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1211 	struct rb_node **p;
1212 	struct rb_node *parent = NULL;
1213 	bool leftmost = true;
1214 
1215 	if (insert_p && insert_parent) {
1216 		parent = insert_parent;
1217 		p = insert_p;
1218 		goto do_insert;
1219 	}
1220 
1221 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1222 							lstart, &leftmost);
1223 do_insert:
1224 	__attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1225 								p, leftmost);
1226 }
1227 
1228 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1229 						struct discard_cmd *dc)
1230 {
1231 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1232 }
1233 
1234 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1235 				struct discard_cmd *dc, block_t blkaddr)
1236 {
1237 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1238 	struct discard_info di = dc->di;
1239 	bool modified = false;
1240 
1241 	if (dc->state == D_DONE || dc->len == 1) {
1242 		__remove_discard_cmd(sbi, dc);
1243 		return;
1244 	}
1245 
1246 	dcc->undiscard_blks -= di.len;
1247 
1248 	if (blkaddr > di.lstart) {
1249 		dc->len = blkaddr - dc->lstart;
1250 		dcc->undiscard_blks += dc->len;
1251 		__relocate_discard_cmd(dcc, dc);
1252 		modified = true;
1253 	}
1254 
1255 	if (blkaddr < di.lstart + di.len - 1) {
1256 		if (modified) {
1257 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1258 					di.start + blkaddr + 1 - di.lstart,
1259 					di.lstart + di.len - 1 - blkaddr,
1260 					NULL, NULL);
1261 		} else {
1262 			dc->lstart++;
1263 			dc->len--;
1264 			dc->start++;
1265 			dcc->undiscard_blks += dc->len;
1266 			__relocate_discard_cmd(dcc, dc);
1267 		}
1268 	}
1269 }
1270 
1271 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1272 				struct block_device *bdev, block_t lstart,
1273 				block_t start, block_t len)
1274 {
1275 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1276 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1277 	struct discard_cmd *dc;
1278 	struct discard_info di = {0};
1279 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1280 	unsigned int max_discard_blocks =
1281 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1282 	block_t end = lstart + len;
1283 
1284 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1285 					NULL, lstart,
1286 					(struct rb_entry **)&prev_dc,
1287 					(struct rb_entry **)&next_dc,
1288 					&insert_p, &insert_parent, true, NULL);
1289 	if (dc)
1290 		prev_dc = dc;
1291 
1292 	if (!prev_dc) {
1293 		di.lstart = lstart;
1294 		di.len = next_dc ? next_dc->lstart - lstart : len;
1295 		di.len = min(di.len, len);
1296 		di.start = start;
1297 	}
1298 
1299 	while (1) {
1300 		struct rb_node *node;
1301 		bool merged = false;
1302 		struct discard_cmd *tdc = NULL;
1303 
1304 		if (prev_dc) {
1305 			di.lstart = prev_dc->lstart + prev_dc->len;
1306 			if (di.lstart < lstart)
1307 				di.lstart = lstart;
1308 			if (di.lstart >= end)
1309 				break;
1310 
1311 			if (!next_dc || next_dc->lstart > end)
1312 				di.len = end - di.lstart;
1313 			else
1314 				di.len = next_dc->lstart - di.lstart;
1315 			di.start = start + di.lstart - lstart;
1316 		}
1317 
1318 		if (!di.len)
1319 			goto next;
1320 
1321 		if (prev_dc && prev_dc->state == D_PREP &&
1322 			prev_dc->bdev == bdev &&
1323 			__is_discard_back_mergeable(&di, &prev_dc->di,
1324 							max_discard_blocks)) {
1325 			prev_dc->di.len += di.len;
1326 			dcc->undiscard_blks += di.len;
1327 			__relocate_discard_cmd(dcc, prev_dc);
1328 			di = prev_dc->di;
1329 			tdc = prev_dc;
1330 			merged = true;
1331 		}
1332 
1333 		if (next_dc && next_dc->state == D_PREP &&
1334 			next_dc->bdev == bdev &&
1335 			__is_discard_front_mergeable(&di, &next_dc->di,
1336 							max_discard_blocks)) {
1337 			next_dc->di.lstart = di.lstart;
1338 			next_dc->di.len += di.len;
1339 			next_dc->di.start = di.start;
1340 			dcc->undiscard_blks += di.len;
1341 			__relocate_discard_cmd(dcc, next_dc);
1342 			if (tdc)
1343 				__remove_discard_cmd(sbi, tdc);
1344 			merged = true;
1345 		}
1346 
1347 		if (!merged) {
1348 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1349 							di.len, NULL, NULL);
1350 		}
1351  next:
1352 		prev_dc = next_dc;
1353 		if (!prev_dc)
1354 			break;
1355 
1356 		node = rb_next(&prev_dc->rb_node);
1357 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1358 	}
1359 }
1360 
1361 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1362 		struct block_device *bdev, block_t blkstart, block_t blklen)
1363 {
1364 	block_t lblkstart = blkstart;
1365 
1366 	if (!f2fs_bdev_support_discard(bdev))
1367 		return;
1368 
1369 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1370 
1371 	if (f2fs_is_multi_device(sbi)) {
1372 		int devi = f2fs_target_device_index(sbi, blkstart);
1373 
1374 		blkstart -= FDEV(devi).start_blk;
1375 	}
1376 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1377 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1378 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1379 }
1380 
1381 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1382 					struct discard_policy *dpolicy)
1383 {
1384 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1385 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1386 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1387 	struct discard_cmd *dc;
1388 	struct blk_plug plug;
1389 	unsigned int pos = dcc->next_pos;
1390 	unsigned int issued = 0;
1391 	bool io_interrupted = false;
1392 
1393 	mutex_lock(&dcc->cmd_lock);
1394 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1395 					NULL, pos,
1396 					(struct rb_entry **)&prev_dc,
1397 					(struct rb_entry **)&next_dc,
1398 					&insert_p, &insert_parent, true, NULL);
1399 	if (!dc)
1400 		dc = next_dc;
1401 
1402 	blk_start_plug(&plug);
1403 
1404 	while (dc) {
1405 		struct rb_node *node;
1406 		int err = 0;
1407 
1408 		if (dc->state != D_PREP)
1409 			goto next;
1410 
1411 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1412 			io_interrupted = true;
1413 			break;
1414 		}
1415 
1416 		dcc->next_pos = dc->lstart + dc->len;
1417 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1418 
1419 		if (issued >= dpolicy->max_requests)
1420 			break;
1421 next:
1422 		node = rb_next(&dc->rb_node);
1423 		if (err)
1424 			__remove_discard_cmd(sbi, dc);
1425 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1426 	}
1427 
1428 	blk_finish_plug(&plug);
1429 
1430 	if (!dc)
1431 		dcc->next_pos = 0;
1432 
1433 	mutex_unlock(&dcc->cmd_lock);
1434 
1435 	if (!issued && io_interrupted)
1436 		issued = -1;
1437 
1438 	return issued;
1439 }
1440 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1441 					struct discard_policy *dpolicy);
1442 
1443 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1444 					struct discard_policy *dpolicy)
1445 {
1446 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1447 	struct list_head *pend_list;
1448 	struct discard_cmd *dc, *tmp;
1449 	struct blk_plug plug;
1450 	int i, issued;
1451 	bool io_interrupted = false;
1452 
1453 	if (dpolicy->timeout)
1454 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1455 
1456 retry:
1457 	issued = 0;
1458 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1459 		if (dpolicy->timeout &&
1460 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1461 			break;
1462 
1463 		if (i + 1 < dpolicy->granularity)
1464 			break;
1465 
1466 		if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered)
1467 			return __issue_discard_cmd_orderly(sbi, dpolicy);
1468 
1469 		pend_list = &dcc->pend_list[i];
1470 
1471 		mutex_lock(&dcc->cmd_lock);
1472 		if (list_empty(pend_list))
1473 			goto next;
1474 		if (unlikely(dcc->rbtree_check))
1475 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1476 							&dcc->root, false));
1477 		blk_start_plug(&plug);
1478 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1479 			f2fs_bug_on(sbi, dc->state != D_PREP);
1480 
1481 			if (dpolicy->timeout &&
1482 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1483 				break;
1484 
1485 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1486 						!is_idle(sbi, DISCARD_TIME)) {
1487 				io_interrupted = true;
1488 				break;
1489 			}
1490 
1491 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1492 
1493 			if (issued >= dpolicy->max_requests)
1494 				break;
1495 		}
1496 		blk_finish_plug(&plug);
1497 next:
1498 		mutex_unlock(&dcc->cmd_lock);
1499 
1500 		if (issued >= dpolicy->max_requests || io_interrupted)
1501 			break;
1502 	}
1503 
1504 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1505 		__wait_all_discard_cmd(sbi, dpolicy);
1506 		goto retry;
1507 	}
1508 
1509 	if (!issued && io_interrupted)
1510 		issued = -1;
1511 
1512 	return issued;
1513 }
1514 
1515 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1516 {
1517 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1518 	struct list_head *pend_list;
1519 	struct discard_cmd *dc, *tmp;
1520 	int i;
1521 	bool dropped = false;
1522 
1523 	mutex_lock(&dcc->cmd_lock);
1524 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1525 		pend_list = &dcc->pend_list[i];
1526 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1527 			f2fs_bug_on(sbi, dc->state != D_PREP);
1528 			__remove_discard_cmd(sbi, dc);
1529 			dropped = true;
1530 		}
1531 	}
1532 	mutex_unlock(&dcc->cmd_lock);
1533 
1534 	return dropped;
1535 }
1536 
1537 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1538 {
1539 	__drop_discard_cmd(sbi);
1540 }
1541 
1542 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1543 							struct discard_cmd *dc)
1544 {
1545 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1546 	unsigned int len = 0;
1547 
1548 	wait_for_completion_io(&dc->wait);
1549 	mutex_lock(&dcc->cmd_lock);
1550 	f2fs_bug_on(sbi, dc->state != D_DONE);
1551 	dc->ref--;
1552 	if (!dc->ref) {
1553 		if (!dc->error)
1554 			len = dc->len;
1555 		__remove_discard_cmd(sbi, dc);
1556 	}
1557 	mutex_unlock(&dcc->cmd_lock);
1558 
1559 	return len;
1560 }
1561 
1562 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1563 						struct discard_policy *dpolicy,
1564 						block_t start, block_t end)
1565 {
1566 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1567 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1568 					&(dcc->fstrim_list) : &(dcc->wait_list);
1569 	struct discard_cmd *dc = NULL, *iter, *tmp;
1570 	unsigned int trimmed = 0;
1571 
1572 next:
1573 	dc = NULL;
1574 
1575 	mutex_lock(&dcc->cmd_lock);
1576 	list_for_each_entry_safe(iter, tmp, wait_list, list) {
1577 		if (iter->lstart + iter->len <= start || end <= iter->lstart)
1578 			continue;
1579 		if (iter->len < dpolicy->granularity)
1580 			continue;
1581 		if (iter->state == D_DONE && !iter->ref) {
1582 			wait_for_completion_io(&iter->wait);
1583 			if (!iter->error)
1584 				trimmed += iter->len;
1585 			__remove_discard_cmd(sbi, iter);
1586 		} else {
1587 			iter->ref++;
1588 			dc = iter;
1589 			break;
1590 		}
1591 	}
1592 	mutex_unlock(&dcc->cmd_lock);
1593 
1594 	if (dc) {
1595 		trimmed += __wait_one_discard_bio(sbi, dc);
1596 		goto next;
1597 	}
1598 
1599 	return trimmed;
1600 }
1601 
1602 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1603 						struct discard_policy *dpolicy)
1604 {
1605 	struct discard_policy dp;
1606 	unsigned int discard_blks;
1607 
1608 	if (dpolicy)
1609 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1610 
1611 	/* wait all */
1612 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1613 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1614 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1615 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1616 
1617 	return discard_blks;
1618 }
1619 
1620 /* This should be covered by global mutex, &sit_i->sentry_lock */
1621 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1622 {
1623 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1624 	struct discard_cmd *dc;
1625 	bool need_wait = false;
1626 
1627 	mutex_lock(&dcc->cmd_lock);
1628 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1629 							NULL, blkaddr);
1630 	if (dc) {
1631 		if (dc->state == D_PREP) {
1632 			__punch_discard_cmd(sbi, dc, blkaddr);
1633 		} else {
1634 			dc->ref++;
1635 			need_wait = true;
1636 		}
1637 	}
1638 	mutex_unlock(&dcc->cmd_lock);
1639 
1640 	if (need_wait)
1641 		__wait_one_discard_bio(sbi, dc);
1642 }
1643 
1644 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1645 {
1646 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1647 
1648 	if (dcc && dcc->f2fs_issue_discard) {
1649 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1650 
1651 		dcc->f2fs_issue_discard = NULL;
1652 		kthread_stop(discard_thread);
1653 	}
1654 }
1655 
1656 /* This comes from f2fs_put_super */
1657 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1658 {
1659 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1660 	struct discard_policy dpolicy;
1661 	bool dropped;
1662 
1663 	if (!atomic_read(&dcc->discard_cmd_cnt))
1664 		return false;
1665 
1666 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1667 					dcc->discard_granularity);
1668 	__issue_discard_cmd(sbi, &dpolicy);
1669 	dropped = __drop_discard_cmd(sbi);
1670 
1671 	/* just to make sure there is no pending discard commands */
1672 	__wait_all_discard_cmd(sbi, NULL);
1673 
1674 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1675 	return dropped;
1676 }
1677 
1678 static int issue_discard_thread(void *data)
1679 {
1680 	struct f2fs_sb_info *sbi = data;
1681 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1682 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1683 	struct discard_policy dpolicy;
1684 	unsigned int wait_ms = dcc->min_discard_issue_time;
1685 	int issued;
1686 
1687 	set_freezable();
1688 
1689 	do {
1690 		wait_event_interruptible_timeout(*q,
1691 				kthread_should_stop() || freezing(current) ||
1692 				dcc->discard_wake,
1693 				msecs_to_jiffies(wait_ms));
1694 
1695 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1696 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1697 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1698 		else
1699 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1700 						dcc->discard_granularity);
1701 
1702 		if (dcc->discard_wake)
1703 			dcc->discard_wake = 0;
1704 
1705 		/* clean up pending candidates before going to sleep */
1706 		if (atomic_read(&dcc->queued_discard))
1707 			__wait_all_discard_cmd(sbi, NULL);
1708 
1709 		if (try_to_freeze())
1710 			continue;
1711 		if (f2fs_readonly(sbi->sb))
1712 			continue;
1713 		if (kthread_should_stop())
1714 			return 0;
1715 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1716 			!atomic_read(&dcc->discard_cmd_cnt)) {
1717 			wait_ms = dpolicy.max_interval;
1718 			continue;
1719 		}
1720 
1721 		sb_start_intwrite(sbi->sb);
1722 
1723 		issued = __issue_discard_cmd(sbi, &dpolicy);
1724 		if (issued > 0) {
1725 			__wait_all_discard_cmd(sbi, &dpolicy);
1726 			wait_ms = dpolicy.min_interval;
1727 		} else if (issued == -1) {
1728 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1729 			if (!wait_ms)
1730 				wait_ms = dpolicy.mid_interval;
1731 		} else {
1732 			wait_ms = dpolicy.max_interval;
1733 		}
1734 		if (!atomic_read(&dcc->discard_cmd_cnt))
1735 			wait_ms = dpolicy.max_interval;
1736 
1737 		sb_end_intwrite(sbi->sb);
1738 
1739 	} while (!kthread_should_stop());
1740 	return 0;
1741 }
1742 
1743 #ifdef CONFIG_BLK_DEV_ZONED
1744 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1745 		struct block_device *bdev, block_t blkstart, block_t blklen)
1746 {
1747 	sector_t sector, nr_sects;
1748 	block_t lblkstart = blkstart;
1749 	int devi = 0;
1750 
1751 	if (f2fs_is_multi_device(sbi)) {
1752 		devi = f2fs_target_device_index(sbi, blkstart);
1753 		if (blkstart < FDEV(devi).start_blk ||
1754 		    blkstart > FDEV(devi).end_blk) {
1755 			f2fs_err(sbi, "Invalid block %x", blkstart);
1756 			return -EIO;
1757 		}
1758 		blkstart -= FDEV(devi).start_blk;
1759 	}
1760 
1761 	/* For sequential zones, reset the zone write pointer */
1762 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1763 		sector = SECTOR_FROM_BLOCK(blkstart);
1764 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1765 
1766 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1767 				nr_sects != bdev_zone_sectors(bdev)) {
1768 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1769 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1770 				 blkstart, blklen);
1771 			return -EIO;
1772 		}
1773 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1774 		return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1775 					sector, nr_sects, GFP_NOFS);
1776 	}
1777 
1778 	/* For conventional zones, use regular discard if supported */
1779 	__queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1780 	return 0;
1781 }
1782 #endif
1783 
1784 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1785 		struct block_device *bdev, block_t blkstart, block_t blklen)
1786 {
1787 #ifdef CONFIG_BLK_DEV_ZONED
1788 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1789 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1790 #endif
1791 	__queue_discard_cmd(sbi, bdev, blkstart, blklen);
1792 	return 0;
1793 }
1794 
1795 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1796 				block_t blkstart, block_t blklen)
1797 {
1798 	sector_t start = blkstart, len = 0;
1799 	struct block_device *bdev;
1800 	struct seg_entry *se;
1801 	unsigned int offset;
1802 	block_t i;
1803 	int err = 0;
1804 
1805 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1806 
1807 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1808 		if (i != start) {
1809 			struct block_device *bdev2 =
1810 				f2fs_target_device(sbi, i, NULL);
1811 
1812 			if (bdev2 != bdev) {
1813 				err = __issue_discard_async(sbi, bdev,
1814 						start, len);
1815 				if (err)
1816 					return err;
1817 				bdev = bdev2;
1818 				start = i;
1819 				len = 0;
1820 			}
1821 		}
1822 
1823 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1824 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1825 
1826 		if (f2fs_block_unit_discard(sbi) &&
1827 				!f2fs_test_and_set_bit(offset, se->discard_map))
1828 			sbi->discard_blks--;
1829 	}
1830 
1831 	if (len)
1832 		err = __issue_discard_async(sbi, bdev, start, len);
1833 	return err;
1834 }
1835 
1836 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1837 							bool check_only)
1838 {
1839 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1840 	int max_blocks = sbi->blocks_per_seg;
1841 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1842 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1843 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1844 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1845 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1846 	unsigned int start = 0, end = -1;
1847 	bool force = (cpc->reason & CP_DISCARD);
1848 	struct discard_entry *de = NULL;
1849 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1850 	int i;
1851 
1852 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
1853 			!f2fs_block_unit_discard(sbi))
1854 		return false;
1855 
1856 	if (!force) {
1857 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1858 			SM_I(sbi)->dcc_info->nr_discards >=
1859 				SM_I(sbi)->dcc_info->max_discards)
1860 			return false;
1861 	}
1862 
1863 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1864 	for (i = 0; i < entries; i++)
1865 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1866 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1867 
1868 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1869 				SM_I(sbi)->dcc_info->max_discards) {
1870 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1871 		if (start >= max_blocks)
1872 			break;
1873 
1874 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1875 		if (force && start && end != max_blocks
1876 					&& (end - start) < cpc->trim_minlen)
1877 			continue;
1878 
1879 		if (check_only)
1880 			return true;
1881 
1882 		if (!de) {
1883 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1884 						GFP_F2FS_ZERO, true, NULL);
1885 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1886 			list_add_tail(&de->list, head);
1887 		}
1888 
1889 		for (i = start; i < end; i++)
1890 			__set_bit_le(i, (void *)de->discard_map);
1891 
1892 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1893 	}
1894 	return false;
1895 }
1896 
1897 static void release_discard_addr(struct discard_entry *entry)
1898 {
1899 	list_del(&entry->list);
1900 	kmem_cache_free(discard_entry_slab, entry);
1901 }
1902 
1903 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
1904 {
1905 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1906 	struct discard_entry *entry, *this;
1907 
1908 	/* drop caches */
1909 	list_for_each_entry_safe(entry, this, head, list)
1910 		release_discard_addr(entry);
1911 }
1912 
1913 /*
1914  * Should call f2fs_clear_prefree_segments after checkpoint is done.
1915  */
1916 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1917 {
1918 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1919 	unsigned int segno;
1920 
1921 	mutex_lock(&dirty_i->seglist_lock);
1922 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1923 		__set_test_and_free(sbi, segno, false);
1924 	mutex_unlock(&dirty_i->seglist_lock);
1925 }
1926 
1927 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
1928 						struct cp_control *cpc)
1929 {
1930 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1931 	struct list_head *head = &dcc->entry_list;
1932 	struct discard_entry *entry, *this;
1933 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1934 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1935 	unsigned int start = 0, end = -1;
1936 	unsigned int secno, start_segno;
1937 	bool force = (cpc->reason & CP_DISCARD);
1938 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
1939 						DISCARD_UNIT_SECTION;
1940 
1941 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
1942 		section_alignment = true;
1943 
1944 	mutex_lock(&dirty_i->seglist_lock);
1945 
1946 	while (1) {
1947 		int i;
1948 
1949 		if (section_alignment && end != -1)
1950 			end--;
1951 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1952 		if (start >= MAIN_SEGS(sbi))
1953 			break;
1954 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1955 								start + 1);
1956 
1957 		if (section_alignment) {
1958 			start = rounddown(start, sbi->segs_per_sec);
1959 			end = roundup(end, sbi->segs_per_sec);
1960 		}
1961 
1962 		for (i = start; i < end; i++) {
1963 			if (test_and_clear_bit(i, prefree_map))
1964 				dirty_i->nr_dirty[PRE]--;
1965 		}
1966 
1967 		if (!f2fs_realtime_discard_enable(sbi))
1968 			continue;
1969 
1970 		if (force && start >= cpc->trim_start &&
1971 					(end - 1) <= cpc->trim_end)
1972 				continue;
1973 
1974 		if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
1975 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1976 				(end - start) << sbi->log_blocks_per_seg);
1977 			continue;
1978 		}
1979 next:
1980 		secno = GET_SEC_FROM_SEG(sbi, start);
1981 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
1982 		if (!IS_CURSEC(sbi, secno) &&
1983 			!get_valid_blocks(sbi, start, true))
1984 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1985 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
1986 
1987 		start = start_segno + sbi->segs_per_sec;
1988 		if (start < end)
1989 			goto next;
1990 		else
1991 			end = start - 1;
1992 	}
1993 	mutex_unlock(&dirty_i->seglist_lock);
1994 
1995 	if (!f2fs_block_unit_discard(sbi))
1996 		goto wakeup;
1997 
1998 	/* send small discards */
1999 	list_for_each_entry_safe(entry, this, head, list) {
2000 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2001 		bool is_valid = test_bit_le(0, entry->discard_map);
2002 
2003 find_next:
2004 		if (is_valid) {
2005 			next_pos = find_next_zero_bit_le(entry->discard_map,
2006 					sbi->blocks_per_seg, cur_pos);
2007 			len = next_pos - cur_pos;
2008 
2009 			if (f2fs_sb_has_blkzoned(sbi) ||
2010 			    (force && len < cpc->trim_minlen))
2011 				goto skip;
2012 
2013 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2014 									len);
2015 			total_len += len;
2016 		} else {
2017 			next_pos = find_next_bit_le(entry->discard_map,
2018 					sbi->blocks_per_seg, cur_pos);
2019 		}
2020 skip:
2021 		cur_pos = next_pos;
2022 		is_valid = !is_valid;
2023 
2024 		if (cur_pos < sbi->blocks_per_seg)
2025 			goto find_next;
2026 
2027 		release_discard_addr(entry);
2028 		dcc->nr_discards -= total_len;
2029 	}
2030 
2031 wakeup:
2032 	wake_up_discard_thread(sbi, false);
2033 }
2034 
2035 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2036 {
2037 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2038 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2039 	int err = 0;
2040 
2041 	if (!f2fs_realtime_discard_enable(sbi))
2042 		return 0;
2043 
2044 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2045 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2046 	if (IS_ERR(dcc->f2fs_issue_discard)) {
2047 		err = PTR_ERR(dcc->f2fs_issue_discard);
2048 		dcc->f2fs_issue_discard = NULL;
2049 	}
2050 
2051 	return err;
2052 }
2053 
2054 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2055 {
2056 	struct discard_cmd_control *dcc;
2057 	int err = 0, i;
2058 
2059 	if (SM_I(sbi)->dcc_info) {
2060 		dcc = SM_I(sbi)->dcc_info;
2061 		goto init_thread;
2062 	}
2063 
2064 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2065 	if (!dcc)
2066 		return -ENOMEM;
2067 
2068 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2069 	dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
2070 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2071 		dcc->discard_granularity = sbi->blocks_per_seg;
2072 	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2073 		dcc->discard_granularity = BLKS_PER_SEC(sbi);
2074 
2075 	INIT_LIST_HEAD(&dcc->entry_list);
2076 	for (i = 0; i < MAX_PLIST_NUM; i++)
2077 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2078 	INIT_LIST_HEAD(&dcc->wait_list);
2079 	INIT_LIST_HEAD(&dcc->fstrim_list);
2080 	mutex_init(&dcc->cmd_lock);
2081 	atomic_set(&dcc->issued_discard, 0);
2082 	atomic_set(&dcc->queued_discard, 0);
2083 	atomic_set(&dcc->discard_cmd_cnt, 0);
2084 	dcc->nr_discards = 0;
2085 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2086 	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2087 	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2088 	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2089 	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2090 	dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2091 	dcc->undiscard_blks = 0;
2092 	dcc->next_pos = 0;
2093 	dcc->root = RB_ROOT_CACHED;
2094 	dcc->rbtree_check = false;
2095 
2096 	init_waitqueue_head(&dcc->discard_wait_queue);
2097 	SM_I(sbi)->dcc_info = dcc;
2098 init_thread:
2099 	err = f2fs_start_discard_thread(sbi);
2100 	if (err) {
2101 		kfree(dcc);
2102 		SM_I(sbi)->dcc_info = NULL;
2103 	}
2104 
2105 	return err;
2106 }
2107 
2108 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2109 {
2110 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2111 
2112 	if (!dcc)
2113 		return;
2114 
2115 	f2fs_stop_discard_thread(sbi);
2116 
2117 	/*
2118 	 * Recovery can cache discard commands, so in error path of
2119 	 * fill_super(), it needs to give a chance to handle them.
2120 	 */
2121 	f2fs_issue_discard_timeout(sbi);
2122 
2123 	kfree(dcc);
2124 	SM_I(sbi)->dcc_info = NULL;
2125 }
2126 
2127 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2128 {
2129 	struct sit_info *sit_i = SIT_I(sbi);
2130 
2131 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2132 		sit_i->dirty_sentries++;
2133 		return false;
2134 	}
2135 
2136 	return true;
2137 }
2138 
2139 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2140 					unsigned int segno, int modified)
2141 {
2142 	struct seg_entry *se = get_seg_entry(sbi, segno);
2143 
2144 	se->type = type;
2145 	if (modified)
2146 		__mark_sit_entry_dirty(sbi, segno);
2147 }
2148 
2149 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2150 								block_t blkaddr)
2151 {
2152 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2153 
2154 	if (segno == NULL_SEGNO)
2155 		return 0;
2156 	return get_seg_entry(sbi, segno)->mtime;
2157 }
2158 
2159 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2160 						unsigned long long old_mtime)
2161 {
2162 	struct seg_entry *se;
2163 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2164 	unsigned long long ctime = get_mtime(sbi, false);
2165 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2166 
2167 	if (segno == NULL_SEGNO)
2168 		return;
2169 
2170 	se = get_seg_entry(sbi, segno);
2171 
2172 	if (!se->mtime)
2173 		se->mtime = mtime;
2174 	else
2175 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2176 						se->valid_blocks + 1);
2177 
2178 	if (ctime > SIT_I(sbi)->max_mtime)
2179 		SIT_I(sbi)->max_mtime = ctime;
2180 }
2181 
2182 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2183 {
2184 	struct seg_entry *se;
2185 	unsigned int segno, offset;
2186 	long int new_vblocks;
2187 	bool exist;
2188 #ifdef CONFIG_F2FS_CHECK_FS
2189 	bool mir_exist;
2190 #endif
2191 
2192 	segno = GET_SEGNO(sbi, blkaddr);
2193 
2194 	se = get_seg_entry(sbi, segno);
2195 	new_vblocks = se->valid_blocks + del;
2196 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2197 
2198 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2199 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2200 
2201 	se->valid_blocks = new_vblocks;
2202 
2203 	/* Update valid block bitmap */
2204 	if (del > 0) {
2205 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2206 #ifdef CONFIG_F2FS_CHECK_FS
2207 		mir_exist = f2fs_test_and_set_bit(offset,
2208 						se->cur_valid_map_mir);
2209 		if (unlikely(exist != mir_exist)) {
2210 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2211 				 blkaddr, exist);
2212 			f2fs_bug_on(sbi, 1);
2213 		}
2214 #endif
2215 		if (unlikely(exist)) {
2216 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2217 				 blkaddr);
2218 			f2fs_bug_on(sbi, 1);
2219 			se->valid_blocks--;
2220 			del = 0;
2221 		}
2222 
2223 		if (f2fs_block_unit_discard(sbi) &&
2224 				!f2fs_test_and_set_bit(offset, se->discard_map))
2225 			sbi->discard_blks--;
2226 
2227 		/*
2228 		 * SSR should never reuse block which is checkpointed
2229 		 * or newly invalidated.
2230 		 */
2231 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2232 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2233 				se->ckpt_valid_blocks++;
2234 		}
2235 	} else {
2236 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2237 #ifdef CONFIG_F2FS_CHECK_FS
2238 		mir_exist = f2fs_test_and_clear_bit(offset,
2239 						se->cur_valid_map_mir);
2240 		if (unlikely(exist != mir_exist)) {
2241 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2242 				 blkaddr, exist);
2243 			f2fs_bug_on(sbi, 1);
2244 		}
2245 #endif
2246 		if (unlikely(!exist)) {
2247 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2248 				 blkaddr);
2249 			f2fs_bug_on(sbi, 1);
2250 			se->valid_blocks++;
2251 			del = 0;
2252 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2253 			/*
2254 			 * If checkpoints are off, we must not reuse data that
2255 			 * was used in the previous checkpoint. If it was used
2256 			 * before, we must track that to know how much space we
2257 			 * really have.
2258 			 */
2259 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2260 				spin_lock(&sbi->stat_lock);
2261 				sbi->unusable_block_count++;
2262 				spin_unlock(&sbi->stat_lock);
2263 			}
2264 		}
2265 
2266 		if (f2fs_block_unit_discard(sbi) &&
2267 			f2fs_test_and_clear_bit(offset, se->discard_map))
2268 			sbi->discard_blks++;
2269 	}
2270 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2271 		se->ckpt_valid_blocks += del;
2272 
2273 	__mark_sit_entry_dirty(sbi, segno);
2274 
2275 	/* update total number of valid blocks to be written in ckpt area */
2276 	SIT_I(sbi)->written_valid_blocks += del;
2277 
2278 	if (__is_large_section(sbi))
2279 		get_sec_entry(sbi, segno)->valid_blocks += del;
2280 }
2281 
2282 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2283 {
2284 	unsigned int segno = GET_SEGNO(sbi, addr);
2285 	struct sit_info *sit_i = SIT_I(sbi);
2286 
2287 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2288 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2289 		return;
2290 
2291 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2292 	f2fs_invalidate_compress_page(sbi, addr);
2293 
2294 	/* add it into sit main buffer */
2295 	down_write(&sit_i->sentry_lock);
2296 
2297 	update_segment_mtime(sbi, addr, 0);
2298 	update_sit_entry(sbi, addr, -1);
2299 
2300 	/* add it into dirty seglist */
2301 	locate_dirty_segment(sbi, segno);
2302 
2303 	up_write(&sit_i->sentry_lock);
2304 }
2305 
2306 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2307 {
2308 	struct sit_info *sit_i = SIT_I(sbi);
2309 	unsigned int segno, offset;
2310 	struct seg_entry *se;
2311 	bool is_cp = false;
2312 
2313 	if (!__is_valid_data_blkaddr(blkaddr))
2314 		return true;
2315 
2316 	down_read(&sit_i->sentry_lock);
2317 
2318 	segno = GET_SEGNO(sbi, blkaddr);
2319 	se = get_seg_entry(sbi, segno);
2320 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2321 
2322 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2323 		is_cp = true;
2324 
2325 	up_read(&sit_i->sentry_lock);
2326 
2327 	return is_cp;
2328 }
2329 
2330 /*
2331  * This function should be resided under the curseg_mutex lock
2332  */
2333 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2334 					struct f2fs_summary *sum)
2335 {
2336 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2337 	void *addr = curseg->sum_blk;
2338 
2339 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2340 	memcpy(addr, sum, sizeof(struct f2fs_summary));
2341 }
2342 
2343 /*
2344  * Calculate the number of current summary pages for writing
2345  */
2346 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2347 {
2348 	int valid_sum_count = 0;
2349 	int i, sum_in_page;
2350 
2351 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2352 		if (sbi->ckpt->alloc_type[i] == SSR)
2353 			valid_sum_count += sbi->blocks_per_seg;
2354 		else {
2355 			if (for_ra)
2356 				valid_sum_count += le16_to_cpu(
2357 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2358 			else
2359 				valid_sum_count += curseg_blkoff(sbi, i);
2360 		}
2361 	}
2362 
2363 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2364 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2365 	if (valid_sum_count <= sum_in_page)
2366 		return 1;
2367 	else if ((valid_sum_count - sum_in_page) <=
2368 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2369 		return 2;
2370 	return 3;
2371 }
2372 
2373 /*
2374  * Caller should put this summary page
2375  */
2376 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2377 {
2378 	if (unlikely(f2fs_cp_error(sbi)))
2379 		return ERR_PTR(-EIO);
2380 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2381 }
2382 
2383 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2384 					void *src, block_t blk_addr)
2385 {
2386 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2387 
2388 	memcpy(page_address(page), src, PAGE_SIZE);
2389 	set_page_dirty(page);
2390 	f2fs_put_page(page, 1);
2391 }
2392 
2393 static void write_sum_page(struct f2fs_sb_info *sbi,
2394 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2395 {
2396 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2397 }
2398 
2399 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2400 						int type, block_t blk_addr)
2401 {
2402 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2403 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2404 	struct f2fs_summary_block *src = curseg->sum_blk;
2405 	struct f2fs_summary_block *dst;
2406 
2407 	dst = (struct f2fs_summary_block *)page_address(page);
2408 	memset(dst, 0, PAGE_SIZE);
2409 
2410 	mutex_lock(&curseg->curseg_mutex);
2411 
2412 	down_read(&curseg->journal_rwsem);
2413 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2414 	up_read(&curseg->journal_rwsem);
2415 
2416 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2417 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2418 
2419 	mutex_unlock(&curseg->curseg_mutex);
2420 
2421 	set_page_dirty(page);
2422 	f2fs_put_page(page, 1);
2423 }
2424 
2425 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2426 				struct curseg_info *curseg, int type)
2427 {
2428 	unsigned int segno = curseg->segno + 1;
2429 	struct free_segmap_info *free_i = FREE_I(sbi);
2430 
2431 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2432 		return !test_bit(segno, free_i->free_segmap);
2433 	return 0;
2434 }
2435 
2436 /*
2437  * Find a new segment from the free segments bitmap to right order
2438  * This function should be returned with success, otherwise BUG
2439  */
2440 static void get_new_segment(struct f2fs_sb_info *sbi,
2441 			unsigned int *newseg, bool new_sec, int dir)
2442 {
2443 	struct free_segmap_info *free_i = FREE_I(sbi);
2444 	unsigned int segno, secno, zoneno;
2445 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2446 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2447 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2448 	unsigned int left_start = hint;
2449 	bool init = true;
2450 	int go_left = 0;
2451 	int i;
2452 
2453 	spin_lock(&free_i->segmap_lock);
2454 
2455 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2456 		segno = find_next_zero_bit(free_i->free_segmap,
2457 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2458 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2459 			goto got_it;
2460 	}
2461 find_other_zone:
2462 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2463 	if (secno >= MAIN_SECS(sbi)) {
2464 		if (dir == ALLOC_RIGHT) {
2465 			secno = find_first_zero_bit(free_i->free_secmap,
2466 							MAIN_SECS(sbi));
2467 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2468 		} else {
2469 			go_left = 1;
2470 			left_start = hint - 1;
2471 		}
2472 	}
2473 	if (go_left == 0)
2474 		goto skip_left;
2475 
2476 	while (test_bit(left_start, free_i->free_secmap)) {
2477 		if (left_start > 0) {
2478 			left_start--;
2479 			continue;
2480 		}
2481 		left_start = find_first_zero_bit(free_i->free_secmap,
2482 							MAIN_SECS(sbi));
2483 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2484 		break;
2485 	}
2486 	secno = left_start;
2487 skip_left:
2488 	segno = GET_SEG_FROM_SEC(sbi, secno);
2489 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2490 
2491 	/* give up on finding another zone */
2492 	if (!init)
2493 		goto got_it;
2494 	if (sbi->secs_per_zone == 1)
2495 		goto got_it;
2496 	if (zoneno == old_zoneno)
2497 		goto got_it;
2498 	if (dir == ALLOC_LEFT) {
2499 		if (!go_left && zoneno + 1 >= total_zones)
2500 			goto got_it;
2501 		if (go_left && zoneno == 0)
2502 			goto got_it;
2503 	}
2504 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2505 		if (CURSEG_I(sbi, i)->zone == zoneno)
2506 			break;
2507 
2508 	if (i < NR_CURSEG_TYPE) {
2509 		/* zone is in user, try another */
2510 		if (go_left)
2511 			hint = zoneno * sbi->secs_per_zone - 1;
2512 		else if (zoneno + 1 >= total_zones)
2513 			hint = 0;
2514 		else
2515 			hint = (zoneno + 1) * sbi->secs_per_zone;
2516 		init = false;
2517 		goto find_other_zone;
2518 	}
2519 got_it:
2520 	/* set it as dirty segment in free segmap */
2521 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2522 	__set_inuse(sbi, segno);
2523 	*newseg = segno;
2524 	spin_unlock(&free_i->segmap_lock);
2525 }
2526 
2527 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2528 {
2529 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2530 	struct summary_footer *sum_footer;
2531 	unsigned short seg_type = curseg->seg_type;
2532 
2533 	curseg->inited = true;
2534 	curseg->segno = curseg->next_segno;
2535 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2536 	curseg->next_blkoff = 0;
2537 	curseg->next_segno = NULL_SEGNO;
2538 
2539 	sum_footer = &(curseg->sum_blk->footer);
2540 	memset(sum_footer, 0, sizeof(struct summary_footer));
2541 
2542 	sanity_check_seg_type(sbi, seg_type);
2543 
2544 	if (IS_DATASEG(seg_type))
2545 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2546 	if (IS_NODESEG(seg_type))
2547 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2548 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2549 }
2550 
2551 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2552 {
2553 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2554 	unsigned short seg_type = curseg->seg_type;
2555 
2556 	sanity_check_seg_type(sbi, seg_type);
2557 	if (f2fs_need_rand_seg(sbi))
2558 		return get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
2559 
2560 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2561 	if (__is_large_section(sbi))
2562 		return curseg->segno;
2563 
2564 	/* inmem log may not locate on any segment after mount */
2565 	if (!curseg->inited)
2566 		return 0;
2567 
2568 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2569 		return 0;
2570 
2571 	if (test_opt(sbi, NOHEAP) &&
2572 		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2573 		return 0;
2574 
2575 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2576 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2577 
2578 	/* find segments from 0 to reuse freed segments */
2579 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2580 		return 0;
2581 
2582 	return curseg->segno;
2583 }
2584 
2585 /*
2586  * Allocate a current working segment.
2587  * This function always allocates a free segment in LFS manner.
2588  */
2589 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2590 {
2591 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2592 	unsigned short seg_type = curseg->seg_type;
2593 	unsigned int segno = curseg->segno;
2594 	int dir = ALLOC_LEFT;
2595 
2596 	if (curseg->inited)
2597 		write_sum_page(sbi, curseg->sum_blk,
2598 				GET_SUM_BLOCK(sbi, segno));
2599 	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2600 		dir = ALLOC_RIGHT;
2601 
2602 	if (test_opt(sbi, NOHEAP))
2603 		dir = ALLOC_RIGHT;
2604 
2605 	segno = __get_next_segno(sbi, type);
2606 	get_new_segment(sbi, &segno, new_sec, dir);
2607 	curseg->next_segno = segno;
2608 	reset_curseg(sbi, type, 1);
2609 	curseg->alloc_type = LFS;
2610 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2611 		curseg->fragment_remained_chunk =
2612 				get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2613 }
2614 
2615 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2616 					int segno, block_t start)
2617 {
2618 	struct seg_entry *se = get_seg_entry(sbi, segno);
2619 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2620 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2621 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2622 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2623 	int i;
2624 
2625 	for (i = 0; i < entries; i++)
2626 		target_map[i] = ckpt_map[i] | cur_map[i];
2627 
2628 	return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2629 }
2630 
2631 /*
2632  * If a segment is written by LFS manner, next block offset is just obtained
2633  * by increasing the current block offset. However, if a segment is written by
2634  * SSR manner, next block offset obtained by calling __next_free_blkoff
2635  */
2636 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2637 				struct curseg_info *seg)
2638 {
2639 	if (seg->alloc_type == SSR) {
2640 		seg->next_blkoff =
2641 			__next_free_blkoff(sbi, seg->segno,
2642 						seg->next_blkoff + 1);
2643 	} else {
2644 		seg->next_blkoff++;
2645 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
2646 			/* To allocate block chunks in different sizes, use random number */
2647 			if (--seg->fragment_remained_chunk <= 0) {
2648 				seg->fragment_remained_chunk =
2649 				   get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2650 				seg->next_blkoff +=
2651 				   get_random_u32_inclusive(1, sbi->max_fragment_hole);
2652 			}
2653 		}
2654 	}
2655 }
2656 
2657 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2658 {
2659 	return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2660 }
2661 
2662 /*
2663  * This function always allocates a used segment(from dirty seglist) by SSR
2664  * manner, so it should recover the existing segment information of valid blocks
2665  */
2666 static void change_curseg(struct f2fs_sb_info *sbi, int type)
2667 {
2668 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2669 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2670 	unsigned int new_segno = curseg->next_segno;
2671 	struct f2fs_summary_block *sum_node;
2672 	struct page *sum_page;
2673 
2674 	write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
2675 
2676 	__set_test_and_inuse(sbi, new_segno);
2677 
2678 	mutex_lock(&dirty_i->seglist_lock);
2679 	__remove_dirty_segment(sbi, new_segno, PRE);
2680 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2681 	mutex_unlock(&dirty_i->seglist_lock);
2682 
2683 	reset_curseg(sbi, type, 1);
2684 	curseg->alloc_type = SSR;
2685 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2686 
2687 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2688 	if (IS_ERR(sum_page)) {
2689 		/* GC won't be able to use stale summary pages by cp_error */
2690 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2691 		return;
2692 	}
2693 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2694 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2695 	f2fs_put_page(sum_page, 1);
2696 }
2697 
2698 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2699 				int alloc_mode, unsigned long long age);
2700 
2701 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2702 					int target_type, int alloc_mode,
2703 					unsigned long long age)
2704 {
2705 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2706 
2707 	curseg->seg_type = target_type;
2708 
2709 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2710 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2711 
2712 		curseg->seg_type = se->type;
2713 		change_curseg(sbi, type);
2714 	} else {
2715 		/* allocate cold segment by default */
2716 		curseg->seg_type = CURSEG_COLD_DATA;
2717 		new_curseg(sbi, type, true);
2718 	}
2719 	stat_inc_seg_type(sbi, curseg);
2720 }
2721 
2722 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2723 {
2724 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2725 
2726 	if (!sbi->am.atgc_enabled)
2727 		return;
2728 
2729 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2730 
2731 	mutex_lock(&curseg->curseg_mutex);
2732 	down_write(&SIT_I(sbi)->sentry_lock);
2733 
2734 	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2735 
2736 	up_write(&SIT_I(sbi)->sentry_lock);
2737 	mutex_unlock(&curseg->curseg_mutex);
2738 
2739 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2740 
2741 }
2742 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2743 {
2744 	__f2fs_init_atgc_curseg(sbi);
2745 }
2746 
2747 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2748 {
2749 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2750 
2751 	mutex_lock(&curseg->curseg_mutex);
2752 	if (!curseg->inited)
2753 		goto out;
2754 
2755 	if (get_valid_blocks(sbi, curseg->segno, false)) {
2756 		write_sum_page(sbi, curseg->sum_blk,
2757 				GET_SUM_BLOCK(sbi, curseg->segno));
2758 	} else {
2759 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2760 		__set_test_and_free(sbi, curseg->segno, true);
2761 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2762 	}
2763 out:
2764 	mutex_unlock(&curseg->curseg_mutex);
2765 }
2766 
2767 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2768 {
2769 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2770 
2771 	if (sbi->am.atgc_enabled)
2772 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2773 }
2774 
2775 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2776 {
2777 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2778 
2779 	mutex_lock(&curseg->curseg_mutex);
2780 	if (!curseg->inited)
2781 		goto out;
2782 	if (get_valid_blocks(sbi, curseg->segno, false))
2783 		goto out;
2784 
2785 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2786 	__set_test_and_inuse(sbi, curseg->segno);
2787 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2788 out:
2789 	mutex_unlock(&curseg->curseg_mutex);
2790 }
2791 
2792 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2793 {
2794 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2795 
2796 	if (sbi->am.atgc_enabled)
2797 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2798 }
2799 
2800 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2801 				int alloc_mode, unsigned long long age)
2802 {
2803 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2804 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2805 	unsigned segno = NULL_SEGNO;
2806 	unsigned short seg_type = curseg->seg_type;
2807 	int i, cnt;
2808 	bool reversed = false;
2809 
2810 	sanity_check_seg_type(sbi, seg_type);
2811 
2812 	/* f2fs_need_SSR() already forces to do this */
2813 	if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2814 		curseg->next_segno = segno;
2815 		return 1;
2816 	}
2817 
2818 	/* For node segments, let's do SSR more intensively */
2819 	if (IS_NODESEG(seg_type)) {
2820 		if (seg_type >= CURSEG_WARM_NODE) {
2821 			reversed = true;
2822 			i = CURSEG_COLD_NODE;
2823 		} else {
2824 			i = CURSEG_HOT_NODE;
2825 		}
2826 		cnt = NR_CURSEG_NODE_TYPE;
2827 	} else {
2828 		if (seg_type >= CURSEG_WARM_DATA) {
2829 			reversed = true;
2830 			i = CURSEG_COLD_DATA;
2831 		} else {
2832 			i = CURSEG_HOT_DATA;
2833 		}
2834 		cnt = NR_CURSEG_DATA_TYPE;
2835 	}
2836 
2837 	for (; cnt-- > 0; reversed ? i-- : i++) {
2838 		if (i == seg_type)
2839 			continue;
2840 		if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
2841 			curseg->next_segno = segno;
2842 			return 1;
2843 		}
2844 	}
2845 
2846 	/* find valid_blocks=0 in dirty list */
2847 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2848 		segno = get_free_segment(sbi);
2849 		if (segno != NULL_SEGNO) {
2850 			curseg->next_segno = segno;
2851 			return 1;
2852 		}
2853 	}
2854 	return 0;
2855 }
2856 
2857 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
2858 {
2859 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2860 
2861 	if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2862 	    curseg->seg_type == CURSEG_WARM_NODE)
2863 		return true;
2864 	if (curseg->alloc_type == LFS &&
2865 	    is_next_segment_free(sbi, curseg, type) &&
2866 	    likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2867 		return true;
2868 	if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
2869 		return true;
2870 	return false;
2871 }
2872 
2873 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2874 					unsigned int start, unsigned int end)
2875 {
2876 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2877 	unsigned int segno;
2878 
2879 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2880 	mutex_lock(&curseg->curseg_mutex);
2881 	down_write(&SIT_I(sbi)->sentry_lock);
2882 
2883 	segno = CURSEG_I(sbi, type)->segno;
2884 	if (segno < start || segno > end)
2885 		goto unlock;
2886 
2887 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2888 		change_curseg(sbi, type);
2889 	else
2890 		new_curseg(sbi, type, true);
2891 
2892 	stat_inc_seg_type(sbi, curseg);
2893 
2894 	locate_dirty_segment(sbi, segno);
2895 unlock:
2896 	up_write(&SIT_I(sbi)->sentry_lock);
2897 
2898 	if (segno != curseg->segno)
2899 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2900 			    type, segno, curseg->segno);
2901 
2902 	mutex_unlock(&curseg->curseg_mutex);
2903 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2904 }
2905 
2906 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
2907 						bool new_sec, bool force)
2908 {
2909 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2910 	unsigned int old_segno;
2911 
2912 	if (!curseg->inited)
2913 		goto alloc;
2914 
2915 	if (force || curseg->next_blkoff ||
2916 		get_valid_blocks(sbi, curseg->segno, new_sec))
2917 		goto alloc;
2918 
2919 	if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
2920 		return;
2921 alloc:
2922 	old_segno = curseg->segno;
2923 	new_curseg(sbi, type, true);
2924 	stat_inc_seg_type(sbi, curseg);
2925 	locate_dirty_segment(sbi, old_segno);
2926 }
2927 
2928 static void __allocate_new_section(struct f2fs_sb_info *sbi,
2929 						int type, bool force)
2930 {
2931 	__allocate_new_segment(sbi, type, true, force);
2932 }
2933 
2934 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
2935 {
2936 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2937 	down_write(&SIT_I(sbi)->sentry_lock);
2938 	__allocate_new_section(sbi, type, force);
2939 	up_write(&SIT_I(sbi)->sentry_lock);
2940 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2941 }
2942 
2943 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
2944 {
2945 	int i;
2946 
2947 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2948 	down_write(&SIT_I(sbi)->sentry_lock);
2949 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
2950 		__allocate_new_segment(sbi, i, false, false);
2951 	up_write(&SIT_I(sbi)->sentry_lock);
2952 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2953 }
2954 
2955 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
2956 						struct cp_control *cpc)
2957 {
2958 	__u64 trim_start = cpc->trim_start;
2959 	bool has_candidate = false;
2960 
2961 	down_write(&SIT_I(sbi)->sentry_lock);
2962 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2963 		if (add_discard_addrs(sbi, cpc, true)) {
2964 			has_candidate = true;
2965 			break;
2966 		}
2967 	}
2968 	up_write(&SIT_I(sbi)->sentry_lock);
2969 
2970 	cpc->trim_start = trim_start;
2971 	return has_candidate;
2972 }
2973 
2974 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
2975 					struct discard_policy *dpolicy,
2976 					unsigned int start, unsigned int end)
2977 {
2978 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2979 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
2980 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
2981 	struct discard_cmd *dc;
2982 	struct blk_plug plug;
2983 	int issued;
2984 	unsigned int trimmed = 0;
2985 
2986 next:
2987 	issued = 0;
2988 
2989 	mutex_lock(&dcc->cmd_lock);
2990 	if (unlikely(dcc->rbtree_check))
2991 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
2992 							&dcc->root, false));
2993 
2994 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
2995 					NULL, start,
2996 					(struct rb_entry **)&prev_dc,
2997 					(struct rb_entry **)&next_dc,
2998 					&insert_p, &insert_parent, true, NULL);
2999 	if (!dc)
3000 		dc = next_dc;
3001 
3002 	blk_start_plug(&plug);
3003 
3004 	while (dc && dc->lstart <= end) {
3005 		struct rb_node *node;
3006 		int err = 0;
3007 
3008 		if (dc->len < dpolicy->granularity)
3009 			goto skip;
3010 
3011 		if (dc->state != D_PREP) {
3012 			list_move_tail(&dc->list, &dcc->fstrim_list);
3013 			goto skip;
3014 		}
3015 
3016 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3017 
3018 		if (issued >= dpolicy->max_requests) {
3019 			start = dc->lstart + dc->len;
3020 
3021 			if (err)
3022 				__remove_discard_cmd(sbi, dc);
3023 
3024 			blk_finish_plug(&plug);
3025 			mutex_unlock(&dcc->cmd_lock);
3026 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3027 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3028 			goto next;
3029 		}
3030 skip:
3031 		node = rb_next(&dc->rb_node);
3032 		if (err)
3033 			__remove_discard_cmd(sbi, dc);
3034 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3035 
3036 		if (fatal_signal_pending(current))
3037 			break;
3038 	}
3039 
3040 	blk_finish_plug(&plug);
3041 	mutex_unlock(&dcc->cmd_lock);
3042 
3043 	return trimmed;
3044 }
3045 
3046 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3047 {
3048 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3049 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3050 	unsigned int start_segno, end_segno;
3051 	block_t start_block, end_block;
3052 	struct cp_control cpc;
3053 	struct discard_policy dpolicy;
3054 	unsigned long long trimmed = 0;
3055 	int err = 0;
3056 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3057 
3058 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3059 		return -EINVAL;
3060 
3061 	if (end < MAIN_BLKADDR(sbi))
3062 		goto out;
3063 
3064 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3065 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3066 		return -EFSCORRUPTED;
3067 	}
3068 
3069 	/* start/end segment number in main_area */
3070 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3071 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3072 						GET_SEGNO(sbi, end);
3073 	if (need_align) {
3074 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
3075 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3076 	}
3077 
3078 	cpc.reason = CP_DISCARD;
3079 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3080 	cpc.trim_start = start_segno;
3081 	cpc.trim_end = end_segno;
3082 
3083 	if (sbi->discard_blks == 0)
3084 		goto out;
3085 
3086 	f2fs_down_write(&sbi->gc_lock);
3087 	err = f2fs_write_checkpoint(sbi, &cpc);
3088 	f2fs_up_write(&sbi->gc_lock);
3089 	if (err)
3090 		goto out;
3091 
3092 	/*
3093 	 * We filed discard candidates, but actually we don't need to wait for
3094 	 * all of them, since they'll be issued in idle time along with runtime
3095 	 * discard option. User configuration looks like using runtime discard
3096 	 * or periodic fstrim instead of it.
3097 	 */
3098 	if (f2fs_realtime_discard_enable(sbi))
3099 		goto out;
3100 
3101 	start_block = START_BLOCK(sbi, start_segno);
3102 	end_block = START_BLOCK(sbi, end_segno + 1);
3103 
3104 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3105 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3106 					start_block, end_block);
3107 
3108 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3109 					start_block, end_block);
3110 out:
3111 	if (!err)
3112 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3113 	return err;
3114 }
3115 
3116 static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3117 					struct curseg_info *curseg)
3118 {
3119 	return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3120 							curseg->segno);
3121 }
3122 
3123 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3124 {
3125 	switch (hint) {
3126 	case WRITE_LIFE_SHORT:
3127 		return CURSEG_HOT_DATA;
3128 	case WRITE_LIFE_EXTREME:
3129 		return CURSEG_COLD_DATA;
3130 	default:
3131 		return CURSEG_WARM_DATA;
3132 	}
3133 }
3134 
3135 static int __get_segment_type_2(struct f2fs_io_info *fio)
3136 {
3137 	if (fio->type == DATA)
3138 		return CURSEG_HOT_DATA;
3139 	else
3140 		return CURSEG_HOT_NODE;
3141 }
3142 
3143 static int __get_segment_type_4(struct f2fs_io_info *fio)
3144 {
3145 	if (fio->type == DATA) {
3146 		struct inode *inode = fio->page->mapping->host;
3147 
3148 		if (S_ISDIR(inode->i_mode))
3149 			return CURSEG_HOT_DATA;
3150 		else
3151 			return CURSEG_COLD_DATA;
3152 	} else {
3153 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3154 			return CURSEG_WARM_NODE;
3155 		else
3156 			return CURSEG_COLD_NODE;
3157 	}
3158 }
3159 
3160 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
3161 {
3162 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3163 	struct extent_info ei = {};
3164 
3165 	if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
3166 		if (!ei.age)
3167 			return NO_CHECK_TYPE;
3168 		if (ei.age <= sbi->hot_data_age_threshold)
3169 			return CURSEG_HOT_DATA;
3170 		if (ei.age <= sbi->warm_data_age_threshold)
3171 			return CURSEG_WARM_DATA;
3172 		return CURSEG_COLD_DATA;
3173 	}
3174 	return NO_CHECK_TYPE;
3175 }
3176 
3177 static int __get_segment_type_6(struct f2fs_io_info *fio)
3178 {
3179 	if (fio->type == DATA) {
3180 		struct inode *inode = fio->page->mapping->host;
3181 		int type;
3182 
3183 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3184 			return CURSEG_COLD_DATA_PINNED;
3185 
3186 		if (page_private_gcing(fio->page)) {
3187 			if (fio->sbi->am.atgc_enabled &&
3188 				(fio->io_type == FS_DATA_IO) &&
3189 				(fio->sbi->gc_mode != GC_URGENT_HIGH))
3190 				return CURSEG_ALL_DATA_ATGC;
3191 			else
3192 				return CURSEG_COLD_DATA;
3193 		}
3194 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3195 			return CURSEG_COLD_DATA;
3196 
3197 		type = __get_age_segment_type(inode, fio->page->index);
3198 		if (type != NO_CHECK_TYPE)
3199 			return type;
3200 
3201 		if (file_is_hot(inode) ||
3202 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3203 				f2fs_is_cow_file(inode))
3204 			return CURSEG_HOT_DATA;
3205 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3206 	} else {
3207 		if (IS_DNODE(fio->page))
3208 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3209 						CURSEG_HOT_NODE;
3210 		return CURSEG_COLD_NODE;
3211 	}
3212 }
3213 
3214 static int __get_segment_type(struct f2fs_io_info *fio)
3215 {
3216 	int type = 0;
3217 
3218 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3219 	case 2:
3220 		type = __get_segment_type_2(fio);
3221 		break;
3222 	case 4:
3223 		type = __get_segment_type_4(fio);
3224 		break;
3225 	case 6:
3226 		type = __get_segment_type_6(fio);
3227 		break;
3228 	default:
3229 		f2fs_bug_on(fio->sbi, true);
3230 	}
3231 
3232 	if (IS_HOT(type))
3233 		fio->temp = HOT;
3234 	else if (IS_WARM(type))
3235 		fio->temp = WARM;
3236 	else
3237 		fio->temp = COLD;
3238 	return type;
3239 }
3240 
3241 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3242 		block_t old_blkaddr, block_t *new_blkaddr,
3243 		struct f2fs_summary *sum, int type,
3244 		struct f2fs_io_info *fio)
3245 {
3246 	struct sit_info *sit_i = SIT_I(sbi);
3247 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3248 	unsigned long long old_mtime;
3249 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3250 	struct seg_entry *se = NULL;
3251 
3252 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3253 
3254 	mutex_lock(&curseg->curseg_mutex);
3255 	down_write(&sit_i->sentry_lock);
3256 
3257 	if (from_gc) {
3258 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3259 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3260 		sanity_check_seg_type(sbi, se->type);
3261 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3262 	}
3263 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3264 
3265 	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3266 
3267 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3268 
3269 	/*
3270 	 * __add_sum_entry should be resided under the curseg_mutex
3271 	 * because, this function updates a summary entry in the
3272 	 * current summary block.
3273 	 */
3274 	__add_sum_entry(sbi, type, sum);
3275 
3276 	__refresh_next_blkoff(sbi, curseg);
3277 
3278 	stat_inc_block_count(sbi, curseg);
3279 
3280 	if (from_gc) {
3281 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3282 	} else {
3283 		update_segment_mtime(sbi, old_blkaddr, 0);
3284 		old_mtime = 0;
3285 	}
3286 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3287 
3288 	/*
3289 	 * SIT information should be updated before segment allocation,
3290 	 * since SSR needs latest valid block information.
3291 	 */
3292 	update_sit_entry(sbi, *new_blkaddr, 1);
3293 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3294 		update_sit_entry(sbi, old_blkaddr, -1);
3295 
3296 	if (!__has_curseg_space(sbi, curseg)) {
3297 		/*
3298 		 * Flush out current segment and replace it with new segment.
3299 		 */
3300 		if (from_gc) {
3301 			get_atssr_segment(sbi, type, se->type,
3302 						AT_SSR, se->mtime);
3303 		} else {
3304 			if (need_new_seg(sbi, type))
3305 				new_curseg(sbi, type, false);
3306 			else
3307 				change_curseg(sbi, type);
3308 			stat_inc_seg_type(sbi, curseg);
3309 		}
3310 	}
3311 	/*
3312 	 * segment dirty status should be updated after segment allocation,
3313 	 * so we just need to update status only one time after previous
3314 	 * segment being closed.
3315 	 */
3316 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3317 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3318 
3319 	if (IS_DATASEG(type))
3320 		atomic64_inc(&sbi->allocated_data_blocks);
3321 
3322 	up_write(&sit_i->sentry_lock);
3323 
3324 	if (page && IS_NODESEG(type)) {
3325 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3326 
3327 		f2fs_inode_chksum_set(sbi, page);
3328 	}
3329 
3330 	if (fio) {
3331 		struct f2fs_bio_info *io;
3332 
3333 		if (F2FS_IO_ALIGNED(sbi))
3334 			fio->retry = false;
3335 
3336 		INIT_LIST_HEAD(&fio->list);
3337 		fio->in_list = true;
3338 		io = sbi->write_io[fio->type] + fio->temp;
3339 		spin_lock(&io->io_lock);
3340 		list_add_tail(&fio->list, &io->io_list);
3341 		spin_unlock(&io->io_lock);
3342 	}
3343 
3344 	mutex_unlock(&curseg->curseg_mutex);
3345 
3346 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3347 }
3348 
3349 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3350 					block_t blkaddr, unsigned int blkcnt)
3351 {
3352 	if (!f2fs_is_multi_device(sbi))
3353 		return;
3354 
3355 	while (1) {
3356 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3357 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3358 
3359 		/* update device state for fsync */
3360 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3361 
3362 		/* update device state for checkpoint */
3363 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3364 			spin_lock(&sbi->dev_lock);
3365 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3366 			spin_unlock(&sbi->dev_lock);
3367 		}
3368 
3369 		if (blkcnt <= blks)
3370 			break;
3371 		blkcnt -= blks;
3372 		blkaddr += blks;
3373 	}
3374 }
3375 
3376 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3377 {
3378 	int type = __get_segment_type(fio);
3379 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3380 
3381 	if (keep_order)
3382 		f2fs_down_read(&fio->sbi->io_order_lock);
3383 reallocate:
3384 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3385 			&fio->new_blkaddr, sum, type, fio);
3386 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
3387 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
3388 					fio->old_blkaddr, fio->old_blkaddr);
3389 		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
3390 	}
3391 
3392 	/* writeout dirty page into bdev */
3393 	f2fs_submit_page_write(fio);
3394 	if (fio->retry) {
3395 		fio->old_blkaddr = fio->new_blkaddr;
3396 		goto reallocate;
3397 	}
3398 
3399 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3400 
3401 	if (keep_order)
3402 		f2fs_up_read(&fio->sbi->io_order_lock);
3403 }
3404 
3405 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3406 					enum iostat_type io_type)
3407 {
3408 	struct f2fs_io_info fio = {
3409 		.sbi = sbi,
3410 		.type = META,
3411 		.temp = HOT,
3412 		.op = REQ_OP_WRITE,
3413 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3414 		.old_blkaddr = page->index,
3415 		.new_blkaddr = page->index,
3416 		.page = page,
3417 		.encrypted_page = NULL,
3418 		.in_list = false,
3419 	};
3420 
3421 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3422 		fio.op_flags &= ~REQ_META;
3423 
3424 	set_page_writeback(page);
3425 	ClearPageError(page);
3426 	f2fs_submit_page_write(&fio);
3427 
3428 	stat_inc_meta_count(sbi, page->index);
3429 	f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3430 }
3431 
3432 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3433 {
3434 	struct f2fs_summary sum;
3435 
3436 	set_summary(&sum, nid, 0, 0);
3437 	do_write_page(&sum, fio);
3438 
3439 	f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3440 }
3441 
3442 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3443 					struct f2fs_io_info *fio)
3444 {
3445 	struct f2fs_sb_info *sbi = fio->sbi;
3446 	struct f2fs_summary sum;
3447 
3448 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3449 	if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
3450 		f2fs_update_age_extent_cache(dn);
3451 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3452 	do_write_page(&sum, fio);
3453 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3454 
3455 	f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3456 }
3457 
3458 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3459 {
3460 	int err;
3461 	struct f2fs_sb_info *sbi = fio->sbi;
3462 	unsigned int segno;
3463 
3464 	fio->new_blkaddr = fio->old_blkaddr;
3465 	/* i/o temperature is needed for passing down write hints */
3466 	__get_segment_type(fio);
3467 
3468 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3469 
3470 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3471 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3472 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3473 			  __func__, segno);
3474 		err = -EFSCORRUPTED;
3475 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
3476 		goto drop_bio;
3477 	}
3478 
3479 	if (f2fs_cp_error(sbi)) {
3480 		err = -EIO;
3481 		goto drop_bio;
3482 	}
3483 
3484 	if (fio->post_read)
3485 		invalidate_mapping_pages(META_MAPPING(sbi),
3486 				fio->new_blkaddr, fio->new_blkaddr);
3487 
3488 	stat_inc_inplace_blocks(fio->sbi);
3489 
3490 	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3491 		err = f2fs_merge_page_bio(fio);
3492 	else
3493 		err = f2fs_submit_page_bio(fio);
3494 	if (!err) {
3495 		f2fs_update_device_state(fio->sbi, fio->ino,
3496 						fio->new_blkaddr, 1);
3497 		f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
3498 						fio->io_type, F2FS_BLKSIZE);
3499 	}
3500 
3501 	return err;
3502 drop_bio:
3503 	if (fio->bio && *(fio->bio)) {
3504 		struct bio *bio = *(fio->bio);
3505 
3506 		bio->bi_status = BLK_STS_IOERR;
3507 		bio_endio(bio);
3508 		*(fio->bio) = NULL;
3509 	}
3510 	return err;
3511 }
3512 
3513 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3514 						unsigned int segno)
3515 {
3516 	int i;
3517 
3518 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3519 		if (CURSEG_I(sbi, i)->segno == segno)
3520 			break;
3521 	}
3522 	return i;
3523 }
3524 
3525 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3526 				block_t old_blkaddr, block_t new_blkaddr,
3527 				bool recover_curseg, bool recover_newaddr,
3528 				bool from_gc)
3529 {
3530 	struct sit_info *sit_i = SIT_I(sbi);
3531 	struct curseg_info *curseg;
3532 	unsigned int segno, old_cursegno;
3533 	struct seg_entry *se;
3534 	int type;
3535 	unsigned short old_blkoff;
3536 	unsigned char old_alloc_type;
3537 
3538 	segno = GET_SEGNO(sbi, new_blkaddr);
3539 	se = get_seg_entry(sbi, segno);
3540 	type = se->type;
3541 
3542 	f2fs_down_write(&SM_I(sbi)->curseg_lock);
3543 
3544 	if (!recover_curseg) {
3545 		/* for recovery flow */
3546 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3547 			if (old_blkaddr == NULL_ADDR)
3548 				type = CURSEG_COLD_DATA;
3549 			else
3550 				type = CURSEG_WARM_DATA;
3551 		}
3552 	} else {
3553 		if (IS_CURSEG(sbi, segno)) {
3554 			/* se->type is volatile as SSR allocation */
3555 			type = __f2fs_get_curseg(sbi, segno);
3556 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3557 		} else {
3558 			type = CURSEG_WARM_DATA;
3559 		}
3560 	}
3561 
3562 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3563 	curseg = CURSEG_I(sbi, type);
3564 
3565 	mutex_lock(&curseg->curseg_mutex);
3566 	down_write(&sit_i->sentry_lock);
3567 
3568 	old_cursegno = curseg->segno;
3569 	old_blkoff = curseg->next_blkoff;
3570 	old_alloc_type = curseg->alloc_type;
3571 
3572 	/* change the current segment */
3573 	if (segno != curseg->segno) {
3574 		curseg->next_segno = segno;
3575 		change_curseg(sbi, type);
3576 	}
3577 
3578 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3579 	__add_sum_entry(sbi, type, sum);
3580 
3581 	if (!recover_curseg || recover_newaddr) {
3582 		if (!from_gc)
3583 			update_segment_mtime(sbi, new_blkaddr, 0);
3584 		update_sit_entry(sbi, new_blkaddr, 1);
3585 	}
3586 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3587 		invalidate_mapping_pages(META_MAPPING(sbi),
3588 					old_blkaddr, old_blkaddr);
3589 		f2fs_invalidate_compress_page(sbi, old_blkaddr);
3590 		if (!from_gc)
3591 			update_segment_mtime(sbi, old_blkaddr, 0);
3592 		update_sit_entry(sbi, old_blkaddr, -1);
3593 	}
3594 
3595 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3596 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3597 
3598 	locate_dirty_segment(sbi, old_cursegno);
3599 
3600 	if (recover_curseg) {
3601 		if (old_cursegno != curseg->segno) {
3602 			curseg->next_segno = old_cursegno;
3603 			change_curseg(sbi, type);
3604 		}
3605 		curseg->next_blkoff = old_blkoff;
3606 		curseg->alloc_type = old_alloc_type;
3607 	}
3608 
3609 	up_write(&sit_i->sentry_lock);
3610 	mutex_unlock(&curseg->curseg_mutex);
3611 	f2fs_up_write(&SM_I(sbi)->curseg_lock);
3612 }
3613 
3614 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3615 				block_t old_addr, block_t new_addr,
3616 				unsigned char version, bool recover_curseg,
3617 				bool recover_newaddr)
3618 {
3619 	struct f2fs_summary sum;
3620 
3621 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3622 
3623 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3624 					recover_curseg, recover_newaddr, false);
3625 
3626 	f2fs_update_data_blkaddr(dn, new_addr);
3627 }
3628 
3629 void f2fs_wait_on_page_writeback(struct page *page,
3630 				enum page_type type, bool ordered, bool locked)
3631 {
3632 	if (PageWriteback(page)) {
3633 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3634 
3635 		/* submit cached LFS IO */
3636 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3637 		/* sbumit cached IPU IO */
3638 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3639 		if (ordered) {
3640 			wait_on_page_writeback(page);
3641 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3642 		} else {
3643 			wait_for_stable_page(page);
3644 		}
3645 	}
3646 }
3647 
3648 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3649 {
3650 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3651 	struct page *cpage;
3652 
3653 	if (!f2fs_post_read_required(inode))
3654 		return;
3655 
3656 	if (!__is_valid_data_blkaddr(blkaddr))
3657 		return;
3658 
3659 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3660 	if (cpage) {
3661 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3662 		f2fs_put_page(cpage, 1);
3663 	}
3664 }
3665 
3666 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3667 								block_t len)
3668 {
3669 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3670 	block_t i;
3671 
3672 	if (!f2fs_post_read_required(inode))
3673 		return;
3674 
3675 	for (i = 0; i < len; i++)
3676 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3677 
3678 	invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
3679 }
3680 
3681 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3682 {
3683 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3684 	struct curseg_info *seg_i;
3685 	unsigned char *kaddr;
3686 	struct page *page;
3687 	block_t start;
3688 	int i, j, offset;
3689 
3690 	start = start_sum_block(sbi);
3691 
3692 	page = f2fs_get_meta_page(sbi, start++);
3693 	if (IS_ERR(page))
3694 		return PTR_ERR(page);
3695 	kaddr = (unsigned char *)page_address(page);
3696 
3697 	/* Step 1: restore nat cache */
3698 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3699 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3700 
3701 	/* Step 2: restore sit cache */
3702 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3703 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3704 	offset = 2 * SUM_JOURNAL_SIZE;
3705 
3706 	/* Step 3: restore summary entries */
3707 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3708 		unsigned short blk_off;
3709 		unsigned int segno;
3710 
3711 		seg_i = CURSEG_I(sbi, i);
3712 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3713 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3714 		seg_i->next_segno = segno;
3715 		reset_curseg(sbi, i, 0);
3716 		seg_i->alloc_type = ckpt->alloc_type[i];
3717 		seg_i->next_blkoff = blk_off;
3718 
3719 		if (seg_i->alloc_type == SSR)
3720 			blk_off = sbi->blocks_per_seg;
3721 
3722 		for (j = 0; j < blk_off; j++) {
3723 			struct f2fs_summary *s;
3724 
3725 			s = (struct f2fs_summary *)(kaddr + offset);
3726 			seg_i->sum_blk->entries[j] = *s;
3727 			offset += SUMMARY_SIZE;
3728 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3729 						SUM_FOOTER_SIZE)
3730 				continue;
3731 
3732 			f2fs_put_page(page, 1);
3733 			page = NULL;
3734 
3735 			page = f2fs_get_meta_page(sbi, start++);
3736 			if (IS_ERR(page))
3737 				return PTR_ERR(page);
3738 			kaddr = (unsigned char *)page_address(page);
3739 			offset = 0;
3740 		}
3741 	}
3742 	f2fs_put_page(page, 1);
3743 	return 0;
3744 }
3745 
3746 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3747 {
3748 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3749 	struct f2fs_summary_block *sum;
3750 	struct curseg_info *curseg;
3751 	struct page *new;
3752 	unsigned short blk_off;
3753 	unsigned int segno = 0;
3754 	block_t blk_addr = 0;
3755 	int err = 0;
3756 
3757 	/* get segment number and block addr */
3758 	if (IS_DATASEG(type)) {
3759 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3760 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3761 							CURSEG_HOT_DATA]);
3762 		if (__exist_node_summaries(sbi))
3763 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3764 		else
3765 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3766 	} else {
3767 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3768 							CURSEG_HOT_NODE]);
3769 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3770 							CURSEG_HOT_NODE]);
3771 		if (__exist_node_summaries(sbi))
3772 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3773 							type - CURSEG_HOT_NODE);
3774 		else
3775 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3776 	}
3777 
3778 	new = f2fs_get_meta_page(sbi, blk_addr);
3779 	if (IS_ERR(new))
3780 		return PTR_ERR(new);
3781 	sum = (struct f2fs_summary_block *)page_address(new);
3782 
3783 	if (IS_NODESEG(type)) {
3784 		if (__exist_node_summaries(sbi)) {
3785 			struct f2fs_summary *ns = &sum->entries[0];
3786 			int i;
3787 
3788 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3789 				ns->version = 0;
3790 				ns->ofs_in_node = 0;
3791 			}
3792 		} else {
3793 			err = f2fs_restore_node_summary(sbi, segno, sum);
3794 			if (err)
3795 				goto out;
3796 		}
3797 	}
3798 
3799 	/* set uncompleted segment to curseg */
3800 	curseg = CURSEG_I(sbi, type);
3801 	mutex_lock(&curseg->curseg_mutex);
3802 
3803 	/* update journal info */
3804 	down_write(&curseg->journal_rwsem);
3805 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3806 	up_write(&curseg->journal_rwsem);
3807 
3808 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3809 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3810 	curseg->next_segno = segno;
3811 	reset_curseg(sbi, type, 0);
3812 	curseg->alloc_type = ckpt->alloc_type[type];
3813 	curseg->next_blkoff = blk_off;
3814 	mutex_unlock(&curseg->curseg_mutex);
3815 out:
3816 	f2fs_put_page(new, 1);
3817 	return err;
3818 }
3819 
3820 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3821 {
3822 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3823 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3824 	int type = CURSEG_HOT_DATA;
3825 	int err;
3826 
3827 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3828 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3829 
3830 		if (npages >= 2)
3831 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3832 							META_CP, true);
3833 
3834 		/* restore for compacted data summary */
3835 		err = read_compacted_summaries(sbi);
3836 		if (err)
3837 			return err;
3838 		type = CURSEG_HOT_NODE;
3839 	}
3840 
3841 	if (__exist_node_summaries(sbi))
3842 		f2fs_ra_meta_pages(sbi,
3843 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
3844 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
3845 
3846 	for (; type <= CURSEG_COLD_NODE; type++) {
3847 		err = read_normal_summaries(sbi, type);
3848 		if (err)
3849 			return err;
3850 	}
3851 
3852 	/* sanity check for summary blocks */
3853 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3854 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3855 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
3856 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
3857 		return -EINVAL;
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3864 {
3865 	struct page *page;
3866 	unsigned char *kaddr;
3867 	struct f2fs_summary *summary;
3868 	struct curseg_info *seg_i;
3869 	int written_size = 0;
3870 	int i, j;
3871 
3872 	page = f2fs_grab_meta_page(sbi, blkaddr++);
3873 	kaddr = (unsigned char *)page_address(page);
3874 	memset(kaddr, 0, PAGE_SIZE);
3875 
3876 	/* Step 1: write nat cache */
3877 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3878 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3879 	written_size += SUM_JOURNAL_SIZE;
3880 
3881 	/* Step 2: write sit cache */
3882 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3883 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3884 	written_size += SUM_JOURNAL_SIZE;
3885 
3886 	/* Step 3: write summary entries */
3887 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3888 		unsigned short blkoff;
3889 
3890 		seg_i = CURSEG_I(sbi, i);
3891 		if (sbi->ckpt->alloc_type[i] == SSR)
3892 			blkoff = sbi->blocks_per_seg;
3893 		else
3894 			blkoff = curseg_blkoff(sbi, i);
3895 
3896 		for (j = 0; j < blkoff; j++) {
3897 			if (!page) {
3898 				page = f2fs_grab_meta_page(sbi, blkaddr++);
3899 				kaddr = (unsigned char *)page_address(page);
3900 				memset(kaddr, 0, PAGE_SIZE);
3901 				written_size = 0;
3902 			}
3903 			summary = (struct f2fs_summary *)(kaddr + written_size);
3904 			*summary = seg_i->sum_blk->entries[j];
3905 			written_size += SUMMARY_SIZE;
3906 
3907 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3908 							SUM_FOOTER_SIZE)
3909 				continue;
3910 
3911 			set_page_dirty(page);
3912 			f2fs_put_page(page, 1);
3913 			page = NULL;
3914 		}
3915 	}
3916 	if (page) {
3917 		set_page_dirty(page);
3918 		f2fs_put_page(page, 1);
3919 	}
3920 }
3921 
3922 static void write_normal_summaries(struct f2fs_sb_info *sbi,
3923 					block_t blkaddr, int type)
3924 {
3925 	int i, end;
3926 
3927 	if (IS_DATASEG(type))
3928 		end = type + NR_CURSEG_DATA_TYPE;
3929 	else
3930 		end = type + NR_CURSEG_NODE_TYPE;
3931 
3932 	for (i = type; i < end; i++)
3933 		write_current_sum_page(sbi, i, blkaddr + (i - type));
3934 }
3935 
3936 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3937 {
3938 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
3939 		write_compacted_summaries(sbi, start_blk);
3940 	else
3941 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3942 }
3943 
3944 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3945 {
3946 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
3947 }
3948 
3949 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3950 					unsigned int val, int alloc)
3951 {
3952 	int i;
3953 
3954 	if (type == NAT_JOURNAL) {
3955 		for (i = 0; i < nats_in_cursum(journal); i++) {
3956 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
3957 				return i;
3958 		}
3959 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3960 			return update_nats_in_cursum(journal, 1);
3961 	} else if (type == SIT_JOURNAL) {
3962 		for (i = 0; i < sits_in_cursum(journal); i++)
3963 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
3964 				return i;
3965 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3966 			return update_sits_in_cursum(journal, 1);
3967 	}
3968 	return -1;
3969 }
3970 
3971 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3972 					unsigned int segno)
3973 {
3974 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
3975 }
3976 
3977 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3978 					unsigned int start)
3979 {
3980 	struct sit_info *sit_i = SIT_I(sbi);
3981 	struct page *page;
3982 	pgoff_t src_off, dst_off;
3983 
3984 	src_off = current_sit_addr(sbi, start);
3985 	dst_off = next_sit_addr(sbi, src_off);
3986 
3987 	page = f2fs_grab_meta_page(sbi, dst_off);
3988 	seg_info_to_sit_page(sbi, page, start);
3989 
3990 	set_page_dirty(page);
3991 	set_to_next_sit(sit_i, start);
3992 
3993 	return page;
3994 }
3995 
3996 static struct sit_entry_set *grab_sit_entry_set(void)
3997 {
3998 	struct sit_entry_set *ses =
3999 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4000 						GFP_NOFS, true, NULL);
4001 
4002 	ses->entry_cnt = 0;
4003 	INIT_LIST_HEAD(&ses->set_list);
4004 	return ses;
4005 }
4006 
4007 static void release_sit_entry_set(struct sit_entry_set *ses)
4008 {
4009 	list_del(&ses->set_list);
4010 	kmem_cache_free(sit_entry_set_slab, ses);
4011 }
4012 
4013 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4014 						struct list_head *head)
4015 {
4016 	struct sit_entry_set *next = ses;
4017 
4018 	if (list_is_last(&ses->set_list, head))
4019 		return;
4020 
4021 	list_for_each_entry_continue(next, head, set_list)
4022 		if (ses->entry_cnt <= next->entry_cnt) {
4023 			list_move_tail(&ses->set_list, &next->set_list);
4024 			return;
4025 		}
4026 
4027 	list_move_tail(&ses->set_list, head);
4028 }
4029 
4030 static void add_sit_entry(unsigned int segno, struct list_head *head)
4031 {
4032 	struct sit_entry_set *ses;
4033 	unsigned int start_segno = START_SEGNO(segno);
4034 
4035 	list_for_each_entry(ses, head, set_list) {
4036 		if (ses->start_segno == start_segno) {
4037 			ses->entry_cnt++;
4038 			adjust_sit_entry_set(ses, head);
4039 			return;
4040 		}
4041 	}
4042 
4043 	ses = grab_sit_entry_set();
4044 
4045 	ses->start_segno = start_segno;
4046 	ses->entry_cnt++;
4047 	list_add(&ses->set_list, head);
4048 }
4049 
4050 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4051 {
4052 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4053 	struct list_head *set_list = &sm_info->sit_entry_set;
4054 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4055 	unsigned int segno;
4056 
4057 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4058 		add_sit_entry(segno, set_list);
4059 }
4060 
4061 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4062 {
4063 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4064 	struct f2fs_journal *journal = curseg->journal;
4065 	int i;
4066 
4067 	down_write(&curseg->journal_rwsem);
4068 	for (i = 0; i < sits_in_cursum(journal); i++) {
4069 		unsigned int segno;
4070 		bool dirtied;
4071 
4072 		segno = le32_to_cpu(segno_in_journal(journal, i));
4073 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4074 
4075 		if (!dirtied)
4076 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4077 	}
4078 	update_sits_in_cursum(journal, -i);
4079 	up_write(&curseg->journal_rwsem);
4080 }
4081 
4082 /*
4083  * CP calls this function, which flushes SIT entries including sit_journal,
4084  * and moves prefree segs to free segs.
4085  */
4086 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4087 {
4088 	struct sit_info *sit_i = SIT_I(sbi);
4089 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4090 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4091 	struct f2fs_journal *journal = curseg->journal;
4092 	struct sit_entry_set *ses, *tmp;
4093 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4094 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4095 	struct seg_entry *se;
4096 
4097 	down_write(&sit_i->sentry_lock);
4098 
4099 	if (!sit_i->dirty_sentries)
4100 		goto out;
4101 
4102 	/*
4103 	 * add and account sit entries of dirty bitmap in sit entry
4104 	 * set temporarily
4105 	 */
4106 	add_sits_in_set(sbi);
4107 
4108 	/*
4109 	 * if there are no enough space in journal to store dirty sit
4110 	 * entries, remove all entries from journal and add and account
4111 	 * them in sit entry set.
4112 	 */
4113 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4114 								!to_journal)
4115 		remove_sits_in_journal(sbi);
4116 
4117 	/*
4118 	 * there are two steps to flush sit entries:
4119 	 * #1, flush sit entries to journal in current cold data summary block.
4120 	 * #2, flush sit entries to sit page.
4121 	 */
4122 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4123 		struct page *page = NULL;
4124 		struct f2fs_sit_block *raw_sit = NULL;
4125 		unsigned int start_segno = ses->start_segno;
4126 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4127 						(unsigned long)MAIN_SEGS(sbi));
4128 		unsigned int segno = start_segno;
4129 
4130 		if (to_journal &&
4131 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4132 			to_journal = false;
4133 
4134 		if (to_journal) {
4135 			down_write(&curseg->journal_rwsem);
4136 		} else {
4137 			page = get_next_sit_page(sbi, start_segno);
4138 			raw_sit = page_address(page);
4139 		}
4140 
4141 		/* flush dirty sit entries in region of current sit set */
4142 		for_each_set_bit_from(segno, bitmap, end) {
4143 			int offset, sit_offset;
4144 
4145 			se = get_seg_entry(sbi, segno);
4146 #ifdef CONFIG_F2FS_CHECK_FS
4147 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4148 						SIT_VBLOCK_MAP_SIZE))
4149 				f2fs_bug_on(sbi, 1);
4150 #endif
4151 
4152 			/* add discard candidates */
4153 			if (!(cpc->reason & CP_DISCARD)) {
4154 				cpc->trim_start = segno;
4155 				add_discard_addrs(sbi, cpc, false);
4156 			}
4157 
4158 			if (to_journal) {
4159 				offset = f2fs_lookup_journal_in_cursum(journal,
4160 							SIT_JOURNAL, segno, 1);
4161 				f2fs_bug_on(sbi, offset < 0);
4162 				segno_in_journal(journal, offset) =
4163 							cpu_to_le32(segno);
4164 				seg_info_to_raw_sit(se,
4165 					&sit_in_journal(journal, offset));
4166 				check_block_count(sbi, segno,
4167 					&sit_in_journal(journal, offset));
4168 			} else {
4169 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4170 				seg_info_to_raw_sit(se,
4171 						&raw_sit->entries[sit_offset]);
4172 				check_block_count(sbi, segno,
4173 						&raw_sit->entries[sit_offset]);
4174 			}
4175 
4176 			__clear_bit(segno, bitmap);
4177 			sit_i->dirty_sentries--;
4178 			ses->entry_cnt--;
4179 		}
4180 
4181 		if (to_journal)
4182 			up_write(&curseg->journal_rwsem);
4183 		else
4184 			f2fs_put_page(page, 1);
4185 
4186 		f2fs_bug_on(sbi, ses->entry_cnt);
4187 		release_sit_entry_set(ses);
4188 	}
4189 
4190 	f2fs_bug_on(sbi, !list_empty(head));
4191 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4192 out:
4193 	if (cpc->reason & CP_DISCARD) {
4194 		__u64 trim_start = cpc->trim_start;
4195 
4196 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4197 			add_discard_addrs(sbi, cpc, false);
4198 
4199 		cpc->trim_start = trim_start;
4200 	}
4201 	up_write(&sit_i->sentry_lock);
4202 
4203 	set_prefree_as_free_segments(sbi);
4204 }
4205 
4206 static int build_sit_info(struct f2fs_sb_info *sbi)
4207 {
4208 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4209 	struct sit_info *sit_i;
4210 	unsigned int sit_segs, start;
4211 	char *src_bitmap, *bitmap;
4212 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4213 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4214 
4215 	/* allocate memory for SIT information */
4216 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4217 	if (!sit_i)
4218 		return -ENOMEM;
4219 
4220 	SM_I(sbi)->sit_info = sit_i;
4221 
4222 	sit_i->sentries =
4223 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4224 					      MAIN_SEGS(sbi)),
4225 			      GFP_KERNEL);
4226 	if (!sit_i->sentries)
4227 		return -ENOMEM;
4228 
4229 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4230 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4231 								GFP_KERNEL);
4232 	if (!sit_i->dirty_sentries_bitmap)
4233 		return -ENOMEM;
4234 
4235 #ifdef CONFIG_F2FS_CHECK_FS
4236 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4237 #else
4238 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4239 #endif
4240 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4241 	if (!sit_i->bitmap)
4242 		return -ENOMEM;
4243 
4244 	bitmap = sit_i->bitmap;
4245 
4246 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4247 		sit_i->sentries[start].cur_valid_map = bitmap;
4248 		bitmap += SIT_VBLOCK_MAP_SIZE;
4249 
4250 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4251 		bitmap += SIT_VBLOCK_MAP_SIZE;
4252 
4253 #ifdef CONFIG_F2FS_CHECK_FS
4254 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4255 		bitmap += SIT_VBLOCK_MAP_SIZE;
4256 #endif
4257 
4258 		if (discard_map) {
4259 			sit_i->sentries[start].discard_map = bitmap;
4260 			bitmap += SIT_VBLOCK_MAP_SIZE;
4261 		}
4262 	}
4263 
4264 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4265 	if (!sit_i->tmp_map)
4266 		return -ENOMEM;
4267 
4268 	if (__is_large_section(sbi)) {
4269 		sit_i->sec_entries =
4270 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4271 						      MAIN_SECS(sbi)),
4272 				      GFP_KERNEL);
4273 		if (!sit_i->sec_entries)
4274 			return -ENOMEM;
4275 	}
4276 
4277 	/* get information related with SIT */
4278 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4279 
4280 	/* setup SIT bitmap from ckeckpoint pack */
4281 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4282 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4283 
4284 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4285 	if (!sit_i->sit_bitmap)
4286 		return -ENOMEM;
4287 
4288 #ifdef CONFIG_F2FS_CHECK_FS
4289 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4290 					sit_bitmap_size, GFP_KERNEL);
4291 	if (!sit_i->sit_bitmap_mir)
4292 		return -ENOMEM;
4293 
4294 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4295 					main_bitmap_size, GFP_KERNEL);
4296 	if (!sit_i->invalid_segmap)
4297 		return -ENOMEM;
4298 #endif
4299 
4300 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4301 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4302 	sit_i->written_valid_blocks = 0;
4303 	sit_i->bitmap_size = sit_bitmap_size;
4304 	sit_i->dirty_sentries = 0;
4305 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4306 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4307 	sit_i->mounted_time = ktime_get_boottime_seconds();
4308 	init_rwsem(&sit_i->sentry_lock);
4309 	return 0;
4310 }
4311 
4312 static int build_free_segmap(struct f2fs_sb_info *sbi)
4313 {
4314 	struct free_segmap_info *free_i;
4315 	unsigned int bitmap_size, sec_bitmap_size;
4316 
4317 	/* allocate memory for free segmap information */
4318 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4319 	if (!free_i)
4320 		return -ENOMEM;
4321 
4322 	SM_I(sbi)->free_info = free_i;
4323 
4324 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4325 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4326 	if (!free_i->free_segmap)
4327 		return -ENOMEM;
4328 
4329 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4330 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4331 	if (!free_i->free_secmap)
4332 		return -ENOMEM;
4333 
4334 	/* set all segments as dirty temporarily */
4335 	memset(free_i->free_segmap, 0xff, bitmap_size);
4336 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4337 
4338 	/* init free segmap information */
4339 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4340 	free_i->free_segments = 0;
4341 	free_i->free_sections = 0;
4342 	spin_lock_init(&free_i->segmap_lock);
4343 	return 0;
4344 }
4345 
4346 static int build_curseg(struct f2fs_sb_info *sbi)
4347 {
4348 	struct curseg_info *array;
4349 	int i;
4350 
4351 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4352 					sizeof(*array)), GFP_KERNEL);
4353 	if (!array)
4354 		return -ENOMEM;
4355 
4356 	SM_I(sbi)->curseg_array = array;
4357 
4358 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4359 		mutex_init(&array[i].curseg_mutex);
4360 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4361 		if (!array[i].sum_blk)
4362 			return -ENOMEM;
4363 		init_rwsem(&array[i].journal_rwsem);
4364 		array[i].journal = f2fs_kzalloc(sbi,
4365 				sizeof(struct f2fs_journal), GFP_KERNEL);
4366 		if (!array[i].journal)
4367 			return -ENOMEM;
4368 		if (i < NR_PERSISTENT_LOG)
4369 			array[i].seg_type = CURSEG_HOT_DATA + i;
4370 		else if (i == CURSEG_COLD_DATA_PINNED)
4371 			array[i].seg_type = CURSEG_COLD_DATA;
4372 		else if (i == CURSEG_ALL_DATA_ATGC)
4373 			array[i].seg_type = CURSEG_COLD_DATA;
4374 		array[i].segno = NULL_SEGNO;
4375 		array[i].next_blkoff = 0;
4376 		array[i].inited = false;
4377 	}
4378 	return restore_curseg_summaries(sbi);
4379 }
4380 
4381 static int build_sit_entries(struct f2fs_sb_info *sbi)
4382 {
4383 	struct sit_info *sit_i = SIT_I(sbi);
4384 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4385 	struct f2fs_journal *journal = curseg->journal;
4386 	struct seg_entry *se;
4387 	struct f2fs_sit_entry sit;
4388 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4389 	unsigned int i, start, end;
4390 	unsigned int readed, start_blk = 0;
4391 	int err = 0;
4392 	block_t sit_valid_blocks[2] = {0, 0};
4393 
4394 	do {
4395 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4396 							META_SIT, true);
4397 
4398 		start = start_blk * sit_i->sents_per_block;
4399 		end = (start_blk + readed) * sit_i->sents_per_block;
4400 
4401 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4402 			struct f2fs_sit_block *sit_blk;
4403 			struct page *page;
4404 
4405 			se = &sit_i->sentries[start];
4406 			page = get_current_sit_page(sbi, start);
4407 			if (IS_ERR(page))
4408 				return PTR_ERR(page);
4409 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4410 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4411 			f2fs_put_page(page, 1);
4412 
4413 			err = check_block_count(sbi, start, &sit);
4414 			if (err)
4415 				return err;
4416 			seg_info_from_raw_sit(se, &sit);
4417 
4418 			if (se->type >= NR_PERSISTENT_LOG) {
4419 				f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4420 							se->type, start);
4421 				f2fs_handle_error(sbi,
4422 						ERROR_INCONSISTENT_SUM_TYPE);
4423 				return -EFSCORRUPTED;
4424 			}
4425 
4426 			sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4427 
4428 			if (f2fs_block_unit_discard(sbi)) {
4429 				/* build discard map only one time */
4430 				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4431 					memset(se->discard_map, 0xff,
4432 						SIT_VBLOCK_MAP_SIZE);
4433 				} else {
4434 					memcpy(se->discard_map,
4435 						se->cur_valid_map,
4436 						SIT_VBLOCK_MAP_SIZE);
4437 					sbi->discard_blks +=
4438 						sbi->blocks_per_seg -
4439 						se->valid_blocks;
4440 				}
4441 			}
4442 
4443 			if (__is_large_section(sbi))
4444 				get_sec_entry(sbi, start)->valid_blocks +=
4445 							se->valid_blocks;
4446 		}
4447 		start_blk += readed;
4448 	} while (start_blk < sit_blk_cnt);
4449 
4450 	down_read(&curseg->journal_rwsem);
4451 	for (i = 0; i < sits_in_cursum(journal); i++) {
4452 		unsigned int old_valid_blocks;
4453 
4454 		start = le32_to_cpu(segno_in_journal(journal, i));
4455 		if (start >= MAIN_SEGS(sbi)) {
4456 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4457 				 start);
4458 			err = -EFSCORRUPTED;
4459 			f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4460 			break;
4461 		}
4462 
4463 		se = &sit_i->sentries[start];
4464 		sit = sit_in_journal(journal, i);
4465 
4466 		old_valid_blocks = se->valid_blocks;
4467 
4468 		sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
4469 
4470 		err = check_block_count(sbi, start, &sit);
4471 		if (err)
4472 			break;
4473 		seg_info_from_raw_sit(se, &sit);
4474 
4475 		if (se->type >= NR_PERSISTENT_LOG) {
4476 			f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4477 							se->type, start);
4478 			err = -EFSCORRUPTED;
4479 			f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4480 			break;
4481 		}
4482 
4483 		sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4484 
4485 		if (f2fs_block_unit_discard(sbi)) {
4486 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4487 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4488 			} else {
4489 				memcpy(se->discard_map, se->cur_valid_map,
4490 							SIT_VBLOCK_MAP_SIZE);
4491 				sbi->discard_blks += old_valid_blocks;
4492 				sbi->discard_blks -= se->valid_blocks;
4493 			}
4494 		}
4495 
4496 		if (__is_large_section(sbi)) {
4497 			get_sec_entry(sbi, start)->valid_blocks +=
4498 							se->valid_blocks;
4499 			get_sec_entry(sbi, start)->valid_blocks -=
4500 							old_valid_blocks;
4501 		}
4502 	}
4503 	up_read(&curseg->journal_rwsem);
4504 
4505 	if (err)
4506 		return err;
4507 
4508 	if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4509 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4510 			 sit_valid_blocks[NODE], valid_node_count(sbi));
4511 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
4512 		return -EFSCORRUPTED;
4513 	}
4514 
4515 	if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
4516 				valid_user_blocks(sbi)) {
4517 		f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
4518 			 sit_valid_blocks[DATA], sit_valid_blocks[NODE],
4519 			 valid_user_blocks(sbi));
4520 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
4521 		return -EFSCORRUPTED;
4522 	}
4523 
4524 	return 0;
4525 }
4526 
4527 static void init_free_segmap(struct f2fs_sb_info *sbi)
4528 {
4529 	unsigned int start;
4530 	int type;
4531 	struct seg_entry *sentry;
4532 
4533 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4534 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4535 			continue;
4536 		sentry = get_seg_entry(sbi, start);
4537 		if (!sentry->valid_blocks)
4538 			__set_free(sbi, start);
4539 		else
4540 			SIT_I(sbi)->written_valid_blocks +=
4541 						sentry->valid_blocks;
4542 	}
4543 
4544 	/* set use the current segments */
4545 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4546 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4547 
4548 		__set_test_and_inuse(sbi, curseg_t->segno);
4549 	}
4550 }
4551 
4552 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4553 {
4554 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4555 	struct free_segmap_info *free_i = FREE_I(sbi);
4556 	unsigned int segno = 0, offset = 0, secno;
4557 	block_t valid_blocks, usable_blks_in_seg;
4558 
4559 	while (1) {
4560 		/* find dirty segment based on free segmap */
4561 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4562 		if (segno >= MAIN_SEGS(sbi))
4563 			break;
4564 		offset = segno + 1;
4565 		valid_blocks = get_valid_blocks(sbi, segno, false);
4566 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4567 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4568 			continue;
4569 		if (valid_blocks > usable_blks_in_seg) {
4570 			f2fs_bug_on(sbi, 1);
4571 			continue;
4572 		}
4573 		mutex_lock(&dirty_i->seglist_lock);
4574 		__locate_dirty_segment(sbi, segno, DIRTY);
4575 		mutex_unlock(&dirty_i->seglist_lock);
4576 	}
4577 
4578 	if (!__is_large_section(sbi))
4579 		return;
4580 
4581 	mutex_lock(&dirty_i->seglist_lock);
4582 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4583 		valid_blocks = get_valid_blocks(sbi, segno, true);
4584 		secno = GET_SEC_FROM_SEG(sbi, segno);
4585 
4586 		if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
4587 			continue;
4588 		if (IS_CURSEC(sbi, secno))
4589 			continue;
4590 		set_bit(secno, dirty_i->dirty_secmap);
4591 	}
4592 	mutex_unlock(&dirty_i->seglist_lock);
4593 }
4594 
4595 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4596 {
4597 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4598 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4599 
4600 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4601 	if (!dirty_i->victim_secmap)
4602 		return -ENOMEM;
4603 
4604 	dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4605 	if (!dirty_i->pinned_secmap)
4606 		return -ENOMEM;
4607 
4608 	dirty_i->pinned_secmap_cnt = 0;
4609 	dirty_i->enable_pin_section = true;
4610 	return 0;
4611 }
4612 
4613 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4614 {
4615 	struct dirty_seglist_info *dirty_i;
4616 	unsigned int bitmap_size, i;
4617 
4618 	/* allocate memory for dirty segments list information */
4619 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4620 								GFP_KERNEL);
4621 	if (!dirty_i)
4622 		return -ENOMEM;
4623 
4624 	SM_I(sbi)->dirty_info = dirty_i;
4625 	mutex_init(&dirty_i->seglist_lock);
4626 
4627 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4628 
4629 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4630 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4631 								GFP_KERNEL);
4632 		if (!dirty_i->dirty_segmap[i])
4633 			return -ENOMEM;
4634 	}
4635 
4636 	if (__is_large_section(sbi)) {
4637 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4638 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4639 						bitmap_size, GFP_KERNEL);
4640 		if (!dirty_i->dirty_secmap)
4641 			return -ENOMEM;
4642 	}
4643 
4644 	init_dirty_segmap(sbi);
4645 	return init_victim_secmap(sbi);
4646 }
4647 
4648 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4649 {
4650 	int i;
4651 
4652 	/*
4653 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4654 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4655 	 */
4656 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4657 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4658 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4659 		unsigned int blkofs = curseg->next_blkoff;
4660 
4661 		if (f2fs_sb_has_readonly(sbi) &&
4662 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4663 			continue;
4664 
4665 		sanity_check_seg_type(sbi, curseg->seg_type);
4666 
4667 		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
4668 			f2fs_err(sbi,
4669 				 "Current segment has invalid alloc_type:%d",
4670 				 curseg->alloc_type);
4671 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4672 			return -EFSCORRUPTED;
4673 		}
4674 
4675 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4676 			goto out;
4677 
4678 		if (curseg->alloc_type == SSR)
4679 			continue;
4680 
4681 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4682 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4683 				continue;
4684 out:
4685 			f2fs_err(sbi,
4686 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4687 				 i, curseg->segno, curseg->alloc_type,
4688 				 curseg->next_blkoff, blkofs);
4689 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4690 			return -EFSCORRUPTED;
4691 		}
4692 	}
4693 	return 0;
4694 }
4695 
4696 #ifdef CONFIG_BLK_DEV_ZONED
4697 
4698 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4699 				    struct f2fs_dev_info *fdev,
4700 				    struct blk_zone *zone)
4701 {
4702 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4703 	block_t zone_block, wp_block, last_valid_block;
4704 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4705 	int i, s, b, ret;
4706 	struct seg_entry *se;
4707 
4708 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4709 		return 0;
4710 
4711 	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4712 	wp_segno = GET_SEGNO(sbi, wp_block);
4713 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4714 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4715 	zone_segno = GET_SEGNO(sbi, zone_block);
4716 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4717 
4718 	if (zone_segno >= MAIN_SEGS(sbi))
4719 		return 0;
4720 
4721 	/*
4722 	 * Skip check of zones cursegs point to, since
4723 	 * fix_curseg_write_pointer() checks them.
4724 	 */
4725 	for (i = 0; i < NO_CHECK_TYPE; i++)
4726 		if (zone_secno == GET_SEC_FROM_SEG(sbi,
4727 						   CURSEG_I(sbi, i)->segno))
4728 			return 0;
4729 
4730 	/*
4731 	 * Get last valid block of the zone.
4732 	 */
4733 	last_valid_block = zone_block - 1;
4734 	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4735 		segno = zone_segno + s;
4736 		se = get_seg_entry(sbi, segno);
4737 		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4738 			if (f2fs_test_bit(b, se->cur_valid_map)) {
4739 				last_valid_block = START_BLOCK(sbi, segno) + b;
4740 				break;
4741 			}
4742 		if (last_valid_block >= zone_block)
4743 			break;
4744 	}
4745 
4746 	/*
4747 	 * If last valid block is beyond the write pointer, report the
4748 	 * inconsistency. This inconsistency does not cause write error
4749 	 * because the zone will not be selected for write operation until
4750 	 * it get discarded. Just report it.
4751 	 */
4752 	if (last_valid_block >= wp_block) {
4753 		f2fs_notice(sbi, "Valid block beyond write pointer: "
4754 			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4755 			    GET_SEGNO(sbi, last_valid_block),
4756 			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4757 			    wp_segno, wp_blkoff);
4758 		return 0;
4759 	}
4760 
4761 	/*
4762 	 * If there is no valid block in the zone and if write pointer is
4763 	 * not at zone start, reset the write pointer.
4764 	 */
4765 	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4766 		f2fs_notice(sbi,
4767 			    "Zone without valid block has non-zero write "
4768 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4769 			    wp_segno, wp_blkoff);
4770 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4771 					zone->len >> log_sectors_per_block);
4772 		if (ret) {
4773 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4774 				 fdev->path, ret);
4775 			return ret;
4776 		}
4777 	}
4778 
4779 	return 0;
4780 }
4781 
4782 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4783 						  block_t zone_blkaddr)
4784 {
4785 	int i;
4786 
4787 	for (i = 0; i < sbi->s_ndevs; i++) {
4788 		if (!bdev_is_zoned(FDEV(i).bdev))
4789 			continue;
4790 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4791 				zone_blkaddr <= FDEV(i).end_blk))
4792 			return &FDEV(i);
4793 	}
4794 
4795 	return NULL;
4796 }
4797 
4798 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4799 			      void *data)
4800 {
4801 	memcpy(data, zone, sizeof(struct blk_zone));
4802 	return 0;
4803 }
4804 
4805 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4806 {
4807 	struct curseg_info *cs = CURSEG_I(sbi, type);
4808 	struct f2fs_dev_info *zbd;
4809 	struct blk_zone zone;
4810 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4811 	block_t cs_zone_block, wp_block;
4812 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4813 	sector_t zone_sector;
4814 	int err;
4815 
4816 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4817 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4818 
4819 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4820 	if (!zbd)
4821 		return 0;
4822 
4823 	/* report zone for the sector the curseg points to */
4824 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4825 		<< log_sectors_per_block;
4826 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4827 				  report_one_zone_cb, &zone);
4828 	if (err != 1) {
4829 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4830 			 zbd->path, err);
4831 		return err;
4832 	}
4833 
4834 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4835 		return 0;
4836 
4837 	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4838 	wp_segno = GET_SEGNO(sbi, wp_block);
4839 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4840 	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4841 
4842 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4843 		wp_sector_off == 0)
4844 		return 0;
4845 
4846 	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4847 		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4848 		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4849 
4850 	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4851 		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4852 
4853 	f2fs_allocate_new_section(sbi, type, true);
4854 
4855 	/* check consistency of the zone curseg pointed to */
4856 	if (check_zone_write_pointer(sbi, zbd, &zone))
4857 		return -EIO;
4858 
4859 	/* check newly assigned zone */
4860 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4861 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4862 
4863 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4864 	if (!zbd)
4865 		return 0;
4866 
4867 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4868 		<< log_sectors_per_block;
4869 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4870 				  report_one_zone_cb, &zone);
4871 	if (err != 1) {
4872 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4873 			 zbd->path, err);
4874 		return err;
4875 	}
4876 
4877 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4878 		return 0;
4879 
4880 	if (zone.wp != zone.start) {
4881 		f2fs_notice(sbi,
4882 			    "New zone for curseg[%d] is not yet discarded. "
4883 			    "Reset the zone: curseg[0x%x,0x%x]",
4884 			    type, cs->segno, cs->next_blkoff);
4885 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4886 				zone_sector >> log_sectors_per_block,
4887 				zone.len >> log_sectors_per_block);
4888 		if (err) {
4889 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4890 				 zbd->path, err);
4891 			return err;
4892 		}
4893 	}
4894 
4895 	return 0;
4896 }
4897 
4898 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4899 {
4900 	int i, ret;
4901 
4902 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4903 		ret = fix_curseg_write_pointer(sbi, i);
4904 		if (ret)
4905 			return ret;
4906 	}
4907 
4908 	return 0;
4909 }
4910 
4911 struct check_zone_write_pointer_args {
4912 	struct f2fs_sb_info *sbi;
4913 	struct f2fs_dev_info *fdev;
4914 };
4915 
4916 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4917 				      void *data)
4918 {
4919 	struct check_zone_write_pointer_args *args;
4920 
4921 	args = (struct check_zone_write_pointer_args *)data;
4922 
4923 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
4924 }
4925 
4926 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4927 {
4928 	int i, ret;
4929 	struct check_zone_write_pointer_args args;
4930 
4931 	for (i = 0; i < sbi->s_ndevs; i++) {
4932 		if (!bdev_is_zoned(FDEV(i).bdev))
4933 			continue;
4934 
4935 		args.sbi = sbi;
4936 		args.fdev = &FDEV(i);
4937 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4938 					  check_zone_write_pointer_cb, &args);
4939 		if (ret < 0)
4940 			return ret;
4941 	}
4942 
4943 	return 0;
4944 }
4945 
4946 static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
4947 						unsigned int dev_idx)
4948 {
4949 	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
4950 		return true;
4951 	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
4952 }
4953 
4954 /* Return the zone index in the given device */
4955 static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
4956 					int dev_idx)
4957 {
4958 	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4959 
4960 	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
4961 						sbi->log_blocks_per_blkz;
4962 }
4963 
4964 /*
4965  * Return the usable segments in a section based on the zone's
4966  * corresponding zone capacity. Zone is equal to a section.
4967  */
4968 static inline unsigned int f2fs_usable_zone_segs_in_sec(
4969 		struct f2fs_sb_info *sbi, unsigned int segno)
4970 {
4971 	unsigned int dev_idx, zone_idx;
4972 
4973 	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
4974 	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
4975 
4976 	/* Conventional zone's capacity is always equal to zone size */
4977 	if (is_conv_zone(sbi, zone_idx, dev_idx))
4978 		return sbi->segs_per_sec;
4979 
4980 	if (!sbi->unusable_blocks_per_sec)
4981 		return sbi->segs_per_sec;
4982 
4983 	/* Get the segment count beyond zone capacity block */
4984 	return sbi->segs_per_sec - (sbi->unusable_blocks_per_sec >>
4985 						sbi->log_blocks_per_seg);
4986 }
4987 
4988 /*
4989  * Return the number of usable blocks in a segment. The number of blocks
4990  * returned is always equal to the number of blocks in a segment for
4991  * segments fully contained within a sequential zone capacity or a
4992  * conventional zone. For segments partially contained in a sequential
4993  * zone capacity, the number of usable blocks up to the zone capacity
4994  * is returned. 0 is returned in all other cases.
4995  */
4996 static inline unsigned int f2fs_usable_zone_blks_in_seg(
4997 			struct f2fs_sb_info *sbi, unsigned int segno)
4998 {
4999 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5000 	unsigned int zone_idx, dev_idx, secno;
5001 
5002 	secno = GET_SEC_FROM_SEG(sbi, segno);
5003 	seg_start = START_BLOCK(sbi, segno);
5004 	dev_idx = f2fs_target_device_index(sbi, seg_start);
5005 	zone_idx = get_zone_idx(sbi, secno, dev_idx);
5006 
5007 	/*
5008 	 * Conventional zone's capacity is always equal to zone size,
5009 	 * so, blocks per segment is unchanged.
5010 	 */
5011 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5012 		return sbi->blocks_per_seg;
5013 
5014 	if (!sbi->unusable_blocks_per_sec)
5015 		return sbi->blocks_per_seg;
5016 
5017 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5018 	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5019 
5020 	/*
5021 	 * If segment starts before zone capacity and spans beyond
5022 	 * zone capacity, then usable blocks are from seg start to
5023 	 * zone capacity. If the segment starts after the zone capacity,
5024 	 * then there are no usable blocks.
5025 	 */
5026 	if (seg_start >= sec_cap_blkaddr)
5027 		return 0;
5028 	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5029 		return sec_cap_blkaddr - seg_start;
5030 
5031 	return sbi->blocks_per_seg;
5032 }
5033 #else
5034 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5035 {
5036 	return 0;
5037 }
5038 
5039 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5040 {
5041 	return 0;
5042 }
5043 
5044 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5045 							unsigned int segno)
5046 {
5047 	return 0;
5048 }
5049 
5050 static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5051 							unsigned int segno)
5052 {
5053 	return 0;
5054 }
5055 #endif
5056 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5057 					unsigned int segno)
5058 {
5059 	if (f2fs_sb_has_blkzoned(sbi))
5060 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5061 
5062 	return sbi->blocks_per_seg;
5063 }
5064 
5065 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5066 					unsigned int segno)
5067 {
5068 	if (f2fs_sb_has_blkzoned(sbi))
5069 		return f2fs_usable_zone_segs_in_sec(sbi, segno);
5070 
5071 	return sbi->segs_per_sec;
5072 }
5073 
5074 /*
5075  * Update min, max modified time for cost-benefit GC algorithm
5076  */
5077 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5078 {
5079 	struct sit_info *sit_i = SIT_I(sbi);
5080 	unsigned int segno;
5081 
5082 	down_write(&sit_i->sentry_lock);
5083 
5084 	sit_i->min_mtime = ULLONG_MAX;
5085 
5086 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5087 		unsigned int i;
5088 		unsigned long long mtime = 0;
5089 
5090 		for (i = 0; i < sbi->segs_per_sec; i++)
5091 			mtime += get_seg_entry(sbi, segno + i)->mtime;
5092 
5093 		mtime = div_u64(mtime, sbi->segs_per_sec);
5094 
5095 		if (sit_i->min_mtime > mtime)
5096 			sit_i->min_mtime = mtime;
5097 	}
5098 	sit_i->max_mtime = get_mtime(sbi, false);
5099 	sit_i->dirty_max_mtime = 0;
5100 	up_write(&sit_i->sentry_lock);
5101 }
5102 
5103 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5104 {
5105 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5106 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5107 	struct f2fs_sm_info *sm_info;
5108 	int err;
5109 
5110 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5111 	if (!sm_info)
5112 		return -ENOMEM;
5113 
5114 	/* init sm info */
5115 	sbi->sm_info = sm_info;
5116 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5117 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5118 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5119 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5120 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5121 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5122 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5123 	sm_info->rec_prefree_segments = sm_info->main_segments *
5124 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5125 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5126 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5127 
5128 	if (!f2fs_lfs_mode(sbi))
5129 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5130 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5131 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5132 	sm_info->min_seq_blocks = sbi->blocks_per_seg;
5133 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5134 	sm_info->min_ssr_sections = reserved_sections(sbi);
5135 
5136 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5137 
5138 	init_f2fs_rwsem(&sm_info->curseg_lock);
5139 
5140 	err = f2fs_create_flush_cmd_control(sbi);
5141 	if (err)
5142 		return err;
5143 
5144 	err = create_discard_cmd_control(sbi);
5145 	if (err)
5146 		return err;
5147 
5148 	err = build_sit_info(sbi);
5149 	if (err)
5150 		return err;
5151 	err = build_free_segmap(sbi);
5152 	if (err)
5153 		return err;
5154 	err = build_curseg(sbi);
5155 	if (err)
5156 		return err;
5157 
5158 	/* reinit free segmap based on SIT */
5159 	err = build_sit_entries(sbi);
5160 	if (err)
5161 		return err;
5162 
5163 	init_free_segmap(sbi);
5164 	err = build_dirty_segmap(sbi);
5165 	if (err)
5166 		return err;
5167 
5168 	err = sanity_check_curseg(sbi);
5169 	if (err)
5170 		return err;
5171 
5172 	init_min_max_mtime(sbi);
5173 	return 0;
5174 }
5175 
5176 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5177 		enum dirty_type dirty_type)
5178 {
5179 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5180 
5181 	mutex_lock(&dirty_i->seglist_lock);
5182 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5183 	dirty_i->nr_dirty[dirty_type] = 0;
5184 	mutex_unlock(&dirty_i->seglist_lock);
5185 }
5186 
5187 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5188 {
5189 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5190 
5191 	kvfree(dirty_i->pinned_secmap);
5192 	kvfree(dirty_i->victim_secmap);
5193 }
5194 
5195 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5196 {
5197 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5198 	int i;
5199 
5200 	if (!dirty_i)
5201 		return;
5202 
5203 	/* discard pre-free/dirty segments list */
5204 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5205 		discard_dirty_segmap(sbi, i);
5206 
5207 	if (__is_large_section(sbi)) {
5208 		mutex_lock(&dirty_i->seglist_lock);
5209 		kvfree(dirty_i->dirty_secmap);
5210 		mutex_unlock(&dirty_i->seglist_lock);
5211 	}
5212 
5213 	destroy_victim_secmap(sbi);
5214 	SM_I(sbi)->dirty_info = NULL;
5215 	kfree(dirty_i);
5216 }
5217 
5218 static void destroy_curseg(struct f2fs_sb_info *sbi)
5219 {
5220 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5221 	int i;
5222 
5223 	if (!array)
5224 		return;
5225 	SM_I(sbi)->curseg_array = NULL;
5226 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5227 		kfree(array[i].sum_blk);
5228 		kfree(array[i].journal);
5229 	}
5230 	kfree(array);
5231 }
5232 
5233 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5234 {
5235 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5236 
5237 	if (!free_i)
5238 		return;
5239 	SM_I(sbi)->free_info = NULL;
5240 	kvfree(free_i->free_segmap);
5241 	kvfree(free_i->free_secmap);
5242 	kfree(free_i);
5243 }
5244 
5245 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5246 {
5247 	struct sit_info *sit_i = SIT_I(sbi);
5248 
5249 	if (!sit_i)
5250 		return;
5251 
5252 	if (sit_i->sentries)
5253 		kvfree(sit_i->bitmap);
5254 	kfree(sit_i->tmp_map);
5255 
5256 	kvfree(sit_i->sentries);
5257 	kvfree(sit_i->sec_entries);
5258 	kvfree(sit_i->dirty_sentries_bitmap);
5259 
5260 	SM_I(sbi)->sit_info = NULL;
5261 	kvfree(sit_i->sit_bitmap);
5262 #ifdef CONFIG_F2FS_CHECK_FS
5263 	kvfree(sit_i->sit_bitmap_mir);
5264 	kvfree(sit_i->invalid_segmap);
5265 #endif
5266 	kfree(sit_i);
5267 }
5268 
5269 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5270 {
5271 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5272 
5273 	if (!sm_info)
5274 		return;
5275 	f2fs_destroy_flush_cmd_control(sbi, true);
5276 	destroy_discard_cmd_control(sbi);
5277 	destroy_dirty_segmap(sbi);
5278 	destroy_curseg(sbi);
5279 	destroy_free_segmap(sbi);
5280 	destroy_sit_info(sbi);
5281 	sbi->sm_info = NULL;
5282 	kfree(sm_info);
5283 }
5284 
5285 int __init f2fs_create_segment_manager_caches(void)
5286 {
5287 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5288 			sizeof(struct discard_entry));
5289 	if (!discard_entry_slab)
5290 		goto fail;
5291 
5292 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5293 			sizeof(struct discard_cmd));
5294 	if (!discard_cmd_slab)
5295 		goto destroy_discard_entry;
5296 
5297 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5298 			sizeof(struct sit_entry_set));
5299 	if (!sit_entry_set_slab)
5300 		goto destroy_discard_cmd;
5301 
5302 	revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
5303 			sizeof(struct revoke_entry));
5304 	if (!revoke_entry_slab)
5305 		goto destroy_sit_entry_set;
5306 	return 0;
5307 
5308 destroy_sit_entry_set:
5309 	kmem_cache_destroy(sit_entry_set_slab);
5310 destroy_discard_cmd:
5311 	kmem_cache_destroy(discard_cmd_slab);
5312 destroy_discard_entry:
5313 	kmem_cache_destroy(discard_entry_slab);
5314 fail:
5315 	return -ENOMEM;
5316 }
5317 
5318 void f2fs_destroy_segment_manager_caches(void)
5319 {
5320 	kmem_cache_destroy(sit_entry_set_slab);
5321 	kmem_cache_destroy(discard_cmd_slab);
5322 	kmem_cache_destroy(discard_entry_slab);
5323 	kmem_cache_destroy(revoke_entry_slab);
5324 }
5325