xref: /linux/fs/f2fs/segment.c (revision 3a755ebcc2557e22b895b8976257f682c653db1d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *inmem_entry_slab;
34 
35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
188 void f2fs_register_inmem_page(struct inode *inode, struct page *page)
189 {
190 	struct inmem_pages *new;
191 
192 	set_page_private_atomic(page);
193 
194 	new = f2fs_kmem_cache_alloc(inmem_entry_slab,
195 					GFP_NOFS, true, NULL);
196 
197 	/* add atomic page indices to the list */
198 	new->page = page;
199 	INIT_LIST_HEAD(&new->list);
200 
201 	/* increase reference count with clean state */
202 	get_page(page);
203 	mutex_lock(&F2FS_I(inode)->inmem_lock);
204 	list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
205 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
206 	mutex_unlock(&F2FS_I(inode)->inmem_lock);
207 
208 	trace_f2fs_register_inmem_page(page, INMEM);
209 }
210 
211 static int __revoke_inmem_pages(struct inode *inode,
212 				struct list_head *head, bool drop, bool recover,
213 				bool trylock)
214 {
215 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
216 	struct inmem_pages *cur, *tmp;
217 	int err = 0;
218 
219 	list_for_each_entry_safe(cur, tmp, head, list) {
220 		struct page *page = cur->page;
221 
222 		if (drop)
223 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
224 
225 		if (trylock) {
226 			/*
227 			 * to avoid deadlock in between page lock and
228 			 * inmem_lock.
229 			 */
230 			if (!trylock_page(page))
231 				continue;
232 		} else {
233 			lock_page(page);
234 		}
235 
236 		f2fs_wait_on_page_writeback(page, DATA, true, true);
237 
238 		if (recover) {
239 			struct dnode_of_data dn;
240 			struct node_info ni;
241 
242 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
243 retry:
244 			set_new_dnode(&dn, inode, NULL, NULL, 0);
245 			err = f2fs_get_dnode_of_data(&dn, page->index,
246 								LOOKUP_NODE);
247 			if (err) {
248 				if (err == -ENOMEM) {
249 					memalloc_retry_wait(GFP_NOFS);
250 					goto retry;
251 				}
252 				err = -EAGAIN;
253 				goto next;
254 			}
255 
256 			err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
257 			if (err) {
258 				f2fs_put_dnode(&dn);
259 				return err;
260 			}
261 
262 			if (cur->old_addr == NEW_ADDR) {
263 				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
264 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
265 			} else
266 				f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
267 					cur->old_addr, ni.version, true, true);
268 			f2fs_put_dnode(&dn);
269 		}
270 next:
271 		/* we don't need to invalidate this in the sccessful status */
272 		if (drop || recover) {
273 			ClearPageUptodate(page);
274 			clear_page_private_gcing(page);
275 		}
276 		detach_page_private(page);
277 		set_page_private(page, 0);
278 		f2fs_put_page(page, 1);
279 
280 		list_del(&cur->list);
281 		kmem_cache_free(inmem_entry_slab, cur);
282 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
283 	}
284 	return err;
285 }
286 
287 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
288 {
289 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
290 	struct inode *inode;
291 	struct f2fs_inode_info *fi;
292 	unsigned int count = sbi->atomic_files;
293 	unsigned int looped = 0;
294 next:
295 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
296 	if (list_empty(head)) {
297 		spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
298 		return;
299 	}
300 	fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
301 	inode = igrab(&fi->vfs_inode);
302 	if (inode)
303 		list_move_tail(&fi->inmem_ilist, head);
304 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305 
306 	if (inode) {
307 		if (gc_failure) {
308 			if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
309 				goto skip;
310 		}
311 		set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
312 		f2fs_drop_inmem_pages(inode);
313 skip:
314 		iput(inode);
315 	}
316 	f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
317 	if (gc_failure) {
318 		if (++looped >= count)
319 			return;
320 	}
321 	goto next;
322 }
323 
324 void f2fs_drop_inmem_pages(struct inode *inode)
325 {
326 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
327 	struct f2fs_inode_info *fi = F2FS_I(inode);
328 
329 	do {
330 		mutex_lock(&fi->inmem_lock);
331 		if (list_empty(&fi->inmem_pages)) {
332 			fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
333 
334 			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
335 			if (!list_empty(&fi->inmem_ilist))
336 				list_del_init(&fi->inmem_ilist);
337 			if (f2fs_is_atomic_file(inode)) {
338 				clear_inode_flag(inode, FI_ATOMIC_FILE);
339 				sbi->atomic_files--;
340 			}
341 			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
342 
343 			mutex_unlock(&fi->inmem_lock);
344 			break;
345 		}
346 		__revoke_inmem_pages(inode, &fi->inmem_pages,
347 						true, false, true);
348 		mutex_unlock(&fi->inmem_lock);
349 	} while (1);
350 }
351 
352 void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
353 {
354 	struct f2fs_inode_info *fi = F2FS_I(inode);
355 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
356 	struct list_head *head = &fi->inmem_pages;
357 	struct inmem_pages *cur = NULL;
358 
359 	f2fs_bug_on(sbi, !page_private_atomic(page));
360 
361 	mutex_lock(&fi->inmem_lock);
362 	list_for_each_entry(cur, head, list) {
363 		if (cur->page == page)
364 			break;
365 	}
366 
367 	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
368 	list_del(&cur->list);
369 	mutex_unlock(&fi->inmem_lock);
370 
371 	dec_page_count(sbi, F2FS_INMEM_PAGES);
372 	kmem_cache_free(inmem_entry_slab, cur);
373 
374 	ClearPageUptodate(page);
375 	clear_page_private_atomic(page);
376 	f2fs_put_page(page, 0);
377 
378 	detach_page_private(page);
379 	set_page_private(page, 0);
380 
381 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
382 }
383 
384 static int __f2fs_commit_inmem_pages(struct inode *inode)
385 {
386 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
387 	struct f2fs_inode_info *fi = F2FS_I(inode);
388 	struct inmem_pages *cur, *tmp;
389 	struct f2fs_io_info fio = {
390 		.sbi = sbi,
391 		.ino = inode->i_ino,
392 		.type = DATA,
393 		.op = REQ_OP_WRITE,
394 		.op_flags = REQ_SYNC | REQ_PRIO,
395 		.io_type = FS_DATA_IO,
396 	};
397 	struct list_head revoke_list;
398 	bool submit_bio = false;
399 	int err = 0;
400 
401 	INIT_LIST_HEAD(&revoke_list);
402 
403 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
404 		struct page *page = cur->page;
405 
406 		lock_page(page);
407 		if (page->mapping == inode->i_mapping) {
408 			trace_f2fs_commit_inmem_page(page, INMEM);
409 
410 			f2fs_wait_on_page_writeback(page, DATA, true, true);
411 
412 			set_page_dirty(page);
413 			if (clear_page_dirty_for_io(page)) {
414 				inode_dec_dirty_pages(inode);
415 				f2fs_remove_dirty_inode(inode);
416 			}
417 retry:
418 			fio.page = page;
419 			fio.old_blkaddr = NULL_ADDR;
420 			fio.encrypted_page = NULL;
421 			fio.need_lock = LOCK_DONE;
422 			err = f2fs_do_write_data_page(&fio);
423 			if (err) {
424 				if (err == -ENOMEM) {
425 					memalloc_retry_wait(GFP_NOFS);
426 					goto retry;
427 				}
428 				unlock_page(page);
429 				break;
430 			}
431 			/* record old blkaddr for revoking */
432 			cur->old_addr = fio.old_blkaddr;
433 			submit_bio = true;
434 		}
435 		unlock_page(page);
436 		list_move_tail(&cur->list, &revoke_list);
437 	}
438 
439 	if (submit_bio)
440 		f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
441 
442 	if (err) {
443 		/*
444 		 * try to revoke all committed pages, but still we could fail
445 		 * due to no memory or other reason, if that happened, EAGAIN
446 		 * will be returned, which means in such case, transaction is
447 		 * already not integrity, caller should use journal to do the
448 		 * recovery or rewrite & commit last transaction. For other
449 		 * error number, revoking was done by filesystem itself.
450 		 */
451 		err = __revoke_inmem_pages(inode, &revoke_list,
452 						false, true, false);
453 
454 		/* drop all uncommitted pages */
455 		__revoke_inmem_pages(inode, &fi->inmem_pages,
456 						true, false, false);
457 	} else {
458 		__revoke_inmem_pages(inode, &revoke_list,
459 						false, false, false);
460 	}
461 
462 	return err;
463 }
464 
465 int f2fs_commit_inmem_pages(struct inode *inode)
466 {
467 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
468 	struct f2fs_inode_info *fi = F2FS_I(inode);
469 	int err;
470 
471 	f2fs_balance_fs(sbi, true);
472 
473 	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
474 
475 	f2fs_lock_op(sbi);
476 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
477 
478 	mutex_lock(&fi->inmem_lock);
479 	err = __f2fs_commit_inmem_pages(inode);
480 	mutex_unlock(&fi->inmem_lock);
481 
482 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
483 
484 	f2fs_unlock_op(sbi);
485 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
486 
487 	return err;
488 }
489 
490 /*
491  * This function balances dirty node and dentry pages.
492  * In addition, it controls garbage collection.
493  */
494 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
495 {
496 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
497 		f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
498 		f2fs_stop_checkpoint(sbi, false);
499 	}
500 
501 	/* balance_fs_bg is able to be pending */
502 	if (need && excess_cached_nats(sbi))
503 		f2fs_balance_fs_bg(sbi, false);
504 
505 	if (!f2fs_is_checkpoint_ready(sbi))
506 		return;
507 
508 	/*
509 	 * We should do GC or end up with checkpoint, if there are so many dirty
510 	 * dir/node pages without enough free segments.
511 	 */
512 	if (has_not_enough_free_secs(sbi, 0, 0)) {
513 		if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
514 					sbi->gc_thread->f2fs_gc_task) {
515 			DEFINE_WAIT(wait);
516 
517 			prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
518 						TASK_UNINTERRUPTIBLE);
519 			wake_up(&sbi->gc_thread->gc_wait_queue_head);
520 			io_schedule();
521 			finish_wait(&sbi->gc_thread->fggc_wq, &wait);
522 		} else {
523 			f2fs_down_write(&sbi->gc_lock);
524 			f2fs_gc(sbi, false, false, false, NULL_SEGNO);
525 		}
526 	}
527 }
528 
529 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
530 {
531 	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
532 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
533 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
534 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
535 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
536 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
537 	unsigned int threshold = sbi->blocks_per_seg * factor *
538 					DEFAULT_DIRTY_THRESHOLD;
539 	unsigned int global_threshold = threshold * 3 / 2;
540 
541 	if (dents >= threshold || qdata >= threshold ||
542 		nodes >= threshold || meta >= threshold ||
543 		imeta >= threshold)
544 		return true;
545 	return dents + qdata + nodes + meta + imeta >  global_threshold;
546 }
547 
548 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
549 {
550 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
551 		return;
552 
553 	/* try to shrink extent cache when there is no enough memory */
554 	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
555 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
556 
557 	/* check the # of cached NAT entries */
558 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
559 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
560 
561 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
562 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
563 	else
564 		f2fs_build_free_nids(sbi, false, false);
565 
566 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
567 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
568 		goto do_sync;
569 
570 	/* there is background inflight IO or foreground operation recently */
571 	if (is_inflight_io(sbi, REQ_TIME) ||
572 		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
573 		return;
574 
575 	/* exceed periodical checkpoint timeout threshold */
576 	if (f2fs_time_over(sbi, CP_TIME))
577 		goto do_sync;
578 
579 	/* checkpoint is the only way to shrink partial cached entries */
580 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
581 		f2fs_available_free_memory(sbi, INO_ENTRIES))
582 		return;
583 
584 do_sync:
585 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
586 		struct blk_plug plug;
587 
588 		mutex_lock(&sbi->flush_lock);
589 
590 		blk_start_plug(&plug);
591 		f2fs_sync_dirty_inodes(sbi, FILE_INODE);
592 		blk_finish_plug(&plug);
593 
594 		mutex_unlock(&sbi->flush_lock);
595 	}
596 	f2fs_sync_fs(sbi->sb, true);
597 	stat_inc_bg_cp_count(sbi->stat_info);
598 }
599 
600 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
601 				struct block_device *bdev)
602 {
603 	int ret = blkdev_issue_flush(bdev);
604 
605 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
606 				test_opt(sbi, FLUSH_MERGE), ret);
607 	return ret;
608 }
609 
610 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
611 {
612 	int ret = 0;
613 	int i;
614 
615 	if (!f2fs_is_multi_device(sbi))
616 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
617 
618 	for (i = 0; i < sbi->s_ndevs; i++) {
619 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
620 			continue;
621 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
622 		if (ret)
623 			break;
624 	}
625 	return ret;
626 }
627 
628 static int issue_flush_thread(void *data)
629 {
630 	struct f2fs_sb_info *sbi = data;
631 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
632 	wait_queue_head_t *q = &fcc->flush_wait_queue;
633 repeat:
634 	if (kthread_should_stop())
635 		return 0;
636 
637 	if (!llist_empty(&fcc->issue_list)) {
638 		struct flush_cmd *cmd, *next;
639 		int ret;
640 
641 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
642 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
643 
644 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
645 
646 		ret = submit_flush_wait(sbi, cmd->ino);
647 		atomic_inc(&fcc->issued_flush);
648 
649 		llist_for_each_entry_safe(cmd, next,
650 					  fcc->dispatch_list, llnode) {
651 			cmd->ret = ret;
652 			complete(&cmd->wait);
653 		}
654 		fcc->dispatch_list = NULL;
655 	}
656 
657 	wait_event_interruptible(*q,
658 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
659 	goto repeat;
660 }
661 
662 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
663 {
664 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
665 	struct flush_cmd cmd;
666 	int ret;
667 
668 	if (test_opt(sbi, NOBARRIER))
669 		return 0;
670 
671 	if (!test_opt(sbi, FLUSH_MERGE)) {
672 		atomic_inc(&fcc->queued_flush);
673 		ret = submit_flush_wait(sbi, ino);
674 		atomic_dec(&fcc->queued_flush);
675 		atomic_inc(&fcc->issued_flush);
676 		return ret;
677 	}
678 
679 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
680 	    f2fs_is_multi_device(sbi)) {
681 		ret = submit_flush_wait(sbi, ino);
682 		atomic_dec(&fcc->queued_flush);
683 
684 		atomic_inc(&fcc->issued_flush);
685 		return ret;
686 	}
687 
688 	cmd.ino = ino;
689 	init_completion(&cmd.wait);
690 
691 	llist_add(&cmd.llnode, &fcc->issue_list);
692 
693 	/*
694 	 * update issue_list before we wake up issue_flush thread, this
695 	 * smp_mb() pairs with another barrier in ___wait_event(), see
696 	 * more details in comments of waitqueue_active().
697 	 */
698 	smp_mb();
699 
700 	if (waitqueue_active(&fcc->flush_wait_queue))
701 		wake_up(&fcc->flush_wait_queue);
702 
703 	if (fcc->f2fs_issue_flush) {
704 		wait_for_completion(&cmd.wait);
705 		atomic_dec(&fcc->queued_flush);
706 	} else {
707 		struct llist_node *list;
708 
709 		list = llist_del_all(&fcc->issue_list);
710 		if (!list) {
711 			wait_for_completion(&cmd.wait);
712 			atomic_dec(&fcc->queued_flush);
713 		} else {
714 			struct flush_cmd *tmp, *next;
715 
716 			ret = submit_flush_wait(sbi, ino);
717 
718 			llist_for_each_entry_safe(tmp, next, list, llnode) {
719 				if (tmp == &cmd) {
720 					cmd.ret = ret;
721 					atomic_dec(&fcc->queued_flush);
722 					continue;
723 				}
724 				tmp->ret = ret;
725 				complete(&tmp->wait);
726 			}
727 		}
728 	}
729 
730 	return cmd.ret;
731 }
732 
733 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
734 {
735 	dev_t dev = sbi->sb->s_bdev->bd_dev;
736 	struct flush_cmd_control *fcc;
737 	int err = 0;
738 
739 	if (SM_I(sbi)->fcc_info) {
740 		fcc = SM_I(sbi)->fcc_info;
741 		if (fcc->f2fs_issue_flush)
742 			return err;
743 		goto init_thread;
744 	}
745 
746 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
747 	if (!fcc)
748 		return -ENOMEM;
749 	atomic_set(&fcc->issued_flush, 0);
750 	atomic_set(&fcc->queued_flush, 0);
751 	init_waitqueue_head(&fcc->flush_wait_queue);
752 	init_llist_head(&fcc->issue_list);
753 	SM_I(sbi)->fcc_info = fcc;
754 	if (!test_opt(sbi, FLUSH_MERGE))
755 		return err;
756 
757 init_thread:
758 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
759 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
760 	if (IS_ERR(fcc->f2fs_issue_flush)) {
761 		err = PTR_ERR(fcc->f2fs_issue_flush);
762 		kfree(fcc);
763 		SM_I(sbi)->fcc_info = NULL;
764 		return err;
765 	}
766 
767 	return err;
768 }
769 
770 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
771 {
772 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
773 
774 	if (fcc && fcc->f2fs_issue_flush) {
775 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
776 
777 		fcc->f2fs_issue_flush = NULL;
778 		kthread_stop(flush_thread);
779 	}
780 	if (free) {
781 		kfree(fcc);
782 		SM_I(sbi)->fcc_info = NULL;
783 	}
784 }
785 
786 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
787 {
788 	int ret = 0, i;
789 
790 	if (!f2fs_is_multi_device(sbi))
791 		return 0;
792 
793 	if (test_opt(sbi, NOBARRIER))
794 		return 0;
795 
796 	for (i = 1; i < sbi->s_ndevs; i++) {
797 		int count = DEFAULT_RETRY_IO_COUNT;
798 
799 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
800 			continue;
801 
802 		do {
803 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
804 			if (ret)
805 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
806 		} while (ret && --count);
807 
808 		if (ret) {
809 			f2fs_stop_checkpoint(sbi, false);
810 			break;
811 		}
812 
813 		spin_lock(&sbi->dev_lock);
814 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
815 		spin_unlock(&sbi->dev_lock);
816 	}
817 
818 	return ret;
819 }
820 
821 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
822 		enum dirty_type dirty_type)
823 {
824 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
825 
826 	/* need not be added */
827 	if (IS_CURSEG(sbi, segno))
828 		return;
829 
830 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
831 		dirty_i->nr_dirty[dirty_type]++;
832 
833 	if (dirty_type == DIRTY) {
834 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
835 		enum dirty_type t = sentry->type;
836 
837 		if (unlikely(t >= DIRTY)) {
838 			f2fs_bug_on(sbi, 1);
839 			return;
840 		}
841 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
842 			dirty_i->nr_dirty[t]++;
843 
844 		if (__is_large_section(sbi)) {
845 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
846 			block_t valid_blocks =
847 				get_valid_blocks(sbi, segno, true);
848 
849 			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
850 					valid_blocks == BLKS_PER_SEC(sbi)));
851 
852 			if (!IS_CURSEC(sbi, secno))
853 				set_bit(secno, dirty_i->dirty_secmap);
854 		}
855 	}
856 }
857 
858 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
859 		enum dirty_type dirty_type)
860 {
861 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
862 	block_t valid_blocks;
863 
864 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
865 		dirty_i->nr_dirty[dirty_type]--;
866 
867 	if (dirty_type == DIRTY) {
868 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
869 		enum dirty_type t = sentry->type;
870 
871 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
872 			dirty_i->nr_dirty[t]--;
873 
874 		valid_blocks = get_valid_blocks(sbi, segno, true);
875 		if (valid_blocks == 0) {
876 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
877 						dirty_i->victim_secmap);
878 #ifdef CONFIG_F2FS_CHECK_FS
879 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
880 #endif
881 		}
882 		if (__is_large_section(sbi)) {
883 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
884 
885 			if (!valid_blocks ||
886 					valid_blocks == BLKS_PER_SEC(sbi)) {
887 				clear_bit(secno, dirty_i->dirty_secmap);
888 				return;
889 			}
890 
891 			if (!IS_CURSEC(sbi, secno))
892 				set_bit(secno, dirty_i->dirty_secmap);
893 		}
894 	}
895 }
896 
897 /*
898  * Should not occur error such as -ENOMEM.
899  * Adding dirty entry into seglist is not critical operation.
900  * If a given segment is one of current working segments, it won't be added.
901  */
902 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
903 {
904 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
905 	unsigned short valid_blocks, ckpt_valid_blocks;
906 	unsigned int usable_blocks;
907 
908 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
909 		return;
910 
911 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
912 	mutex_lock(&dirty_i->seglist_lock);
913 
914 	valid_blocks = get_valid_blocks(sbi, segno, false);
915 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
916 
917 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
918 		ckpt_valid_blocks == usable_blocks)) {
919 		__locate_dirty_segment(sbi, segno, PRE);
920 		__remove_dirty_segment(sbi, segno, DIRTY);
921 	} else if (valid_blocks < usable_blocks) {
922 		__locate_dirty_segment(sbi, segno, DIRTY);
923 	} else {
924 		/* Recovery routine with SSR needs this */
925 		__remove_dirty_segment(sbi, segno, DIRTY);
926 	}
927 
928 	mutex_unlock(&dirty_i->seglist_lock);
929 }
930 
931 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
932 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
933 {
934 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
935 	unsigned int segno;
936 
937 	mutex_lock(&dirty_i->seglist_lock);
938 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
939 		if (get_valid_blocks(sbi, segno, false))
940 			continue;
941 		if (IS_CURSEG(sbi, segno))
942 			continue;
943 		__locate_dirty_segment(sbi, segno, PRE);
944 		__remove_dirty_segment(sbi, segno, DIRTY);
945 	}
946 	mutex_unlock(&dirty_i->seglist_lock);
947 }
948 
949 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
950 {
951 	int ovp_hole_segs =
952 		(overprovision_segments(sbi) - reserved_segments(sbi));
953 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
954 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
955 	block_t holes[2] = {0, 0};	/* DATA and NODE */
956 	block_t unusable;
957 	struct seg_entry *se;
958 	unsigned int segno;
959 
960 	mutex_lock(&dirty_i->seglist_lock);
961 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
962 		se = get_seg_entry(sbi, segno);
963 		if (IS_NODESEG(se->type))
964 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
965 							se->valid_blocks;
966 		else
967 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
968 							se->valid_blocks;
969 	}
970 	mutex_unlock(&dirty_i->seglist_lock);
971 
972 	unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
973 	if (unusable > ovp_holes)
974 		return unusable - ovp_holes;
975 	return 0;
976 }
977 
978 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
979 {
980 	int ovp_hole_segs =
981 		(overprovision_segments(sbi) - reserved_segments(sbi));
982 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
983 		return -EAGAIN;
984 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
985 		dirty_segments(sbi) > ovp_hole_segs)
986 		return -EAGAIN;
987 	return 0;
988 }
989 
990 /* This is only used by SBI_CP_DISABLED */
991 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
992 {
993 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
994 	unsigned int segno = 0;
995 
996 	mutex_lock(&dirty_i->seglist_lock);
997 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
998 		if (get_valid_blocks(sbi, segno, false))
999 			continue;
1000 		if (get_ckpt_valid_blocks(sbi, segno, false))
1001 			continue;
1002 		mutex_unlock(&dirty_i->seglist_lock);
1003 		return segno;
1004 	}
1005 	mutex_unlock(&dirty_i->seglist_lock);
1006 	return NULL_SEGNO;
1007 }
1008 
1009 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
1010 		struct block_device *bdev, block_t lstart,
1011 		block_t start, block_t len)
1012 {
1013 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1014 	struct list_head *pend_list;
1015 	struct discard_cmd *dc;
1016 
1017 	f2fs_bug_on(sbi, !len);
1018 
1019 	pend_list = &dcc->pend_list[plist_idx(len)];
1020 
1021 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
1022 	INIT_LIST_HEAD(&dc->list);
1023 	dc->bdev = bdev;
1024 	dc->lstart = lstart;
1025 	dc->start = start;
1026 	dc->len = len;
1027 	dc->ref = 0;
1028 	dc->state = D_PREP;
1029 	dc->queued = 0;
1030 	dc->error = 0;
1031 	init_completion(&dc->wait);
1032 	list_add_tail(&dc->list, pend_list);
1033 	spin_lock_init(&dc->lock);
1034 	dc->bio_ref = 0;
1035 	atomic_inc(&dcc->discard_cmd_cnt);
1036 	dcc->undiscard_blks += len;
1037 
1038 	return dc;
1039 }
1040 
1041 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1042 				struct block_device *bdev, block_t lstart,
1043 				block_t start, block_t len,
1044 				struct rb_node *parent, struct rb_node **p,
1045 				bool leftmost)
1046 {
1047 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1048 	struct discard_cmd *dc;
1049 
1050 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1051 
1052 	rb_link_node(&dc->rb_node, parent, p);
1053 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1054 
1055 	return dc;
1056 }
1057 
1058 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1059 							struct discard_cmd *dc)
1060 {
1061 	if (dc->state == D_DONE)
1062 		atomic_sub(dc->queued, &dcc->queued_discard);
1063 
1064 	list_del(&dc->list);
1065 	rb_erase_cached(&dc->rb_node, &dcc->root);
1066 	dcc->undiscard_blks -= dc->len;
1067 
1068 	kmem_cache_free(discard_cmd_slab, dc);
1069 
1070 	atomic_dec(&dcc->discard_cmd_cnt);
1071 }
1072 
1073 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1074 							struct discard_cmd *dc)
1075 {
1076 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1077 	unsigned long flags;
1078 
1079 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1080 
1081 	spin_lock_irqsave(&dc->lock, flags);
1082 	if (dc->bio_ref) {
1083 		spin_unlock_irqrestore(&dc->lock, flags);
1084 		return;
1085 	}
1086 	spin_unlock_irqrestore(&dc->lock, flags);
1087 
1088 	f2fs_bug_on(sbi, dc->ref);
1089 
1090 	if (dc->error == -EOPNOTSUPP)
1091 		dc->error = 0;
1092 
1093 	if (dc->error)
1094 		printk_ratelimited(
1095 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1096 			KERN_INFO, sbi->sb->s_id,
1097 			dc->lstart, dc->start, dc->len, dc->error);
1098 	__detach_discard_cmd(dcc, dc);
1099 }
1100 
1101 static void f2fs_submit_discard_endio(struct bio *bio)
1102 {
1103 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1104 	unsigned long flags;
1105 
1106 	spin_lock_irqsave(&dc->lock, flags);
1107 	if (!dc->error)
1108 		dc->error = blk_status_to_errno(bio->bi_status);
1109 	dc->bio_ref--;
1110 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1111 		dc->state = D_DONE;
1112 		complete_all(&dc->wait);
1113 	}
1114 	spin_unlock_irqrestore(&dc->lock, flags);
1115 	bio_put(bio);
1116 }
1117 
1118 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1119 				block_t start, block_t end)
1120 {
1121 #ifdef CONFIG_F2FS_CHECK_FS
1122 	struct seg_entry *sentry;
1123 	unsigned int segno;
1124 	block_t blk = start;
1125 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1126 	unsigned long *map;
1127 
1128 	while (blk < end) {
1129 		segno = GET_SEGNO(sbi, blk);
1130 		sentry = get_seg_entry(sbi, segno);
1131 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1132 
1133 		if (end < START_BLOCK(sbi, segno + 1))
1134 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1135 		else
1136 			size = max_blocks;
1137 		map = (unsigned long *)(sentry->cur_valid_map);
1138 		offset = __find_rev_next_bit(map, size, offset);
1139 		f2fs_bug_on(sbi, offset != size);
1140 		blk = START_BLOCK(sbi, segno + 1);
1141 	}
1142 #endif
1143 }
1144 
1145 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1146 				struct discard_policy *dpolicy,
1147 				int discard_type, unsigned int granularity)
1148 {
1149 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1150 
1151 	/* common policy */
1152 	dpolicy->type = discard_type;
1153 	dpolicy->sync = true;
1154 	dpolicy->ordered = false;
1155 	dpolicy->granularity = granularity;
1156 
1157 	dpolicy->max_requests = dcc->max_discard_request;
1158 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
1159 	dpolicy->timeout = false;
1160 
1161 	if (discard_type == DPOLICY_BG) {
1162 		dpolicy->min_interval = dcc->min_discard_issue_time;
1163 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1164 		dpolicy->max_interval = dcc->max_discard_issue_time;
1165 		dpolicy->io_aware = true;
1166 		dpolicy->sync = false;
1167 		dpolicy->ordered = true;
1168 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1169 			dpolicy->granularity = 1;
1170 			if (atomic_read(&dcc->discard_cmd_cnt))
1171 				dpolicy->max_interval =
1172 					dcc->min_discard_issue_time;
1173 		}
1174 	} else if (discard_type == DPOLICY_FORCE) {
1175 		dpolicy->min_interval = dcc->min_discard_issue_time;
1176 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1177 		dpolicy->max_interval = dcc->max_discard_issue_time;
1178 		dpolicy->io_aware = false;
1179 	} else if (discard_type == DPOLICY_FSTRIM) {
1180 		dpolicy->io_aware = false;
1181 	} else if (discard_type == DPOLICY_UMOUNT) {
1182 		dpolicy->io_aware = false;
1183 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1184 		dpolicy->granularity = 1;
1185 		dpolicy->timeout = true;
1186 	}
1187 }
1188 
1189 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1190 				struct block_device *bdev, block_t lstart,
1191 				block_t start, block_t len);
1192 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1193 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1194 						struct discard_policy *dpolicy,
1195 						struct discard_cmd *dc,
1196 						unsigned int *issued)
1197 {
1198 	struct block_device *bdev = dc->bdev;
1199 	unsigned int max_discard_blocks =
1200 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1201 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1202 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1203 					&(dcc->fstrim_list) : &(dcc->wait_list);
1204 	int flag = dpolicy->sync ? REQ_SYNC : 0;
1205 	block_t lstart, start, len, total_len;
1206 	int err = 0;
1207 
1208 	if (dc->state != D_PREP)
1209 		return 0;
1210 
1211 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1212 		return 0;
1213 
1214 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1215 
1216 	lstart = dc->lstart;
1217 	start = dc->start;
1218 	len = dc->len;
1219 	total_len = len;
1220 
1221 	dc->len = 0;
1222 
1223 	while (total_len && *issued < dpolicy->max_requests && !err) {
1224 		struct bio *bio = NULL;
1225 		unsigned long flags;
1226 		bool last = true;
1227 
1228 		if (len > max_discard_blocks) {
1229 			len = max_discard_blocks;
1230 			last = false;
1231 		}
1232 
1233 		(*issued)++;
1234 		if (*issued == dpolicy->max_requests)
1235 			last = true;
1236 
1237 		dc->len += len;
1238 
1239 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1240 			f2fs_show_injection_info(sbi, FAULT_DISCARD);
1241 			err = -EIO;
1242 			goto submit;
1243 		}
1244 		err = __blkdev_issue_discard(bdev,
1245 					SECTOR_FROM_BLOCK(start),
1246 					SECTOR_FROM_BLOCK(len),
1247 					GFP_NOFS, &bio);
1248 submit:
1249 		if (err) {
1250 			spin_lock_irqsave(&dc->lock, flags);
1251 			if (dc->state == D_PARTIAL)
1252 				dc->state = D_SUBMIT;
1253 			spin_unlock_irqrestore(&dc->lock, flags);
1254 
1255 			break;
1256 		}
1257 
1258 		f2fs_bug_on(sbi, !bio);
1259 
1260 		/*
1261 		 * should keep before submission to avoid D_DONE
1262 		 * right away
1263 		 */
1264 		spin_lock_irqsave(&dc->lock, flags);
1265 		if (last)
1266 			dc->state = D_SUBMIT;
1267 		else
1268 			dc->state = D_PARTIAL;
1269 		dc->bio_ref++;
1270 		spin_unlock_irqrestore(&dc->lock, flags);
1271 
1272 		atomic_inc(&dcc->queued_discard);
1273 		dc->queued++;
1274 		list_move_tail(&dc->list, wait_list);
1275 
1276 		/* sanity check on discard range */
1277 		__check_sit_bitmap(sbi, lstart, lstart + len);
1278 
1279 		bio->bi_private = dc;
1280 		bio->bi_end_io = f2fs_submit_discard_endio;
1281 		bio->bi_opf |= flag;
1282 		submit_bio(bio);
1283 
1284 		atomic_inc(&dcc->issued_discard);
1285 
1286 		f2fs_update_iostat(sbi, FS_DISCARD, 1);
1287 
1288 		lstart += len;
1289 		start += len;
1290 		total_len -= len;
1291 		len = total_len;
1292 	}
1293 
1294 	if (!err && len) {
1295 		dcc->undiscard_blks -= len;
1296 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1297 	}
1298 	return err;
1299 }
1300 
1301 static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1302 				struct block_device *bdev, block_t lstart,
1303 				block_t start, block_t len,
1304 				struct rb_node **insert_p,
1305 				struct rb_node *insert_parent)
1306 {
1307 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1308 	struct rb_node **p;
1309 	struct rb_node *parent = NULL;
1310 	bool leftmost = true;
1311 
1312 	if (insert_p && insert_parent) {
1313 		parent = insert_parent;
1314 		p = insert_p;
1315 		goto do_insert;
1316 	}
1317 
1318 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1319 							lstart, &leftmost);
1320 do_insert:
1321 	__attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1322 								p, leftmost);
1323 }
1324 
1325 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1326 						struct discard_cmd *dc)
1327 {
1328 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1329 }
1330 
1331 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1332 				struct discard_cmd *dc, block_t blkaddr)
1333 {
1334 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1335 	struct discard_info di = dc->di;
1336 	bool modified = false;
1337 
1338 	if (dc->state == D_DONE || dc->len == 1) {
1339 		__remove_discard_cmd(sbi, dc);
1340 		return;
1341 	}
1342 
1343 	dcc->undiscard_blks -= di.len;
1344 
1345 	if (blkaddr > di.lstart) {
1346 		dc->len = blkaddr - dc->lstart;
1347 		dcc->undiscard_blks += dc->len;
1348 		__relocate_discard_cmd(dcc, dc);
1349 		modified = true;
1350 	}
1351 
1352 	if (blkaddr < di.lstart + di.len - 1) {
1353 		if (modified) {
1354 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1355 					di.start + blkaddr + 1 - di.lstart,
1356 					di.lstart + di.len - 1 - blkaddr,
1357 					NULL, NULL);
1358 		} else {
1359 			dc->lstart++;
1360 			dc->len--;
1361 			dc->start++;
1362 			dcc->undiscard_blks += dc->len;
1363 			__relocate_discard_cmd(dcc, dc);
1364 		}
1365 	}
1366 }
1367 
1368 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1369 				struct block_device *bdev, block_t lstart,
1370 				block_t start, block_t len)
1371 {
1372 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1373 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1374 	struct discard_cmd *dc;
1375 	struct discard_info di = {0};
1376 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1377 	unsigned int max_discard_blocks =
1378 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1379 	block_t end = lstart + len;
1380 
1381 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1382 					NULL, lstart,
1383 					(struct rb_entry **)&prev_dc,
1384 					(struct rb_entry **)&next_dc,
1385 					&insert_p, &insert_parent, true, NULL);
1386 	if (dc)
1387 		prev_dc = dc;
1388 
1389 	if (!prev_dc) {
1390 		di.lstart = lstart;
1391 		di.len = next_dc ? next_dc->lstart - lstart : len;
1392 		di.len = min(di.len, len);
1393 		di.start = start;
1394 	}
1395 
1396 	while (1) {
1397 		struct rb_node *node;
1398 		bool merged = false;
1399 		struct discard_cmd *tdc = NULL;
1400 
1401 		if (prev_dc) {
1402 			di.lstart = prev_dc->lstart + prev_dc->len;
1403 			if (di.lstart < lstart)
1404 				di.lstart = lstart;
1405 			if (di.lstart >= end)
1406 				break;
1407 
1408 			if (!next_dc || next_dc->lstart > end)
1409 				di.len = end - di.lstart;
1410 			else
1411 				di.len = next_dc->lstart - di.lstart;
1412 			di.start = start + di.lstart - lstart;
1413 		}
1414 
1415 		if (!di.len)
1416 			goto next;
1417 
1418 		if (prev_dc && prev_dc->state == D_PREP &&
1419 			prev_dc->bdev == bdev &&
1420 			__is_discard_back_mergeable(&di, &prev_dc->di,
1421 							max_discard_blocks)) {
1422 			prev_dc->di.len += di.len;
1423 			dcc->undiscard_blks += di.len;
1424 			__relocate_discard_cmd(dcc, prev_dc);
1425 			di = prev_dc->di;
1426 			tdc = prev_dc;
1427 			merged = true;
1428 		}
1429 
1430 		if (next_dc && next_dc->state == D_PREP &&
1431 			next_dc->bdev == bdev &&
1432 			__is_discard_front_mergeable(&di, &next_dc->di,
1433 							max_discard_blocks)) {
1434 			next_dc->di.lstart = di.lstart;
1435 			next_dc->di.len += di.len;
1436 			next_dc->di.start = di.start;
1437 			dcc->undiscard_blks += di.len;
1438 			__relocate_discard_cmd(dcc, next_dc);
1439 			if (tdc)
1440 				__remove_discard_cmd(sbi, tdc);
1441 			merged = true;
1442 		}
1443 
1444 		if (!merged) {
1445 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1446 							di.len, NULL, NULL);
1447 		}
1448  next:
1449 		prev_dc = next_dc;
1450 		if (!prev_dc)
1451 			break;
1452 
1453 		node = rb_next(&prev_dc->rb_node);
1454 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1455 	}
1456 }
1457 
1458 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1459 		struct block_device *bdev, block_t blkstart, block_t blklen)
1460 {
1461 	block_t lblkstart = blkstart;
1462 
1463 	if (!f2fs_bdev_support_discard(bdev))
1464 		return 0;
1465 
1466 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1467 
1468 	if (f2fs_is_multi_device(sbi)) {
1469 		int devi = f2fs_target_device_index(sbi, blkstart);
1470 
1471 		blkstart -= FDEV(devi).start_blk;
1472 	}
1473 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1474 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1475 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1476 	return 0;
1477 }
1478 
1479 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1480 					struct discard_policy *dpolicy)
1481 {
1482 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1483 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1484 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1485 	struct discard_cmd *dc;
1486 	struct blk_plug plug;
1487 	unsigned int pos = dcc->next_pos;
1488 	unsigned int issued = 0;
1489 	bool io_interrupted = false;
1490 
1491 	mutex_lock(&dcc->cmd_lock);
1492 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1493 					NULL, pos,
1494 					(struct rb_entry **)&prev_dc,
1495 					(struct rb_entry **)&next_dc,
1496 					&insert_p, &insert_parent, true, NULL);
1497 	if (!dc)
1498 		dc = next_dc;
1499 
1500 	blk_start_plug(&plug);
1501 
1502 	while (dc) {
1503 		struct rb_node *node;
1504 		int err = 0;
1505 
1506 		if (dc->state != D_PREP)
1507 			goto next;
1508 
1509 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1510 			io_interrupted = true;
1511 			break;
1512 		}
1513 
1514 		dcc->next_pos = dc->lstart + dc->len;
1515 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1516 
1517 		if (issued >= dpolicy->max_requests)
1518 			break;
1519 next:
1520 		node = rb_next(&dc->rb_node);
1521 		if (err)
1522 			__remove_discard_cmd(sbi, dc);
1523 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1524 	}
1525 
1526 	blk_finish_plug(&plug);
1527 
1528 	if (!dc)
1529 		dcc->next_pos = 0;
1530 
1531 	mutex_unlock(&dcc->cmd_lock);
1532 
1533 	if (!issued && io_interrupted)
1534 		issued = -1;
1535 
1536 	return issued;
1537 }
1538 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1539 					struct discard_policy *dpolicy);
1540 
1541 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1542 					struct discard_policy *dpolicy)
1543 {
1544 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1545 	struct list_head *pend_list;
1546 	struct discard_cmd *dc, *tmp;
1547 	struct blk_plug plug;
1548 	int i, issued;
1549 	bool io_interrupted = false;
1550 
1551 	if (dpolicy->timeout)
1552 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1553 
1554 retry:
1555 	issued = 0;
1556 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1557 		if (dpolicy->timeout &&
1558 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1559 			break;
1560 
1561 		if (i + 1 < dpolicy->granularity)
1562 			break;
1563 
1564 		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1565 			return __issue_discard_cmd_orderly(sbi, dpolicy);
1566 
1567 		pend_list = &dcc->pend_list[i];
1568 
1569 		mutex_lock(&dcc->cmd_lock);
1570 		if (list_empty(pend_list))
1571 			goto next;
1572 		if (unlikely(dcc->rbtree_check))
1573 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1574 							&dcc->root, false));
1575 		blk_start_plug(&plug);
1576 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1577 			f2fs_bug_on(sbi, dc->state != D_PREP);
1578 
1579 			if (dpolicy->timeout &&
1580 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1581 				break;
1582 
1583 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1584 						!is_idle(sbi, DISCARD_TIME)) {
1585 				io_interrupted = true;
1586 				break;
1587 			}
1588 
1589 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1590 
1591 			if (issued >= dpolicy->max_requests)
1592 				break;
1593 		}
1594 		blk_finish_plug(&plug);
1595 next:
1596 		mutex_unlock(&dcc->cmd_lock);
1597 
1598 		if (issued >= dpolicy->max_requests || io_interrupted)
1599 			break;
1600 	}
1601 
1602 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1603 		__wait_all_discard_cmd(sbi, dpolicy);
1604 		goto retry;
1605 	}
1606 
1607 	if (!issued && io_interrupted)
1608 		issued = -1;
1609 
1610 	return issued;
1611 }
1612 
1613 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1614 {
1615 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1616 	struct list_head *pend_list;
1617 	struct discard_cmd *dc, *tmp;
1618 	int i;
1619 	bool dropped = false;
1620 
1621 	mutex_lock(&dcc->cmd_lock);
1622 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1623 		pend_list = &dcc->pend_list[i];
1624 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1625 			f2fs_bug_on(sbi, dc->state != D_PREP);
1626 			__remove_discard_cmd(sbi, dc);
1627 			dropped = true;
1628 		}
1629 	}
1630 	mutex_unlock(&dcc->cmd_lock);
1631 
1632 	return dropped;
1633 }
1634 
1635 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1636 {
1637 	__drop_discard_cmd(sbi);
1638 }
1639 
1640 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1641 							struct discard_cmd *dc)
1642 {
1643 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1644 	unsigned int len = 0;
1645 
1646 	wait_for_completion_io(&dc->wait);
1647 	mutex_lock(&dcc->cmd_lock);
1648 	f2fs_bug_on(sbi, dc->state != D_DONE);
1649 	dc->ref--;
1650 	if (!dc->ref) {
1651 		if (!dc->error)
1652 			len = dc->len;
1653 		__remove_discard_cmd(sbi, dc);
1654 	}
1655 	mutex_unlock(&dcc->cmd_lock);
1656 
1657 	return len;
1658 }
1659 
1660 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1661 						struct discard_policy *dpolicy,
1662 						block_t start, block_t end)
1663 {
1664 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1665 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1666 					&(dcc->fstrim_list) : &(dcc->wait_list);
1667 	struct discard_cmd *dc, *tmp;
1668 	bool need_wait;
1669 	unsigned int trimmed = 0;
1670 
1671 next:
1672 	need_wait = false;
1673 
1674 	mutex_lock(&dcc->cmd_lock);
1675 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
1676 		if (dc->lstart + dc->len <= start || end <= dc->lstart)
1677 			continue;
1678 		if (dc->len < dpolicy->granularity)
1679 			continue;
1680 		if (dc->state == D_DONE && !dc->ref) {
1681 			wait_for_completion_io(&dc->wait);
1682 			if (!dc->error)
1683 				trimmed += dc->len;
1684 			__remove_discard_cmd(sbi, dc);
1685 		} else {
1686 			dc->ref++;
1687 			need_wait = true;
1688 			break;
1689 		}
1690 	}
1691 	mutex_unlock(&dcc->cmd_lock);
1692 
1693 	if (need_wait) {
1694 		trimmed += __wait_one_discard_bio(sbi, dc);
1695 		goto next;
1696 	}
1697 
1698 	return trimmed;
1699 }
1700 
1701 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1702 						struct discard_policy *dpolicy)
1703 {
1704 	struct discard_policy dp;
1705 	unsigned int discard_blks;
1706 
1707 	if (dpolicy)
1708 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1709 
1710 	/* wait all */
1711 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1712 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1713 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1714 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1715 
1716 	return discard_blks;
1717 }
1718 
1719 /* This should be covered by global mutex, &sit_i->sentry_lock */
1720 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1721 {
1722 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1723 	struct discard_cmd *dc;
1724 	bool need_wait = false;
1725 
1726 	mutex_lock(&dcc->cmd_lock);
1727 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1728 							NULL, blkaddr);
1729 	if (dc) {
1730 		if (dc->state == D_PREP) {
1731 			__punch_discard_cmd(sbi, dc, blkaddr);
1732 		} else {
1733 			dc->ref++;
1734 			need_wait = true;
1735 		}
1736 	}
1737 	mutex_unlock(&dcc->cmd_lock);
1738 
1739 	if (need_wait)
1740 		__wait_one_discard_bio(sbi, dc);
1741 }
1742 
1743 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1744 {
1745 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1746 
1747 	if (dcc && dcc->f2fs_issue_discard) {
1748 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1749 
1750 		dcc->f2fs_issue_discard = NULL;
1751 		kthread_stop(discard_thread);
1752 	}
1753 }
1754 
1755 /* This comes from f2fs_put_super */
1756 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1757 {
1758 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1759 	struct discard_policy dpolicy;
1760 	bool dropped;
1761 
1762 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1763 					dcc->discard_granularity);
1764 	__issue_discard_cmd(sbi, &dpolicy);
1765 	dropped = __drop_discard_cmd(sbi);
1766 
1767 	/* just to make sure there is no pending discard commands */
1768 	__wait_all_discard_cmd(sbi, NULL);
1769 
1770 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1771 	return dropped;
1772 }
1773 
1774 static int issue_discard_thread(void *data)
1775 {
1776 	struct f2fs_sb_info *sbi = data;
1777 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1778 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1779 	struct discard_policy dpolicy;
1780 	unsigned int wait_ms = dcc->min_discard_issue_time;
1781 	int issued;
1782 
1783 	set_freezable();
1784 
1785 	do {
1786 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1787 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1788 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1789 		else
1790 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1791 						dcc->discard_granularity);
1792 
1793 		if (!atomic_read(&dcc->discard_cmd_cnt))
1794 		       wait_ms = dpolicy.max_interval;
1795 
1796 		wait_event_interruptible_timeout(*q,
1797 				kthread_should_stop() || freezing(current) ||
1798 				dcc->discard_wake,
1799 				msecs_to_jiffies(wait_ms));
1800 
1801 		if (dcc->discard_wake)
1802 			dcc->discard_wake = 0;
1803 
1804 		/* clean up pending candidates before going to sleep */
1805 		if (atomic_read(&dcc->queued_discard))
1806 			__wait_all_discard_cmd(sbi, NULL);
1807 
1808 		if (try_to_freeze())
1809 			continue;
1810 		if (f2fs_readonly(sbi->sb))
1811 			continue;
1812 		if (kthread_should_stop())
1813 			return 0;
1814 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1815 			wait_ms = dpolicy.max_interval;
1816 			continue;
1817 		}
1818 		if (!atomic_read(&dcc->discard_cmd_cnt))
1819 			continue;
1820 
1821 		sb_start_intwrite(sbi->sb);
1822 
1823 		issued = __issue_discard_cmd(sbi, &dpolicy);
1824 		if (issued > 0) {
1825 			__wait_all_discard_cmd(sbi, &dpolicy);
1826 			wait_ms = dpolicy.min_interval;
1827 		} else if (issued == -1) {
1828 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1829 			if (!wait_ms)
1830 				wait_ms = dpolicy.mid_interval;
1831 		} else {
1832 			wait_ms = dpolicy.max_interval;
1833 		}
1834 
1835 		sb_end_intwrite(sbi->sb);
1836 
1837 	} while (!kthread_should_stop());
1838 	return 0;
1839 }
1840 
1841 #ifdef CONFIG_BLK_DEV_ZONED
1842 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1843 		struct block_device *bdev, block_t blkstart, block_t blklen)
1844 {
1845 	sector_t sector, nr_sects;
1846 	block_t lblkstart = blkstart;
1847 	int devi = 0;
1848 
1849 	if (f2fs_is_multi_device(sbi)) {
1850 		devi = f2fs_target_device_index(sbi, blkstart);
1851 		if (blkstart < FDEV(devi).start_blk ||
1852 		    blkstart > FDEV(devi).end_blk) {
1853 			f2fs_err(sbi, "Invalid block %x", blkstart);
1854 			return -EIO;
1855 		}
1856 		blkstart -= FDEV(devi).start_blk;
1857 	}
1858 
1859 	/* For sequential zones, reset the zone write pointer */
1860 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1861 		sector = SECTOR_FROM_BLOCK(blkstart);
1862 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1863 
1864 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1865 				nr_sects != bdev_zone_sectors(bdev)) {
1866 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1867 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1868 				 blkstart, blklen);
1869 			return -EIO;
1870 		}
1871 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1872 		return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1873 					sector, nr_sects, GFP_NOFS);
1874 	}
1875 
1876 	/* For conventional zones, use regular discard if supported */
1877 	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1878 }
1879 #endif
1880 
1881 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1882 		struct block_device *bdev, block_t blkstart, block_t blklen)
1883 {
1884 #ifdef CONFIG_BLK_DEV_ZONED
1885 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1886 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1887 #endif
1888 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1889 }
1890 
1891 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1892 				block_t blkstart, block_t blklen)
1893 {
1894 	sector_t start = blkstart, len = 0;
1895 	struct block_device *bdev;
1896 	struct seg_entry *se;
1897 	unsigned int offset;
1898 	block_t i;
1899 	int err = 0;
1900 
1901 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1902 
1903 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1904 		if (i != start) {
1905 			struct block_device *bdev2 =
1906 				f2fs_target_device(sbi, i, NULL);
1907 
1908 			if (bdev2 != bdev) {
1909 				err = __issue_discard_async(sbi, bdev,
1910 						start, len);
1911 				if (err)
1912 					return err;
1913 				bdev = bdev2;
1914 				start = i;
1915 				len = 0;
1916 			}
1917 		}
1918 
1919 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1920 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1921 
1922 		if (f2fs_block_unit_discard(sbi) &&
1923 				!f2fs_test_and_set_bit(offset, se->discard_map))
1924 			sbi->discard_blks--;
1925 	}
1926 
1927 	if (len)
1928 		err = __issue_discard_async(sbi, bdev, start, len);
1929 	return err;
1930 }
1931 
1932 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1933 							bool check_only)
1934 {
1935 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1936 	int max_blocks = sbi->blocks_per_seg;
1937 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1938 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1939 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1940 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1941 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1942 	unsigned int start = 0, end = -1;
1943 	bool force = (cpc->reason & CP_DISCARD);
1944 	struct discard_entry *de = NULL;
1945 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1946 	int i;
1947 
1948 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
1949 			!f2fs_block_unit_discard(sbi))
1950 		return false;
1951 
1952 	if (!force) {
1953 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1954 			SM_I(sbi)->dcc_info->nr_discards >=
1955 				SM_I(sbi)->dcc_info->max_discards)
1956 			return false;
1957 	}
1958 
1959 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1960 	for (i = 0; i < entries; i++)
1961 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1962 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1963 
1964 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1965 				SM_I(sbi)->dcc_info->max_discards) {
1966 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1967 		if (start >= max_blocks)
1968 			break;
1969 
1970 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1971 		if (force && start && end != max_blocks
1972 					&& (end - start) < cpc->trim_minlen)
1973 			continue;
1974 
1975 		if (check_only)
1976 			return true;
1977 
1978 		if (!de) {
1979 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1980 						GFP_F2FS_ZERO, true, NULL);
1981 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1982 			list_add_tail(&de->list, head);
1983 		}
1984 
1985 		for (i = start; i < end; i++)
1986 			__set_bit_le(i, (void *)de->discard_map);
1987 
1988 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1989 	}
1990 	return false;
1991 }
1992 
1993 static void release_discard_addr(struct discard_entry *entry)
1994 {
1995 	list_del(&entry->list);
1996 	kmem_cache_free(discard_entry_slab, entry);
1997 }
1998 
1999 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2000 {
2001 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2002 	struct discard_entry *entry, *this;
2003 
2004 	/* drop caches */
2005 	list_for_each_entry_safe(entry, this, head, list)
2006 		release_discard_addr(entry);
2007 }
2008 
2009 /*
2010  * Should call f2fs_clear_prefree_segments after checkpoint is done.
2011  */
2012 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2013 {
2014 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2015 	unsigned int segno;
2016 
2017 	mutex_lock(&dirty_i->seglist_lock);
2018 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2019 		__set_test_and_free(sbi, segno, false);
2020 	mutex_unlock(&dirty_i->seglist_lock);
2021 }
2022 
2023 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2024 						struct cp_control *cpc)
2025 {
2026 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2027 	struct list_head *head = &dcc->entry_list;
2028 	struct discard_entry *entry, *this;
2029 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2030 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2031 	unsigned int start = 0, end = -1;
2032 	unsigned int secno, start_segno;
2033 	bool force = (cpc->reason & CP_DISCARD);
2034 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2035 						DISCARD_UNIT_SECTION;
2036 
2037 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2038 		section_alignment = true;
2039 
2040 	mutex_lock(&dirty_i->seglist_lock);
2041 
2042 	while (1) {
2043 		int i;
2044 
2045 		if (section_alignment && end != -1)
2046 			end--;
2047 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2048 		if (start >= MAIN_SEGS(sbi))
2049 			break;
2050 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2051 								start + 1);
2052 
2053 		if (section_alignment) {
2054 			start = rounddown(start, sbi->segs_per_sec);
2055 			end = roundup(end, sbi->segs_per_sec);
2056 		}
2057 
2058 		for (i = start; i < end; i++) {
2059 			if (test_and_clear_bit(i, prefree_map))
2060 				dirty_i->nr_dirty[PRE]--;
2061 		}
2062 
2063 		if (!f2fs_realtime_discard_enable(sbi))
2064 			continue;
2065 
2066 		if (force && start >= cpc->trim_start &&
2067 					(end - 1) <= cpc->trim_end)
2068 				continue;
2069 
2070 		if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
2071 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2072 				(end - start) << sbi->log_blocks_per_seg);
2073 			continue;
2074 		}
2075 next:
2076 		secno = GET_SEC_FROM_SEG(sbi, start);
2077 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2078 		if (!IS_CURSEC(sbi, secno) &&
2079 			!get_valid_blocks(sbi, start, true))
2080 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2081 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
2082 
2083 		start = start_segno + sbi->segs_per_sec;
2084 		if (start < end)
2085 			goto next;
2086 		else
2087 			end = start - 1;
2088 	}
2089 	mutex_unlock(&dirty_i->seglist_lock);
2090 
2091 	if (!f2fs_block_unit_discard(sbi))
2092 		goto wakeup;
2093 
2094 	/* send small discards */
2095 	list_for_each_entry_safe(entry, this, head, list) {
2096 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2097 		bool is_valid = test_bit_le(0, entry->discard_map);
2098 
2099 find_next:
2100 		if (is_valid) {
2101 			next_pos = find_next_zero_bit_le(entry->discard_map,
2102 					sbi->blocks_per_seg, cur_pos);
2103 			len = next_pos - cur_pos;
2104 
2105 			if (f2fs_sb_has_blkzoned(sbi) ||
2106 			    (force && len < cpc->trim_minlen))
2107 				goto skip;
2108 
2109 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2110 									len);
2111 			total_len += len;
2112 		} else {
2113 			next_pos = find_next_bit_le(entry->discard_map,
2114 					sbi->blocks_per_seg, cur_pos);
2115 		}
2116 skip:
2117 		cur_pos = next_pos;
2118 		is_valid = !is_valid;
2119 
2120 		if (cur_pos < sbi->blocks_per_seg)
2121 			goto find_next;
2122 
2123 		release_discard_addr(entry);
2124 		dcc->nr_discards -= total_len;
2125 	}
2126 
2127 wakeup:
2128 	wake_up_discard_thread(sbi, false);
2129 }
2130 
2131 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2132 {
2133 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2134 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2135 	int err = 0;
2136 
2137 	if (!f2fs_realtime_discard_enable(sbi))
2138 		return 0;
2139 
2140 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2141 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2142 	if (IS_ERR(dcc->f2fs_issue_discard))
2143 		err = PTR_ERR(dcc->f2fs_issue_discard);
2144 
2145 	return err;
2146 }
2147 
2148 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2149 {
2150 	struct discard_cmd_control *dcc;
2151 	int err = 0, i;
2152 
2153 	if (SM_I(sbi)->dcc_info) {
2154 		dcc = SM_I(sbi)->dcc_info;
2155 		goto init_thread;
2156 	}
2157 
2158 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2159 	if (!dcc)
2160 		return -ENOMEM;
2161 
2162 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2163 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2164 		dcc->discard_granularity = sbi->blocks_per_seg;
2165 	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2166 		dcc->discard_granularity = BLKS_PER_SEC(sbi);
2167 
2168 	INIT_LIST_HEAD(&dcc->entry_list);
2169 	for (i = 0; i < MAX_PLIST_NUM; i++)
2170 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2171 	INIT_LIST_HEAD(&dcc->wait_list);
2172 	INIT_LIST_HEAD(&dcc->fstrim_list);
2173 	mutex_init(&dcc->cmd_lock);
2174 	atomic_set(&dcc->issued_discard, 0);
2175 	atomic_set(&dcc->queued_discard, 0);
2176 	atomic_set(&dcc->discard_cmd_cnt, 0);
2177 	dcc->nr_discards = 0;
2178 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2179 	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2180 	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2181 	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2182 	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2183 	dcc->undiscard_blks = 0;
2184 	dcc->next_pos = 0;
2185 	dcc->root = RB_ROOT_CACHED;
2186 	dcc->rbtree_check = false;
2187 
2188 	init_waitqueue_head(&dcc->discard_wait_queue);
2189 	SM_I(sbi)->dcc_info = dcc;
2190 init_thread:
2191 	err = f2fs_start_discard_thread(sbi);
2192 	if (err) {
2193 		kfree(dcc);
2194 		SM_I(sbi)->dcc_info = NULL;
2195 	}
2196 
2197 	return err;
2198 }
2199 
2200 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2201 {
2202 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2203 
2204 	if (!dcc)
2205 		return;
2206 
2207 	f2fs_stop_discard_thread(sbi);
2208 
2209 	/*
2210 	 * Recovery can cache discard commands, so in error path of
2211 	 * fill_super(), it needs to give a chance to handle them.
2212 	 */
2213 	if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2214 		f2fs_issue_discard_timeout(sbi);
2215 
2216 	kfree(dcc);
2217 	SM_I(sbi)->dcc_info = NULL;
2218 }
2219 
2220 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2221 {
2222 	struct sit_info *sit_i = SIT_I(sbi);
2223 
2224 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2225 		sit_i->dirty_sentries++;
2226 		return false;
2227 	}
2228 
2229 	return true;
2230 }
2231 
2232 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2233 					unsigned int segno, int modified)
2234 {
2235 	struct seg_entry *se = get_seg_entry(sbi, segno);
2236 
2237 	se->type = type;
2238 	if (modified)
2239 		__mark_sit_entry_dirty(sbi, segno);
2240 }
2241 
2242 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2243 								block_t blkaddr)
2244 {
2245 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2246 
2247 	if (segno == NULL_SEGNO)
2248 		return 0;
2249 	return get_seg_entry(sbi, segno)->mtime;
2250 }
2251 
2252 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2253 						unsigned long long old_mtime)
2254 {
2255 	struct seg_entry *se;
2256 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2257 	unsigned long long ctime = get_mtime(sbi, false);
2258 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2259 
2260 	if (segno == NULL_SEGNO)
2261 		return;
2262 
2263 	se = get_seg_entry(sbi, segno);
2264 
2265 	if (!se->mtime)
2266 		se->mtime = mtime;
2267 	else
2268 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2269 						se->valid_blocks + 1);
2270 
2271 	if (ctime > SIT_I(sbi)->max_mtime)
2272 		SIT_I(sbi)->max_mtime = ctime;
2273 }
2274 
2275 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2276 {
2277 	struct seg_entry *se;
2278 	unsigned int segno, offset;
2279 	long int new_vblocks;
2280 	bool exist;
2281 #ifdef CONFIG_F2FS_CHECK_FS
2282 	bool mir_exist;
2283 #endif
2284 
2285 	segno = GET_SEGNO(sbi, blkaddr);
2286 
2287 	se = get_seg_entry(sbi, segno);
2288 	new_vblocks = se->valid_blocks + del;
2289 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2290 
2291 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2292 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2293 
2294 	se->valid_blocks = new_vblocks;
2295 
2296 	/* Update valid block bitmap */
2297 	if (del > 0) {
2298 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2299 #ifdef CONFIG_F2FS_CHECK_FS
2300 		mir_exist = f2fs_test_and_set_bit(offset,
2301 						se->cur_valid_map_mir);
2302 		if (unlikely(exist != mir_exist)) {
2303 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2304 				 blkaddr, exist);
2305 			f2fs_bug_on(sbi, 1);
2306 		}
2307 #endif
2308 		if (unlikely(exist)) {
2309 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2310 				 blkaddr);
2311 			f2fs_bug_on(sbi, 1);
2312 			se->valid_blocks--;
2313 			del = 0;
2314 		}
2315 
2316 		if (f2fs_block_unit_discard(sbi) &&
2317 				!f2fs_test_and_set_bit(offset, se->discard_map))
2318 			sbi->discard_blks--;
2319 
2320 		/*
2321 		 * SSR should never reuse block which is checkpointed
2322 		 * or newly invalidated.
2323 		 */
2324 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2325 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2326 				se->ckpt_valid_blocks++;
2327 		}
2328 	} else {
2329 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2330 #ifdef CONFIG_F2FS_CHECK_FS
2331 		mir_exist = f2fs_test_and_clear_bit(offset,
2332 						se->cur_valid_map_mir);
2333 		if (unlikely(exist != mir_exist)) {
2334 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2335 				 blkaddr, exist);
2336 			f2fs_bug_on(sbi, 1);
2337 		}
2338 #endif
2339 		if (unlikely(!exist)) {
2340 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2341 				 blkaddr);
2342 			f2fs_bug_on(sbi, 1);
2343 			se->valid_blocks++;
2344 			del = 0;
2345 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2346 			/*
2347 			 * If checkpoints are off, we must not reuse data that
2348 			 * was used in the previous checkpoint. If it was used
2349 			 * before, we must track that to know how much space we
2350 			 * really have.
2351 			 */
2352 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2353 				spin_lock(&sbi->stat_lock);
2354 				sbi->unusable_block_count++;
2355 				spin_unlock(&sbi->stat_lock);
2356 			}
2357 		}
2358 
2359 		if (f2fs_block_unit_discard(sbi) &&
2360 			f2fs_test_and_clear_bit(offset, se->discard_map))
2361 			sbi->discard_blks++;
2362 	}
2363 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2364 		se->ckpt_valid_blocks += del;
2365 
2366 	__mark_sit_entry_dirty(sbi, segno);
2367 
2368 	/* update total number of valid blocks to be written in ckpt area */
2369 	SIT_I(sbi)->written_valid_blocks += del;
2370 
2371 	if (__is_large_section(sbi))
2372 		get_sec_entry(sbi, segno)->valid_blocks += del;
2373 }
2374 
2375 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2376 {
2377 	unsigned int segno = GET_SEGNO(sbi, addr);
2378 	struct sit_info *sit_i = SIT_I(sbi);
2379 
2380 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2381 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2382 		return;
2383 
2384 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2385 	f2fs_invalidate_compress_page(sbi, addr);
2386 
2387 	/* add it into sit main buffer */
2388 	down_write(&sit_i->sentry_lock);
2389 
2390 	update_segment_mtime(sbi, addr, 0);
2391 	update_sit_entry(sbi, addr, -1);
2392 
2393 	/* add it into dirty seglist */
2394 	locate_dirty_segment(sbi, segno);
2395 
2396 	up_write(&sit_i->sentry_lock);
2397 }
2398 
2399 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2400 {
2401 	struct sit_info *sit_i = SIT_I(sbi);
2402 	unsigned int segno, offset;
2403 	struct seg_entry *se;
2404 	bool is_cp = false;
2405 
2406 	if (!__is_valid_data_blkaddr(blkaddr))
2407 		return true;
2408 
2409 	down_read(&sit_i->sentry_lock);
2410 
2411 	segno = GET_SEGNO(sbi, blkaddr);
2412 	se = get_seg_entry(sbi, segno);
2413 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2414 
2415 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2416 		is_cp = true;
2417 
2418 	up_read(&sit_i->sentry_lock);
2419 
2420 	return is_cp;
2421 }
2422 
2423 /*
2424  * This function should be resided under the curseg_mutex lock
2425  */
2426 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2427 					struct f2fs_summary *sum)
2428 {
2429 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2430 	void *addr = curseg->sum_blk;
2431 
2432 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2433 	memcpy(addr, sum, sizeof(struct f2fs_summary));
2434 }
2435 
2436 /*
2437  * Calculate the number of current summary pages for writing
2438  */
2439 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2440 {
2441 	int valid_sum_count = 0;
2442 	int i, sum_in_page;
2443 
2444 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2445 		if (sbi->ckpt->alloc_type[i] == SSR)
2446 			valid_sum_count += sbi->blocks_per_seg;
2447 		else {
2448 			if (for_ra)
2449 				valid_sum_count += le16_to_cpu(
2450 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2451 			else
2452 				valid_sum_count += curseg_blkoff(sbi, i);
2453 		}
2454 	}
2455 
2456 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2457 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2458 	if (valid_sum_count <= sum_in_page)
2459 		return 1;
2460 	else if ((valid_sum_count - sum_in_page) <=
2461 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2462 		return 2;
2463 	return 3;
2464 }
2465 
2466 /*
2467  * Caller should put this summary page
2468  */
2469 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2470 {
2471 	if (unlikely(f2fs_cp_error(sbi)))
2472 		return ERR_PTR(-EIO);
2473 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2474 }
2475 
2476 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2477 					void *src, block_t blk_addr)
2478 {
2479 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2480 
2481 	memcpy(page_address(page), src, PAGE_SIZE);
2482 	set_page_dirty(page);
2483 	f2fs_put_page(page, 1);
2484 }
2485 
2486 static void write_sum_page(struct f2fs_sb_info *sbi,
2487 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2488 {
2489 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2490 }
2491 
2492 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2493 						int type, block_t blk_addr)
2494 {
2495 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2496 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2497 	struct f2fs_summary_block *src = curseg->sum_blk;
2498 	struct f2fs_summary_block *dst;
2499 
2500 	dst = (struct f2fs_summary_block *)page_address(page);
2501 	memset(dst, 0, PAGE_SIZE);
2502 
2503 	mutex_lock(&curseg->curseg_mutex);
2504 
2505 	down_read(&curseg->journal_rwsem);
2506 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2507 	up_read(&curseg->journal_rwsem);
2508 
2509 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2510 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2511 
2512 	mutex_unlock(&curseg->curseg_mutex);
2513 
2514 	set_page_dirty(page);
2515 	f2fs_put_page(page, 1);
2516 }
2517 
2518 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2519 				struct curseg_info *curseg, int type)
2520 {
2521 	unsigned int segno = curseg->segno + 1;
2522 	struct free_segmap_info *free_i = FREE_I(sbi);
2523 
2524 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2525 		return !test_bit(segno, free_i->free_segmap);
2526 	return 0;
2527 }
2528 
2529 /*
2530  * Find a new segment from the free segments bitmap to right order
2531  * This function should be returned with success, otherwise BUG
2532  */
2533 static void get_new_segment(struct f2fs_sb_info *sbi,
2534 			unsigned int *newseg, bool new_sec, int dir)
2535 {
2536 	struct free_segmap_info *free_i = FREE_I(sbi);
2537 	unsigned int segno, secno, zoneno;
2538 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2539 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2540 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2541 	unsigned int left_start = hint;
2542 	bool init = true;
2543 	int go_left = 0;
2544 	int i;
2545 
2546 	spin_lock(&free_i->segmap_lock);
2547 
2548 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2549 		segno = find_next_zero_bit(free_i->free_segmap,
2550 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2551 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2552 			goto got_it;
2553 	}
2554 find_other_zone:
2555 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2556 	if (secno >= MAIN_SECS(sbi)) {
2557 		if (dir == ALLOC_RIGHT) {
2558 			secno = find_first_zero_bit(free_i->free_secmap,
2559 							MAIN_SECS(sbi));
2560 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2561 		} else {
2562 			go_left = 1;
2563 			left_start = hint - 1;
2564 		}
2565 	}
2566 	if (go_left == 0)
2567 		goto skip_left;
2568 
2569 	while (test_bit(left_start, free_i->free_secmap)) {
2570 		if (left_start > 0) {
2571 			left_start--;
2572 			continue;
2573 		}
2574 		left_start = find_first_zero_bit(free_i->free_secmap,
2575 							MAIN_SECS(sbi));
2576 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2577 		break;
2578 	}
2579 	secno = left_start;
2580 skip_left:
2581 	segno = GET_SEG_FROM_SEC(sbi, secno);
2582 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2583 
2584 	/* give up on finding another zone */
2585 	if (!init)
2586 		goto got_it;
2587 	if (sbi->secs_per_zone == 1)
2588 		goto got_it;
2589 	if (zoneno == old_zoneno)
2590 		goto got_it;
2591 	if (dir == ALLOC_LEFT) {
2592 		if (!go_left && zoneno + 1 >= total_zones)
2593 			goto got_it;
2594 		if (go_left && zoneno == 0)
2595 			goto got_it;
2596 	}
2597 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2598 		if (CURSEG_I(sbi, i)->zone == zoneno)
2599 			break;
2600 
2601 	if (i < NR_CURSEG_TYPE) {
2602 		/* zone is in user, try another */
2603 		if (go_left)
2604 			hint = zoneno * sbi->secs_per_zone - 1;
2605 		else if (zoneno + 1 >= total_zones)
2606 			hint = 0;
2607 		else
2608 			hint = (zoneno + 1) * sbi->secs_per_zone;
2609 		init = false;
2610 		goto find_other_zone;
2611 	}
2612 got_it:
2613 	/* set it as dirty segment in free segmap */
2614 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2615 	__set_inuse(sbi, segno);
2616 	*newseg = segno;
2617 	spin_unlock(&free_i->segmap_lock);
2618 }
2619 
2620 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2621 {
2622 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2623 	struct summary_footer *sum_footer;
2624 	unsigned short seg_type = curseg->seg_type;
2625 
2626 	curseg->inited = true;
2627 	curseg->segno = curseg->next_segno;
2628 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2629 	curseg->next_blkoff = 0;
2630 	curseg->next_segno = NULL_SEGNO;
2631 
2632 	sum_footer = &(curseg->sum_blk->footer);
2633 	memset(sum_footer, 0, sizeof(struct summary_footer));
2634 
2635 	sanity_check_seg_type(sbi, seg_type);
2636 
2637 	if (IS_DATASEG(seg_type))
2638 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2639 	if (IS_NODESEG(seg_type))
2640 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2641 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2642 }
2643 
2644 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2645 {
2646 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2647 	unsigned short seg_type = curseg->seg_type;
2648 
2649 	sanity_check_seg_type(sbi, seg_type);
2650 	if (f2fs_need_rand_seg(sbi))
2651 		return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
2652 
2653 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2654 	if (__is_large_section(sbi))
2655 		return curseg->segno;
2656 
2657 	/* inmem log may not locate on any segment after mount */
2658 	if (!curseg->inited)
2659 		return 0;
2660 
2661 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2662 		return 0;
2663 
2664 	if (test_opt(sbi, NOHEAP) &&
2665 		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2666 		return 0;
2667 
2668 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2669 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2670 
2671 	/* find segments from 0 to reuse freed segments */
2672 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2673 		return 0;
2674 
2675 	return curseg->segno;
2676 }
2677 
2678 /*
2679  * Allocate a current working segment.
2680  * This function always allocates a free segment in LFS manner.
2681  */
2682 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2683 {
2684 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2685 	unsigned short seg_type = curseg->seg_type;
2686 	unsigned int segno = curseg->segno;
2687 	int dir = ALLOC_LEFT;
2688 
2689 	if (curseg->inited)
2690 		write_sum_page(sbi, curseg->sum_blk,
2691 				GET_SUM_BLOCK(sbi, segno));
2692 	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2693 		dir = ALLOC_RIGHT;
2694 
2695 	if (test_opt(sbi, NOHEAP))
2696 		dir = ALLOC_RIGHT;
2697 
2698 	segno = __get_next_segno(sbi, type);
2699 	get_new_segment(sbi, &segno, new_sec, dir);
2700 	curseg->next_segno = segno;
2701 	reset_curseg(sbi, type, 1);
2702 	curseg->alloc_type = LFS;
2703 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2704 		curseg->fragment_remained_chunk =
2705 				prandom_u32() % sbi->max_fragment_chunk + 1;
2706 }
2707 
2708 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2709 					int segno, block_t start)
2710 {
2711 	struct seg_entry *se = get_seg_entry(sbi, segno);
2712 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2713 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2714 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2715 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2716 	int i;
2717 
2718 	for (i = 0; i < entries; i++)
2719 		target_map[i] = ckpt_map[i] | cur_map[i];
2720 
2721 	return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2722 }
2723 
2724 /*
2725  * If a segment is written by LFS manner, next block offset is just obtained
2726  * by increasing the current block offset. However, if a segment is written by
2727  * SSR manner, next block offset obtained by calling __next_free_blkoff
2728  */
2729 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2730 				struct curseg_info *seg)
2731 {
2732 	if (seg->alloc_type == SSR) {
2733 		seg->next_blkoff =
2734 			__next_free_blkoff(sbi, seg->segno,
2735 						seg->next_blkoff + 1);
2736 	} else {
2737 		seg->next_blkoff++;
2738 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
2739 			/* To allocate block chunks in different sizes, use random number */
2740 			if (--seg->fragment_remained_chunk <= 0) {
2741 				seg->fragment_remained_chunk =
2742 				   prandom_u32() % sbi->max_fragment_chunk + 1;
2743 				seg->next_blkoff +=
2744 				   prandom_u32() % sbi->max_fragment_hole + 1;
2745 			}
2746 		}
2747 	}
2748 }
2749 
2750 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2751 {
2752 	return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2753 }
2754 
2755 /*
2756  * This function always allocates a used segment(from dirty seglist) by SSR
2757  * manner, so it should recover the existing segment information of valid blocks
2758  */
2759 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
2760 {
2761 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2762 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2763 	unsigned int new_segno = curseg->next_segno;
2764 	struct f2fs_summary_block *sum_node;
2765 	struct page *sum_page;
2766 
2767 	if (flush)
2768 		write_sum_page(sbi, curseg->sum_blk,
2769 					GET_SUM_BLOCK(sbi, curseg->segno));
2770 
2771 	__set_test_and_inuse(sbi, new_segno);
2772 
2773 	mutex_lock(&dirty_i->seglist_lock);
2774 	__remove_dirty_segment(sbi, new_segno, PRE);
2775 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2776 	mutex_unlock(&dirty_i->seglist_lock);
2777 
2778 	reset_curseg(sbi, type, 1);
2779 	curseg->alloc_type = SSR;
2780 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2781 
2782 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2783 	if (IS_ERR(sum_page)) {
2784 		/* GC won't be able to use stale summary pages by cp_error */
2785 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2786 		return;
2787 	}
2788 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2789 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2790 	f2fs_put_page(sum_page, 1);
2791 }
2792 
2793 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2794 				int alloc_mode, unsigned long long age);
2795 
2796 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2797 					int target_type, int alloc_mode,
2798 					unsigned long long age)
2799 {
2800 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2801 
2802 	curseg->seg_type = target_type;
2803 
2804 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2805 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2806 
2807 		curseg->seg_type = se->type;
2808 		change_curseg(sbi, type, true);
2809 	} else {
2810 		/* allocate cold segment by default */
2811 		curseg->seg_type = CURSEG_COLD_DATA;
2812 		new_curseg(sbi, type, true);
2813 	}
2814 	stat_inc_seg_type(sbi, curseg);
2815 }
2816 
2817 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2818 {
2819 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2820 
2821 	if (!sbi->am.atgc_enabled)
2822 		return;
2823 
2824 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2825 
2826 	mutex_lock(&curseg->curseg_mutex);
2827 	down_write(&SIT_I(sbi)->sentry_lock);
2828 
2829 	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2830 
2831 	up_write(&SIT_I(sbi)->sentry_lock);
2832 	mutex_unlock(&curseg->curseg_mutex);
2833 
2834 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2835 
2836 }
2837 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2838 {
2839 	__f2fs_init_atgc_curseg(sbi);
2840 }
2841 
2842 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2843 {
2844 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2845 
2846 	mutex_lock(&curseg->curseg_mutex);
2847 	if (!curseg->inited)
2848 		goto out;
2849 
2850 	if (get_valid_blocks(sbi, curseg->segno, false)) {
2851 		write_sum_page(sbi, curseg->sum_blk,
2852 				GET_SUM_BLOCK(sbi, curseg->segno));
2853 	} else {
2854 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2855 		__set_test_and_free(sbi, curseg->segno, true);
2856 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2857 	}
2858 out:
2859 	mutex_unlock(&curseg->curseg_mutex);
2860 }
2861 
2862 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2863 {
2864 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2865 
2866 	if (sbi->am.atgc_enabled)
2867 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2868 }
2869 
2870 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2871 {
2872 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2873 
2874 	mutex_lock(&curseg->curseg_mutex);
2875 	if (!curseg->inited)
2876 		goto out;
2877 	if (get_valid_blocks(sbi, curseg->segno, false))
2878 		goto out;
2879 
2880 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2881 	__set_test_and_inuse(sbi, curseg->segno);
2882 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2883 out:
2884 	mutex_unlock(&curseg->curseg_mutex);
2885 }
2886 
2887 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2888 {
2889 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2890 
2891 	if (sbi->am.atgc_enabled)
2892 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2893 }
2894 
2895 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2896 				int alloc_mode, unsigned long long age)
2897 {
2898 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2899 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2900 	unsigned segno = NULL_SEGNO;
2901 	unsigned short seg_type = curseg->seg_type;
2902 	int i, cnt;
2903 	bool reversed = false;
2904 
2905 	sanity_check_seg_type(sbi, seg_type);
2906 
2907 	/* f2fs_need_SSR() already forces to do this */
2908 	if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2909 		curseg->next_segno = segno;
2910 		return 1;
2911 	}
2912 
2913 	/* For node segments, let's do SSR more intensively */
2914 	if (IS_NODESEG(seg_type)) {
2915 		if (seg_type >= CURSEG_WARM_NODE) {
2916 			reversed = true;
2917 			i = CURSEG_COLD_NODE;
2918 		} else {
2919 			i = CURSEG_HOT_NODE;
2920 		}
2921 		cnt = NR_CURSEG_NODE_TYPE;
2922 	} else {
2923 		if (seg_type >= CURSEG_WARM_DATA) {
2924 			reversed = true;
2925 			i = CURSEG_COLD_DATA;
2926 		} else {
2927 			i = CURSEG_HOT_DATA;
2928 		}
2929 		cnt = NR_CURSEG_DATA_TYPE;
2930 	}
2931 
2932 	for (; cnt-- > 0; reversed ? i-- : i++) {
2933 		if (i == seg_type)
2934 			continue;
2935 		if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
2936 			curseg->next_segno = segno;
2937 			return 1;
2938 		}
2939 	}
2940 
2941 	/* find valid_blocks=0 in dirty list */
2942 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2943 		segno = get_free_segment(sbi);
2944 		if (segno != NULL_SEGNO) {
2945 			curseg->next_segno = segno;
2946 			return 1;
2947 		}
2948 	}
2949 	return 0;
2950 }
2951 
2952 /*
2953  * flush out current segment and replace it with new segment
2954  * This function should be returned with success, otherwise BUG
2955  */
2956 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2957 						int type, bool force)
2958 {
2959 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2960 
2961 	if (force)
2962 		new_curseg(sbi, type, true);
2963 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2964 					curseg->seg_type == CURSEG_WARM_NODE)
2965 		new_curseg(sbi, type, false);
2966 	else if (curseg->alloc_type == LFS &&
2967 			is_next_segment_free(sbi, curseg, type) &&
2968 			likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2969 		new_curseg(sbi, type, false);
2970 	else if (f2fs_need_SSR(sbi) &&
2971 			get_ssr_segment(sbi, type, SSR, 0))
2972 		change_curseg(sbi, type, true);
2973 	else
2974 		new_curseg(sbi, type, false);
2975 
2976 	stat_inc_seg_type(sbi, curseg);
2977 }
2978 
2979 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2980 					unsigned int start, unsigned int end)
2981 {
2982 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2983 	unsigned int segno;
2984 
2985 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2986 	mutex_lock(&curseg->curseg_mutex);
2987 	down_write(&SIT_I(sbi)->sentry_lock);
2988 
2989 	segno = CURSEG_I(sbi, type)->segno;
2990 	if (segno < start || segno > end)
2991 		goto unlock;
2992 
2993 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2994 		change_curseg(sbi, type, true);
2995 	else
2996 		new_curseg(sbi, type, true);
2997 
2998 	stat_inc_seg_type(sbi, curseg);
2999 
3000 	locate_dirty_segment(sbi, segno);
3001 unlock:
3002 	up_write(&SIT_I(sbi)->sentry_lock);
3003 
3004 	if (segno != curseg->segno)
3005 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3006 			    type, segno, curseg->segno);
3007 
3008 	mutex_unlock(&curseg->curseg_mutex);
3009 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3010 }
3011 
3012 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3013 						bool new_sec, bool force)
3014 {
3015 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3016 	unsigned int old_segno;
3017 
3018 	if (!curseg->inited)
3019 		goto alloc;
3020 
3021 	if (force || curseg->next_blkoff ||
3022 		get_valid_blocks(sbi, curseg->segno, new_sec))
3023 		goto alloc;
3024 
3025 	if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3026 		return;
3027 alloc:
3028 	old_segno = curseg->segno;
3029 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
3030 	locate_dirty_segment(sbi, old_segno);
3031 }
3032 
3033 static void __allocate_new_section(struct f2fs_sb_info *sbi,
3034 						int type, bool force)
3035 {
3036 	__allocate_new_segment(sbi, type, true, force);
3037 }
3038 
3039 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3040 {
3041 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3042 	down_write(&SIT_I(sbi)->sentry_lock);
3043 	__allocate_new_section(sbi, type, force);
3044 	up_write(&SIT_I(sbi)->sentry_lock);
3045 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3046 }
3047 
3048 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3049 {
3050 	int i;
3051 
3052 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3053 	down_write(&SIT_I(sbi)->sentry_lock);
3054 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3055 		__allocate_new_segment(sbi, i, false, false);
3056 	up_write(&SIT_I(sbi)->sentry_lock);
3057 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3058 }
3059 
3060 static const struct segment_allocation default_salloc_ops = {
3061 	.allocate_segment = allocate_segment_by_default,
3062 };
3063 
3064 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3065 						struct cp_control *cpc)
3066 {
3067 	__u64 trim_start = cpc->trim_start;
3068 	bool has_candidate = false;
3069 
3070 	down_write(&SIT_I(sbi)->sentry_lock);
3071 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3072 		if (add_discard_addrs(sbi, cpc, true)) {
3073 			has_candidate = true;
3074 			break;
3075 		}
3076 	}
3077 	up_write(&SIT_I(sbi)->sentry_lock);
3078 
3079 	cpc->trim_start = trim_start;
3080 	return has_candidate;
3081 }
3082 
3083 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3084 					struct discard_policy *dpolicy,
3085 					unsigned int start, unsigned int end)
3086 {
3087 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3088 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3089 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3090 	struct discard_cmd *dc;
3091 	struct blk_plug plug;
3092 	int issued;
3093 	unsigned int trimmed = 0;
3094 
3095 next:
3096 	issued = 0;
3097 
3098 	mutex_lock(&dcc->cmd_lock);
3099 	if (unlikely(dcc->rbtree_check))
3100 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
3101 							&dcc->root, false));
3102 
3103 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
3104 					NULL, start,
3105 					(struct rb_entry **)&prev_dc,
3106 					(struct rb_entry **)&next_dc,
3107 					&insert_p, &insert_parent, true, NULL);
3108 	if (!dc)
3109 		dc = next_dc;
3110 
3111 	blk_start_plug(&plug);
3112 
3113 	while (dc && dc->lstart <= end) {
3114 		struct rb_node *node;
3115 		int err = 0;
3116 
3117 		if (dc->len < dpolicy->granularity)
3118 			goto skip;
3119 
3120 		if (dc->state != D_PREP) {
3121 			list_move_tail(&dc->list, &dcc->fstrim_list);
3122 			goto skip;
3123 		}
3124 
3125 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3126 
3127 		if (issued >= dpolicy->max_requests) {
3128 			start = dc->lstart + dc->len;
3129 
3130 			if (err)
3131 				__remove_discard_cmd(sbi, dc);
3132 
3133 			blk_finish_plug(&plug);
3134 			mutex_unlock(&dcc->cmd_lock);
3135 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3136 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3137 			goto next;
3138 		}
3139 skip:
3140 		node = rb_next(&dc->rb_node);
3141 		if (err)
3142 			__remove_discard_cmd(sbi, dc);
3143 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3144 
3145 		if (fatal_signal_pending(current))
3146 			break;
3147 	}
3148 
3149 	blk_finish_plug(&plug);
3150 	mutex_unlock(&dcc->cmd_lock);
3151 
3152 	return trimmed;
3153 }
3154 
3155 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3156 {
3157 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3158 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3159 	unsigned int start_segno, end_segno;
3160 	block_t start_block, end_block;
3161 	struct cp_control cpc;
3162 	struct discard_policy dpolicy;
3163 	unsigned long long trimmed = 0;
3164 	int err = 0;
3165 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3166 
3167 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3168 		return -EINVAL;
3169 
3170 	if (end < MAIN_BLKADDR(sbi))
3171 		goto out;
3172 
3173 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3174 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3175 		return -EFSCORRUPTED;
3176 	}
3177 
3178 	/* start/end segment number in main_area */
3179 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3180 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3181 						GET_SEGNO(sbi, end);
3182 	if (need_align) {
3183 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
3184 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3185 	}
3186 
3187 	cpc.reason = CP_DISCARD;
3188 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3189 	cpc.trim_start = start_segno;
3190 	cpc.trim_end = end_segno;
3191 
3192 	if (sbi->discard_blks == 0)
3193 		goto out;
3194 
3195 	f2fs_down_write(&sbi->gc_lock);
3196 	err = f2fs_write_checkpoint(sbi, &cpc);
3197 	f2fs_up_write(&sbi->gc_lock);
3198 	if (err)
3199 		goto out;
3200 
3201 	/*
3202 	 * We filed discard candidates, but actually we don't need to wait for
3203 	 * all of them, since they'll be issued in idle time along with runtime
3204 	 * discard option. User configuration looks like using runtime discard
3205 	 * or periodic fstrim instead of it.
3206 	 */
3207 	if (f2fs_realtime_discard_enable(sbi))
3208 		goto out;
3209 
3210 	start_block = START_BLOCK(sbi, start_segno);
3211 	end_block = START_BLOCK(sbi, end_segno + 1);
3212 
3213 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3214 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3215 					start_block, end_block);
3216 
3217 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3218 					start_block, end_block);
3219 out:
3220 	if (!err)
3221 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3222 	return err;
3223 }
3224 
3225 static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3226 					struct curseg_info *curseg)
3227 {
3228 	return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3229 							curseg->segno);
3230 }
3231 
3232 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3233 {
3234 	switch (hint) {
3235 	case WRITE_LIFE_SHORT:
3236 		return CURSEG_HOT_DATA;
3237 	case WRITE_LIFE_EXTREME:
3238 		return CURSEG_COLD_DATA;
3239 	default:
3240 		return CURSEG_WARM_DATA;
3241 	}
3242 }
3243 
3244 static int __get_segment_type_2(struct f2fs_io_info *fio)
3245 {
3246 	if (fio->type == DATA)
3247 		return CURSEG_HOT_DATA;
3248 	else
3249 		return CURSEG_HOT_NODE;
3250 }
3251 
3252 static int __get_segment_type_4(struct f2fs_io_info *fio)
3253 {
3254 	if (fio->type == DATA) {
3255 		struct inode *inode = fio->page->mapping->host;
3256 
3257 		if (S_ISDIR(inode->i_mode))
3258 			return CURSEG_HOT_DATA;
3259 		else
3260 			return CURSEG_COLD_DATA;
3261 	} else {
3262 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3263 			return CURSEG_WARM_NODE;
3264 		else
3265 			return CURSEG_COLD_NODE;
3266 	}
3267 }
3268 
3269 static int __get_segment_type_6(struct f2fs_io_info *fio)
3270 {
3271 	if (fio->type == DATA) {
3272 		struct inode *inode = fio->page->mapping->host;
3273 
3274 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3275 			return CURSEG_COLD_DATA_PINNED;
3276 
3277 		if (page_private_gcing(fio->page)) {
3278 			if (fio->sbi->am.atgc_enabled &&
3279 				(fio->io_type == FS_DATA_IO) &&
3280 				(fio->sbi->gc_mode != GC_URGENT_HIGH))
3281 				return CURSEG_ALL_DATA_ATGC;
3282 			else
3283 				return CURSEG_COLD_DATA;
3284 		}
3285 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3286 			return CURSEG_COLD_DATA;
3287 		if (file_is_hot(inode) ||
3288 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3289 				f2fs_is_atomic_file(inode) ||
3290 				f2fs_is_volatile_file(inode))
3291 			return CURSEG_HOT_DATA;
3292 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3293 	} else {
3294 		if (IS_DNODE(fio->page))
3295 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3296 						CURSEG_HOT_NODE;
3297 		return CURSEG_COLD_NODE;
3298 	}
3299 }
3300 
3301 static int __get_segment_type(struct f2fs_io_info *fio)
3302 {
3303 	int type = 0;
3304 
3305 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3306 	case 2:
3307 		type = __get_segment_type_2(fio);
3308 		break;
3309 	case 4:
3310 		type = __get_segment_type_4(fio);
3311 		break;
3312 	case 6:
3313 		type = __get_segment_type_6(fio);
3314 		break;
3315 	default:
3316 		f2fs_bug_on(fio->sbi, true);
3317 	}
3318 
3319 	if (IS_HOT(type))
3320 		fio->temp = HOT;
3321 	else if (IS_WARM(type))
3322 		fio->temp = WARM;
3323 	else
3324 		fio->temp = COLD;
3325 	return type;
3326 }
3327 
3328 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3329 		block_t old_blkaddr, block_t *new_blkaddr,
3330 		struct f2fs_summary *sum, int type,
3331 		struct f2fs_io_info *fio)
3332 {
3333 	struct sit_info *sit_i = SIT_I(sbi);
3334 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3335 	unsigned long long old_mtime;
3336 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3337 	struct seg_entry *se = NULL;
3338 
3339 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3340 
3341 	mutex_lock(&curseg->curseg_mutex);
3342 	down_write(&sit_i->sentry_lock);
3343 
3344 	if (from_gc) {
3345 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3346 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3347 		sanity_check_seg_type(sbi, se->type);
3348 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3349 	}
3350 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3351 
3352 	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3353 
3354 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3355 
3356 	/*
3357 	 * __add_sum_entry should be resided under the curseg_mutex
3358 	 * because, this function updates a summary entry in the
3359 	 * current summary block.
3360 	 */
3361 	__add_sum_entry(sbi, type, sum);
3362 
3363 	__refresh_next_blkoff(sbi, curseg);
3364 
3365 	stat_inc_block_count(sbi, curseg);
3366 
3367 	if (from_gc) {
3368 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3369 	} else {
3370 		update_segment_mtime(sbi, old_blkaddr, 0);
3371 		old_mtime = 0;
3372 	}
3373 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3374 
3375 	/*
3376 	 * SIT information should be updated before segment allocation,
3377 	 * since SSR needs latest valid block information.
3378 	 */
3379 	update_sit_entry(sbi, *new_blkaddr, 1);
3380 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3381 		update_sit_entry(sbi, old_blkaddr, -1);
3382 
3383 	if (!__has_curseg_space(sbi, curseg)) {
3384 		if (from_gc)
3385 			get_atssr_segment(sbi, type, se->type,
3386 						AT_SSR, se->mtime);
3387 		else
3388 			sit_i->s_ops->allocate_segment(sbi, type, false);
3389 	}
3390 	/*
3391 	 * segment dirty status should be updated after segment allocation,
3392 	 * so we just need to update status only one time after previous
3393 	 * segment being closed.
3394 	 */
3395 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3396 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3397 
3398 	up_write(&sit_i->sentry_lock);
3399 
3400 	if (page && IS_NODESEG(type)) {
3401 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3402 
3403 		f2fs_inode_chksum_set(sbi, page);
3404 	}
3405 
3406 	if (fio) {
3407 		struct f2fs_bio_info *io;
3408 
3409 		if (F2FS_IO_ALIGNED(sbi))
3410 			fio->retry = false;
3411 
3412 		INIT_LIST_HEAD(&fio->list);
3413 		fio->in_list = true;
3414 		io = sbi->write_io[fio->type] + fio->temp;
3415 		spin_lock(&io->io_lock);
3416 		list_add_tail(&fio->list, &io->io_list);
3417 		spin_unlock(&io->io_lock);
3418 	}
3419 
3420 	mutex_unlock(&curseg->curseg_mutex);
3421 
3422 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3423 }
3424 
3425 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3426 					block_t blkaddr, unsigned int blkcnt)
3427 {
3428 	if (!f2fs_is_multi_device(sbi))
3429 		return;
3430 
3431 	while (1) {
3432 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3433 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3434 
3435 		/* update device state for fsync */
3436 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3437 
3438 		/* update device state for checkpoint */
3439 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3440 			spin_lock(&sbi->dev_lock);
3441 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3442 			spin_unlock(&sbi->dev_lock);
3443 		}
3444 
3445 		if (blkcnt <= blks)
3446 			break;
3447 		blkcnt -= blks;
3448 		blkaddr += blks;
3449 	}
3450 }
3451 
3452 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3453 {
3454 	int type = __get_segment_type(fio);
3455 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3456 
3457 	if (keep_order)
3458 		f2fs_down_read(&fio->sbi->io_order_lock);
3459 reallocate:
3460 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3461 			&fio->new_blkaddr, sum, type, fio);
3462 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
3463 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
3464 					fio->old_blkaddr, fio->old_blkaddr);
3465 		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
3466 	}
3467 
3468 	/* writeout dirty page into bdev */
3469 	f2fs_submit_page_write(fio);
3470 	if (fio->retry) {
3471 		fio->old_blkaddr = fio->new_blkaddr;
3472 		goto reallocate;
3473 	}
3474 
3475 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3476 
3477 	if (keep_order)
3478 		f2fs_up_read(&fio->sbi->io_order_lock);
3479 }
3480 
3481 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3482 					enum iostat_type io_type)
3483 {
3484 	struct f2fs_io_info fio = {
3485 		.sbi = sbi,
3486 		.type = META,
3487 		.temp = HOT,
3488 		.op = REQ_OP_WRITE,
3489 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3490 		.old_blkaddr = page->index,
3491 		.new_blkaddr = page->index,
3492 		.page = page,
3493 		.encrypted_page = NULL,
3494 		.in_list = false,
3495 	};
3496 
3497 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3498 		fio.op_flags &= ~REQ_META;
3499 
3500 	set_page_writeback(page);
3501 	ClearPageError(page);
3502 	f2fs_submit_page_write(&fio);
3503 
3504 	stat_inc_meta_count(sbi, page->index);
3505 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3506 }
3507 
3508 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3509 {
3510 	struct f2fs_summary sum;
3511 
3512 	set_summary(&sum, nid, 0, 0);
3513 	do_write_page(&sum, fio);
3514 
3515 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3516 }
3517 
3518 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3519 					struct f2fs_io_info *fio)
3520 {
3521 	struct f2fs_sb_info *sbi = fio->sbi;
3522 	struct f2fs_summary sum;
3523 
3524 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3525 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3526 	do_write_page(&sum, fio);
3527 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3528 
3529 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3530 }
3531 
3532 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3533 {
3534 	int err;
3535 	struct f2fs_sb_info *sbi = fio->sbi;
3536 	unsigned int segno;
3537 
3538 	fio->new_blkaddr = fio->old_blkaddr;
3539 	/* i/o temperature is needed for passing down write hints */
3540 	__get_segment_type(fio);
3541 
3542 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3543 
3544 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3545 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3546 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3547 			  __func__, segno);
3548 		err = -EFSCORRUPTED;
3549 		goto drop_bio;
3550 	}
3551 
3552 	if (f2fs_cp_error(sbi)) {
3553 		err = -EIO;
3554 		goto drop_bio;
3555 	}
3556 
3557 	invalidate_mapping_pages(META_MAPPING(sbi),
3558 				fio->new_blkaddr, fio->new_blkaddr);
3559 
3560 	stat_inc_inplace_blocks(fio->sbi);
3561 
3562 	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3563 		err = f2fs_merge_page_bio(fio);
3564 	else
3565 		err = f2fs_submit_page_bio(fio);
3566 	if (!err) {
3567 		f2fs_update_device_state(fio->sbi, fio->ino,
3568 						fio->new_blkaddr, 1);
3569 		f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3570 	}
3571 
3572 	return err;
3573 drop_bio:
3574 	if (fio->bio && *(fio->bio)) {
3575 		struct bio *bio = *(fio->bio);
3576 
3577 		bio->bi_status = BLK_STS_IOERR;
3578 		bio_endio(bio);
3579 		*(fio->bio) = NULL;
3580 	}
3581 	return err;
3582 }
3583 
3584 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3585 						unsigned int segno)
3586 {
3587 	int i;
3588 
3589 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3590 		if (CURSEG_I(sbi, i)->segno == segno)
3591 			break;
3592 	}
3593 	return i;
3594 }
3595 
3596 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3597 				block_t old_blkaddr, block_t new_blkaddr,
3598 				bool recover_curseg, bool recover_newaddr,
3599 				bool from_gc)
3600 {
3601 	struct sit_info *sit_i = SIT_I(sbi);
3602 	struct curseg_info *curseg;
3603 	unsigned int segno, old_cursegno;
3604 	struct seg_entry *se;
3605 	int type;
3606 	unsigned short old_blkoff;
3607 	unsigned char old_alloc_type;
3608 
3609 	segno = GET_SEGNO(sbi, new_blkaddr);
3610 	se = get_seg_entry(sbi, segno);
3611 	type = se->type;
3612 
3613 	f2fs_down_write(&SM_I(sbi)->curseg_lock);
3614 
3615 	if (!recover_curseg) {
3616 		/* for recovery flow */
3617 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3618 			if (old_blkaddr == NULL_ADDR)
3619 				type = CURSEG_COLD_DATA;
3620 			else
3621 				type = CURSEG_WARM_DATA;
3622 		}
3623 	} else {
3624 		if (IS_CURSEG(sbi, segno)) {
3625 			/* se->type is volatile as SSR allocation */
3626 			type = __f2fs_get_curseg(sbi, segno);
3627 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3628 		} else {
3629 			type = CURSEG_WARM_DATA;
3630 		}
3631 	}
3632 
3633 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3634 	curseg = CURSEG_I(sbi, type);
3635 
3636 	mutex_lock(&curseg->curseg_mutex);
3637 	down_write(&sit_i->sentry_lock);
3638 
3639 	old_cursegno = curseg->segno;
3640 	old_blkoff = curseg->next_blkoff;
3641 	old_alloc_type = curseg->alloc_type;
3642 
3643 	/* change the current segment */
3644 	if (segno != curseg->segno) {
3645 		curseg->next_segno = segno;
3646 		change_curseg(sbi, type, true);
3647 	}
3648 
3649 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3650 	__add_sum_entry(sbi, type, sum);
3651 
3652 	if (!recover_curseg || recover_newaddr) {
3653 		if (!from_gc)
3654 			update_segment_mtime(sbi, new_blkaddr, 0);
3655 		update_sit_entry(sbi, new_blkaddr, 1);
3656 	}
3657 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3658 		invalidate_mapping_pages(META_MAPPING(sbi),
3659 					old_blkaddr, old_blkaddr);
3660 		f2fs_invalidate_compress_page(sbi, old_blkaddr);
3661 		if (!from_gc)
3662 			update_segment_mtime(sbi, old_blkaddr, 0);
3663 		update_sit_entry(sbi, old_blkaddr, -1);
3664 	}
3665 
3666 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3667 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3668 
3669 	locate_dirty_segment(sbi, old_cursegno);
3670 
3671 	if (recover_curseg) {
3672 		if (old_cursegno != curseg->segno) {
3673 			curseg->next_segno = old_cursegno;
3674 			change_curseg(sbi, type, true);
3675 		}
3676 		curseg->next_blkoff = old_blkoff;
3677 		curseg->alloc_type = old_alloc_type;
3678 	}
3679 
3680 	up_write(&sit_i->sentry_lock);
3681 	mutex_unlock(&curseg->curseg_mutex);
3682 	f2fs_up_write(&SM_I(sbi)->curseg_lock);
3683 }
3684 
3685 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3686 				block_t old_addr, block_t new_addr,
3687 				unsigned char version, bool recover_curseg,
3688 				bool recover_newaddr)
3689 {
3690 	struct f2fs_summary sum;
3691 
3692 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3693 
3694 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3695 					recover_curseg, recover_newaddr, false);
3696 
3697 	f2fs_update_data_blkaddr(dn, new_addr);
3698 }
3699 
3700 void f2fs_wait_on_page_writeback(struct page *page,
3701 				enum page_type type, bool ordered, bool locked)
3702 {
3703 	if (PageWriteback(page)) {
3704 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3705 
3706 		/* submit cached LFS IO */
3707 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3708 		/* sbumit cached IPU IO */
3709 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3710 		if (ordered) {
3711 			wait_on_page_writeback(page);
3712 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3713 		} else {
3714 			wait_for_stable_page(page);
3715 		}
3716 	}
3717 }
3718 
3719 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3720 {
3721 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3722 	struct page *cpage;
3723 
3724 	if (!f2fs_post_read_required(inode))
3725 		return;
3726 
3727 	if (!__is_valid_data_blkaddr(blkaddr))
3728 		return;
3729 
3730 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3731 	if (cpage) {
3732 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3733 		f2fs_put_page(cpage, 1);
3734 	}
3735 }
3736 
3737 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3738 								block_t len)
3739 {
3740 	block_t i;
3741 
3742 	for (i = 0; i < len; i++)
3743 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3744 }
3745 
3746 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3747 {
3748 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3749 	struct curseg_info *seg_i;
3750 	unsigned char *kaddr;
3751 	struct page *page;
3752 	block_t start;
3753 	int i, j, offset;
3754 
3755 	start = start_sum_block(sbi);
3756 
3757 	page = f2fs_get_meta_page(sbi, start++);
3758 	if (IS_ERR(page))
3759 		return PTR_ERR(page);
3760 	kaddr = (unsigned char *)page_address(page);
3761 
3762 	/* Step 1: restore nat cache */
3763 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3764 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3765 
3766 	/* Step 2: restore sit cache */
3767 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3768 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3769 	offset = 2 * SUM_JOURNAL_SIZE;
3770 
3771 	/* Step 3: restore summary entries */
3772 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3773 		unsigned short blk_off;
3774 		unsigned int segno;
3775 
3776 		seg_i = CURSEG_I(sbi, i);
3777 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3778 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3779 		seg_i->next_segno = segno;
3780 		reset_curseg(sbi, i, 0);
3781 		seg_i->alloc_type = ckpt->alloc_type[i];
3782 		seg_i->next_blkoff = blk_off;
3783 
3784 		if (seg_i->alloc_type == SSR)
3785 			blk_off = sbi->blocks_per_seg;
3786 
3787 		for (j = 0; j < blk_off; j++) {
3788 			struct f2fs_summary *s;
3789 
3790 			s = (struct f2fs_summary *)(kaddr + offset);
3791 			seg_i->sum_blk->entries[j] = *s;
3792 			offset += SUMMARY_SIZE;
3793 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3794 						SUM_FOOTER_SIZE)
3795 				continue;
3796 
3797 			f2fs_put_page(page, 1);
3798 			page = NULL;
3799 
3800 			page = f2fs_get_meta_page(sbi, start++);
3801 			if (IS_ERR(page))
3802 				return PTR_ERR(page);
3803 			kaddr = (unsigned char *)page_address(page);
3804 			offset = 0;
3805 		}
3806 	}
3807 	f2fs_put_page(page, 1);
3808 	return 0;
3809 }
3810 
3811 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3812 {
3813 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3814 	struct f2fs_summary_block *sum;
3815 	struct curseg_info *curseg;
3816 	struct page *new;
3817 	unsigned short blk_off;
3818 	unsigned int segno = 0;
3819 	block_t blk_addr = 0;
3820 	int err = 0;
3821 
3822 	/* get segment number and block addr */
3823 	if (IS_DATASEG(type)) {
3824 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3825 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3826 							CURSEG_HOT_DATA]);
3827 		if (__exist_node_summaries(sbi))
3828 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3829 		else
3830 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3831 	} else {
3832 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3833 							CURSEG_HOT_NODE]);
3834 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3835 							CURSEG_HOT_NODE]);
3836 		if (__exist_node_summaries(sbi))
3837 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3838 							type - CURSEG_HOT_NODE);
3839 		else
3840 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3841 	}
3842 
3843 	new = f2fs_get_meta_page(sbi, blk_addr);
3844 	if (IS_ERR(new))
3845 		return PTR_ERR(new);
3846 	sum = (struct f2fs_summary_block *)page_address(new);
3847 
3848 	if (IS_NODESEG(type)) {
3849 		if (__exist_node_summaries(sbi)) {
3850 			struct f2fs_summary *ns = &sum->entries[0];
3851 			int i;
3852 
3853 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3854 				ns->version = 0;
3855 				ns->ofs_in_node = 0;
3856 			}
3857 		} else {
3858 			err = f2fs_restore_node_summary(sbi, segno, sum);
3859 			if (err)
3860 				goto out;
3861 		}
3862 	}
3863 
3864 	/* set uncompleted segment to curseg */
3865 	curseg = CURSEG_I(sbi, type);
3866 	mutex_lock(&curseg->curseg_mutex);
3867 
3868 	/* update journal info */
3869 	down_write(&curseg->journal_rwsem);
3870 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3871 	up_write(&curseg->journal_rwsem);
3872 
3873 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3874 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3875 	curseg->next_segno = segno;
3876 	reset_curseg(sbi, type, 0);
3877 	curseg->alloc_type = ckpt->alloc_type[type];
3878 	curseg->next_blkoff = blk_off;
3879 	mutex_unlock(&curseg->curseg_mutex);
3880 out:
3881 	f2fs_put_page(new, 1);
3882 	return err;
3883 }
3884 
3885 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3886 {
3887 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3888 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3889 	int type = CURSEG_HOT_DATA;
3890 	int err;
3891 
3892 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3893 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3894 
3895 		if (npages >= 2)
3896 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3897 							META_CP, true);
3898 
3899 		/* restore for compacted data summary */
3900 		err = read_compacted_summaries(sbi);
3901 		if (err)
3902 			return err;
3903 		type = CURSEG_HOT_NODE;
3904 	}
3905 
3906 	if (__exist_node_summaries(sbi))
3907 		f2fs_ra_meta_pages(sbi,
3908 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
3909 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
3910 
3911 	for (; type <= CURSEG_COLD_NODE; type++) {
3912 		err = read_normal_summaries(sbi, type);
3913 		if (err)
3914 			return err;
3915 	}
3916 
3917 	/* sanity check for summary blocks */
3918 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3919 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3920 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
3921 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
3922 		return -EINVAL;
3923 	}
3924 
3925 	return 0;
3926 }
3927 
3928 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3929 {
3930 	struct page *page;
3931 	unsigned char *kaddr;
3932 	struct f2fs_summary *summary;
3933 	struct curseg_info *seg_i;
3934 	int written_size = 0;
3935 	int i, j;
3936 
3937 	page = f2fs_grab_meta_page(sbi, blkaddr++);
3938 	kaddr = (unsigned char *)page_address(page);
3939 	memset(kaddr, 0, PAGE_SIZE);
3940 
3941 	/* Step 1: write nat cache */
3942 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3943 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3944 	written_size += SUM_JOURNAL_SIZE;
3945 
3946 	/* Step 2: write sit cache */
3947 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3948 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3949 	written_size += SUM_JOURNAL_SIZE;
3950 
3951 	/* Step 3: write summary entries */
3952 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3953 		unsigned short blkoff;
3954 
3955 		seg_i = CURSEG_I(sbi, i);
3956 		if (sbi->ckpt->alloc_type[i] == SSR)
3957 			blkoff = sbi->blocks_per_seg;
3958 		else
3959 			blkoff = curseg_blkoff(sbi, i);
3960 
3961 		for (j = 0; j < blkoff; j++) {
3962 			if (!page) {
3963 				page = f2fs_grab_meta_page(sbi, blkaddr++);
3964 				kaddr = (unsigned char *)page_address(page);
3965 				memset(kaddr, 0, PAGE_SIZE);
3966 				written_size = 0;
3967 			}
3968 			summary = (struct f2fs_summary *)(kaddr + written_size);
3969 			*summary = seg_i->sum_blk->entries[j];
3970 			written_size += SUMMARY_SIZE;
3971 
3972 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3973 							SUM_FOOTER_SIZE)
3974 				continue;
3975 
3976 			set_page_dirty(page);
3977 			f2fs_put_page(page, 1);
3978 			page = NULL;
3979 		}
3980 	}
3981 	if (page) {
3982 		set_page_dirty(page);
3983 		f2fs_put_page(page, 1);
3984 	}
3985 }
3986 
3987 static void write_normal_summaries(struct f2fs_sb_info *sbi,
3988 					block_t blkaddr, int type)
3989 {
3990 	int i, end;
3991 
3992 	if (IS_DATASEG(type))
3993 		end = type + NR_CURSEG_DATA_TYPE;
3994 	else
3995 		end = type + NR_CURSEG_NODE_TYPE;
3996 
3997 	for (i = type; i < end; i++)
3998 		write_current_sum_page(sbi, i, blkaddr + (i - type));
3999 }
4000 
4001 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4002 {
4003 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4004 		write_compacted_summaries(sbi, start_blk);
4005 	else
4006 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4007 }
4008 
4009 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4010 {
4011 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4012 }
4013 
4014 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4015 					unsigned int val, int alloc)
4016 {
4017 	int i;
4018 
4019 	if (type == NAT_JOURNAL) {
4020 		for (i = 0; i < nats_in_cursum(journal); i++) {
4021 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4022 				return i;
4023 		}
4024 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4025 			return update_nats_in_cursum(journal, 1);
4026 	} else if (type == SIT_JOURNAL) {
4027 		for (i = 0; i < sits_in_cursum(journal); i++)
4028 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4029 				return i;
4030 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4031 			return update_sits_in_cursum(journal, 1);
4032 	}
4033 	return -1;
4034 }
4035 
4036 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4037 					unsigned int segno)
4038 {
4039 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4040 }
4041 
4042 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4043 					unsigned int start)
4044 {
4045 	struct sit_info *sit_i = SIT_I(sbi);
4046 	struct page *page;
4047 	pgoff_t src_off, dst_off;
4048 
4049 	src_off = current_sit_addr(sbi, start);
4050 	dst_off = next_sit_addr(sbi, src_off);
4051 
4052 	page = f2fs_grab_meta_page(sbi, dst_off);
4053 	seg_info_to_sit_page(sbi, page, start);
4054 
4055 	set_page_dirty(page);
4056 	set_to_next_sit(sit_i, start);
4057 
4058 	return page;
4059 }
4060 
4061 static struct sit_entry_set *grab_sit_entry_set(void)
4062 {
4063 	struct sit_entry_set *ses =
4064 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4065 						GFP_NOFS, true, NULL);
4066 
4067 	ses->entry_cnt = 0;
4068 	INIT_LIST_HEAD(&ses->set_list);
4069 	return ses;
4070 }
4071 
4072 static void release_sit_entry_set(struct sit_entry_set *ses)
4073 {
4074 	list_del(&ses->set_list);
4075 	kmem_cache_free(sit_entry_set_slab, ses);
4076 }
4077 
4078 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4079 						struct list_head *head)
4080 {
4081 	struct sit_entry_set *next = ses;
4082 
4083 	if (list_is_last(&ses->set_list, head))
4084 		return;
4085 
4086 	list_for_each_entry_continue(next, head, set_list)
4087 		if (ses->entry_cnt <= next->entry_cnt)
4088 			break;
4089 
4090 	list_move_tail(&ses->set_list, &next->set_list);
4091 }
4092 
4093 static void add_sit_entry(unsigned int segno, struct list_head *head)
4094 {
4095 	struct sit_entry_set *ses;
4096 	unsigned int start_segno = START_SEGNO(segno);
4097 
4098 	list_for_each_entry(ses, head, set_list) {
4099 		if (ses->start_segno == start_segno) {
4100 			ses->entry_cnt++;
4101 			adjust_sit_entry_set(ses, head);
4102 			return;
4103 		}
4104 	}
4105 
4106 	ses = grab_sit_entry_set();
4107 
4108 	ses->start_segno = start_segno;
4109 	ses->entry_cnt++;
4110 	list_add(&ses->set_list, head);
4111 }
4112 
4113 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4114 {
4115 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4116 	struct list_head *set_list = &sm_info->sit_entry_set;
4117 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4118 	unsigned int segno;
4119 
4120 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4121 		add_sit_entry(segno, set_list);
4122 }
4123 
4124 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4125 {
4126 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4127 	struct f2fs_journal *journal = curseg->journal;
4128 	int i;
4129 
4130 	down_write(&curseg->journal_rwsem);
4131 	for (i = 0; i < sits_in_cursum(journal); i++) {
4132 		unsigned int segno;
4133 		bool dirtied;
4134 
4135 		segno = le32_to_cpu(segno_in_journal(journal, i));
4136 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4137 
4138 		if (!dirtied)
4139 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4140 	}
4141 	update_sits_in_cursum(journal, -i);
4142 	up_write(&curseg->journal_rwsem);
4143 }
4144 
4145 /*
4146  * CP calls this function, which flushes SIT entries including sit_journal,
4147  * and moves prefree segs to free segs.
4148  */
4149 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4150 {
4151 	struct sit_info *sit_i = SIT_I(sbi);
4152 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4153 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4154 	struct f2fs_journal *journal = curseg->journal;
4155 	struct sit_entry_set *ses, *tmp;
4156 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4157 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4158 	struct seg_entry *se;
4159 
4160 	down_write(&sit_i->sentry_lock);
4161 
4162 	if (!sit_i->dirty_sentries)
4163 		goto out;
4164 
4165 	/*
4166 	 * add and account sit entries of dirty bitmap in sit entry
4167 	 * set temporarily
4168 	 */
4169 	add_sits_in_set(sbi);
4170 
4171 	/*
4172 	 * if there are no enough space in journal to store dirty sit
4173 	 * entries, remove all entries from journal and add and account
4174 	 * them in sit entry set.
4175 	 */
4176 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4177 								!to_journal)
4178 		remove_sits_in_journal(sbi);
4179 
4180 	/*
4181 	 * there are two steps to flush sit entries:
4182 	 * #1, flush sit entries to journal in current cold data summary block.
4183 	 * #2, flush sit entries to sit page.
4184 	 */
4185 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4186 		struct page *page = NULL;
4187 		struct f2fs_sit_block *raw_sit = NULL;
4188 		unsigned int start_segno = ses->start_segno;
4189 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4190 						(unsigned long)MAIN_SEGS(sbi));
4191 		unsigned int segno = start_segno;
4192 
4193 		if (to_journal &&
4194 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4195 			to_journal = false;
4196 
4197 		if (to_journal) {
4198 			down_write(&curseg->journal_rwsem);
4199 		} else {
4200 			page = get_next_sit_page(sbi, start_segno);
4201 			raw_sit = page_address(page);
4202 		}
4203 
4204 		/* flush dirty sit entries in region of current sit set */
4205 		for_each_set_bit_from(segno, bitmap, end) {
4206 			int offset, sit_offset;
4207 
4208 			se = get_seg_entry(sbi, segno);
4209 #ifdef CONFIG_F2FS_CHECK_FS
4210 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4211 						SIT_VBLOCK_MAP_SIZE))
4212 				f2fs_bug_on(sbi, 1);
4213 #endif
4214 
4215 			/* add discard candidates */
4216 			if (!(cpc->reason & CP_DISCARD)) {
4217 				cpc->trim_start = segno;
4218 				add_discard_addrs(sbi, cpc, false);
4219 			}
4220 
4221 			if (to_journal) {
4222 				offset = f2fs_lookup_journal_in_cursum(journal,
4223 							SIT_JOURNAL, segno, 1);
4224 				f2fs_bug_on(sbi, offset < 0);
4225 				segno_in_journal(journal, offset) =
4226 							cpu_to_le32(segno);
4227 				seg_info_to_raw_sit(se,
4228 					&sit_in_journal(journal, offset));
4229 				check_block_count(sbi, segno,
4230 					&sit_in_journal(journal, offset));
4231 			} else {
4232 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4233 				seg_info_to_raw_sit(se,
4234 						&raw_sit->entries[sit_offset]);
4235 				check_block_count(sbi, segno,
4236 						&raw_sit->entries[sit_offset]);
4237 			}
4238 
4239 			__clear_bit(segno, bitmap);
4240 			sit_i->dirty_sentries--;
4241 			ses->entry_cnt--;
4242 		}
4243 
4244 		if (to_journal)
4245 			up_write(&curseg->journal_rwsem);
4246 		else
4247 			f2fs_put_page(page, 1);
4248 
4249 		f2fs_bug_on(sbi, ses->entry_cnt);
4250 		release_sit_entry_set(ses);
4251 	}
4252 
4253 	f2fs_bug_on(sbi, !list_empty(head));
4254 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4255 out:
4256 	if (cpc->reason & CP_DISCARD) {
4257 		__u64 trim_start = cpc->trim_start;
4258 
4259 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4260 			add_discard_addrs(sbi, cpc, false);
4261 
4262 		cpc->trim_start = trim_start;
4263 	}
4264 	up_write(&sit_i->sentry_lock);
4265 
4266 	set_prefree_as_free_segments(sbi);
4267 }
4268 
4269 static int build_sit_info(struct f2fs_sb_info *sbi)
4270 {
4271 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4272 	struct sit_info *sit_i;
4273 	unsigned int sit_segs, start;
4274 	char *src_bitmap, *bitmap;
4275 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4276 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4277 
4278 	/* allocate memory for SIT information */
4279 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4280 	if (!sit_i)
4281 		return -ENOMEM;
4282 
4283 	SM_I(sbi)->sit_info = sit_i;
4284 
4285 	sit_i->sentries =
4286 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4287 					      MAIN_SEGS(sbi)),
4288 			      GFP_KERNEL);
4289 	if (!sit_i->sentries)
4290 		return -ENOMEM;
4291 
4292 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4293 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4294 								GFP_KERNEL);
4295 	if (!sit_i->dirty_sentries_bitmap)
4296 		return -ENOMEM;
4297 
4298 #ifdef CONFIG_F2FS_CHECK_FS
4299 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4300 #else
4301 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4302 #endif
4303 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4304 	if (!sit_i->bitmap)
4305 		return -ENOMEM;
4306 
4307 	bitmap = sit_i->bitmap;
4308 
4309 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4310 		sit_i->sentries[start].cur_valid_map = bitmap;
4311 		bitmap += SIT_VBLOCK_MAP_SIZE;
4312 
4313 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4314 		bitmap += SIT_VBLOCK_MAP_SIZE;
4315 
4316 #ifdef CONFIG_F2FS_CHECK_FS
4317 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4318 		bitmap += SIT_VBLOCK_MAP_SIZE;
4319 #endif
4320 
4321 		if (discard_map) {
4322 			sit_i->sentries[start].discard_map = bitmap;
4323 			bitmap += SIT_VBLOCK_MAP_SIZE;
4324 		}
4325 	}
4326 
4327 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4328 	if (!sit_i->tmp_map)
4329 		return -ENOMEM;
4330 
4331 	if (__is_large_section(sbi)) {
4332 		sit_i->sec_entries =
4333 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4334 						      MAIN_SECS(sbi)),
4335 				      GFP_KERNEL);
4336 		if (!sit_i->sec_entries)
4337 			return -ENOMEM;
4338 	}
4339 
4340 	/* get information related with SIT */
4341 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4342 
4343 	/* setup SIT bitmap from ckeckpoint pack */
4344 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4345 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4346 
4347 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4348 	if (!sit_i->sit_bitmap)
4349 		return -ENOMEM;
4350 
4351 #ifdef CONFIG_F2FS_CHECK_FS
4352 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4353 					sit_bitmap_size, GFP_KERNEL);
4354 	if (!sit_i->sit_bitmap_mir)
4355 		return -ENOMEM;
4356 
4357 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4358 					main_bitmap_size, GFP_KERNEL);
4359 	if (!sit_i->invalid_segmap)
4360 		return -ENOMEM;
4361 #endif
4362 
4363 	/* init SIT information */
4364 	sit_i->s_ops = &default_salloc_ops;
4365 
4366 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4367 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4368 	sit_i->written_valid_blocks = 0;
4369 	sit_i->bitmap_size = sit_bitmap_size;
4370 	sit_i->dirty_sentries = 0;
4371 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4372 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4373 	sit_i->mounted_time = ktime_get_boottime_seconds();
4374 	init_rwsem(&sit_i->sentry_lock);
4375 	return 0;
4376 }
4377 
4378 static int build_free_segmap(struct f2fs_sb_info *sbi)
4379 {
4380 	struct free_segmap_info *free_i;
4381 	unsigned int bitmap_size, sec_bitmap_size;
4382 
4383 	/* allocate memory for free segmap information */
4384 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4385 	if (!free_i)
4386 		return -ENOMEM;
4387 
4388 	SM_I(sbi)->free_info = free_i;
4389 
4390 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4391 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4392 	if (!free_i->free_segmap)
4393 		return -ENOMEM;
4394 
4395 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4396 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4397 	if (!free_i->free_secmap)
4398 		return -ENOMEM;
4399 
4400 	/* set all segments as dirty temporarily */
4401 	memset(free_i->free_segmap, 0xff, bitmap_size);
4402 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4403 
4404 	/* init free segmap information */
4405 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4406 	free_i->free_segments = 0;
4407 	free_i->free_sections = 0;
4408 	spin_lock_init(&free_i->segmap_lock);
4409 	return 0;
4410 }
4411 
4412 static int build_curseg(struct f2fs_sb_info *sbi)
4413 {
4414 	struct curseg_info *array;
4415 	int i;
4416 
4417 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4418 					sizeof(*array)), GFP_KERNEL);
4419 	if (!array)
4420 		return -ENOMEM;
4421 
4422 	SM_I(sbi)->curseg_array = array;
4423 
4424 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4425 		mutex_init(&array[i].curseg_mutex);
4426 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4427 		if (!array[i].sum_blk)
4428 			return -ENOMEM;
4429 		init_rwsem(&array[i].journal_rwsem);
4430 		array[i].journal = f2fs_kzalloc(sbi,
4431 				sizeof(struct f2fs_journal), GFP_KERNEL);
4432 		if (!array[i].journal)
4433 			return -ENOMEM;
4434 		if (i < NR_PERSISTENT_LOG)
4435 			array[i].seg_type = CURSEG_HOT_DATA + i;
4436 		else if (i == CURSEG_COLD_DATA_PINNED)
4437 			array[i].seg_type = CURSEG_COLD_DATA;
4438 		else if (i == CURSEG_ALL_DATA_ATGC)
4439 			array[i].seg_type = CURSEG_COLD_DATA;
4440 		array[i].segno = NULL_SEGNO;
4441 		array[i].next_blkoff = 0;
4442 		array[i].inited = false;
4443 	}
4444 	return restore_curseg_summaries(sbi);
4445 }
4446 
4447 static int build_sit_entries(struct f2fs_sb_info *sbi)
4448 {
4449 	struct sit_info *sit_i = SIT_I(sbi);
4450 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4451 	struct f2fs_journal *journal = curseg->journal;
4452 	struct seg_entry *se;
4453 	struct f2fs_sit_entry sit;
4454 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4455 	unsigned int i, start, end;
4456 	unsigned int readed, start_blk = 0;
4457 	int err = 0;
4458 	block_t total_node_blocks = 0;
4459 
4460 	do {
4461 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4462 							META_SIT, true);
4463 
4464 		start = start_blk * sit_i->sents_per_block;
4465 		end = (start_blk + readed) * sit_i->sents_per_block;
4466 
4467 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4468 			struct f2fs_sit_block *sit_blk;
4469 			struct page *page;
4470 
4471 			se = &sit_i->sentries[start];
4472 			page = get_current_sit_page(sbi, start);
4473 			if (IS_ERR(page))
4474 				return PTR_ERR(page);
4475 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4476 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4477 			f2fs_put_page(page, 1);
4478 
4479 			err = check_block_count(sbi, start, &sit);
4480 			if (err)
4481 				return err;
4482 			seg_info_from_raw_sit(se, &sit);
4483 			if (IS_NODESEG(se->type))
4484 				total_node_blocks += se->valid_blocks;
4485 
4486 			if (f2fs_block_unit_discard(sbi)) {
4487 				/* build discard map only one time */
4488 				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4489 					memset(se->discard_map, 0xff,
4490 						SIT_VBLOCK_MAP_SIZE);
4491 				} else {
4492 					memcpy(se->discard_map,
4493 						se->cur_valid_map,
4494 						SIT_VBLOCK_MAP_SIZE);
4495 					sbi->discard_blks +=
4496 						sbi->blocks_per_seg -
4497 						se->valid_blocks;
4498 				}
4499 			}
4500 
4501 			if (__is_large_section(sbi))
4502 				get_sec_entry(sbi, start)->valid_blocks +=
4503 							se->valid_blocks;
4504 		}
4505 		start_blk += readed;
4506 	} while (start_blk < sit_blk_cnt);
4507 
4508 	down_read(&curseg->journal_rwsem);
4509 	for (i = 0; i < sits_in_cursum(journal); i++) {
4510 		unsigned int old_valid_blocks;
4511 
4512 		start = le32_to_cpu(segno_in_journal(journal, i));
4513 		if (start >= MAIN_SEGS(sbi)) {
4514 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4515 				 start);
4516 			err = -EFSCORRUPTED;
4517 			break;
4518 		}
4519 
4520 		se = &sit_i->sentries[start];
4521 		sit = sit_in_journal(journal, i);
4522 
4523 		old_valid_blocks = se->valid_blocks;
4524 		if (IS_NODESEG(se->type))
4525 			total_node_blocks -= old_valid_blocks;
4526 
4527 		err = check_block_count(sbi, start, &sit);
4528 		if (err)
4529 			break;
4530 		seg_info_from_raw_sit(se, &sit);
4531 		if (IS_NODESEG(se->type))
4532 			total_node_blocks += se->valid_blocks;
4533 
4534 		if (f2fs_block_unit_discard(sbi)) {
4535 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4536 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4537 			} else {
4538 				memcpy(se->discard_map, se->cur_valid_map,
4539 							SIT_VBLOCK_MAP_SIZE);
4540 				sbi->discard_blks += old_valid_blocks;
4541 				sbi->discard_blks -= se->valid_blocks;
4542 			}
4543 		}
4544 
4545 		if (__is_large_section(sbi)) {
4546 			get_sec_entry(sbi, start)->valid_blocks +=
4547 							se->valid_blocks;
4548 			get_sec_entry(sbi, start)->valid_blocks -=
4549 							old_valid_blocks;
4550 		}
4551 	}
4552 	up_read(&curseg->journal_rwsem);
4553 
4554 	if (!err && total_node_blocks != valid_node_count(sbi)) {
4555 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4556 			 total_node_blocks, valid_node_count(sbi));
4557 		err = -EFSCORRUPTED;
4558 	}
4559 
4560 	return err;
4561 }
4562 
4563 static void init_free_segmap(struct f2fs_sb_info *sbi)
4564 {
4565 	unsigned int start;
4566 	int type;
4567 	struct seg_entry *sentry;
4568 
4569 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4570 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4571 			continue;
4572 		sentry = get_seg_entry(sbi, start);
4573 		if (!sentry->valid_blocks)
4574 			__set_free(sbi, start);
4575 		else
4576 			SIT_I(sbi)->written_valid_blocks +=
4577 						sentry->valid_blocks;
4578 	}
4579 
4580 	/* set use the current segments */
4581 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4582 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4583 
4584 		__set_test_and_inuse(sbi, curseg_t->segno);
4585 	}
4586 }
4587 
4588 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4589 {
4590 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4591 	struct free_segmap_info *free_i = FREE_I(sbi);
4592 	unsigned int segno = 0, offset = 0, secno;
4593 	block_t valid_blocks, usable_blks_in_seg;
4594 	block_t blks_per_sec = BLKS_PER_SEC(sbi);
4595 
4596 	while (1) {
4597 		/* find dirty segment based on free segmap */
4598 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4599 		if (segno >= MAIN_SEGS(sbi))
4600 			break;
4601 		offset = segno + 1;
4602 		valid_blocks = get_valid_blocks(sbi, segno, false);
4603 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4604 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4605 			continue;
4606 		if (valid_blocks > usable_blks_in_seg) {
4607 			f2fs_bug_on(sbi, 1);
4608 			continue;
4609 		}
4610 		mutex_lock(&dirty_i->seglist_lock);
4611 		__locate_dirty_segment(sbi, segno, DIRTY);
4612 		mutex_unlock(&dirty_i->seglist_lock);
4613 	}
4614 
4615 	if (!__is_large_section(sbi))
4616 		return;
4617 
4618 	mutex_lock(&dirty_i->seglist_lock);
4619 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4620 		valid_blocks = get_valid_blocks(sbi, segno, true);
4621 		secno = GET_SEC_FROM_SEG(sbi, segno);
4622 
4623 		if (!valid_blocks || valid_blocks == blks_per_sec)
4624 			continue;
4625 		if (IS_CURSEC(sbi, secno))
4626 			continue;
4627 		set_bit(secno, dirty_i->dirty_secmap);
4628 	}
4629 	mutex_unlock(&dirty_i->seglist_lock);
4630 }
4631 
4632 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4633 {
4634 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4635 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4636 
4637 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4638 	if (!dirty_i->victim_secmap)
4639 		return -ENOMEM;
4640 	return 0;
4641 }
4642 
4643 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4644 {
4645 	struct dirty_seglist_info *dirty_i;
4646 	unsigned int bitmap_size, i;
4647 
4648 	/* allocate memory for dirty segments list information */
4649 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4650 								GFP_KERNEL);
4651 	if (!dirty_i)
4652 		return -ENOMEM;
4653 
4654 	SM_I(sbi)->dirty_info = dirty_i;
4655 	mutex_init(&dirty_i->seglist_lock);
4656 
4657 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4658 
4659 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4660 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4661 								GFP_KERNEL);
4662 		if (!dirty_i->dirty_segmap[i])
4663 			return -ENOMEM;
4664 	}
4665 
4666 	if (__is_large_section(sbi)) {
4667 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4668 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4669 						bitmap_size, GFP_KERNEL);
4670 		if (!dirty_i->dirty_secmap)
4671 			return -ENOMEM;
4672 	}
4673 
4674 	init_dirty_segmap(sbi);
4675 	return init_victim_secmap(sbi);
4676 }
4677 
4678 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4679 {
4680 	int i;
4681 
4682 	/*
4683 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4684 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4685 	 */
4686 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4687 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4688 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4689 		unsigned int blkofs = curseg->next_blkoff;
4690 
4691 		if (f2fs_sb_has_readonly(sbi) &&
4692 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4693 			continue;
4694 
4695 		sanity_check_seg_type(sbi, curseg->seg_type);
4696 
4697 		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
4698 			f2fs_err(sbi,
4699 				 "Current segment has invalid alloc_type:%d",
4700 				 curseg->alloc_type);
4701 			return -EFSCORRUPTED;
4702 		}
4703 
4704 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4705 			goto out;
4706 
4707 		if (curseg->alloc_type == SSR)
4708 			continue;
4709 
4710 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4711 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4712 				continue;
4713 out:
4714 			f2fs_err(sbi,
4715 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4716 				 i, curseg->segno, curseg->alloc_type,
4717 				 curseg->next_blkoff, blkofs);
4718 			return -EFSCORRUPTED;
4719 		}
4720 	}
4721 	return 0;
4722 }
4723 
4724 #ifdef CONFIG_BLK_DEV_ZONED
4725 
4726 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4727 				    struct f2fs_dev_info *fdev,
4728 				    struct blk_zone *zone)
4729 {
4730 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4731 	block_t zone_block, wp_block, last_valid_block;
4732 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4733 	int i, s, b, ret;
4734 	struct seg_entry *se;
4735 
4736 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4737 		return 0;
4738 
4739 	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4740 	wp_segno = GET_SEGNO(sbi, wp_block);
4741 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4742 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4743 	zone_segno = GET_SEGNO(sbi, zone_block);
4744 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4745 
4746 	if (zone_segno >= MAIN_SEGS(sbi))
4747 		return 0;
4748 
4749 	/*
4750 	 * Skip check of zones cursegs point to, since
4751 	 * fix_curseg_write_pointer() checks them.
4752 	 */
4753 	for (i = 0; i < NO_CHECK_TYPE; i++)
4754 		if (zone_secno == GET_SEC_FROM_SEG(sbi,
4755 						   CURSEG_I(sbi, i)->segno))
4756 			return 0;
4757 
4758 	/*
4759 	 * Get last valid block of the zone.
4760 	 */
4761 	last_valid_block = zone_block - 1;
4762 	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4763 		segno = zone_segno + s;
4764 		se = get_seg_entry(sbi, segno);
4765 		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4766 			if (f2fs_test_bit(b, se->cur_valid_map)) {
4767 				last_valid_block = START_BLOCK(sbi, segno) + b;
4768 				break;
4769 			}
4770 		if (last_valid_block >= zone_block)
4771 			break;
4772 	}
4773 
4774 	/*
4775 	 * If last valid block is beyond the write pointer, report the
4776 	 * inconsistency. This inconsistency does not cause write error
4777 	 * because the zone will not be selected for write operation until
4778 	 * it get discarded. Just report it.
4779 	 */
4780 	if (last_valid_block >= wp_block) {
4781 		f2fs_notice(sbi, "Valid block beyond write pointer: "
4782 			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4783 			    GET_SEGNO(sbi, last_valid_block),
4784 			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4785 			    wp_segno, wp_blkoff);
4786 		return 0;
4787 	}
4788 
4789 	/*
4790 	 * If there is no valid block in the zone and if write pointer is
4791 	 * not at zone start, reset the write pointer.
4792 	 */
4793 	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4794 		f2fs_notice(sbi,
4795 			    "Zone without valid block has non-zero write "
4796 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4797 			    wp_segno, wp_blkoff);
4798 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4799 					zone->len >> log_sectors_per_block);
4800 		if (ret) {
4801 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4802 				 fdev->path, ret);
4803 			return ret;
4804 		}
4805 	}
4806 
4807 	return 0;
4808 }
4809 
4810 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4811 						  block_t zone_blkaddr)
4812 {
4813 	int i;
4814 
4815 	for (i = 0; i < sbi->s_ndevs; i++) {
4816 		if (!bdev_is_zoned(FDEV(i).bdev))
4817 			continue;
4818 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4819 				zone_blkaddr <= FDEV(i).end_blk))
4820 			return &FDEV(i);
4821 	}
4822 
4823 	return NULL;
4824 }
4825 
4826 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4827 			      void *data)
4828 {
4829 	memcpy(data, zone, sizeof(struct blk_zone));
4830 	return 0;
4831 }
4832 
4833 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4834 {
4835 	struct curseg_info *cs = CURSEG_I(sbi, type);
4836 	struct f2fs_dev_info *zbd;
4837 	struct blk_zone zone;
4838 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4839 	block_t cs_zone_block, wp_block;
4840 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4841 	sector_t zone_sector;
4842 	int err;
4843 
4844 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4845 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4846 
4847 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4848 	if (!zbd)
4849 		return 0;
4850 
4851 	/* report zone for the sector the curseg points to */
4852 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4853 		<< log_sectors_per_block;
4854 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4855 				  report_one_zone_cb, &zone);
4856 	if (err != 1) {
4857 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4858 			 zbd->path, err);
4859 		return err;
4860 	}
4861 
4862 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4863 		return 0;
4864 
4865 	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4866 	wp_segno = GET_SEGNO(sbi, wp_block);
4867 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4868 	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4869 
4870 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4871 		wp_sector_off == 0)
4872 		return 0;
4873 
4874 	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4875 		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4876 		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4877 
4878 	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4879 		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4880 
4881 	f2fs_allocate_new_section(sbi, type, true);
4882 
4883 	/* check consistency of the zone curseg pointed to */
4884 	if (check_zone_write_pointer(sbi, zbd, &zone))
4885 		return -EIO;
4886 
4887 	/* check newly assigned zone */
4888 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4889 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4890 
4891 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4892 	if (!zbd)
4893 		return 0;
4894 
4895 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4896 		<< log_sectors_per_block;
4897 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4898 				  report_one_zone_cb, &zone);
4899 	if (err != 1) {
4900 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4901 			 zbd->path, err);
4902 		return err;
4903 	}
4904 
4905 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4906 		return 0;
4907 
4908 	if (zone.wp != zone.start) {
4909 		f2fs_notice(sbi,
4910 			    "New zone for curseg[%d] is not yet discarded. "
4911 			    "Reset the zone: curseg[0x%x,0x%x]",
4912 			    type, cs->segno, cs->next_blkoff);
4913 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4914 				zone_sector >> log_sectors_per_block,
4915 				zone.len >> log_sectors_per_block);
4916 		if (err) {
4917 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4918 				 zbd->path, err);
4919 			return err;
4920 		}
4921 	}
4922 
4923 	return 0;
4924 }
4925 
4926 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4927 {
4928 	int i, ret;
4929 
4930 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4931 		ret = fix_curseg_write_pointer(sbi, i);
4932 		if (ret)
4933 			return ret;
4934 	}
4935 
4936 	return 0;
4937 }
4938 
4939 struct check_zone_write_pointer_args {
4940 	struct f2fs_sb_info *sbi;
4941 	struct f2fs_dev_info *fdev;
4942 };
4943 
4944 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4945 				      void *data)
4946 {
4947 	struct check_zone_write_pointer_args *args;
4948 
4949 	args = (struct check_zone_write_pointer_args *)data;
4950 
4951 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
4952 }
4953 
4954 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4955 {
4956 	int i, ret;
4957 	struct check_zone_write_pointer_args args;
4958 
4959 	for (i = 0; i < sbi->s_ndevs; i++) {
4960 		if (!bdev_is_zoned(FDEV(i).bdev))
4961 			continue;
4962 
4963 		args.sbi = sbi;
4964 		args.fdev = &FDEV(i);
4965 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4966 					  check_zone_write_pointer_cb, &args);
4967 		if (ret < 0)
4968 			return ret;
4969 	}
4970 
4971 	return 0;
4972 }
4973 
4974 static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
4975 						unsigned int dev_idx)
4976 {
4977 	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
4978 		return true;
4979 	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
4980 }
4981 
4982 /* Return the zone index in the given device */
4983 static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
4984 					int dev_idx)
4985 {
4986 	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4987 
4988 	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
4989 						sbi->log_blocks_per_blkz;
4990 }
4991 
4992 /*
4993  * Return the usable segments in a section based on the zone's
4994  * corresponding zone capacity. Zone is equal to a section.
4995  */
4996 static inline unsigned int f2fs_usable_zone_segs_in_sec(
4997 		struct f2fs_sb_info *sbi, unsigned int segno)
4998 {
4999 	unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
5000 
5001 	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
5002 	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
5003 
5004 	/* Conventional zone's capacity is always equal to zone size */
5005 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5006 		return sbi->segs_per_sec;
5007 
5008 	/*
5009 	 * If the zone_capacity_blocks array is NULL, then zone capacity
5010 	 * is equal to the zone size for all zones
5011 	 */
5012 	if (!FDEV(dev_idx).zone_capacity_blocks)
5013 		return sbi->segs_per_sec;
5014 
5015 	/* Get the segment count beyond zone capacity block */
5016 	unusable_segs_in_sec = (sbi->blocks_per_blkz -
5017 				FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
5018 				sbi->log_blocks_per_seg;
5019 	return sbi->segs_per_sec - unusable_segs_in_sec;
5020 }
5021 
5022 /*
5023  * Return the number of usable blocks in a segment. The number of blocks
5024  * returned is always equal to the number of blocks in a segment for
5025  * segments fully contained within a sequential zone capacity or a
5026  * conventional zone. For segments partially contained in a sequential
5027  * zone capacity, the number of usable blocks up to the zone capacity
5028  * is returned. 0 is returned in all other cases.
5029  */
5030 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5031 			struct f2fs_sb_info *sbi, unsigned int segno)
5032 {
5033 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5034 	unsigned int zone_idx, dev_idx, secno;
5035 
5036 	secno = GET_SEC_FROM_SEG(sbi, segno);
5037 	seg_start = START_BLOCK(sbi, segno);
5038 	dev_idx = f2fs_target_device_index(sbi, seg_start);
5039 	zone_idx = get_zone_idx(sbi, secno, dev_idx);
5040 
5041 	/*
5042 	 * Conventional zone's capacity is always equal to zone size,
5043 	 * so, blocks per segment is unchanged.
5044 	 */
5045 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5046 		return sbi->blocks_per_seg;
5047 
5048 	if (!FDEV(dev_idx).zone_capacity_blocks)
5049 		return sbi->blocks_per_seg;
5050 
5051 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5052 	sec_cap_blkaddr = sec_start_blkaddr +
5053 				FDEV(dev_idx).zone_capacity_blocks[zone_idx];
5054 
5055 	/*
5056 	 * If segment starts before zone capacity and spans beyond
5057 	 * zone capacity, then usable blocks are from seg start to
5058 	 * zone capacity. If the segment starts after the zone capacity,
5059 	 * then there are no usable blocks.
5060 	 */
5061 	if (seg_start >= sec_cap_blkaddr)
5062 		return 0;
5063 	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5064 		return sec_cap_blkaddr - seg_start;
5065 
5066 	return sbi->blocks_per_seg;
5067 }
5068 #else
5069 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5070 {
5071 	return 0;
5072 }
5073 
5074 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5075 {
5076 	return 0;
5077 }
5078 
5079 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5080 							unsigned int segno)
5081 {
5082 	return 0;
5083 }
5084 
5085 static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5086 							unsigned int segno)
5087 {
5088 	return 0;
5089 }
5090 #endif
5091 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5092 					unsigned int segno)
5093 {
5094 	if (f2fs_sb_has_blkzoned(sbi))
5095 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5096 
5097 	return sbi->blocks_per_seg;
5098 }
5099 
5100 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5101 					unsigned int segno)
5102 {
5103 	if (f2fs_sb_has_blkzoned(sbi))
5104 		return f2fs_usable_zone_segs_in_sec(sbi, segno);
5105 
5106 	return sbi->segs_per_sec;
5107 }
5108 
5109 /*
5110  * Update min, max modified time for cost-benefit GC algorithm
5111  */
5112 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5113 {
5114 	struct sit_info *sit_i = SIT_I(sbi);
5115 	unsigned int segno;
5116 
5117 	down_write(&sit_i->sentry_lock);
5118 
5119 	sit_i->min_mtime = ULLONG_MAX;
5120 
5121 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5122 		unsigned int i;
5123 		unsigned long long mtime = 0;
5124 
5125 		for (i = 0; i < sbi->segs_per_sec; i++)
5126 			mtime += get_seg_entry(sbi, segno + i)->mtime;
5127 
5128 		mtime = div_u64(mtime, sbi->segs_per_sec);
5129 
5130 		if (sit_i->min_mtime > mtime)
5131 			sit_i->min_mtime = mtime;
5132 	}
5133 	sit_i->max_mtime = get_mtime(sbi, false);
5134 	sit_i->dirty_max_mtime = 0;
5135 	up_write(&sit_i->sentry_lock);
5136 }
5137 
5138 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5139 {
5140 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5141 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5142 	struct f2fs_sm_info *sm_info;
5143 	int err;
5144 
5145 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5146 	if (!sm_info)
5147 		return -ENOMEM;
5148 
5149 	/* init sm info */
5150 	sbi->sm_info = sm_info;
5151 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5152 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5153 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5154 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5155 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5156 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5157 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5158 	sm_info->rec_prefree_segments = sm_info->main_segments *
5159 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5160 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5161 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5162 
5163 	if (!f2fs_lfs_mode(sbi))
5164 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5165 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5166 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5167 	sm_info->min_seq_blocks = sbi->blocks_per_seg;
5168 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5169 	sm_info->min_ssr_sections = reserved_sections(sbi);
5170 
5171 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5172 
5173 	init_f2fs_rwsem(&sm_info->curseg_lock);
5174 
5175 	if (!f2fs_readonly(sbi->sb)) {
5176 		err = f2fs_create_flush_cmd_control(sbi);
5177 		if (err)
5178 			return err;
5179 	}
5180 
5181 	err = create_discard_cmd_control(sbi);
5182 	if (err)
5183 		return err;
5184 
5185 	err = build_sit_info(sbi);
5186 	if (err)
5187 		return err;
5188 	err = build_free_segmap(sbi);
5189 	if (err)
5190 		return err;
5191 	err = build_curseg(sbi);
5192 	if (err)
5193 		return err;
5194 
5195 	/* reinit free segmap based on SIT */
5196 	err = build_sit_entries(sbi);
5197 	if (err)
5198 		return err;
5199 
5200 	init_free_segmap(sbi);
5201 	err = build_dirty_segmap(sbi);
5202 	if (err)
5203 		return err;
5204 
5205 	err = sanity_check_curseg(sbi);
5206 	if (err)
5207 		return err;
5208 
5209 	init_min_max_mtime(sbi);
5210 	return 0;
5211 }
5212 
5213 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5214 		enum dirty_type dirty_type)
5215 {
5216 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5217 
5218 	mutex_lock(&dirty_i->seglist_lock);
5219 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5220 	dirty_i->nr_dirty[dirty_type] = 0;
5221 	mutex_unlock(&dirty_i->seglist_lock);
5222 }
5223 
5224 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5225 {
5226 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5227 
5228 	kvfree(dirty_i->victim_secmap);
5229 }
5230 
5231 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5232 {
5233 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5234 	int i;
5235 
5236 	if (!dirty_i)
5237 		return;
5238 
5239 	/* discard pre-free/dirty segments list */
5240 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5241 		discard_dirty_segmap(sbi, i);
5242 
5243 	if (__is_large_section(sbi)) {
5244 		mutex_lock(&dirty_i->seglist_lock);
5245 		kvfree(dirty_i->dirty_secmap);
5246 		mutex_unlock(&dirty_i->seglist_lock);
5247 	}
5248 
5249 	destroy_victim_secmap(sbi);
5250 	SM_I(sbi)->dirty_info = NULL;
5251 	kfree(dirty_i);
5252 }
5253 
5254 static void destroy_curseg(struct f2fs_sb_info *sbi)
5255 {
5256 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5257 	int i;
5258 
5259 	if (!array)
5260 		return;
5261 	SM_I(sbi)->curseg_array = NULL;
5262 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5263 		kfree(array[i].sum_blk);
5264 		kfree(array[i].journal);
5265 	}
5266 	kfree(array);
5267 }
5268 
5269 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5270 {
5271 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5272 
5273 	if (!free_i)
5274 		return;
5275 	SM_I(sbi)->free_info = NULL;
5276 	kvfree(free_i->free_segmap);
5277 	kvfree(free_i->free_secmap);
5278 	kfree(free_i);
5279 }
5280 
5281 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5282 {
5283 	struct sit_info *sit_i = SIT_I(sbi);
5284 
5285 	if (!sit_i)
5286 		return;
5287 
5288 	if (sit_i->sentries)
5289 		kvfree(sit_i->bitmap);
5290 	kfree(sit_i->tmp_map);
5291 
5292 	kvfree(sit_i->sentries);
5293 	kvfree(sit_i->sec_entries);
5294 	kvfree(sit_i->dirty_sentries_bitmap);
5295 
5296 	SM_I(sbi)->sit_info = NULL;
5297 	kvfree(sit_i->sit_bitmap);
5298 #ifdef CONFIG_F2FS_CHECK_FS
5299 	kvfree(sit_i->sit_bitmap_mir);
5300 	kvfree(sit_i->invalid_segmap);
5301 #endif
5302 	kfree(sit_i);
5303 }
5304 
5305 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5306 {
5307 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5308 
5309 	if (!sm_info)
5310 		return;
5311 	f2fs_destroy_flush_cmd_control(sbi, true);
5312 	destroy_discard_cmd_control(sbi);
5313 	destroy_dirty_segmap(sbi);
5314 	destroy_curseg(sbi);
5315 	destroy_free_segmap(sbi);
5316 	destroy_sit_info(sbi);
5317 	sbi->sm_info = NULL;
5318 	kfree(sm_info);
5319 }
5320 
5321 int __init f2fs_create_segment_manager_caches(void)
5322 {
5323 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5324 			sizeof(struct discard_entry));
5325 	if (!discard_entry_slab)
5326 		goto fail;
5327 
5328 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5329 			sizeof(struct discard_cmd));
5330 	if (!discard_cmd_slab)
5331 		goto destroy_discard_entry;
5332 
5333 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5334 			sizeof(struct sit_entry_set));
5335 	if (!sit_entry_set_slab)
5336 		goto destroy_discard_cmd;
5337 
5338 	inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
5339 			sizeof(struct inmem_pages));
5340 	if (!inmem_entry_slab)
5341 		goto destroy_sit_entry_set;
5342 	return 0;
5343 
5344 destroy_sit_entry_set:
5345 	kmem_cache_destroy(sit_entry_set_slab);
5346 destroy_discard_cmd:
5347 	kmem_cache_destroy(discard_cmd_slab);
5348 destroy_discard_entry:
5349 	kmem_cache_destroy(discard_entry_slab);
5350 fail:
5351 	return -ENOMEM;
5352 }
5353 
5354 void f2fs_destroy_segment_manager_caches(void)
5355 {
5356 	kmem_cache_destroy(sit_entry_set_slab);
5357 	kmem_cache_destroy(discard_cmd_slab);
5358 	kmem_cache_destroy(discard_entry_slab);
5359 	kmem_cache_destroy(inmem_entry_slab);
5360 }
5361