xref: /linux/fs/f2fs/segment.c (revision 888af2701db79b9b27c7e37f9ede528a5ca53b76)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *inmem_entry_slab;
34 
35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
188 void f2fs_register_inmem_page(struct inode *inode, struct page *page)
189 {
190 	struct inmem_pages *new;
191 
192 	set_page_private_atomic(page);
193 
194 	new = f2fs_kmem_cache_alloc(inmem_entry_slab,
195 					GFP_NOFS, true, NULL);
196 
197 	/* add atomic page indices to the list */
198 	new->page = page;
199 	INIT_LIST_HEAD(&new->list);
200 
201 	/* increase reference count with clean state */
202 	get_page(page);
203 	mutex_lock(&F2FS_I(inode)->inmem_lock);
204 	list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
205 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
206 	mutex_unlock(&F2FS_I(inode)->inmem_lock);
207 
208 	trace_f2fs_register_inmem_page(page, INMEM);
209 }
210 
211 static int __revoke_inmem_pages(struct inode *inode,
212 				struct list_head *head, bool drop, bool recover,
213 				bool trylock)
214 {
215 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
216 	struct inmem_pages *cur, *tmp;
217 	int err = 0;
218 
219 	list_for_each_entry_safe(cur, tmp, head, list) {
220 		struct page *page = cur->page;
221 
222 		if (drop)
223 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
224 
225 		if (trylock) {
226 			/*
227 			 * to avoid deadlock in between page lock and
228 			 * inmem_lock.
229 			 */
230 			if (!trylock_page(page))
231 				continue;
232 		} else {
233 			lock_page(page);
234 		}
235 
236 		f2fs_wait_on_page_writeback(page, DATA, true, true);
237 
238 		if (recover) {
239 			struct dnode_of_data dn;
240 			struct node_info ni;
241 
242 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
243 retry:
244 			set_new_dnode(&dn, inode, NULL, NULL, 0);
245 			err = f2fs_get_dnode_of_data(&dn, page->index,
246 								LOOKUP_NODE);
247 			if (err) {
248 				if (err == -ENOMEM) {
249 					memalloc_retry_wait(GFP_NOFS);
250 					goto retry;
251 				}
252 				err = -EAGAIN;
253 				goto next;
254 			}
255 
256 			err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
257 			if (err) {
258 				f2fs_put_dnode(&dn);
259 				return err;
260 			}
261 
262 			if (cur->old_addr == NEW_ADDR) {
263 				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
264 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
265 			} else
266 				f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
267 					cur->old_addr, ni.version, true, true);
268 			f2fs_put_dnode(&dn);
269 		}
270 next:
271 		/* we don't need to invalidate this in the sccessful status */
272 		if (drop || recover) {
273 			ClearPageUptodate(page);
274 			clear_page_private_gcing(page);
275 		}
276 		detach_page_private(page);
277 		set_page_private(page, 0);
278 		f2fs_put_page(page, 1);
279 
280 		list_del(&cur->list);
281 		kmem_cache_free(inmem_entry_slab, cur);
282 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
283 	}
284 	return err;
285 }
286 
287 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
288 {
289 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
290 	struct inode *inode;
291 	struct f2fs_inode_info *fi;
292 	unsigned int count = sbi->atomic_files;
293 	unsigned int looped = 0;
294 next:
295 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
296 	if (list_empty(head)) {
297 		spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
298 		return;
299 	}
300 	fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
301 	inode = igrab(&fi->vfs_inode);
302 	if (inode)
303 		list_move_tail(&fi->inmem_ilist, head);
304 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305 
306 	if (inode) {
307 		if (gc_failure) {
308 			if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
309 				goto skip;
310 		}
311 		set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
312 		f2fs_drop_inmem_pages(inode);
313 skip:
314 		iput(inode);
315 	}
316 	f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
317 	if (gc_failure) {
318 		if (++looped >= count)
319 			return;
320 	}
321 	goto next;
322 }
323 
324 void f2fs_drop_inmem_pages(struct inode *inode)
325 {
326 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
327 	struct f2fs_inode_info *fi = F2FS_I(inode);
328 
329 	do {
330 		mutex_lock(&fi->inmem_lock);
331 		if (list_empty(&fi->inmem_pages)) {
332 			fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
333 
334 			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
335 			if (!list_empty(&fi->inmem_ilist))
336 				list_del_init(&fi->inmem_ilist);
337 			if (f2fs_is_atomic_file(inode)) {
338 				clear_inode_flag(inode, FI_ATOMIC_FILE);
339 				sbi->atomic_files--;
340 			}
341 			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
342 
343 			mutex_unlock(&fi->inmem_lock);
344 			break;
345 		}
346 		__revoke_inmem_pages(inode, &fi->inmem_pages,
347 						true, false, true);
348 		mutex_unlock(&fi->inmem_lock);
349 	} while (1);
350 }
351 
352 void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
353 {
354 	struct f2fs_inode_info *fi = F2FS_I(inode);
355 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
356 	struct list_head *head = &fi->inmem_pages;
357 	struct inmem_pages *cur = NULL;
358 
359 	f2fs_bug_on(sbi, !page_private_atomic(page));
360 
361 	mutex_lock(&fi->inmem_lock);
362 	list_for_each_entry(cur, head, list) {
363 		if (cur->page == page)
364 			break;
365 	}
366 
367 	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
368 	list_del(&cur->list);
369 	mutex_unlock(&fi->inmem_lock);
370 
371 	dec_page_count(sbi, F2FS_INMEM_PAGES);
372 	kmem_cache_free(inmem_entry_slab, cur);
373 
374 	ClearPageUptodate(page);
375 	clear_page_private_atomic(page);
376 	f2fs_put_page(page, 0);
377 
378 	detach_page_private(page);
379 	set_page_private(page, 0);
380 
381 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
382 }
383 
384 static int __f2fs_commit_inmem_pages(struct inode *inode)
385 {
386 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
387 	struct f2fs_inode_info *fi = F2FS_I(inode);
388 	struct inmem_pages *cur, *tmp;
389 	struct f2fs_io_info fio = {
390 		.sbi = sbi,
391 		.ino = inode->i_ino,
392 		.type = DATA,
393 		.op = REQ_OP_WRITE,
394 		.op_flags = REQ_SYNC | REQ_PRIO,
395 		.io_type = FS_DATA_IO,
396 	};
397 	struct list_head revoke_list;
398 	bool submit_bio = false;
399 	int err = 0;
400 
401 	INIT_LIST_HEAD(&revoke_list);
402 
403 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
404 		struct page *page = cur->page;
405 
406 		lock_page(page);
407 		if (page->mapping == inode->i_mapping) {
408 			trace_f2fs_commit_inmem_page(page, INMEM);
409 
410 			f2fs_wait_on_page_writeback(page, DATA, true, true);
411 
412 			set_page_dirty(page);
413 			if (clear_page_dirty_for_io(page)) {
414 				inode_dec_dirty_pages(inode);
415 				f2fs_remove_dirty_inode(inode);
416 			}
417 retry:
418 			fio.page = page;
419 			fio.old_blkaddr = NULL_ADDR;
420 			fio.encrypted_page = NULL;
421 			fio.need_lock = LOCK_DONE;
422 			err = f2fs_do_write_data_page(&fio);
423 			if (err) {
424 				if (err == -ENOMEM) {
425 					memalloc_retry_wait(GFP_NOFS);
426 					goto retry;
427 				}
428 				unlock_page(page);
429 				break;
430 			}
431 			/* record old blkaddr for revoking */
432 			cur->old_addr = fio.old_blkaddr;
433 			submit_bio = true;
434 		}
435 		unlock_page(page);
436 		list_move_tail(&cur->list, &revoke_list);
437 	}
438 
439 	if (submit_bio)
440 		f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
441 
442 	if (err) {
443 		/*
444 		 * try to revoke all committed pages, but still we could fail
445 		 * due to no memory or other reason, if that happened, EAGAIN
446 		 * will be returned, which means in such case, transaction is
447 		 * already not integrity, caller should use journal to do the
448 		 * recovery or rewrite & commit last transaction. For other
449 		 * error number, revoking was done by filesystem itself.
450 		 */
451 		err = __revoke_inmem_pages(inode, &revoke_list,
452 						false, true, false);
453 
454 		/* drop all uncommitted pages */
455 		__revoke_inmem_pages(inode, &fi->inmem_pages,
456 						true, false, false);
457 	} else {
458 		__revoke_inmem_pages(inode, &revoke_list,
459 						false, false, false);
460 	}
461 
462 	return err;
463 }
464 
465 int f2fs_commit_inmem_pages(struct inode *inode)
466 {
467 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
468 	struct f2fs_inode_info *fi = F2FS_I(inode);
469 	int err;
470 
471 	f2fs_balance_fs(sbi, true);
472 
473 	down_write(&fi->i_gc_rwsem[WRITE]);
474 
475 	f2fs_lock_op(sbi);
476 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
477 
478 	mutex_lock(&fi->inmem_lock);
479 	err = __f2fs_commit_inmem_pages(inode);
480 	mutex_unlock(&fi->inmem_lock);
481 
482 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
483 
484 	f2fs_unlock_op(sbi);
485 	up_write(&fi->i_gc_rwsem[WRITE]);
486 
487 	return err;
488 }
489 
490 /*
491  * This function balances dirty node and dentry pages.
492  * In addition, it controls garbage collection.
493  */
494 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
495 {
496 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
497 		f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
498 		f2fs_stop_checkpoint(sbi, false);
499 	}
500 
501 	/* balance_fs_bg is able to be pending */
502 	if (need && excess_cached_nats(sbi))
503 		f2fs_balance_fs_bg(sbi, false);
504 
505 	if (!f2fs_is_checkpoint_ready(sbi))
506 		return;
507 
508 	/*
509 	 * We should do GC or end up with checkpoint, if there are so many dirty
510 	 * dir/node pages without enough free segments.
511 	 */
512 	if (has_not_enough_free_secs(sbi, 0, 0)) {
513 		if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
514 					sbi->gc_thread->f2fs_gc_task) {
515 			DEFINE_WAIT(wait);
516 
517 			prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
518 						TASK_UNINTERRUPTIBLE);
519 			wake_up(&sbi->gc_thread->gc_wait_queue_head);
520 			io_schedule();
521 			finish_wait(&sbi->gc_thread->fggc_wq, &wait);
522 		} else {
523 			down_write(&sbi->gc_lock);
524 			f2fs_gc(sbi, false, false, false, NULL_SEGNO);
525 		}
526 	}
527 }
528 
529 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
530 {
531 	int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
532 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
533 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
534 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
535 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
536 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
537 	unsigned int threshold = sbi->blocks_per_seg * factor *
538 					DEFAULT_DIRTY_THRESHOLD;
539 	unsigned int global_threshold = threshold * 3 / 2;
540 
541 	if (dents >= threshold || qdata >= threshold ||
542 		nodes >= threshold || meta >= threshold ||
543 		imeta >= threshold)
544 		return true;
545 	return dents + qdata + nodes + meta + imeta >  global_threshold;
546 }
547 
548 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
549 {
550 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
551 		return;
552 
553 	/* try to shrink extent cache when there is no enough memory */
554 	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
555 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
556 
557 	/* check the # of cached NAT entries */
558 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
559 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
560 
561 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
562 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
563 	else
564 		f2fs_build_free_nids(sbi, false, false);
565 
566 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
567 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
568 		goto do_sync;
569 
570 	/* there is background inflight IO or foreground operation recently */
571 	if (is_inflight_io(sbi, REQ_TIME) ||
572 		(!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
573 		return;
574 
575 	/* exceed periodical checkpoint timeout threshold */
576 	if (f2fs_time_over(sbi, CP_TIME))
577 		goto do_sync;
578 
579 	/* checkpoint is the only way to shrink partial cached entries */
580 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
581 		f2fs_available_free_memory(sbi, INO_ENTRIES))
582 		return;
583 
584 do_sync:
585 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
586 		struct blk_plug plug;
587 
588 		mutex_lock(&sbi->flush_lock);
589 
590 		blk_start_plug(&plug);
591 		f2fs_sync_dirty_inodes(sbi, FILE_INODE);
592 		blk_finish_plug(&plug);
593 
594 		mutex_unlock(&sbi->flush_lock);
595 	}
596 	f2fs_sync_fs(sbi->sb, true);
597 	stat_inc_bg_cp_count(sbi->stat_info);
598 }
599 
600 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
601 				struct block_device *bdev)
602 {
603 	int ret = blkdev_issue_flush(bdev);
604 
605 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
606 				test_opt(sbi, FLUSH_MERGE), ret);
607 	return ret;
608 }
609 
610 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
611 {
612 	int ret = 0;
613 	int i;
614 
615 	if (!f2fs_is_multi_device(sbi))
616 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
617 
618 	for (i = 0; i < sbi->s_ndevs; i++) {
619 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
620 			continue;
621 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
622 		if (ret)
623 			break;
624 	}
625 	return ret;
626 }
627 
628 static int issue_flush_thread(void *data)
629 {
630 	struct f2fs_sb_info *sbi = data;
631 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
632 	wait_queue_head_t *q = &fcc->flush_wait_queue;
633 repeat:
634 	if (kthread_should_stop())
635 		return 0;
636 
637 	if (!llist_empty(&fcc->issue_list)) {
638 		struct flush_cmd *cmd, *next;
639 		int ret;
640 
641 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
642 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
643 
644 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
645 
646 		ret = submit_flush_wait(sbi, cmd->ino);
647 		atomic_inc(&fcc->issued_flush);
648 
649 		llist_for_each_entry_safe(cmd, next,
650 					  fcc->dispatch_list, llnode) {
651 			cmd->ret = ret;
652 			complete(&cmd->wait);
653 		}
654 		fcc->dispatch_list = NULL;
655 	}
656 
657 	wait_event_interruptible(*q,
658 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
659 	goto repeat;
660 }
661 
662 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
663 {
664 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
665 	struct flush_cmd cmd;
666 	int ret;
667 
668 	if (test_opt(sbi, NOBARRIER))
669 		return 0;
670 
671 	if (!test_opt(sbi, FLUSH_MERGE)) {
672 		atomic_inc(&fcc->queued_flush);
673 		ret = submit_flush_wait(sbi, ino);
674 		atomic_dec(&fcc->queued_flush);
675 		atomic_inc(&fcc->issued_flush);
676 		return ret;
677 	}
678 
679 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
680 	    f2fs_is_multi_device(sbi)) {
681 		ret = submit_flush_wait(sbi, ino);
682 		atomic_dec(&fcc->queued_flush);
683 
684 		atomic_inc(&fcc->issued_flush);
685 		return ret;
686 	}
687 
688 	cmd.ino = ino;
689 	init_completion(&cmd.wait);
690 
691 	llist_add(&cmd.llnode, &fcc->issue_list);
692 
693 	/*
694 	 * update issue_list before we wake up issue_flush thread, this
695 	 * smp_mb() pairs with another barrier in ___wait_event(), see
696 	 * more details in comments of waitqueue_active().
697 	 */
698 	smp_mb();
699 
700 	if (waitqueue_active(&fcc->flush_wait_queue))
701 		wake_up(&fcc->flush_wait_queue);
702 
703 	if (fcc->f2fs_issue_flush) {
704 		wait_for_completion(&cmd.wait);
705 		atomic_dec(&fcc->queued_flush);
706 	} else {
707 		struct llist_node *list;
708 
709 		list = llist_del_all(&fcc->issue_list);
710 		if (!list) {
711 			wait_for_completion(&cmd.wait);
712 			atomic_dec(&fcc->queued_flush);
713 		} else {
714 			struct flush_cmd *tmp, *next;
715 
716 			ret = submit_flush_wait(sbi, ino);
717 
718 			llist_for_each_entry_safe(tmp, next, list, llnode) {
719 				if (tmp == &cmd) {
720 					cmd.ret = ret;
721 					atomic_dec(&fcc->queued_flush);
722 					continue;
723 				}
724 				tmp->ret = ret;
725 				complete(&tmp->wait);
726 			}
727 		}
728 	}
729 
730 	return cmd.ret;
731 }
732 
733 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
734 {
735 	dev_t dev = sbi->sb->s_bdev->bd_dev;
736 	struct flush_cmd_control *fcc;
737 	int err = 0;
738 
739 	if (SM_I(sbi)->fcc_info) {
740 		fcc = SM_I(sbi)->fcc_info;
741 		if (fcc->f2fs_issue_flush)
742 			return err;
743 		goto init_thread;
744 	}
745 
746 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
747 	if (!fcc)
748 		return -ENOMEM;
749 	atomic_set(&fcc->issued_flush, 0);
750 	atomic_set(&fcc->queued_flush, 0);
751 	init_waitqueue_head(&fcc->flush_wait_queue);
752 	init_llist_head(&fcc->issue_list);
753 	SM_I(sbi)->fcc_info = fcc;
754 	if (!test_opt(sbi, FLUSH_MERGE))
755 		return err;
756 
757 init_thread:
758 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
759 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
760 	if (IS_ERR(fcc->f2fs_issue_flush)) {
761 		err = PTR_ERR(fcc->f2fs_issue_flush);
762 		kfree(fcc);
763 		SM_I(sbi)->fcc_info = NULL;
764 		return err;
765 	}
766 
767 	return err;
768 }
769 
770 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
771 {
772 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
773 
774 	if (fcc && fcc->f2fs_issue_flush) {
775 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
776 
777 		fcc->f2fs_issue_flush = NULL;
778 		kthread_stop(flush_thread);
779 	}
780 	if (free) {
781 		kfree(fcc);
782 		SM_I(sbi)->fcc_info = NULL;
783 	}
784 }
785 
786 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
787 {
788 	int ret = 0, i;
789 
790 	if (!f2fs_is_multi_device(sbi))
791 		return 0;
792 
793 	if (test_opt(sbi, NOBARRIER))
794 		return 0;
795 
796 	for (i = 1; i < sbi->s_ndevs; i++) {
797 		int count = DEFAULT_RETRY_IO_COUNT;
798 
799 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
800 			continue;
801 
802 		do {
803 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
804 			if (ret)
805 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
806 		} while (ret && --count);
807 
808 		if (ret) {
809 			f2fs_stop_checkpoint(sbi, false);
810 			break;
811 		}
812 
813 		spin_lock(&sbi->dev_lock);
814 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
815 		spin_unlock(&sbi->dev_lock);
816 	}
817 
818 	return ret;
819 }
820 
821 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
822 		enum dirty_type dirty_type)
823 {
824 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
825 
826 	/* need not be added */
827 	if (IS_CURSEG(sbi, segno))
828 		return;
829 
830 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
831 		dirty_i->nr_dirty[dirty_type]++;
832 
833 	if (dirty_type == DIRTY) {
834 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
835 		enum dirty_type t = sentry->type;
836 
837 		if (unlikely(t >= DIRTY)) {
838 			f2fs_bug_on(sbi, 1);
839 			return;
840 		}
841 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
842 			dirty_i->nr_dirty[t]++;
843 
844 		if (__is_large_section(sbi)) {
845 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
846 			block_t valid_blocks =
847 				get_valid_blocks(sbi, segno, true);
848 
849 			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
850 					valid_blocks == BLKS_PER_SEC(sbi)));
851 
852 			if (!IS_CURSEC(sbi, secno))
853 				set_bit(secno, dirty_i->dirty_secmap);
854 		}
855 	}
856 }
857 
858 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
859 		enum dirty_type dirty_type)
860 {
861 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
862 	block_t valid_blocks;
863 
864 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
865 		dirty_i->nr_dirty[dirty_type]--;
866 
867 	if (dirty_type == DIRTY) {
868 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
869 		enum dirty_type t = sentry->type;
870 
871 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
872 			dirty_i->nr_dirty[t]--;
873 
874 		valid_blocks = get_valid_blocks(sbi, segno, true);
875 		if (valid_blocks == 0) {
876 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
877 						dirty_i->victim_secmap);
878 #ifdef CONFIG_F2FS_CHECK_FS
879 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
880 #endif
881 		}
882 		if (__is_large_section(sbi)) {
883 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
884 
885 			if (!valid_blocks ||
886 					valid_blocks == BLKS_PER_SEC(sbi)) {
887 				clear_bit(secno, dirty_i->dirty_secmap);
888 				return;
889 			}
890 
891 			if (!IS_CURSEC(sbi, secno))
892 				set_bit(secno, dirty_i->dirty_secmap);
893 		}
894 	}
895 }
896 
897 /*
898  * Should not occur error such as -ENOMEM.
899  * Adding dirty entry into seglist is not critical operation.
900  * If a given segment is one of current working segments, it won't be added.
901  */
902 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
903 {
904 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
905 	unsigned short valid_blocks, ckpt_valid_blocks;
906 	unsigned int usable_blocks;
907 
908 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
909 		return;
910 
911 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
912 	mutex_lock(&dirty_i->seglist_lock);
913 
914 	valid_blocks = get_valid_blocks(sbi, segno, false);
915 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
916 
917 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
918 		ckpt_valid_blocks == usable_blocks)) {
919 		__locate_dirty_segment(sbi, segno, PRE);
920 		__remove_dirty_segment(sbi, segno, DIRTY);
921 	} else if (valid_blocks < usable_blocks) {
922 		__locate_dirty_segment(sbi, segno, DIRTY);
923 	} else {
924 		/* Recovery routine with SSR needs this */
925 		__remove_dirty_segment(sbi, segno, DIRTY);
926 	}
927 
928 	mutex_unlock(&dirty_i->seglist_lock);
929 }
930 
931 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
932 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
933 {
934 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
935 	unsigned int segno;
936 
937 	mutex_lock(&dirty_i->seglist_lock);
938 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
939 		if (get_valid_blocks(sbi, segno, false))
940 			continue;
941 		if (IS_CURSEG(sbi, segno))
942 			continue;
943 		__locate_dirty_segment(sbi, segno, PRE);
944 		__remove_dirty_segment(sbi, segno, DIRTY);
945 	}
946 	mutex_unlock(&dirty_i->seglist_lock);
947 }
948 
949 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
950 {
951 	int ovp_hole_segs =
952 		(overprovision_segments(sbi) - reserved_segments(sbi));
953 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
954 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
955 	block_t holes[2] = {0, 0};	/* DATA and NODE */
956 	block_t unusable;
957 	struct seg_entry *se;
958 	unsigned int segno;
959 
960 	mutex_lock(&dirty_i->seglist_lock);
961 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
962 		se = get_seg_entry(sbi, segno);
963 		if (IS_NODESEG(se->type))
964 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
965 							se->valid_blocks;
966 		else
967 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
968 							se->valid_blocks;
969 	}
970 	mutex_unlock(&dirty_i->seglist_lock);
971 
972 	unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
973 	if (unusable > ovp_holes)
974 		return unusable - ovp_holes;
975 	return 0;
976 }
977 
978 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
979 {
980 	int ovp_hole_segs =
981 		(overprovision_segments(sbi) - reserved_segments(sbi));
982 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
983 		return -EAGAIN;
984 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
985 		dirty_segments(sbi) > ovp_hole_segs)
986 		return -EAGAIN;
987 	return 0;
988 }
989 
990 /* This is only used by SBI_CP_DISABLED */
991 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
992 {
993 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
994 	unsigned int segno = 0;
995 
996 	mutex_lock(&dirty_i->seglist_lock);
997 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
998 		if (get_valid_blocks(sbi, segno, false))
999 			continue;
1000 		if (get_ckpt_valid_blocks(sbi, segno, false))
1001 			continue;
1002 		mutex_unlock(&dirty_i->seglist_lock);
1003 		return segno;
1004 	}
1005 	mutex_unlock(&dirty_i->seglist_lock);
1006 	return NULL_SEGNO;
1007 }
1008 
1009 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
1010 		struct block_device *bdev, block_t lstart,
1011 		block_t start, block_t len)
1012 {
1013 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1014 	struct list_head *pend_list;
1015 	struct discard_cmd *dc;
1016 
1017 	f2fs_bug_on(sbi, !len);
1018 
1019 	pend_list = &dcc->pend_list[plist_idx(len)];
1020 
1021 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
1022 	INIT_LIST_HEAD(&dc->list);
1023 	dc->bdev = bdev;
1024 	dc->lstart = lstart;
1025 	dc->start = start;
1026 	dc->len = len;
1027 	dc->ref = 0;
1028 	dc->state = D_PREP;
1029 	dc->queued = 0;
1030 	dc->error = 0;
1031 	init_completion(&dc->wait);
1032 	list_add_tail(&dc->list, pend_list);
1033 	spin_lock_init(&dc->lock);
1034 	dc->bio_ref = 0;
1035 	atomic_inc(&dcc->discard_cmd_cnt);
1036 	dcc->undiscard_blks += len;
1037 
1038 	return dc;
1039 }
1040 
1041 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1042 				struct block_device *bdev, block_t lstart,
1043 				block_t start, block_t len,
1044 				struct rb_node *parent, struct rb_node **p,
1045 				bool leftmost)
1046 {
1047 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1048 	struct discard_cmd *dc;
1049 
1050 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1051 
1052 	rb_link_node(&dc->rb_node, parent, p);
1053 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1054 
1055 	return dc;
1056 }
1057 
1058 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1059 							struct discard_cmd *dc)
1060 {
1061 	if (dc->state == D_DONE)
1062 		atomic_sub(dc->queued, &dcc->queued_discard);
1063 
1064 	list_del(&dc->list);
1065 	rb_erase_cached(&dc->rb_node, &dcc->root);
1066 	dcc->undiscard_blks -= dc->len;
1067 
1068 	kmem_cache_free(discard_cmd_slab, dc);
1069 
1070 	atomic_dec(&dcc->discard_cmd_cnt);
1071 }
1072 
1073 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1074 							struct discard_cmd *dc)
1075 {
1076 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1077 	unsigned long flags;
1078 
1079 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1080 
1081 	spin_lock_irqsave(&dc->lock, flags);
1082 	if (dc->bio_ref) {
1083 		spin_unlock_irqrestore(&dc->lock, flags);
1084 		return;
1085 	}
1086 	spin_unlock_irqrestore(&dc->lock, flags);
1087 
1088 	f2fs_bug_on(sbi, dc->ref);
1089 
1090 	if (dc->error == -EOPNOTSUPP)
1091 		dc->error = 0;
1092 
1093 	if (dc->error)
1094 		printk_ratelimited(
1095 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1096 			KERN_INFO, sbi->sb->s_id,
1097 			dc->lstart, dc->start, dc->len, dc->error);
1098 	__detach_discard_cmd(dcc, dc);
1099 }
1100 
1101 static void f2fs_submit_discard_endio(struct bio *bio)
1102 {
1103 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1104 	unsigned long flags;
1105 
1106 	spin_lock_irqsave(&dc->lock, flags);
1107 	if (!dc->error)
1108 		dc->error = blk_status_to_errno(bio->bi_status);
1109 	dc->bio_ref--;
1110 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1111 		dc->state = D_DONE;
1112 		complete_all(&dc->wait);
1113 	}
1114 	spin_unlock_irqrestore(&dc->lock, flags);
1115 	bio_put(bio);
1116 }
1117 
1118 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1119 				block_t start, block_t end)
1120 {
1121 #ifdef CONFIG_F2FS_CHECK_FS
1122 	struct seg_entry *sentry;
1123 	unsigned int segno;
1124 	block_t blk = start;
1125 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1126 	unsigned long *map;
1127 
1128 	while (blk < end) {
1129 		segno = GET_SEGNO(sbi, blk);
1130 		sentry = get_seg_entry(sbi, segno);
1131 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1132 
1133 		if (end < START_BLOCK(sbi, segno + 1))
1134 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1135 		else
1136 			size = max_blocks;
1137 		map = (unsigned long *)(sentry->cur_valid_map);
1138 		offset = __find_rev_next_bit(map, size, offset);
1139 		f2fs_bug_on(sbi, offset != size);
1140 		blk = START_BLOCK(sbi, segno + 1);
1141 	}
1142 #endif
1143 }
1144 
1145 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1146 				struct discard_policy *dpolicy,
1147 				int discard_type, unsigned int granularity)
1148 {
1149 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1150 
1151 	/* common policy */
1152 	dpolicy->type = discard_type;
1153 	dpolicy->sync = true;
1154 	dpolicy->ordered = false;
1155 	dpolicy->granularity = granularity;
1156 
1157 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1158 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
1159 	dpolicy->timeout = false;
1160 
1161 	if (discard_type == DPOLICY_BG) {
1162 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1163 		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1164 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1165 		dpolicy->io_aware = true;
1166 		dpolicy->sync = false;
1167 		dpolicy->ordered = true;
1168 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1169 			dpolicy->granularity = 1;
1170 			if (atomic_read(&dcc->discard_cmd_cnt))
1171 				dpolicy->max_interval =
1172 					DEF_MIN_DISCARD_ISSUE_TIME;
1173 		}
1174 	} else if (discard_type == DPOLICY_FORCE) {
1175 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1176 		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1177 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1178 		dpolicy->io_aware = false;
1179 	} else if (discard_type == DPOLICY_FSTRIM) {
1180 		dpolicy->io_aware = false;
1181 	} else if (discard_type == DPOLICY_UMOUNT) {
1182 		dpolicy->io_aware = false;
1183 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1184 		dpolicy->granularity = 1;
1185 		dpolicy->timeout = true;
1186 	}
1187 }
1188 
1189 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1190 				struct block_device *bdev, block_t lstart,
1191 				block_t start, block_t len);
1192 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1193 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1194 						struct discard_policy *dpolicy,
1195 						struct discard_cmd *dc,
1196 						unsigned int *issued)
1197 {
1198 	struct block_device *bdev = dc->bdev;
1199 	struct request_queue *q = bdev_get_queue(bdev);
1200 	unsigned int max_discard_blocks =
1201 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1202 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1203 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1204 					&(dcc->fstrim_list) : &(dcc->wait_list);
1205 	int flag = dpolicy->sync ? REQ_SYNC : 0;
1206 	block_t lstart, start, len, total_len;
1207 	int err = 0;
1208 
1209 	if (dc->state != D_PREP)
1210 		return 0;
1211 
1212 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1213 		return 0;
1214 
1215 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1216 
1217 	lstart = dc->lstart;
1218 	start = dc->start;
1219 	len = dc->len;
1220 	total_len = len;
1221 
1222 	dc->len = 0;
1223 
1224 	while (total_len && *issued < dpolicy->max_requests && !err) {
1225 		struct bio *bio = NULL;
1226 		unsigned long flags;
1227 		bool last = true;
1228 
1229 		if (len > max_discard_blocks) {
1230 			len = max_discard_blocks;
1231 			last = false;
1232 		}
1233 
1234 		(*issued)++;
1235 		if (*issued == dpolicy->max_requests)
1236 			last = true;
1237 
1238 		dc->len += len;
1239 
1240 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1241 			f2fs_show_injection_info(sbi, FAULT_DISCARD);
1242 			err = -EIO;
1243 			goto submit;
1244 		}
1245 		err = __blkdev_issue_discard(bdev,
1246 					SECTOR_FROM_BLOCK(start),
1247 					SECTOR_FROM_BLOCK(len),
1248 					GFP_NOFS, 0, &bio);
1249 submit:
1250 		if (err) {
1251 			spin_lock_irqsave(&dc->lock, flags);
1252 			if (dc->state == D_PARTIAL)
1253 				dc->state = D_SUBMIT;
1254 			spin_unlock_irqrestore(&dc->lock, flags);
1255 
1256 			break;
1257 		}
1258 
1259 		f2fs_bug_on(sbi, !bio);
1260 
1261 		/*
1262 		 * should keep before submission to avoid D_DONE
1263 		 * right away
1264 		 */
1265 		spin_lock_irqsave(&dc->lock, flags);
1266 		if (last)
1267 			dc->state = D_SUBMIT;
1268 		else
1269 			dc->state = D_PARTIAL;
1270 		dc->bio_ref++;
1271 		spin_unlock_irqrestore(&dc->lock, flags);
1272 
1273 		atomic_inc(&dcc->queued_discard);
1274 		dc->queued++;
1275 		list_move_tail(&dc->list, wait_list);
1276 
1277 		/* sanity check on discard range */
1278 		__check_sit_bitmap(sbi, lstart, lstart + len);
1279 
1280 		bio->bi_private = dc;
1281 		bio->bi_end_io = f2fs_submit_discard_endio;
1282 		bio->bi_opf |= flag;
1283 		submit_bio(bio);
1284 
1285 		atomic_inc(&dcc->issued_discard);
1286 
1287 		f2fs_update_iostat(sbi, FS_DISCARD, 1);
1288 
1289 		lstart += len;
1290 		start += len;
1291 		total_len -= len;
1292 		len = total_len;
1293 	}
1294 
1295 	if (!err && len) {
1296 		dcc->undiscard_blks -= len;
1297 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1298 	}
1299 	return err;
1300 }
1301 
1302 static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1303 				struct block_device *bdev, block_t lstart,
1304 				block_t start, block_t len,
1305 				struct rb_node **insert_p,
1306 				struct rb_node *insert_parent)
1307 {
1308 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1309 	struct rb_node **p;
1310 	struct rb_node *parent = NULL;
1311 	bool leftmost = true;
1312 
1313 	if (insert_p && insert_parent) {
1314 		parent = insert_parent;
1315 		p = insert_p;
1316 		goto do_insert;
1317 	}
1318 
1319 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1320 							lstart, &leftmost);
1321 do_insert:
1322 	__attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1323 								p, leftmost);
1324 }
1325 
1326 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1327 						struct discard_cmd *dc)
1328 {
1329 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1330 }
1331 
1332 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1333 				struct discard_cmd *dc, block_t blkaddr)
1334 {
1335 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1336 	struct discard_info di = dc->di;
1337 	bool modified = false;
1338 
1339 	if (dc->state == D_DONE || dc->len == 1) {
1340 		__remove_discard_cmd(sbi, dc);
1341 		return;
1342 	}
1343 
1344 	dcc->undiscard_blks -= di.len;
1345 
1346 	if (blkaddr > di.lstart) {
1347 		dc->len = blkaddr - dc->lstart;
1348 		dcc->undiscard_blks += dc->len;
1349 		__relocate_discard_cmd(dcc, dc);
1350 		modified = true;
1351 	}
1352 
1353 	if (blkaddr < di.lstart + di.len - 1) {
1354 		if (modified) {
1355 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1356 					di.start + blkaddr + 1 - di.lstart,
1357 					di.lstart + di.len - 1 - blkaddr,
1358 					NULL, NULL);
1359 		} else {
1360 			dc->lstart++;
1361 			dc->len--;
1362 			dc->start++;
1363 			dcc->undiscard_blks += dc->len;
1364 			__relocate_discard_cmd(dcc, dc);
1365 		}
1366 	}
1367 }
1368 
1369 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1370 				struct block_device *bdev, block_t lstart,
1371 				block_t start, block_t len)
1372 {
1373 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1374 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1375 	struct discard_cmd *dc;
1376 	struct discard_info di = {0};
1377 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1378 	struct request_queue *q = bdev_get_queue(bdev);
1379 	unsigned int max_discard_blocks =
1380 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1381 	block_t end = lstart + len;
1382 
1383 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1384 					NULL, lstart,
1385 					(struct rb_entry **)&prev_dc,
1386 					(struct rb_entry **)&next_dc,
1387 					&insert_p, &insert_parent, true, NULL);
1388 	if (dc)
1389 		prev_dc = dc;
1390 
1391 	if (!prev_dc) {
1392 		di.lstart = lstart;
1393 		di.len = next_dc ? next_dc->lstart - lstart : len;
1394 		di.len = min(di.len, len);
1395 		di.start = start;
1396 	}
1397 
1398 	while (1) {
1399 		struct rb_node *node;
1400 		bool merged = false;
1401 		struct discard_cmd *tdc = NULL;
1402 
1403 		if (prev_dc) {
1404 			di.lstart = prev_dc->lstart + prev_dc->len;
1405 			if (di.lstart < lstart)
1406 				di.lstart = lstart;
1407 			if (di.lstart >= end)
1408 				break;
1409 
1410 			if (!next_dc || next_dc->lstart > end)
1411 				di.len = end - di.lstart;
1412 			else
1413 				di.len = next_dc->lstart - di.lstart;
1414 			di.start = start + di.lstart - lstart;
1415 		}
1416 
1417 		if (!di.len)
1418 			goto next;
1419 
1420 		if (prev_dc && prev_dc->state == D_PREP &&
1421 			prev_dc->bdev == bdev &&
1422 			__is_discard_back_mergeable(&di, &prev_dc->di,
1423 							max_discard_blocks)) {
1424 			prev_dc->di.len += di.len;
1425 			dcc->undiscard_blks += di.len;
1426 			__relocate_discard_cmd(dcc, prev_dc);
1427 			di = prev_dc->di;
1428 			tdc = prev_dc;
1429 			merged = true;
1430 		}
1431 
1432 		if (next_dc && next_dc->state == D_PREP &&
1433 			next_dc->bdev == bdev &&
1434 			__is_discard_front_mergeable(&di, &next_dc->di,
1435 							max_discard_blocks)) {
1436 			next_dc->di.lstart = di.lstart;
1437 			next_dc->di.len += di.len;
1438 			next_dc->di.start = di.start;
1439 			dcc->undiscard_blks += di.len;
1440 			__relocate_discard_cmd(dcc, next_dc);
1441 			if (tdc)
1442 				__remove_discard_cmd(sbi, tdc);
1443 			merged = true;
1444 		}
1445 
1446 		if (!merged) {
1447 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1448 							di.len, NULL, NULL);
1449 		}
1450  next:
1451 		prev_dc = next_dc;
1452 		if (!prev_dc)
1453 			break;
1454 
1455 		node = rb_next(&prev_dc->rb_node);
1456 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1457 	}
1458 }
1459 
1460 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1461 		struct block_device *bdev, block_t blkstart, block_t blklen)
1462 {
1463 	block_t lblkstart = blkstart;
1464 
1465 	if (!f2fs_bdev_support_discard(bdev))
1466 		return 0;
1467 
1468 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1469 
1470 	if (f2fs_is_multi_device(sbi)) {
1471 		int devi = f2fs_target_device_index(sbi, blkstart);
1472 
1473 		blkstart -= FDEV(devi).start_blk;
1474 	}
1475 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1476 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1477 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1478 	return 0;
1479 }
1480 
1481 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1482 					struct discard_policy *dpolicy)
1483 {
1484 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1485 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1486 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1487 	struct discard_cmd *dc;
1488 	struct blk_plug plug;
1489 	unsigned int pos = dcc->next_pos;
1490 	unsigned int issued = 0;
1491 	bool io_interrupted = false;
1492 
1493 	mutex_lock(&dcc->cmd_lock);
1494 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1495 					NULL, pos,
1496 					(struct rb_entry **)&prev_dc,
1497 					(struct rb_entry **)&next_dc,
1498 					&insert_p, &insert_parent, true, NULL);
1499 	if (!dc)
1500 		dc = next_dc;
1501 
1502 	blk_start_plug(&plug);
1503 
1504 	while (dc) {
1505 		struct rb_node *node;
1506 		int err = 0;
1507 
1508 		if (dc->state != D_PREP)
1509 			goto next;
1510 
1511 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1512 			io_interrupted = true;
1513 			break;
1514 		}
1515 
1516 		dcc->next_pos = dc->lstart + dc->len;
1517 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1518 
1519 		if (issued >= dpolicy->max_requests)
1520 			break;
1521 next:
1522 		node = rb_next(&dc->rb_node);
1523 		if (err)
1524 			__remove_discard_cmd(sbi, dc);
1525 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1526 	}
1527 
1528 	blk_finish_plug(&plug);
1529 
1530 	if (!dc)
1531 		dcc->next_pos = 0;
1532 
1533 	mutex_unlock(&dcc->cmd_lock);
1534 
1535 	if (!issued && io_interrupted)
1536 		issued = -1;
1537 
1538 	return issued;
1539 }
1540 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1541 					struct discard_policy *dpolicy);
1542 
1543 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1544 					struct discard_policy *dpolicy)
1545 {
1546 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1547 	struct list_head *pend_list;
1548 	struct discard_cmd *dc, *tmp;
1549 	struct blk_plug plug;
1550 	int i, issued;
1551 	bool io_interrupted = false;
1552 
1553 	if (dpolicy->timeout)
1554 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1555 
1556 retry:
1557 	issued = 0;
1558 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1559 		if (dpolicy->timeout &&
1560 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1561 			break;
1562 
1563 		if (i + 1 < dpolicy->granularity)
1564 			break;
1565 
1566 		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1567 			return __issue_discard_cmd_orderly(sbi, dpolicy);
1568 
1569 		pend_list = &dcc->pend_list[i];
1570 
1571 		mutex_lock(&dcc->cmd_lock);
1572 		if (list_empty(pend_list))
1573 			goto next;
1574 		if (unlikely(dcc->rbtree_check))
1575 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1576 							&dcc->root, false));
1577 		blk_start_plug(&plug);
1578 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1579 			f2fs_bug_on(sbi, dc->state != D_PREP);
1580 
1581 			if (dpolicy->timeout &&
1582 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1583 				break;
1584 
1585 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1586 						!is_idle(sbi, DISCARD_TIME)) {
1587 				io_interrupted = true;
1588 				break;
1589 			}
1590 
1591 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1592 
1593 			if (issued >= dpolicy->max_requests)
1594 				break;
1595 		}
1596 		blk_finish_plug(&plug);
1597 next:
1598 		mutex_unlock(&dcc->cmd_lock);
1599 
1600 		if (issued >= dpolicy->max_requests || io_interrupted)
1601 			break;
1602 	}
1603 
1604 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1605 		__wait_all_discard_cmd(sbi, dpolicy);
1606 		goto retry;
1607 	}
1608 
1609 	if (!issued && io_interrupted)
1610 		issued = -1;
1611 
1612 	return issued;
1613 }
1614 
1615 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1616 {
1617 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1618 	struct list_head *pend_list;
1619 	struct discard_cmd *dc, *tmp;
1620 	int i;
1621 	bool dropped = false;
1622 
1623 	mutex_lock(&dcc->cmd_lock);
1624 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1625 		pend_list = &dcc->pend_list[i];
1626 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1627 			f2fs_bug_on(sbi, dc->state != D_PREP);
1628 			__remove_discard_cmd(sbi, dc);
1629 			dropped = true;
1630 		}
1631 	}
1632 	mutex_unlock(&dcc->cmd_lock);
1633 
1634 	return dropped;
1635 }
1636 
1637 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1638 {
1639 	__drop_discard_cmd(sbi);
1640 }
1641 
1642 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1643 							struct discard_cmd *dc)
1644 {
1645 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1646 	unsigned int len = 0;
1647 
1648 	wait_for_completion_io(&dc->wait);
1649 	mutex_lock(&dcc->cmd_lock);
1650 	f2fs_bug_on(sbi, dc->state != D_DONE);
1651 	dc->ref--;
1652 	if (!dc->ref) {
1653 		if (!dc->error)
1654 			len = dc->len;
1655 		__remove_discard_cmd(sbi, dc);
1656 	}
1657 	mutex_unlock(&dcc->cmd_lock);
1658 
1659 	return len;
1660 }
1661 
1662 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1663 						struct discard_policy *dpolicy,
1664 						block_t start, block_t end)
1665 {
1666 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1667 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1668 					&(dcc->fstrim_list) : &(dcc->wait_list);
1669 	struct discard_cmd *dc, *tmp;
1670 	bool need_wait;
1671 	unsigned int trimmed = 0;
1672 
1673 next:
1674 	need_wait = false;
1675 
1676 	mutex_lock(&dcc->cmd_lock);
1677 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
1678 		if (dc->lstart + dc->len <= start || end <= dc->lstart)
1679 			continue;
1680 		if (dc->len < dpolicy->granularity)
1681 			continue;
1682 		if (dc->state == D_DONE && !dc->ref) {
1683 			wait_for_completion_io(&dc->wait);
1684 			if (!dc->error)
1685 				trimmed += dc->len;
1686 			__remove_discard_cmd(sbi, dc);
1687 		} else {
1688 			dc->ref++;
1689 			need_wait = true;
1690 			break;
1691 		}
1692 	}
1693 	mutex_unlock(&dcc->cmd_lock);
1694 
1695 	if (need_wait) {
1696 		trimmed += __wait_one_discard_bio(sbi, dc);
1697 		goto next;
1698 	}
1699 
1700 	return trimmed;
1701 }
1702 
1703 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1704 						struct discard_policy *dpolicy)
1705 {
1706 	struct discard_policy dp;
1707 	unsigned int discard_blks;
1708 
1709 	if (dpolicy)
1710 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1711 
1712 	/* wait all */
1713 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1714 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1715 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1716 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1717 
1718 	return discard_blks;
1719 }
1720 
1721 /* This should be covered by global mutex, &sit_i->sentry_lock */
1722 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1723 {
1724 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1725 	struct discard_cmd *dc;
1726 	bool need_wait = false;
1727 
1728 	mutex_lock(&dcc->cmd_lock);
1729 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1730 							NULL, blkaddr);
1731 	if (dc) {
1732 		if (dc->state == D_PREP) {
1733 			__punch_discard_cmd(sbi, dc, blkaddr);
1734 		} else {
1735 			dc->ref++;
1736 			need_wait = true;
1737 		}
1738 	}
1739 	mutex_unlock(&dcc->cmd_lock);
1740 
1741 	if (need_wait)
1742 		__wait_one_discard_bio(sbi, dc);
1743 }
1744 
1745 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1746 {
1747 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1748 
1749 	if (dcc && dcc->f2fs_issue_discard) {
1750 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1751 
1752 		dcc->f2fs_issue_discard = NULL;
1753 		kthread_stop(discard_thread);
1754 	}
1755 }
1756 
1757 /* This comes from f2fs_put_super */
1758 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1759 {
1760 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1761 	struct discard_policy dpolicy;
1762 	bool dropped;
1763 
1764 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1765 					dcc->discard_granularity);
1766 	__issue_discard_cmd(sbi, &dpolicy);
1767 	dropped = __drop_discard_cmd(sbi);
1768 
1769 	/* just to make sure there is no pending discard commands */
1770 	__wait_all_discard_cmd(sbi, NULL);
1771 
1772 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1773 	return dropped;
1774 }
1775 
1776 static int issue_discard_thread(void *data)
1777 {
1778 	struct f2fs_sb_info *sbi = data;
1779 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1780 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1781 	struct discard_policy dpolicy;
1782 	unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1783 	int issued;
1784 
1785 	set_freezable();
1786 
1787 	do {
1788 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1789 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1790 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1791 		else
1792 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1793 						dcc->discard_granularity);
1794 
1795 		if (!atomic_read(&dcc->discard_cmd_cnt))
1796 		       wait_ms = dpolicy.max_interval;
1797 
1798 		wait_event_interruptible_timeout(*q,
1799 				kthread_should_stop() || freezing(current) ||
1800 				dcc->discard_wake,
1801 				msecs_to_jiffies(wait_ms));
1802 
1803 		if (dcc->discard_wake)
1804 			dcc->discard_wake = 0;
1805 
1806 		/* clean up pending candidates before going to sleep */
1807 		if (atomic_read(&dcc->queued_discard))
1808 			__wait_all_discard_cmd(sbi, NULL);
1809 
1810 		if (try_to_freeze())
1811 			continue;
1812 		if (f2fs_readonly(sbi->sb))
1813 			continue;
1814 		if (kthread_should_stop())
1815 			return 0;
1816 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1817 			wait_ms = dpolicy.max_interval;
1818 			continue;
1819 		}
1820 		if (!atomic_read(&dcc->discard_cmd_cnt))
1821 			continue;
1822 
1823 		sb_start_intwrite(sbi->sb);
1824 
1825 		issued = __issue_discard_cmd(sbi, &dpolicy);
1826 		if (issued > 0) {
1827 			__wait_all_discard_cmd(sbi, &dpolicy);
1828 			wait_ms = dpolicy.min_interval;
1829 		} else if (issued == -1) {
1830 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1831 			if (!wait_ms)
1832 				wait_ms = dpolicy.mid_interval;
1833 		} else {
1834 			wait_ms = dpolicy.max_interval;
1835 		}
1836 
1837 		sb_end_intwrite(sbi->sb);
1838 
1839 	} while (!kthread_should_stop());
1840 	return 0;
1841 }
1842 
1843 #ifdef CONFIG_BLK_DEV_ZONED
1844 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1845 		struct block_device *bdev, block_t blkstart, block_t blklen)
1846 {
1847 	sector_t sector, nr_sects;
1848 	block_t lblkstart = blkstart;
1849 	int devi = 0;
1850 
1851 	if (f2fs_is_multi_device(sbi)) {
1852 		devi = f2fs_target_device_index(sbi, blkstart);
1853 		if (blkstart < FDEV(devi).start_blk ||
1854 		    blkstart > FDEV(devi).end_blk) {
1855 			f2fs_err(sbi, "Invalid block %x", blkstart);
1856 			return -EIO;
1857 		}
1858 		blkstart -= FDEV(devi).start_blk;
1859 	}
1860 
1861 	/* For sequential zones, reset the zone write pointer */
1862 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1863 		sector = SECTOR_FROM_BLOCK(blkstart);
1864 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1865 
1866 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1867 				nr_sects != bdev_zone_sectors(bdev)) {
1868 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1869 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1870 				 blkstart, blklen);
1871 			return -EIO;
1872 		}
1873 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1874 		return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1875 					sector, nr_sects, GFP_NOFS);
1876 	}
1877 
1878 	/* For conventional zones, use regular discard if supported */
1879 	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1880 }
1881 #endif
1882 
1883 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1884 		struct block_device *bdev, block_t blkstart, block_t blklen)
1885 {
1886 #ifdef CONFIG_BLK_DEV_ZONED
1887 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1888 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1889 #endif
1890 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1891 }
1892 
1893 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1894 				block_t blkstart, block_t blklen)
1895 {
1896 	sector_t start = blkstart, len = 0;
1897 	struct block_device *bdev;
1898 	struct seg_entry *se;
1899 	unsigned int offset;
1900 	block_t i;
1901 	int err = 0;
1902 
1903 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1904 
1905 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1906 		if (i != start) {
1907 			struct block_device *bdev2 =
1908 				f2fs_target_device(sbi, i, NULL);
1909 
1910 			if (bdev2 != bdev) {
1911 				err = __issue_discard_async(sbi, bdev,
1912 						start, len);
1913 				if (err)
1914 					return err;
1915 				bdev = bdev2;
1916 				start = i;
1917 				len = 0;
1918 			}
1919 		}
1920 
1921 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1922 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1923 
1924 		if (f2fs_block_unit_discard(sbi) &&
1925 				!f2fs_test_and_set_bit(offset, se->discard_map))
1926 			sbi->discard_blks--;
1927 	}
1928 
1929 	if (len)
1930 		err = __issue_discard_async(sbi, bdev, start, len);
1931 	return err;
1932 }
1933 
1934 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1935 							bool check_only)
1936 {
1937 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1938 	int max_blocks = sbi->blocks_per_seg;
1939 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1940 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1941 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1942 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1943 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1944 	unsigned int start = 0, end = -1;
1945 	bool force = (cpc->reason & CP_DISCARD);
1946 	struct discard_entry *de = NULL;
1947 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1948 	int i;
1949 
1950 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
1951 			!f2fs_block_unit_discard(sbi))
1952 		return false;
1953 
1954 	if (!force) {
1955 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1956 			SM_I(sbi)->dcc_info->nr_discards >=
1957 				SM_I(sbi)->dcc_info->max_discards)
1958 			return false;
1959 	}
1960 
1961 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1962 	for (i = 0; i < entries; i++)
1963 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1964 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1965 
1966 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1967 				SM_I(sbi)->dcc_info->max_discards) {
1968 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1969 		if (start >= max_blocks)
1970 			break;
1971 
1972 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1973 		if (force && start && end != max_blocks
1974 					&& (end - start) < cpc->trim_minlen)
1975 			continue;
1976 
1977 		if (check_only)
1978 			return true;
1979 
1980 		if (!de) {
1981 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1982 						GFP_F2FS_ZERO, true, NULL);
1983 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1984 			list_add_tail(&de->list, head);
1985 		}
1986 
1987 		for (i = start; i < end; i++)
1988 			__set_bit_le(i, (void *)de->discard_map);
1989 
1990 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1991 	}
1992 	return false;
1993 }
1994 
1995 static void release_discard_addr(struct discard_entry *entry)
1996 {
1997 	list_del(&entry->list);
1998 	kmem_cache_free(discard_entry_slab, entry);
1999 }
2000 
2001 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2002 {
2003 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2004 	struct discard_entry *entry, *this;
2005 
2006 	/* drop caches */
2007 	list_for_each_entry_safe(entry, this, head, list)
2008 		release_discard_addr(entry);
2009 }
2010 
2011 /*
2012  * Should call f2fs_clear_prefree_segments after checkpoint is done.
2013  */
2014 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2015 {
2016 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2017 	unsigned int segno;
2018 
2019 	mutex_lock(&dirty_i->seglist_lock);
2020 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2021 		__set_test_and_free(sbi, segno, false);
2022 	mutex_unlock(&dirty_i->seglist_lock);
2023 }
2024 
2025 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2026 						struct cp_control *cpc)
2027 {
2028 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2029 	struct list_head *head = &dcc->entry_list;
2030 	struct discard_entry *entry, *this;
2031 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2032 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2033 	unsigned int start = 0, end = -1;
2034 	unsigned int secno, start_segno;
2035 	bool force = (cpc->reason & CP_DISCARD);
2036 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2037 						DISCARD_UNIT_SECTION;
2038 
2039 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2040 		section_alignment = true;
2041 
2042 	mutex_lock(&dirty_i->seglist_lock);
2043 
2044 	while (1) {
2045 		int i;
2046 
2047 		if (section_alignment && end != -1)
2048 			end--;
2049 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2050 		if (start >= MAIN_SEGS(sbi))
2051 			break;
2052 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2053 								start + 1);
2054 
2055 		if (section_alignment) {
2056 			start = rounddown(start, sbi->segs_per_sec);
2057 			end = roundup(end, sbi->segs_per_sec);
2058 		}
2059 
2060 		for (i = start; i < end; i++) {
2061 			if (test_and_clear_bit(i, prefree_map))
2062 				dirty_i->nr_dirty[PRE]--;
2063 		}
2064 
2065 		if (!f2fs_realtime_discard_enable(sbi))
2066 			continue;
2067 
2068 		if (force && start >= cpc->trim_start &&
2069 					(end - 1) <= cpc->trim_end)
2070 				continue;
2071 
2072 		if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
2073 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2074 				(end - start) << sbi->log_blocks_per_seg);
2075 			continue;
2076 		}
2077 next:
2078 		secno = GET_SEC_FROM_SEG(sbi, start);
2079 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2080 		if (!IS_CURSEC(sbi, secno) &&
2081 			!get_valid_blocks(sbi, start, true))
2082 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2083 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
2084 
2085 		start = start_segno + sbi->segs_per_sec;
2086 		if (start < end)
2087 			goto next;
2088 		else
2089 			end = start - 1;
2090 	}
2091 	mutex_unlock(&dirty_i->seglist_lock);
2092 
2093 	if (!f2fs_block_unit_discard(sbi))
2094 		goto wakeup;
2095 
2096 	/* send small discards */
2097 	list_for_each_entry_safe(entry, this, head, list) {
2098 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2099 		bool is_valid = test_bit_le(0, entry->discard_map);
2100 
2101 find_next:
2102 		if (is_valid) {
2103 			next_pos = find_next_zero_bit_le(entry->discard_map,
2104 					sbi->blocks_per_seg, cur_pos);
2105 			len = next_pos - cur_pos;
2106 
2107 			if (f2fs_sb_has_blkzoned(sbi) ||
2108 			    (force && len < cpc->trim_minlen))
2109 				goto skip;
2110 
2111 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2112 									len);
2113 			total_len += len;
2114 		} else {
2115 			next_pos = find_next_bit_le(entry->discard_map,
2116 					sbi->blocks_per_seg, cur_pos);
2117 		}
2118 skip:
2119 		cur_pos = next_pos;
2120 		is_valid = !is_valid;
2121 
2122 		if (cur_pos < sbi->blocks_per_seg)
2123 			goto find_next;
2124 
2125 		release_discard_addr(entry);
2126 		dcc->nr_discards -= total_len;
2127 	}
2128 
2129 wakeup:
2130 	wake_up_discard_thread(sbi, false);
2131 }
2132 
2133 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2134 {
2135 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2136 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2137 	int err = 0;
2138 
2139 	if (!f2fs_realtime_discard_enable(sbi))
2140 		return 0;
2141 
2142 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2143 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2144 	if (IS_ERR(dcc->f2fs_issue_discard))
2145 		err = PTR_ERR(dcc->f2fs_issue_discard);
2146 
2147 	return err;
2148 }
2149 
2150 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2151 {
2152 	struct discard_cmd_control *dcc;
2153 	int err = 0, i;
2154 
2155 	if (SM_I(sbi)->dcc_info) {
2156 		dcc = SM_I(sbi)->dcc_info;
2157 		goto init_thread;
2158 	}
2159 
2160 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2161 	if (!dcc)
2162 		return -ENOMEM;
2163 
2164 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2165 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2166 		dcc->discard_granularity = sbi->blocks_per_seg;
2167 	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2168 		dcc->discard_granularity = BLKS_PER_SEC(sbi);
2169 
2170 	INIT_LIST_HEAD(&dcc->entry_list);
2171 	for (i = 0; i < MAX_PLIST_NUM; i++)
2172 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2173 	INIT_LIST_HEAD(&dcc->wait_list);
2174 	INIT_LIST_HEAD(&dcc->fstrim_list);
2175 	mutex_init(&dcc->cmd_lock);
2176 	atomic_set(&dcc->issued_discard, 0);
2177 	atomic_set(&dcc->queued_discard, 0);
2178 	atomic_set(&dcc->discard_cmd_cnt, 0);
2179 	dcc->nr_discards = 0;
2180 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2181 	dcc->undiscard_blks = 0;
2182 	dcc->next_pos = 0;
2183 	dcc->root = RB_ROOT_CACHED;
2184 	dcc->rbtree_check = false;
2185 
2186 	init_waitqueue_head(&dcc->discard_wait_queue);
2187 	SM_I(sbi)->dcc_info = dcc;
2188 init_thread:
2189 	err = f2fs_start_discard_thread(sbi);
2190 	if (err) {
2191 		kfree(dcc);
2192 		SM_I(sbi)->dcc_info = NULL;
2193 	}
2194 
2195 	return err;
2196 }
2197 
2198 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2199 {
2200 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2201 
2202 	if (!dcc)
2203 		return;
2204 
2205 	f2fs_stop_discard_thread(sbi);
2206 
2207 	/*
2208 	 * Recovery can cache discard commands, so in error path of
2209 	 * fill_super(), it needs to give a chance to handle them.
2210 	 */
2211 	if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2212 		f2fs_issue_discard_timeout(sbi);
2213 
2214 	kfree(dcc);
2215 	SM_I(sbi)->dcc_info = NULL;
2216 }
2217 
2218 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2219 {
2220 	struct sit_info *sit_i = SIT_I(sbi);
2221 
2222 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2223 		sit_i->dirty_sentries++;
2224 		return false;
2225 	}
2226 
2227 	return true;
2228 }
2229 
2230 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2231 					unsigned int segno, int modified)
2232 {
2233 	struct seg_entry *se = get_seg_entry(sbi, segno);
2234 
2235 	se->type = type;
2236 	if (modified)
2237 		__mark_sit_entry_dirty(sbi, segno);
2238 }
2239 
2240 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2241 								block_t blkaddr)
2242 {
2243 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2244 
2245 	if (segno == NULL_SEGNO)
2246 		return 0;
2247 	return get_seg_entry(sbi, segno)->mtime;
2248 }
2249 
2250 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2251 						unsigned long long old_mtime)
2252 {
2253 	struct seg_entry *se;
2254 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2255 	unsigned long long ctime = get_mtime(sbi, false);
2256 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2257 
2258 	if (segno == NULL_SEGNO)
2259 		return;
2260 
2261 	se = get_seg_entry(sbi, segno);
2262 
2263 	if (!se->mtime)
2264 		se->mtime = mtime;
2265 	else
2266 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2267 						se->valid_blocks + 1);
2268 
2269 	if (ctime > SIT_I(sbi)->max_mtime)
2270 		SIT_I(sbi)->max_mtime = ctime;
2271 }
2272 
2273 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2274 {
2275 	struct seg_entry *se;
2276 	unsigned int segno, offset;
2277 	long int new_vblocks;
2278 	bool exist;
2279 #ifdef CONFIG_F2FS_CHECK_FS
2280 	bool mir_exist;
2281 #endif
2282 
2283 	segno = GET_SEGNO(sbi, blkaddr);
2284 
2285 	se = get_seg_entry(sbi, segno);
2286 	new_vblocks = se->valid_blocks + del;
2287 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2288 
2289 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2290 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2291 
2292 	se->valid_blocks = new_vblocks;
2293 
2294 	/* Update valid block bitmap */
2295 	if (del > 0) {
2296 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2297 #ifdef CONFIG_F2FS_CHECK_FS
2298 		mir_exist = f2fs_test_and_set_bit(offset,
2299 						se->cur_valid_map_mir);
2300 		if (unlikely(exist != mir_exist)) {
2301 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2302 				 blkaddr, exist);
2303 			f2fs_bug_on(sbi, 1);
2304 		}
2305 #endif
2306 		if (unlikely(exist)) {
2307 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2308 				 blkaddr);
2309 			f2fs_bug_on(sbi, 1);
2310 			se->valid_blocks--;
2311 			del = 0;
2312 		}
2313 
2314 		if (f2fs_block_unit_discard(sbi) &&
2315 				!f2fs_test_and_set_bit(offset, se->discard_map))
2316 			sbi->discard_blks--;
2317 
2318 		/*
2319 		 * SSR should never reuse block which is checkpointed
2320 		 * or newly invalidated.
2321 		 */
2322 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2323 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2324 				se->ckpt_valid_blocks++;
2325 		}
2326 	} else {
2327 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2328 #ifdef CONFIG_F2FS_CHECK_FS
2329 		mir_exist = f2fs_test_and_clear_bit(offset,
2330 						se->cur_valid_map_mir);
2331 		if (unlikely(exist != mir_exist)) {
2332 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2333 				 blkaddr, exist);
2334 			f2fs_bug_on(sbi, 1);
2335 		}
2336 #endif
2337 		if (unlikely(!exist)) {
2338 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2339 				 blkaddr);
2340 			f2fs_bug_on(sbi, 1);
2341 			se->valid_blocks++;
2342 			del = 0;
2343 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2344 			/*
2345 			 * If checkpoints are off, we must not reuse data that
2346 			 * was used in the previous checkpoint. If it was used
2347 			 * before, we must track that to know how much space we
2348 			 * really have.
2349 			 */
2350 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2351 				spin_lock(&sbi->stat_lock);
2352 				sbi->unusable_block_count++;
2353 				spin_unlock(&sbi->stat_lock);
2354 			}
2355 		}
2356 
2357 		if (f2fs_block_unit_discard(sbi) &&
2358 			f2fs_test_and_clear_bit(offset, se->discard_map))
2359 			sbi->discard_blks++;
2360 	}
2361 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2362 		se->ckpt_valid_blocks += del;
2363 
2364 	__mark_sit_entry_dirty(sbi, segno);
2365 
2366 	/* update total number of valid blocks to be written in ckpt area */
2367 	SIT_I(sbi)->written_valid_blocks += del;
2368 
2369 	if (__is_large_section(sbi))
2370 		get_sec_entry(sbi, segno)->valid_blocks += del;
2371 }
2372 
2373 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2374 {
2375 	unsigned int segno = GET_SEGNO(sbi, addr);
2376 	struct sit_info *sit_i = SIT_I(sbi);
2377 
2378 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2379 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2380 		return;
2381 
2382 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2383 	f2fs_invalidate_compress_page(sbi, addr);
2384 
2385 	/* add it into sit main buffer */
2386 	down_write(&sit_i->sentry_lock);
2387 
2388 	update_segment_mtime(sbi, addr, 0);
2389 	update_sit_entry(sbi, addr, -1);
2390 
2391 	/* add it into dirty seglist */
2392 	locate_dirty_segment(sbi, segno);
2393 
2394 	up_write(&sit_i->sentry_lock);
2395 }
2396 
2397 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2398 {
2399 	struct sit_info *sit_i = SIT_I(sbi);
2400 	unsigned int segno, offset;
2401 	struct seg_entry *se;
2402 	bool is_cp = false;
2403 
2404 	if (!__is_valid_data_blkaddr(blkaddr))
2405 		return true;
2406 
2407 	down_read(&sit_i->sentry_lock);
2408 
2409 	segno = GET_SEGNO(sbi, blkaddr);
2410 	se = get_seg_entry(sbi, segno);
2411 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2412 
2413 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2414 		is_cp = true;
2415 
2416 	up_read(&sit_i->sentry_lock);
2417 
2418 	return is_cp;
2419 }
2420 
2421 /*
2422  * This function should be resided under the curseg_mutex lock
2423  */
2424 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2425 					struct f2fs_summary *sum)
2426 {
2427 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2428 	void *addr = curseg->sum_blk;
2429 
2430 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2431 	memcpy(addr, sum, sizeof(struct f2fs_summary));
2432 }
2433 
2434 /*
2435  * Calculate the number of current summary pages for writing
2436  */
2437 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2438 {
2439 	int valid_sum_count = 0;
2440 	int i, sum_in_page;
2441 
2442 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2443 		if (sbi->ckpt->alloc_type[i] == SSR)
2444 			valid_sum_count += sbi->blocks_per_seg;
2445 		else {
2446 			if (for_ra)
2447 				valid_sum_count += le16_to_cpu(
2448 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2449 			else
2450 				valid_sum_count += curseg_blkoff(sbi, i);
2451 		}
2452 	}
2453 
2454 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2455 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2456 	if (valid_sum_count <= sum_in_page)
2457 		return 1;
2458 	else if ((valid_sum_count - sum_in_page) <=
2459 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2460 		return 2;
2461 	return 3;
2462 }
2463 
2464 /*
2465  * Caller should put this summary page
2466  */
2467 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2468 {
2469 	if (unlikely(f2fs_cp_error(sbi)))
2470 		return ERR_PTR(-EIO);
2471 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2472 }
2473 
2474 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2475 					void *src, block_t blk_addr)
2476 {
2477 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2478 
2479 	memcpy(page_address(page), src, PAGE_SIZE);
2480 	set_page_dirty(page);
2481 	f2fs_put_page(page, 1);
2482 }
2483 
2484 static void write_sum_page(struct f2fs_sb_info *sbi,
2485 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2486 {
2487 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2488 }
2489 
2490 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2491 						int type, block_t blk_addr)
2492 {
2493 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2494 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2495 	struct f2fs_summary_block *src = curseg->sum_blk;
2496 	struct f2fs_summary_block *dst;
2497 
2498 	dst = (struct f2fs_summary_block *)page_address(page);
2499 	memset(dst, 0, PAGE_SIZE);
2500 
2501 	mutex_lock(&curseg->curseg_mutex);
2502 
2503 	down_read(&curseg->journal_rwsem);
2504 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2505 	up_read(&curseg->journal_rwsem);
2506 
2507 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2508 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2509 
2510 	mutex_unlock(&curseg->curseg_mutex);
2511 
2512 	set_page_dirty(page);
2513 	f2fs_put_page(page, 1);
2514 }
2515 
2516 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2517 				struct curseg_info *curseg, int type)
2518 {
2519 	unsigned int segno = curseg->segno + 1;
2520 	struct free_segmap_info *free_i = FREE_I(sbi);
2521 
2522 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2523 		return !test_bit(segno, free_i->free_segmap);
2524 	return 0;
2525 }
2526 
2527 /*
2528  * Find a new segment from the free segments bitmap to right order
2529  * This function should be returned with success, otherwise BUG
2530  */
2531 static void get_new_segment(struct f2fs_sb_info *sbi,
2532 			unsigned int *newseg, bool new_sec, int dir)
2533 {
2534 	struct free_segmap_info *free_i = FREE_I(sbi);
2535 	unsigned int segno, secno, zoneno;
2536 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2537 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2538 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2539 	unsigned int left_start = hint;
2540 	bool init = true;
2541 	int go_left = 0;
2542 	int i;
2543 
2544 	spin_lock(&free_i->segmap_lock);
2545 
2546 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2547 		segno = find_next_zero_bit(free_i->free_segmap,
2548 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2549 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2550 			goto got_it;
2551 	}
2552 find_other_zone:
2553 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2554 	if (secno >= MAIN_SECS(sbi)) {
2555 		if (dir == ALLOC_RIGHT) {
2556 			secno = find_first_zero_bit(free_i->free_secmap,
2557 							MAIN_SECS(sbi));
2558 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2559 		} else {
2560 			go_left = 1;
2561 			left_start = hint - 1;
2562 		}
2563 	}
2564 	if (go_left == 0)
2565 		goto skip_left;
2566 
2567 	while (test_bit(left_start, free_i->free_secmap)) {
2568 		if (left_start > 0) {
2569 			left_start--;
2570 			continue;
2571 		}
2572 		left_start = find_first_zero_bit(free_i->free_secmap,
2573 							MAIN_SECS(sbi));
2574 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2575 		break;
2576 	}
2577 	secno = left_start;
2578 skip_left:
2579 	segno = GET_SEG_FROM_SEC(sbi, secno);
2580 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2581 
2582 	/* give up on finding another zone */
2583 	if (!init)
2584 		goto got_it;
2585 	if (sbi->secs_per_zone == 1)
2586 		goto got_it;
2587 	if (zoneno == old_zoneno)
2588 		goto got_it;
2589 	if (dir == ALLOC_LEFT) {
2590 		if (!go_left && zoneno + 1 >= total_zones)
2591 			goto got_it;
2592 		if (go_left && zoneno == 0)
2593 			goto got_it;
2594 	}
2595 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2596 		if (CURSEG_I(sbi, i)->zone == zoneno)
2597 			break;
2598 
2599 	if (i < NR_CURSEG_TYPE) {
2600 		/* zone is in user, try another */
2601 		if (go_left)
2602 			hint = zoneno * sbi->secs_per_zone - 1;
2603 		else if (zoneno + 1 >= total_zones)
2604 			hint = 0;
2605 		else
2606 			hint = (zoneno + 1) * sbi->secs_per_zone;
2607 		init = false;
2608 		goto find_other_zone;
2609 	}
2610 got_it:
2611 	/* set it as dirty segment in free segmap */
2612 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2613 	__set_inuse(sbi, segno);
2614 	*newseg = segno;
2615 	spin_unlock(&free_i->segmap_lock);
2616 }
2617 
2618 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2619 {
2620 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2621 	struct summary_footer *sum_footer;
2622 	unsigned short seg_type = curseg->seg_type;
2623 
2624 	curseg->inited = true;
2625 	curseg->segno = curseg->next_segno;
2626 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2627 	curseg->next_blkoff = 0;
2628 	curseg->next_segno = NULL_SEGNO;
2629 
2630 	sum_footer = &(curseg->sum_blk->footer);
2631 	memset(sum_footer, 0, sizeof(struct summary_footer));
2632 
2633 	sanity_check_seg_type(sbi, seg_type);
2634 
2635 	if (IS_DATASEG(seg_type))
2636 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2637 	if (IS_NODESEG(seg_type))
2638 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2639 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2640 }
2641 
2642 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2643 {
2644 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2645 	unsigned short seg_type = curseg->seg_type;
2646 
2647 	sanity_check_seg_type(sbi, seg_type);
2648 	if (f2fs_need_rand_seg(sbi))
2649 		return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
2650 
2651 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2652 	if (__is_large_section(sbi))
2653 		return curseg->segno;
2654 
2655 	/* inmem log may not locate on any segment after mount */
2656 	if (!curseg->inited)
2657 		return 0;
2658 
2659 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2660 		return 0;
2661 
2662 	if (test_opt(sbi, NOHEAP) &&
2663 		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2664 		return 0;
2665 
2666 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2667 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2668 
2669 	/* find segments from 0 to reuse freed segments */
2670 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2671 		return 0;
2672 
2673 	return curseg->segno;
2674 }
2675 
2676 /*
2677  * Allocate a current working segment.
2678  * This function always allocates a free segment in LFS manner.
2679  */
2680 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2681 {
2682 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2683 	unsigned short seg_type = curseg->seg_type;
2684 	unsigned int segno = curseg->segno;
2685 	int dir = ALLOC_LEFT;
2686 
2687 	if (curseg->inited)
2688 		write_sum_page(sbi, curseg->sum_blk,
2689 				GET_SUM_BLOCK(sbi, segno));
2690 	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2691 		dir = ALLOC_RIGHT;
2692 
2693 	if (test_opt(sbi, NOHEAP))
2694 		dir = ALLOC_RIGHT;
2695 
2696 	segno = __get_next_segno(sbi, type);
2697 	get_new_segment(sbi, &segno, new_sec, dir);
2698 	curseg->next_segno = segno;
2699 	reset_curseg(sbi, type, 1);
2700 	curseg->alloc_type = LFS;
2701 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2702 		curseg->fragment_remained_chunk =
2703 				prandom_u32() % sbi->max_fragment_chunk + 1;
2704 }
2705 
2706 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2707 					int segno, block_t start)
2708 {
2709 	struct seg_entry *se = get_seg_entry(sbi, segno);
2710 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2711 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2712 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2713 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2714 	int i;
2715 
2716 	for (i = 0; i < entries; i++)
2717 		target_map[i] = ckpt_map[i] | cur_map[i];
2718 
2719 	return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2720 }
2721 
2722 /*
2723  * If a segment is written by LFS manner, next block offset is just obtained
2724  * by increasing the current block offset. However, if a segment is written by
2725  * SSR manner, next block offset obtained by calling __next_free_blkoff
2726  */
2727 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2728 				struct curseg_info *seg)
2729 {
2730 	if (seg->alloc_type == SSR) {
2731 		seg->next_blkoff =
2732 			__next_free_blkoff(sbi, seg->segno,
2733 						seg->next_blkoff + 1);
2734 	} else {
2735 		seg->next_blkoff++;
2736 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
2737 			/* To allocate block chunks in different sizes, use random number */
2738 			if (--seg->fragment_remained_chunk <= 0) {
2739 				seg->fragment_remained_chunk =
2740 				   prandom_u32() % sbi->max_fragment_chunk + 1;
2741 				seg->next_blkoff +=
2742 				   prandom_u32() % sbi->max_fragment_hole + 1;
2743 			}
2744 		}
2745 	}
2746 }
2747 
2748 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2749 {
2750 	return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2751 }
2752 
2753 /*
2754  * This function always allocates a used segment(from dirty seglist) by SSR
2755  * manner, so it should recover the existing segment information of valid blocks
2756  */
2757 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
2758 {
2759 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2760 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2761 	unsigned int new_segno = curseg->next_segno;
2762 	struct f2fs_summary_block *sum_node;
2763 	struct page *sum_page;
2764 
2765 	if (flush)
2766 		write_sum_page(sbi, curseg->sum_blk,
2767 					GET_SUM_BLOCK(sbi, curseg->segno));
2768 
2769 	__set_test_and_inuse(sbi, new_segno);
2770 
2771 	mutex_lock(&dirty_i->seglist_lock);
2772 	__remove_dirty_segment(sbi, new_segno, PRE);
2773 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2774 	mutex_unlock(&dirty_i->seglist_lock);
2775 
2776 	reset_curseg(sbi, type, 1);
2777 	curseg->alloc_type = SSR;
2778 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2779 
2780 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2781 	if (IS_ERR(sum_page)) {
2782 		/* GC won't be able to use stale summary pages by cp_error */
2783 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2784 		return;
2785 	}
2786 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2787 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2788 	f2fs_put_page(sum_page, 1);
2789 }
2790 
2791 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2792 				int alloc_mode, unsigned long long age);
2793 
2794 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2795 					int target_type, int alloc_mode,
2796 					unsigned long long age)
2797 {
2798 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2799 
2800 	curseg->seg_type = target_type;
2801 
2802 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2803 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2804 
2805 		curseg->seg_type = se->type;
2806 		change_curseg(sbi, type, true);
2807 	} else {
2808 		/* allocate cold segment by default */
2809 		curseg->seg_type = CURSEG_COLD_DATA;
2810 		new_curseg(sbi, type, true);
2811 	}
2812 	stat_inc_seg_type(sbi, curseg);
2813 }
2814 
2815 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2816 {
2817 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2818 
2819 	if (!sbi->am.atgc_enabled)
2820 		return;
2821 
2822 	down_read(&SM_I(sbi)->curseg_lock);
2823 
2824 	mutex_lock(&curseg->curseg_mutex);
2825 	down_write(&SIT_I(sbi)->sentry_lock);
2826 
2827 	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2828 
2829 	up_write(&SIT_I(sbi)->sentry_lock);
2830 	mutex_unlock(&curseg->curseg_mutex);
2831 
2832 	up_read(&SM_I(sbi)->curseg_lock);
2833 
2834 }
2835 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2836 {
2837 	__f2fs_init_atgc_curseg(sbi);
2838 }
2839 
2840 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2841 {
2842 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2843 
2844 	mutex_lock(&curseg->curseg_mutex);
2845 	if (!curseg->inited)
2846 		goto out;
2847 
2848 	if (get_valid_blocks(sbi, curseg->segno, false)) {
2849 		write_sum_page(sbi, curseg->sum_blk,
2850 				GET_SUM_BLOCK(sbi, curseg->segno));
2851 	} else {
2852 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2853 		__set_test_and_free(sbi, curseg->segno, true);
2854 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2855 	}
2856 out:
2857 	mutex_unlock(&curseg->curseg_mutex);
2858 }
2859 
2860 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2861 {
2862 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2863 
2864 	if (sbi->am.atgc_enabled)
2865 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2866 }
2867 
2868 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2869 {
2870 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2871 
2872 	mutex_lock(&curseg->curseg_mutex);
2873 	if (!curseg->inited)
2874 		goto out;
2875 	if (get_valid_blocks(sbi, curseg->segno, false))
2876 		goto out;
2877 
2878 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2879 	__set_test_and_inuse(sbi, curseg->segno);
2880 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2881 out:
2882 	mutex_unlock(&curseg->curseg_mutex);
2883 }
2884 
2885 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2886 {
2887 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2888 
2889 	if (sbi->am.atgc_enabled)
2890 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2891 }
2892 
2893 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2894 				int alloc_mode, unsigned long long age)
2895 {
2896 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2897 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2898 	unsigned segno = NULL_SEGNO;
2899 	unsigned short seg_type = curseg->seg_type;
2900 	int i, cnt;
2901 	bool reversed = false;
2902 
2903 	sanity_check_seg_type(sbi, seg_type);
2904 
2905 	/* f2fs_need_SSR() already forces to do this */
2906 	if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2907 		curseg->next_segno = segno;
2908 		return 1;
2909 	}
2910 
2911 	/* For node segments, let's do SSR more intensively */
2912 	if (IS_NODESEG(seg_type)) {
2913 		if (seg_type >= CURSEG_WARM_NODE) {
2914 			reversed = true;
2915 			i = CURSEG_COLD_NODE;
2916 		} else {
2917 			i = CURSEG_HOT_NODE;
2918 		}
2919 		cnt = NR_CURSEG_NODE_TYPE;
2920 	} else {
2921 		if (seg_type >= CURSEG_WARM_DATA) {
2922 			reversed = true;
2923 			i = CURSEG_COLD_DATA;
2924 		} else {
2925 			i = CURSEG_HOT_DATA;
2926 		}
2927 		cnt = NR_CURSEG_DATA_TYPE;
2928 	}
2929 
2930 	for (; cnt-- > 0; reversed ? i-- : i++) {
2931 		if (i == seg_type)
2932 			continue;
2933 		if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
2934 			curseg->next_segno = segno;
2935 			return 1;
2936 		}
2937 	}
2938 
2939 	/* find valid_blocks=0 in dirty list */
2940 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2941 		segno = get_free_segment(sbi);
2942 		if (segno != NULL_SEGNO) {
2943 			curseg->next_segno = segno;
2944 			return 1;
2945 		}
2946 	}
2947 	return 0;
2948 }
2949 
2950 /*
2951  * flush out current segment and replace it with new segment
2952  * This function should be returned with success, otherwise BUG
2953  */
2954 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2955 						int type, bool force)
2956 {
2957 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2958 
2959 	if (force)
2960 		new_curseg(sbi, type, true);
2961 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2962 					curseg->seg_type == CURSEG_WARM_NODE)
2963 		new_curseg(sbi, type, false);
2964 	else if (curseg->alloc_type == LFS &&
2965 			is_next_segment_free(sbi, curseg, type) &&
2966 			likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2967 		new_curseg(sbi, type, false);
2968 	else if (f2fs_need_SSR(sbi) &&
2969 			get_ssr_segment(sbi, type, SSR, 0))
2970 		change_curseg(sbi, type, true);
2971 	else
2972 		new_curseg(sbi, type, false);
2973 
2974 	stat_inc_seg_type(sbi, curseg);
2975 }
2976 
2977 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2978 					unsigned int start, unsigned int end)
2979 {
2980 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2981 	unsigned int segno;
2982 
2983 	down_read(&SM_I(sbi)->curseg_lock);
2984 	mutex_lock(&curseg->curseg_mutex);
2985 	down_write(&SIT_I(sbi)->sentry_lock);
2986 
2987 	segno = CURSEG_I(sbi, type)->segno;
2988 	if (segno < start || segno > end)
2989 		goto unlock;
2990 
2991 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2992 		change_curseg(sbi, type, true);
2993 	else
2994 		new_curseg(sbi, type, true);
2995 
2996 	stat_inc_seg_type(sbi, curseg);
2997 
2998 	locate_dirty_segment(sbi, segno);
2999 unlock:
3000 	up_write(&SIT_I(sbi)->sentry_lock);
3001 
3002 	if (segno != curseg->segno)
3003 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3004 			    type, segno, curseg->segno);
3005 
3006 	mutex_unlock(&curseg->curseg_mutex);
3007 	up_read(&SM_I(sbi)->curseg_lock);
3008 }
3009 
3010 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3011 						bool new_sec, bool force)
3012 {
3013 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3014 	unsigned int old_segno;
3015 
3016 	if (!curseg->inited)
3017 		goto alloc;
3018 
3019 	if (force || curseg->next_blkoff ||
3020 		get_valid_blocks(sbi, curseg->segno, new_sec))
3021 		goto alloc;
3022 
3023 	if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3024 		return;
3025 alloc:
3026 	old_segno = curseg->segno;
3027 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
3028 	locate_dirty_segment(sbi, old_segno);
3029 }
3030 
3031 static void __allocate_new_section(struct f2fs_sb_info *sbi,
3032 						int type, bool force)
3033 {
3034 	__allocate_new_segment(sbi, type, true, force);
3035 }
3036 
3037 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3038 {
3039 	down_read(&SM_I(sbi)->curseg_lock);
3040 	down_write(&SIT_I(sbi)->sentry_lock);
3041 	__allocate_new_section(sbi, type, force);
3042 	up_write(&SIT_I(sbi)->sentry_lock);
3043 	up_read(&SM_I(sbi)->curseg_lock);
3044 }
3045 
3046 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3047 {
3048 	int i;
3049 
3050 	down_read(&SM_I(sbi)->curseg_lock);
3051 	down_write(&SIT_I(sbi)->sentry_lock);
3052 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3053 		__allocate_new_segment(sbi, i, false, false);
3054 	up_write(&SIT_I(sbi)->sentry_lock);
3055 	up_read(&SM_I(sbi)->curseg_lock);
3056 }
3057 
3058 static const struct segment_allocation default_salloc_ops = {
3059 	.allocate_segment = allocate_segment_by_default,
3060 };
3061 
3062 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3063 						struct cp_control *cpc)
3064 {
3065 	__u64 trim_start = cpc->trim_start;
3066 	bool has_candidate = false;
3067 
3068 	down_write(&SIT_I(sbi)->sentry_lock);
3069 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3070 		if (add_discard_addrs(sbi, cpc, true)) {
3071 			has_candidate = true;
3072 			break;
3073 		}
3074 	}
3075 	up_write(&SIT_I(sbi)->sentry_lock);
3076 
3077 	cpc->trim_start = trim_start;
3078 	return has_candidate;
3079 }
3080 
3081 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3082 					struct discard_policy *dpolicy,
3083 					unsigned int start, unsigned int end)
3084 {
3085 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3086 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3087 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3088 	struct discard_cmd *dc;
3089 	struct blk_plug plug;
3090 	int issued;
3091 	unsigned int trimmed = 0;
3092 
3093 next:
3094 	issued = 0;
3095 
3096 	mutex_lock(&dcc->cmd_lock);
3097 	if (unlikely(dcc->rbtree_check))
3098 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
3099 							&dcc->root, false));
3100 
3101 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
3102 					NULL, start,
3103 					(struct rb_entry **)&prev_dc,
3104 					(struct rb_entry **)&next_dc,
3105 					&insert_p, &insert_parent, true, NULL);
3106 	if (!dc)
3107 		dc = next_dc;
3108 
3109 	blk_start_plug(&plug);
3110 
3111 	while (dc && dc->lstart <= end) {
3112 		struct rb_node *node;
3113 		int err = 0;
3114 
3115 		if (dc->len < dpolicy->granularity)
3116 			goto skip;
3117 
3118 		if (dc->state != D_PREP) {
3119 			list_move_tail(&dc->list, &dcc->fstrim_list);
3120 			goto skip;
3121 		}
3122 
3123 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3124 
3125 		if (issued >= dpolicy->max_requests) {
3126 			start = dc->lstart + dc->len;
3127 
3128 			if (err)
3129 				__remove_discard_cmd(sbi, dc);
3130 
3131 			blk_finish_plug(&plug);
3132 			mutex_unlock(&dcc->cmd_lock);
3133 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3134 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3135 			goto next;
3136 		}
3137 skip:
3138 		node = rb_next(&dc->rb_node);
3139 		if (err)
3140 			__remove_discard_cmd(sbi, dc);
3141 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3142 
3143 		if (fatal_signal_pending(current))
3144 			break;
3145 	}
3146 
3147 	blk_finish_plug(&plug);
3148 	mutex_unlock(&dcc->cmd_lock);
3149 
3150 	return trimmed;
3151 }
3152 
3153 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3154 {
3155 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3156 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3157 	unsigned int start_segno, end_segno;
3158 	block_t start_block, end_block;
3159 	struct cp_control cpc;
3160 	struct discard_policy dpolicy;
3161 	unsigned long long trimmed = 0;
3162 	int err = 0;
3163 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3164 
3165 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3166 		return -EINVAL;
3167 
3168 	if (end < MAIN_BLKADDR(sbi))
3169 		goto out;
3170 
3171 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3172 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3173 		return -EFSCORRUPTED;
3174 	}
3175 
3176 	/* start/end segment number in main_area */
3177 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3178 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3179 						GET_SEGNO(sbi, end);
3180 	if (need_align) {
3181 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
3182 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3183 	}
3184 
3185 	cpc.reason = CP_DISCARD;
3186 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3187 	cpc.trim_start = start_segno;
3188 	cpc.trim_end = end_segno;
3189 
3190 	if (sbi->discard_blks == 0)
3191 		goto out;
3192 
3193 	down_write(&sbi->gc_lock);
3194 	err = f2fs_write_checkpoint(sbi, &cpc);
3195 	up_write(&sbi->gc_lock);
3196 	if (err)
3197 		goto out;
3198 
3199 	/*
3200 	 * We filed discard candidates, but actually we don't need to wait for
3201 	 * all of them, since they'll be issued in idle time along with runtime
3202 	 * discard option. User configuration looks like using runtime discard
3203 	 * or periodic fstrim instead of it.
3204 	 */
3205 	if (f2fs_realtime_discard_enable(sbi))
3206 		goto out;
3207 
3208 	start_block = START_BLOCK(sbi, start_segno);
3209 	end_block = START_BLOCK(sbi, end_segno + 1);
3210 
3211 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3212 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3213 					start_block, end_block);
3214 
3215 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3216 					start_block, end_block);
3217 out:
3218 	if (!err)
3219 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3220 	return err;
3221 }
3222 
3223 static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3224 					struct curseg_info *curseg)
3225 {
3226 	return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3227 							curseg->segno);
3228 }
3229 
3230 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3231 {
3232 	switch (hint) {
3233 	case WRITE_LIFE_SHORT:
3234 		return CURSEG_HOT_DATA;
3235 	case WRITE_LIFE_EXTREME:
3236 		return CURSEG_COLD_DATA;
3237 	default:
3238 		return CURSEG_WARM_DATA;
3239 	}
3240 }
3241 
3242 /* This returns write hints for each segment type. This hints will be
3243  * passed down to block layer. There are mapping tables which depend on
3244  * the mount option 'whint_mode'.
3245  *
3246  * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
3247  *
3248  * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
3249  *
3250  * User                  F2FS                     Block
3251  * ----                  ----                     -----
3252  *                       META                     WRITE_LIFE_NOT_SET
3253  *                       HOT_NODE                 "
3254  *                       WARM_NODE                "
3255  *                       COLD_NODE                "
3256  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3257  * extension list        "                        "
3258  *
3259  * -- buffered io
3260  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3261  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3262  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3263  * WRITE_LIFE_NONE       "                        "
3264  * WRITE_LIFE_MEDIUM     "                        "
3265  * WRITE_LIFE_LONG       "                        "
3266  *
3267  * -- direct io
3268  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3269  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3270  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3271  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3272  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3273  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3274  *
3275  * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
3276  *
3277  * User                  F2FS                     Block
3278  * ----                  ----                     -----
3279  *                       META                     WRITE_LIFE_MEDIUM;
3280  *                       HOT_NODE                 WRITE_LIFE_NOT_SET
3281  *                       WARM_NODE                "
3282  *                       COLD_NODE                WRITE_LIFE_NONE
3283  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3284  * extension list        "                        "
3285  *
3286  * -- buffered io
3287  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3288  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3289  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
3290  * WRITE_LIFE_NONE       "                        "
3291  * WRITE_LIFE_MEDIUM     "                        "
3292  * WRITE_LIFE_LONG       "                        "
3293  *
3294  * -- direct io
3295  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3296  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3297  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3298  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3299  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3300  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3301  */
3302 
3303 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3304 				enum page_type type, enum temp_type temp)
3305 {
3306 	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
3307 		if (type == DATA) {
3308 			if (temp == WARM)
3309 				return WRITE_LIFE_NOT_SET;
3310 			else if (temp == HOT)
3311 				return WRITE_LIFE_SHORT;
3312 			else if (temp == COLD)
3313 				return WRITE_LIFE_EXTREME;
3314 		} else {
3315 			return WRITE_LIFE_NOT_SET;
3316 		}
3317 	} else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
3318 		if (type == DATA) {
3319 			if (temp == WARM)
3320 				return WRITE_LIFE_LONG;
3321 			else if (temp == HOT)
3322 				return WRITE_LIFE_SHORT;
3323 			else if (temp == COLD)
3324 				return WRITE_LIFE_EXTREME;
3325 		} else if (type == NODE) {
3326 			if (temp == WARM || temp == HOT)
3327 				return WRITE_LIFE_NOT_SET;
3328 			else if (temp == COLD)
3329 				return WRITE_LIFE_NONE;
3330 		} else if (type == META) {
3331 			return WRITE_LIFE_MEDIUM;
3332 		}
3333 	}
3334 	return WRITE_LIFE_NOT_SET;
3335 }
3336 
3337 static int __get_segment_type_2(struct f2fs_io_info *fio)
3338 {
3339 	if (fio->type == DATA)
3340 		return CURSEG_HOT_DATA;
3341 	else
3342 		return CURSEG_HOT_NODE;
3343 }
3344 
3345 static int __get_segment_type_4(struct f2fs_io_info *fio)
3346 {
3347 	if (fio->type == DATA) {
3348 		struct inode *inode = fio->page->mapping->host;
3349 
3350 		if (S_ISDIR(inode->i_mode))
3351 			return CURSEG_HOT_DATA;
3352 		else
3353 			return CURSEG_COLD_DATA;
3354 	} else {
3355 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3356 			return CURSEG_WARM_NODE;
3357 		else
3358 			return CURSEG_COLD_NODE;
3359 	}
3360 }
3361 
3362 static int __get_segment_type_6(struct f2fs_io_info *fio)
3363 {
3364 	if (fio->type == DATA) {
3365 		struct inode *inode = fio->page->mapping->host;
3366 
3367 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3368 			return CURSEG_COLD_DATA_PINNED;
3369 
3370 		if (page_private_gcing(fio->page)) {
3371 			if (fio->sbi->am.atgc_enabled &&
3372 				(fio->io_type == FS_DATA_IO) &&
3373 				(fio->sbi->gc_mode != GC_URGENT_HIGH))
3374 				return CURSEG_ALL_DATA_ATGC;
3375 			else
3376 				return CURSEG_COLD_DATA;
3377 		}
3378 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3379 			return CURSEG_COLD_DATA;
3380 		if (file_is_hot(inode) ||
3381 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3382 				f2fs_is_atomic_file(inode) ||
3383 				f2fs_is_volatile_file(inode))
3384 			return CURSEG_HOT_DATA;
3385 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3386 	} else {
3387 		if (IS_DNODE(fio->page))
3388 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3389 						CURSEG_HOT_NODE;
3390 		return CURSEG_COLD_NODE;
3391 	}
3392 }
3393 
3394 static int __get_segment_type(struct f2fs_io_info *fio)
3395 {
3396 	int type = 0;
3397 
3398 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3399 	case 2:
3400 		type = __get_segment_type_2(fio);
3401 		break;
3402 	case 4:
3403 		type = __get_segment_type_4(fio);
3404 		break;
3405 	case 6:
3406 		type = __get_segment_type_6(fio);
3407 		break;
3408 	default:
3409 		f2fs_bug_on(fio->sbi, true);
3410 	}
3411 
3412 	if (IS_HOT(type))
3413 		fio->temp = HOT;
3414 	else if (IS_WARM(type))
3415 		fio->temp = WARM;
3416 	else
3417 		fio->temp = COLD;
3418 	return type;
3419 }
3420 
3421 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3422 		block_t old_blkaddr, block_t *new_blkaddr,
3423 		struct f2fs_summary *sum, int type,
3424 		struct f2fs_io_info *fio)
3425 {
3426 	struct sit_info *sit_i = SIT_I(sbi);
3427 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3428 	unsigned long long old_mtime;
3429 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3430 	struct seg_entry *se = NULL;
3431 
3432 	down_read(&SM_I(sbi)->curseg_lock);
3433 
3434 	mutex_lock(&curseg->curseg_mutex);
3435 	down_write(&sit_i->sentry_lock);
3436 
3437 	if (from_gc) {
3438 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3439 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3440 		sanity_check_seg_type(sbi, se->type);
3441 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3442 	}
3443 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3444 
3445 	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3446 
3447 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3448 
3449 	/*
3450 	 * __add_sum_entry should be resided under the curseg_mutex
3451 	 * because, this function updates a summary entry in the
3452 	 * current summary block.
3453 	 */
3454 	__add_sum_entry(sbi, type, sum);
3455 
3456 	__refresh_next_blkoff(sbi, curseg);
3457 
3458 	stat_inc_block_count(sbi, curseg);
3459 
3460 	if (from_gc) {
3461 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3462 	} else {
3463 		update_segment_mtime(sbi, old_blkaddr, 0);
3464 		old_mtime = 0;
3465 	}
3466 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3467 
3468 	/*
3469 	 * SIT information should be updated before segment allocation,
3470 	 * since SSR needs latest valid block information.
3471 	 */
3472 	update_sit_entry(sbi, *new_blkaddr, 1);
3473 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3474 		update_sit_entry(sbi, old_blkaddr, -1);
3475 
3476 	if (!__has_curseg_space(sbi, curseg)) {
3477 		if (from_gc)
3478 			get_atssr_segment(sbi, type, se->type,
3479 						AT_SSR, se->mtime);
3480 		else
3481 			sit_i->s_ops->allocate_segment(sbi, type, false);
3482 	}
3483 	/*
3484 	 * segment dirty status should be updated after segment allocation,
3485 	 * so we just need to update status only one time after previous
3486 	 * segment being closed.
3487 	 */
3488 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3489 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3490 
3491 	up_write(&sit_i->sentry_lock);
3492 
3493 	if (page && IS_NODESEG(type)) {
3494 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3495 
3496 		f2fs_inode_chksum_set(sbi, page);
3497 	}
3498 
3499 	if (fio) {
3500 		struct f2fs_bio_info *io;
3501 
3502 		if (F2FS_IO_ALIGNED(sbi))
3503 			fio->retry = false;
3504 
3505 		INIT_LIST_HEAD(&fio->list);
3506 		fio->in_list = true;
3507 		io = sbi->write_io[fio->type] + fio->temp;
3508 		spin_lock(&io->io_lock);
3509 		list_add_tail(&fio->list, &io->io_list);
3510 		spin_unlock(&io->io_lock);
3511 	}
3512 
3513 	mutex_unlock(&curseg->curseg_mutex);
3514 
3515 	up_read(&SM_I(sbi)->curseg_lock);
3516 }
3517 
3518 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3519 					block_t blkaddr, unsigned int blkcnt)
3520 {
3521 	if (!f2fs_is_multi_device(sbi))
3522 		return;
3523 
3524 	while (1) {
3525 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3526 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3527 
3528 		/* update device state for fsync */
3529 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3530 
3531 		/* update device state for checkpoint */
3532 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3533 			spin_lock(&sbi->dev_lock);
3534 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3535 			spin_unlock(&sbi->dev_lock);
3536 		}
3537 
3538 		if (blkcnt <= blks)
3539 			break;
3540 		blkcnt -= blks;
3541 		blkaddr += blks;
3542 	}
3543 }
3544 
3545 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3546 {
3547 	int type = __get_segment_type(fio);
3548 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3549 
3550 	if (keep_order)
3551 		down_read(&fio->sbi->io_order_lock);
3552 reallocate:
3553 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3554 			&fio->new_blkaddr, sum, type, fio);
3555 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
3556 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
3557 					fio->old_blkaddr, fio->old_blkaddr);
3558 		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
3559 	}
3560 
3561 	/* writeout dirty page into bdev */
3562 	f2fs_submit_page_write(fio);
3563 	if (fio->retry) {
3564 		fio->old_blkaddr = fio->new_blkaddr;
3565 		goto reallocate;
3566 	}
3567 
3568 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3569 
3570 	if (keep_order)
3571 		up_read(&fio->sbi->io_order_lock);
3572 }
3573 
3574 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3575 					enum iostat_type io_type)
3576 {
3577 	struct f2fs_io_info fio = {
3578 		.sbi = sbi,
3579 		.type = META,
3580 		.temp = HOT,
3581 		.op = REQ_OP_WRITE,
3582 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3583 		.old_blkaddr = page->index,
3584 		.new_blkaddr = page->index,
3585 		.page = page,
3586 		.encrypted_page = NULL,
3587 		.in_list = false,
3588 	};
3589 
3590 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3591 		fio.op_flags &= ~REQ_META;
3592 
3593 	set_page_writeback(page);
3594 	ClearPageError(page);
3595 	f2fs_submit_page_write(&fio);
3596 
3597 	stat_inc_meta_count(sbi, page->index);
3598 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3599 }
3600 
3601 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3602 {
3603 	struct f2fs_summary sum;
3604 
3605 	set_summary(&sum, nid, 0, 0);
3606 	do_write_page(&sum, fio);
3607 
3608 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3609 }
3610 
3611 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3612 					struct f2fs_io_info *fio)
3613 {
3614 	struct f2fs_sb_info *sbi = fio->sbi;
3615 	struct f2fs_summary sum;
3616 
3617 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3618 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3619 	do_write_page(&sum, fio);
3620 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3621 
3622 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3623 }
3624 
3625 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3626 {
3627 	int err;
3628 	struct f2fs_sb_info *sbi = fio->sbi;
3629 	unsigned int segno;
3630 
3631 	fio->new_blkaddr = fio->old_blkaddr;
3632 	/* i/o temperature is needed for passing down write hints */
3633 	__get_segment_type(fio);
3634 
3635 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3636 
3637 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3638 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3639 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3640 			  __func__, segno);
3641 		err = -EFSCORRUPTED;
3642 		goto drop_bio;
3643 	}
3644 
3645 	if (f2fs_cp_error(sbi)) {
3646 		err = -EIO;
3647 		goto drop_bio;
3648 	}
3649 
3650 	invalidate_mapping_pages(META_MAPPING(sbi),
3651 				fio->new_blkaddr, fio->new_blkaddr);
3652 
3653 	stat_inc_inplace_blocks(fio->sbi);
3654 
3655 	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3656 		err = f2fs_merge_page_bio(fio);
3657 	else
3658 		err = f2fs_submit_page_bio(fio);
3659 	if (!err) {
3660 		f2fs_update_device_state(fio->sbi, fio->ino,
3661 						fio->new_blkaddr, 1);
3662 		f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3663 	}
3664 
3665 	return err;
3666 drop_bio:
3667 	if (fio->bio && *(fio->bio)) {
3668 		struct bio *bio = *(fio->bio);
3669 
3670 		bio->bi_status = BLK_STS_IOERR;
3671 		bio_endio(bio);
3672 		*(fio->bio) = NULL;
3673 	}
3674 	return err;
3675 }
3676 
3677 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3678 						unsigned int segno)
3679 {
3680 	int i;
3681 
3682 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3683 		if (CURSEG_I(sbi, i)->segno == segno)
3684 			break;
3685 	}
3686 	return i;
3687 }
3688 
3689 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3690 				block_t old_blkaddr, block_t new_blkaddr,
3691 				bool recover_curseg, bool recover_newaddr,
3692 				bool from_gc)
3693 {
3694 	struct sit_info *sit_i = SIT_I(sbi);
3695 	struct curseg_info *curseg;
3696 	unsigned int segno, old_cursegno;
3697 	struct seg_entry *se;
3698 	int type;
3699 	unsigned short old_blkoff;
3700 	unsigned char old_alloc_type;
3701 
3702 	segno = GET_SEGNO(sbi, new_blkaddr);
3703 	se = get_seg_entry(sbi, segno);
3704 	type = se->type;
3705 
3706 	down_write(&SM_I(sbi)->curseg_lock);
3707 
3708 	if (!recover_curseg) {
3709 		/* for recovery flow */
3710 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3711 			if (old_blkaddr == NULL_ADDR)
3712 				type = CURSEG_COLD_DATA;
3713 			else
3714 				type = CURSEG_WARM_DATA;
3715 		}
3716 	} else {
3717 		if (IS_CURSEG(sbi, segno)) {
3718 			/* se->type is volatile as SSR allocation */
3719 			type = __f2fs_get_curseg(sbi, segno);
3720 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3721 		} else {
3722 			type = CURSEG_WARM_DATA;
3723 		}
3724 	}
3725 
3726 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3727 	curseg = CURSEG_I(sbi, type);
3728 
3729 	mutex_lock(&curseg->curseg_mutex);
3730 	down_write(&sit_i->sentry_lock);
3731 
3732 	old_cursegno = curseg->segno;
3733 	old_blkoff = curseg->next_blkoff;
3734 	old_alloc_type = curseg->alloc_type;
3735 
3736 	/* change the current segment */
3737 	if (segno != curseg->segno) {
3738 		curseg->next_segno = segno;
3739 		change_curseg(sbi, type, true);
3740 	}
3741 
3742 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3743 	__add_sum_entry(sbi, type, sum);
3744 
3745 	if (!recover_curseg || recover_newaddr) {
3746 		if (!from_gc)
3747 			update_segment_mtime(sbi, new_blkaddr, 0);
3748 		update_sit_entry(sbi, new_blkaddr, 1);
3749 	}
3750 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3751 		invalidate_mapping_pages(META_MAPPING(sbi),
3752 					old_blkaddr, old_blkaddr);
3753 		f2fs_invalidate_compress_page(sbi, old_blkaddr);
3754 		if (!from_gc)
3755 			update_segment_mtime(sbi, old_blkaddr, 0);
3756 		update_sit_entry(sbi, old_blkaddr, -1);
3757 	}
3758 
3759 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3760 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3761 
3762 	locate_dirty_segment(sbi, old_cursegno);
3763 
3764 	if (recover_curseg) {
3765 		if (old_cursegno != curseg->segno) {
3766 			curseg->next_segno = old_cursegno;
3767 			change_curseg(sbi, type, true);
3768 		}
3769 		curseg->next_blkoff = old_blkoff;
3770 		curseg->alloc_type = old_alloc_type;
3771 	}
3772 
3773 	up_write(&sit_i->sentry_lock);
3774 	mutex_unlock(&curseg->curseg_mutex);
3775 	up_write(&SM_I(sbi)->curseg_lock);
3776 }
3777 
3778 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3779 				block_t old_addr, block_t new_addr,
3780 				unsigned char version, bool recover_curseg,
3781 				bool recover_newaddr)
3782 {
3783 	struct f2fs_summary sum;
3784 
3785 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3786 
3787 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3788 					recover_curseg, recover_newaddr, false);
3789 
3790 	f2fs_update_data_blkaddr(dn, new_addr);
3791 }
3792 
3793 void f2fs_wait_on_page_writeback(struct page *page,
3794 				enum page_type type, bool ordered, bool locked)
3795 {
3796 	if (PageWriteback(page)) {
3797 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3798 
3799 		/* submit cached LFS IO */
3800 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3801 		/* sbumit cached IPU IO */
3802 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3803 		if (ordered) {
3804 			wait_on_page_writeback(page);
3805 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3806 		} else {
3807 			wait_for_stable_page(page);
3808 		}
3809 	}
3810 }
3811 
3812 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3813 {
3814 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3815 	struct page *cpage;
3816 
3817 	if (!f2fs_post_read_required(inode))
3818 		return;
3819 
3820 	if (!__is_valid_data_blkaddr(blkaddr))
3821 		return;
3822 
3823 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3824 	if (cpage) {
3825 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3826 		f2fs_put_page(cpage, 1);
3827 	}
3828 }
3829 
3830 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3831 								block_t len)
3832 {
3833 	block_t i;
3834 
3835 	for (i = 0; i < len; i++)
3836 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3837 }
3838 
3839 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3840 {
3841 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3842 	struct curseg_info *seg_i;
3843 	unsigned char *kaddr;
3844 	struct page *page;
3845 	block_t start;
3846 	int i, j, offset;
3847 
3848 	start = start_sum_block(sbi);
3849 
3850 	page = f2fs_get_meta_page(sbi, start++);
3851 	if (IS_ERR(page))
3852 		return PTR_ERR(page);
3853 	kaddr = (unsigned char *)page_address(page);
3854 
3855 	/* Step 1: restore nat cache */
3856 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3857 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3858 
3859 	/* Step 2: restore sit cache */
3860 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3861 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3862 	offset = 2 * SUM_JOURNAL_SIZE;
3863 
3864 	/* Step 3: restore summary entries */
3865 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3866 		unsigned short blk_off;
3867 		unsigned int segno;
3868 
3869 		seg_i = CURSEG_I(sbi, i);
3870 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3871 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3872 		seg_i->next_segno = segno;
3873 		reset_curseg(sbi, i, 0);
3874 		seg_i->alloc_type = ckpt->alloc_type[i];
3875 		seg_i->next_blkoff = blk_off;
3876 
3877 		if (seg_i->alloc_type == SSR)
3878 			blk_off = sbi->blocks_per_seg;
3879 
3880 		for (j = 0; j < blk_off; j++) {
3881 			struct f2fs_summary *s;
3882 
3883 			s = (struct f2fs_summary *)(kaddr + offset);
3884 			seg_i->sum_blk->entries[j] = *s;
3885 			offset += SUMMARY_SIZE;
3886 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3887 						SUM_FOOTER_SIZE)
3888 				continue;
3889 
3890 			f2fs_put_page(page, 1);
3891 			page = NULL;
3892 
3893 			page = f2fs_get_meta_page(sbi, start++);
3894 			if (IS_ERR(page))
3895 				return PTR_ERR(page);
3896 			kaddr = (unsigned char *)page_address(page);
3897 			offset = 0;
3898 		}
3899 	}
3900 	f2fs_put_page(page, 1);
3901 	return 0;
3902 }
3903 
3904 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3905 {
3906 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3907 	struct f2fs_summary_block *sum;
3908 	struct curseg_info *curseg;
3909 	struct page *new;
3910 	unsigned short blk_off;
3911 	unsigned int segno = 0;
3912 	block_t blk_addr = 0;
3913 	int err = 0;
3914 
3915 	/* get segment number and block addr */
3916 	if (IS_DATASEG(type)) {
3917 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3918 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3919 							CURSEG_HOT_DATA]);
3920 		if (__exist_node_summaries(sbi))
3921 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3922 		else
3923 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3924 	} else {
3925 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3926 							CURSEG_HOT_NODE]);
3927 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3928 							CURSEG_HOT_NODE]);
3929 		if (__exist_node_summaries(sbi))
3930 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3931 							type - CURSEG_HOT_NODE);
3932 		else
3933 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3934 	}
3935 
3936 	new = f2fs_get_meta_page(sbi, blk_addr);
3937 	if (IS_ERR(new))
3938 		return PTR_ERR(new);
3939 	sum = (struct f2fs_summary_block *)page_address(new);
3940 
3941 	if (IS_NODESEG(type)) {
3942 		if (__exist_node_summaries(sbi)) {
3943 			struct f2fs_summary *ns = &sum->entries[0];
3944 			int i;
3945 
3946 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3947 				ns->version = 0;
3948 				ns->ofs_in_node = 0;
3949 			}
3950 		} else {
3951 			err = f2fs_restore_node_summary(sbi, segno, sum);
3952 			if (err)
3953 				goto out;
3954 		}
3955 	}
3956 
3957 	/* set uncompleted segment to curseg */
3958 	curseg = CURSEG_I(sbi, type);
3959 	mutex_lock(&curseg->curseg_mutex);
3960 
3961 	/* update journal info */
3962 	down_write(&curseg->journal_rwsem);
3963 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3964 	up_write(&curseg->journal_rwsem);
3965 
3966 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3967 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3968 	curseg->next_segno = segno;
3969 	reset_curseg(sbi, type, 0);
3970 	curseg->alloc_type = ckpt->alloc_type[type];
3971 	curseg->next_blkoff = blk_off;
3972 	mutex_unlock(&curseg->curseg_mutex);
3973 out:
3974 	f2fs_put_page(new, 1);
3975 	return err;
3976 }
3977 
3978 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3979 {
3980 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3981 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3982 	int type = CURSEG_HOT_DATA;
3983 	int err;
3984 
3985 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3986 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3987 
3988 		if (npages >= 2)
3989 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3990 							META_CP, true);
3991 
3992 		/* restore for compacted data summary */
3993 		err = read_compacted_summaries(sbi);
3994 		if (err)
3995 			return err;
3996 		type = CURSEG_HOT_NODE;
3997 	}
3998 
3999 	if (__exist_node_summaries(sbi))
4000 		f2fs_ra_meta_pages(sbi,
4001 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4002 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4003 
4004 	for (; type <= CURSEG_COLD_NODE; type++) {
4005 		err = read_normal_summaries(sbi, type);
4006 		if (err)
4007 			return err;
4008 	}
4009 
4010 	/* sanity check for summary blocks */
4011 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
4012 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
4013 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4014 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4015 		return -EINVAL;
4016 	}
4017 
4018 	return 0;
4019 }
4020 
4021 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4022 {
4023 	struct page *page;
4024 	unsigned char *kaddr;
4025 	struct f2fs_summary *summary;
4026 	struct curseg_info *seg_i;
4027 	int written_size = 0;
4028 	int i, j;
4029 
4030 	page = f2fs_grab_meta_page(sbi, blkaddr++);
4031 	kaddr = (unsigned char *)page_address(page);
4032 	memset(kaddr, 0, PAGE_SIZE);
4033 
4034 	/* Step 1: write nat cache */
4035 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4036 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
4037 	written_size += SUM_JOURNAL_SIZE;
4038 
4039 	/* Step 2: write sit cache */
4040 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4041 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
4042 	written_size += SUM_JOURNAL_SIZE;
4043 
4044 	/* Step 3: write summary entries */
4045 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4046 		unsigned short blkoff;
4047 
4048 		seg_i = CURSEG_I(sbi, i);
4049 		if (sbi->ckpt->alloc_type[i] == SSR)
4050 			blkoff = sbi->blocks_per_seg;
4051 		else
4052 			blkoff = curseg_blkoff(sbi, i);
4053 
4054 		for (j = 0; j < blkoff; j++) {
4055 			if (!page) {
4056 				page = f2fs_grab_meta_page(sbi, blkaddr++);
4057 				kaddr = (unsigned char *)page_address(page);
4058 				memset(kaddr, 0, PAGE_SIZE);
4059 				written_size = 0;
4060 			}
4061 			summary = (struct f2fs_summary *)(kaddr + written_size);
4062 			*summary = seg_i->sum_blk->entries[j];
4063 			written_size += SUMMARY_SIZE;
4064 
4065 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
4066 							SUM_FOOTER_SIZE)
4067 				continue;
4068 
4069 			set_page_dirty(page);
4070 			f2fs_put_page(page, 1);
4071 			page = NULL;
4072 		}
4073 	}
4074 	if (page) {
4075 		set_page_dirty(page);
4076 		f2fs_put_page(page, 1);
4077 	}
4078 }
4079 
4080 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4081 					block_t blkaddr, int type)
4082 {
4083 	int i, end;
4084 
4085 	if (IS_DATASEG(type))
4086 		end = type + NR_CURSEG_DATA_TYPE;
4087 	else
4088 		end = type + NR_CURSEG_NODE_TYPE;
4089 
4090 	for (i = type; i < end; i++)
4091 		write_current_sum_page(sbi, i, blkaddr + (i - type));
4092 }
4093 
4094 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4095 {
4096 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4097 		write_compacted_summaries(sbi, start_blk);
4098 	else
4099 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4100 }
4101 
4102 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4103 {
4104 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4105 }
4106 
4107 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4108 					unsigned int val, int alloc)
4109 {
4110 	int i;
4111 
4112 	if (type == NAT_JOURNAL) {
4113 		for (i = 0; i < nats_in_cursum(journal); i++) {
4114 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4115 				return i;
4116 		}
4117 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4118 			return update_nats_in_cursum(journal, 1);
4119 	} else if (type == SIT_JOURNAL) {
4120 		for (i = 0; i < sits_in_cursum(journal); i++)
4121 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4122 				return i;
4123 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4124 			return update_sits_in_cursum(journal, 1);
4125 	}
4126 	return -1;
4127 }
4128 
4129 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4130 					unsigned int segno)
4131 {
4132 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4133 }
4134 
4135 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4136 					unsigned int start)
4137 {
4138 	struct sit_info *sit_i = SIT_I(sbi);
4139 	struct page *page;
4140 	pgoff_t src_off, dst_off;
4141 
4142 	src_off = current_sit_addr(sbi, start);
4143 	dst_off = next_sit_addr(sbi, src_off);
4144 
4145 	page = f2fs_grab_meta_page(sbi, dst_off);
4146 	seg_info_to_sit_page(sbi, page, start);
4147 
4148 	set_page_dirty(page);
4149 	set_to_next_sit(sit_i, start);
4150 
4151 	return page;
4152 }
4153 
4154 static struct sit_entry_set *grab_sit_entry_set(void)
4155 {
4156 	struct sit_entry_set *ses =
4157 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4158 						GFP_NOFS, true, NULL);
4159 
4160 	ses->entry_cnt = 0;
4161 	INIT_LIST_HEAD(&ses->set_list);
4162 	return ses;
4163 }
4164 
4165 static void release_sit_entry_set(struct sit_entry_set *ses)
4166 {
4167 	list_del(&ses->set_list);
4168 	kmem_cache_free(sit_entry_set_slab, ses);
4169 }
4170 
4171 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4172 						struct list_head *head)
4173 {
4174 	struct sit_entry_set *next = ses;
4175 
4176 	if (list_is_last(&ses->set_list, head))
4177 		return;
4178 
4179 	list_for_each_entry_continue(next, head, set_list)
4180 		if (ses->entry_cnt <= next->entry_cnt)
4181 			break;
4182 
4183 	list_move_tail(&ses->set_list, &next->set_list);
4184 }
4185 
4186 static void add_sit_entry(unsigned int segno, struct list_head *head)
4187 {
4188 	struct sit_entry_set *ses;
4189 	unsigned int start_segno = START_SEGNO(segno);
4190 
4191 	list_for_each_entry(ses, head, set_list) {
4192 		if (ses->start_segno == start_segno) {
4193 			ses->entry_cnt++;
4194 			adjust_sit_entry_set(ses, head);
4195 			return;
4196 		}
4197 	}
4198 
4199 	ses = grab_sit_entry_set();
4200 
4201 	ses->start_segno = start_segno;
4202 	ses->entry_cnt++;
4203 	list_add(&ses->set_list, head);
4204 }
4205 
4206 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4207 {
4208 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4209 	struct list_head *set_list = &sm_info->sit_entry_set;
4210 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4211 	unsigned int segno;
4212 
4213 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4214 		add_sit_entry(segno, set_list);
4215 }
4216 
4217 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4218 {
4219 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4220 	struct f2fs_journal *journal = curseg->journal;
4221 	int i;
4222 
4223 	down_write(&curseg->journal_rwsem);
4224 	for (i = 0; i < sits_in_cursum(journal); i++) {
4225 		unsigned int segno;
4226 		bool dirtied;
4227 
4228 		segno = le32_to_cpu(segno_in_journal(journal, i));
4229 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4230 
4231 		if (!dirtied)
4232 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4233 	}
4234 	update_sits_in_cursum(journal, -i);
4235 	up_write(&curseg->journal_rwsem);
4236 }
4237 
4238 /*
4239  * CP calls this function, which flushes SIT entries including sit_journal,
4240  * and moves prefree segs to free segs.
4241  */
4242 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4243 {
4244 	struct sit_info *sit_i = SIT_I(sbi);
4245 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4246 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4247 	struct f2fs_journal *journal = curseg->journal;
4248 	struct sit_entry_set *ses, *tmp;
4249 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4250 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4251 	struct seg_entry *se;
4252 
4253 	down_write(&sit_i->sentry_lock);
4254 
4255 	if (!sit_i->dirty_sentries)
4256 		goto out;
4257 
4258 	/*
4259 	 * add and account sit entries of dirty bitmap in sit entry
4260 	 * set temporarily
4261 	 */
4262 	add_sits_in_set(sbi);
4263 
4264 	/*
4265 	 * if there are no enough space in journal to store dirty sit
4266 	 * entries, remove all entries from journal and add and account
4267 	 * them in sit entry set.
4268 	 */
4269 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4270 								!to_journal)
4271 		remove_sits_in_journal(sbi);
4272 
4273 	/*
4274 	 * there are two steps to flush sit entries:
4275 	 * #1, flush sit entries to journal in current cold data summary block.
4276 	 * #2, flush sit entries to sit page.
4277 	 */
4278 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4279 		struct page *page = NULL;
4280 		struct f2fs_sit_block *raw_sit = NULL;
4281 		unsigned int start_segno = ses->start_segno;
4282 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4283 						(unsigned long)MAIN_SEGS(sbi));
4284 		unsigned int segno = start_segno;
4285 
4286 		if (to_journal &&
4287 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4288 			to_journal = false;
4289 
4290 		if (to_journal) {
4291 			down_write(&curseg->journal_rwsem);
4292 		} else {
4293 			page = get_next_sit_page(sbi, start_segno);
4294 			raw_sit = page_address(page);
4295 		}
4296 
4297 		/* flush dirty sit entries in region of current sit set */
4298 		for_each_set_bit_from(segno, bitmap, end) {
4299 			int offset, sit_offset;
4300 
4301 			se = get_seg_entry(sbi, segno);
4302 #ifdef CONFIG_F2FS_CHECK_FS
4303 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4304 						SIT_VBLOCK_MAP_SIZE))
4305 				f2fs_bug_on(sbi, 1);
4306 #endif
4307 
4308 			/* add discard candidates */
4309 			if (!(cpc->reason & CP_DISCARD)) {
4310 				cpc->trim_start = segno;
4311 				add_discard_addrs(sbi, cpc, false);
4312 			}
4313 
4314 			if (to_journal) {
4315 				offset = f2fs_lookup_journal_in_cursum(journal,
4316 							SIT_JOURNAL, segno, 1);
4317 				f2fs_bug_on(sbi, offset < 0);
4318 				segno_in_journal(journal, offset) =
4319 							cpu_to_le32(segno);
4320 				seg_info_to_raw_sit(se,
4321 					&sit_in_journal(journal, offset));
4322 				check_block_count(sbi, segno,
4323 					&sit_in_journal(journal, offset));
4324 			} else {
4325 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4326 				seg_info_to_raw_sit(se,
4327 						&raw_sit->entries[sit_offset]);
4328 				check_block_count(sbi, segno,
4329 						&raw_sit->entries[sit_offset]);
4330 			}
4331 
4332 			__clear_bit(segno, bitmap);
4333 			sit_i->dirty_sentries--;
4334 			ses->entry_cnt--;
4335 		}
4336 
4337 		if (to_journal)
4338 			up_write(&curseg->journal_rwsem);
4339 		else
4340 			f2fs_put_page(page, 1);
4341 
4342 		f2fs_bug_on(sbi, ses->entry_cnt);
4343 		release_sit_entry_set(ses);
4344 	}
4345 
4346 	f2fs_bug_on(sbi, !list_empty(head));
4347 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4348 out:
4349 	if (cpc->reason & CP_DISCARD) {
4350 		__u64 trim_start = cpc->trim_start;
4351 
4352 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4353 			add_discard_addrs(sbi, cpc, false);
4354 
4355 		cpc->trim_start = trim_start;
4356 	}
4357 	up_write(&sit_i->sentry_lock);
4358 
4359 	set_prefree_as_free_segments(sbi);
4360 }
4361 
4362 static int build_sit_info(struct f2fs_sb_info *sbi)
4363 {
4364 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4365 	struct sit_info *sit_i;
4366 	unsigned int sit_segs, start;
4367 	char *src_bitmap, *bitmap;
4368 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4369 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4370 
4371 	/* allocate memory for SIT information */
4372 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4373 	if (!sit_i)
4374 		return -ENOMEM;
4375 
4376 	SM_I(sbi)->sit_info = sit_i;
4377 
4378 	sit_i->sentries =
4379 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4380 					      MAIN_SEGS(sbi)),
4381 			      GFP_KERNEL);
4382 	if (!sit_i->sentries)
4383 		return -ENOMEM;
4384 
4385 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4386 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4387 								GFP_KERNEL);
4388 	if (!sit_i->dirty_sentries_bitmap)
4389 		return -ENOMEM;
4390 
4391 #ifdef CONFIG_F2FS_CHECK_FS
4392 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4393 #else
4394 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4395 #endif
4396 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4397 	if (!sit_i->bitmap)
4398 		return -ENOMEM;
4399 
4400 	bitmap = sit_i->bitmap;
4401 
4402 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4403 		sit_i->sentries[start].cur_valid_map = bitmap;
4404 		bitmap += SIT_VBLOCK_MAP_SIZE;
4405 
4406 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4407 		bitmap += SIT_VBLOCK_MAP_SIZE;
4408 
4409 #ifdef CONFIG_F2FS_CHECK_FS
4410 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4411 		bitmap += SIT_VBLOCK_MAP_SIZE;
4412 #endif
4413 
4414 		if (discard_map) {
4415 			sit_i->sentries[start].discard_map = bitmap;
4416 			bitmap += SIT_VBLOCK_MAP_SIZE;
4417 		}
4418 	}
4419 
4420 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4421 	if (!sit_i->tmp_map)
4422 		return -ENOMEM;
4423 
4424 	if (__is_large_section(sbi)) {
4425 		sit_i->sec_entries =
4426 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4427 						      MAIN_SECS(sbi)),
4428 				      GFP_KERNEL);
4429 		if (!sit_i->sec_entries)
4430 			return -ENOMEM;
4431 	}
4432 
4433 	/* get information related with SIT */
4434 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4435 
4436 	/* setup SIT bitmap from ckeckpoint pack */
4437 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4438 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4439 
4440 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4441 	if (!sit_i->sit_bitmap)
4442 		return -ENOMEM;
4443 
4444 #ifdef CONFIG_F2FS_CHECK_FS
4445 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4446 					sit_bitmap_size, GFP_KERNEL);
4447 	if (!sit_i->sit_bitmap_mir)
4448 		return -ENOMEM;
4449 
4450 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4451 					main_bitmap_size, GFP_KERNEL);
4452 	if (!sit_i->invalid_segmap)
4453 		return -ENOMEM;
4454 #endif
4455 
4456 	/* init SIT information */
4457 	sit_i->s_ops = &default_salloc_ops;
4458 
4459 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4460 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4461 	sit_i->written_valid_blocks = 0;
4462 	sit_i->bitmap_size = sit_bitmap_size;
4463 	sit_i->dirty_sentries = 0;
4464 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4465 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4466 	sit_i->mounted_time = ktime_get_boottime_seconds();
4467 	init_rwsem(&sit_i->sentry_lock);
4468 	return 0;
4469 }
4470 
4471 static int build_free_segmap(struct f2fs_sb_info *sbi)
4472 {
4473 	struct free_segmap_info *free_i;
4474 	unsigned int bitmap_size, sec_bitmap_size;
4475 
4476 	/* allocate memory for free segmap information */
4477 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4478 	if (!free_i)
4479 		return -ENOMEM;
4480 
4481 	SM_I(sbi)->free_info = free_i;
4482 
4483 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4484 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4485 	if (!free_i->free_segmap)
4486 		return -ENOMEM;
4487 
4488 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4489 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4490 	if (!free_i->free_secmap)
4491 		return -ENOMEM;
4492 
4493 	/* set all segments as dirty temporarily */
4494 	memset(free_i->free_segmap, 0xff, bitmap_size);
4495 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4496 
4497 	/* init free segmap information */
4498 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4499 	free_i->free_segments = 0;
4500 	free_i->free_sections = 0;
4501 	spin_lock_init(&free_i->segmap_lock);
4502 	return 0;
4503 }
4504 
4505 static int build_curseg(struct f2fs_sb_info *sbi)
4506 {
4507 	struct curseg_info *array;
4508 	int i;
4509 
4510 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4511 					sizeof(*array)), GFP_KERNEL);
4512 	if (!array)
4513 		return -ENOMEM;
4514 
4515 	SM_I(sbi)->curseg_array = array;
4516 
4517 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4518 		mutex_init(&array[i].curseg_mutex);
4519 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4520 		if (!array[i].sum_blk)
4521 			return -ENOMEM;
4522 		init_rwsem(&array[i].journal_rwsem);
4523 		array[i].journal = f2fs_kzalloc(sbi,
4524 				sizeof(struct f2fs_journal), GFP_KERNEL);
4525 		if (!array[i].journal)
4526 			return -ENOMEM;
4527 		if (i < NR_PERSISTENT_LOG)
4528 			array[i].seg_type = CURSEG_HOT_DATA + i;
4529 		else if (i == CURSEG_COLD_DATA_PINNED)
4530 			array[i].seg_type = CURSEG_COLD_DATA;
4531 		else if (i == CURSEG_ALL_DATA_ATGC)
4532 			array[i].seg_type = CURSEG_COLD_DATA;
4533 		array[i].segno = NULL_SEGNO;
4534 		array[i].next_blkoff = 0;
4535 		array[i].inited = false;
4536 	}
4537 	return restore_curseg_summaries(sbi);
4538 }
4539 
4540 static int build_sit_entries(struct f2fs_sb_info *sbi)
4541 {
4542 	struct sit_info *sit_i = SIT_I(sbi);
4543 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4544 	struct f2fs_journal *journal = curseg->journal;
4545 	struct seg_entry *se;
4546 	struct f2fs_sit_entry sit;
4547 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4548 	unsigned int i, start, end;
4549 	unsigned int readed, start_blk = 0;
4550 	int err = 0;
4551 	block_t total_node_blocks = 0;
4552 
4553 	do {
4554 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4555 							META_SIT, true);
4556 
4557 		start = start_blk * sit_i->sents_per_block;
4558 		end = (start_blk + readed) * sit_i->sents_per_block;
4559 
4560 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4561 			struct f2fs_sit_block *sit_blk;
4562 			struct page *page;
4563 
4564 			se = &sit_i->sentries[start];
4565 			page = get_current_sit_page(sbi, start);
4566 			if (IS_ERR(page))
4567 				return PTR_ERR(page);
4568 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4569 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4570 			f2fs_put_page(page, 1);
4571 
4572 			err = check_block_count(sbi, start, &sit);
4573 			if (err)
4574 				return err;
4575 			seg_info_from_raw_sit(se, &sit);
4576 			if (IS_NODESEG(se->type))
4577 				total_node_blocks += se->valid_blocks;
4578 
4579 			if (f2fs_block_unit_discard(sbi)) {
4580 				/* build discard map only one time */
4581 				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4582 					memset(se->discard_map, 0xff,
4583 						SIT_VBLOCK_MAP_SIZE);
4584 				} else {
4585 					memcpy(se->discard_map,
4586 						se->cur_valid_map,
4587 						SIT_VBLOCK_MAP_SIZE);
4588 					sbi->discard_blks +=
4589 						sbi->blocks_per_seg -
4590 						se->valid_blocks;
4591 				}
4592 			}
4593 
4594 			if (__is_large_section(sbi))
4595 				get_sec_entry(sbi, start)->valid_blocks +=
4596 							se->valid_blocks;
4597 		}
4598 		start_blk += readed;
4599 	} while (start_blk < sit_blk_cnt);
4600 
4601 	down_read(&curseg->journal_rwsem);
4602 	for (i = 0; i < sits_in_cursum(journal); i++) {
4603 		unsigned int old_valid_blocks;
4604 
4605 		start = le32_to_cpu(segno_in_journal(journal, i));
4606 		if (start >= MAIN_SEGS(sbi)) {
4607 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4608 				 start);
4609 			err = -EFSCORRUPTED;
4610 			break;
4611 		}
4612 
4613 		se = &sit_i->sentries[start];
4614 		sit = sit_in_journal(journal, i);
4615 
4616 		old_valid_blocks = se->valid_blocks;
4617 		if (IS_NODESEG(se->type))
4618 			total_node_blocks -= old_valid_blocks;
4619 
4620 		err = check_block_count(sbi, start, &sit);
4621 		if (err)
4622 			break;
4623 		seg_info_from_raw_sit(se, &sit);
4624 		if (IS_NODESEG(se->type))
4625 			total_node_blocks += se->valid_blocks;
4626 
4627 		if (f2fs_block_unit_discard(sbi)) {
4628 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4629 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4630 			} else {
4631 				memcpy(se->discard_map, se->cur_valid_map,
4632 							SIT_VBLOCK_MAP_SIZE);
4633 				sbi->discard_blks += old_valid_blocks;
4634 				sbi->discard_blks -= se->valid_blocks;
4635 			}
4636 		}
4637 
4638 		if (__is_large_section(sbi)) {
4639 			get_sec_entry(sbi, start)->valid_blocks +=
4640 							se->valid_blocks;
4641 			get_sec_entry(sbi, start)->valid_blocks -=
4642 							old_valid_blocks;
4643 		}
4644 	}
4645 	up_read(&curseg->journal_rwsem);
4646 
4647 	if (!err && total_node_blocks != valid_node_count(sbi)) {
4648 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4649 			 total_node_blocks, valid_node_count(sbi));
4650 		err = -EFSCORRUPTED;
4651 	}
4652 
4653 	return err;
4654 }
4655 
4656 static void init_free_segmap(struct f2fs_sb_info *sbi)
4657 {
4658 	unsigned int start;
4659 	int type;
4660 	struct seg_entry *sentry;
4661 
4662 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4663 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4664 			continue;
4665 		sentry = get_seg_entry(sbi, start);
4666 		if (!sentry->valid_blocks)
4667 			__set_free(sbi, start);
4668 		else
4669 			SIT_I(sbi)->written_valid_blocks +=
4670 						sentry->valid_blocks;
4671 	}
4672 
4673 	/* set use the current segments */
4674 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4675 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4676 
4677 		__set_test_and_inuse(sbi, curseg_t->segno);
4678 	}
4679 }
4680 
4681 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4682 {
4683 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4684 	struct free_segmap_info *free_i = FREE_I(sbi);
4685 	unsigned int segno = 0, offset = 0, secno;
4686 	block_t valid_blocks, usable_blks_in_seg;
4687 	block_t blks_per_sec = BLKS_PER_SEC(sbi);
4688 
4689 	while (1) {
4690 		/* find dirty segment based on free segmap */
4691 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4692 		if (segno >= MAIN_SEGS(sbi))
4693 			break;
4694 		offset = segno + 1;
4695 		valid_blocks = get_valid_blocks(sbi, segno, false);
4696 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4697 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4698 			continue;
4699 		if (valid_blocks > usable_blks_in_seg) {
4700 			f2fs_bug_on(sbi, 1);
4701 			continue;
4702 		}
4703 		mutex_lock(&dirty_i->seglist_lock);
4704 		__locate_dirty_segment(sbi, segno, DIRTY);
4705 		mutex_unlock(&dirty_i->seglist_lock);
4706 	}
4707 
4708 	if (!__is_large_section(sbi))
4709 		return;
4710 
4711 	mutex_lock(&dirty_i->seglist_lock);
4712 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4713 		valid_blocks = get_valid_blocks(sbi, segno, true);
4714 		secno = GET_SEC_FROM_SEG(sbi, segno);
4715 
4716 		if (!valid_blocks || valid_blocks == blks_per_sec)
4717 			continue;
4718 		if (IS_CURSEC(sbi, secno))
4719 			continue;
4720 		set_bit(secno, dirty_i->dirty_secmap);
4721 	}
4722 	mutex_unlock(&dirty_i->seglist_lock);
4723 }
4724 
4725 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4726 {
4727 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4728 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4729 
4730 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4731 	if (!dirty_i->victim_secmap)
4732 		return -ENOMEM;
4733 	return 0;
4734 }
4735 
4736 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4737 {
4738 	struct dirty_seglist_info *dirty_i;
4739 	unsigned int bitmap_size, i;
4740 
4741 	/* allocate memory for dirty segments list information */
4742 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4743 								GFP_KERNEL);
4744 	if (!dirty_i)
4745 		return -ENOMEM;
4746 
4747 	SM_I(sbi)->dirty_info = dirty_i;
4748 	mutex_init(&dirty_i->seglist_lock);
4749 
4750 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4751 
4752 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4753 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4754 								GFP_KERNEL);
4755 		if (!dirty_i->dirty_segmap[i])
4756 			return -ENOMEM;
4757 	}
4758 
4759 	if (__is_large_section(sbi)) {
4760 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4761 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4762 						bitmap_size, GFP_KERNEL);
4763 		if (!dirty_i->dirty_secmap)
4764 			return -ENOMEM;
4765 	}
4766 
4767 	init_dirty_segmap(sbi);
4768 	return init_victim_secmap(sbi);
4769 }
4770 
4771 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4772 {
4773 	int i;
4774 
4775 	/*
4776 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4777 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4778 	 */
4779 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4780 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4781 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4782 		unsigned int blkofs = curseg->next_blkoff;
4783 
4784 		if (f2fs_sb_has_readonly(sbi) &&
4785 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4786 			continue;
4787 
4788 		sanity_check_seg_type(sbi, curseg->seg_type);
4789 
4790 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4791 			goto out;
4792 
4793 		if (curseg->alloc_type == SSR)
4794 			continue;
4795 
4796 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4797 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4798 				continue;
4799 out:
4800 			f2fs_err(sbi,
4801 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4802 				 i, curseg->segno, curseg->alloc_type,
4803 				 curseg->next_blkoff, blkofs);
4804 			return -EFSCORRUPTED;
4805 		}
4806 	}
4807 	return 0;
4808 }
4809 
4810 #ifdef CONFIG_BLK_DEV_ZONED
4811 
4812 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4813 				    struct f2fs_dev_info *fdev,
4814 				    struct blk_zone *zone)
4815 {
4816 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4817 	block_t zone_block, wp_block, last_valid_block;
4818 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4819 	int i, s, b, ret;
4820 	struct seg_entry *se;
4821 
4822 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4823 		return 0;
4824 
4825 	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4826 	wp_segno = GET_SEGNO(sbi, wp_block);
4827 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4828 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4829 	zone_segno = GET_SEGNO(sbi, zone_block);
4830 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4831 
4832 	if (zone_segno >= MAIN_SEGS(sbi))
4833 		return 0;
4834 
4835 	/*
4836 	 * Skip check of zones cursegs point to, since
4837 	 * fix_curseg_write_pointer() checks them.
4838 	 */
4839 	for (i = 0; i < NO_CHECK_TYPE; i++)
4840 		if (zone_secno == GET_SEC_FROM_SEG(sbi,
4841 						   CURSEG_I(sbi, i)->segno))
4842 			return 0;
4843 
4844 	/*
4845 	 * Get last valid block of the zone.
4846 	 */
4847 	last_valid_block = zone_block - 1;
4848 	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4849 		segno = zone_segno + s;
4850 		se = get_seg_entry(sbi, segno);
4851 		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4852 			if (f2fs_test_bit(b, se->cur_valid_map)) {
4853 				last_valid_block = START_BLOCK(sbi, segno) + b;
4854 				break;
4855 			}
4856 		if (last_valid_block >= zone_block)
4857 			break;
4858 	}
4859 
4860 	/*
4861 	 * If last valid block is beyond the write pointer, report the
4862 	 * inconsistency. This inconsistency does not cause write error
4863 	 * because the zone will not be selected for write operation until
4864 	 * it get discarded. Just report it.
4865 	 */
4866 	if (last_valid_block >= wp_block) {
4867 		f2fs_notice(sbi, "Valid block beyond write pointer: "
4868 			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4869 			    GET_SEGNO(sbi, last_valid_block),
4870 			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4871 			    wp_segno, wp_blkoff);
4872 		return 0;
4873 	}
4874 
4875 	/*
4876 	 * If there is no valid block in the zone and if write pointer is
4877 	 * not at zone start, reset the write pointer.
4878 	 */
4879 	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4880 		f2fs_notice(sbi,
4881 			    "Zone without valid block has non-zero write "
4882 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4883 			    wp_segno, wp_blkoff);
4884 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4885 					zone->len >> log_sectors_per_block);
4886 		if (ret) {
4887 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4888 				 fdev->path, ret);
4889 			return ret;
4890 		}
4891 	}
4892 
4893 	return 0;
4894 }
4895 
4896 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4897 						  block_t zone_blkaddr)
4898 {
4899 	int i;
4900 
4901 	for (i = 0; i < sbi->s_ndevs; i++) {
4902 		if (!bdev_is_zoned(FDEV(i).bdev))
4903 			continue;
4904 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4905 				zone_blkaddr <= FDEV(i).end_blk))
4906 			return &FDEV(i);
4907 	}
4908 
4909 	return NULL;
4910 }
4911 
4912 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4913 			      void *data)
4914 {
4915 	memcpy(data, zone, sizeof(struct blk_zone));
4916 	return 0;
4917 }
4918 
4919 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4920 {
4921 	struct curseg_info *cs = CURSEG_I(sbi, type);
4922 	struct f2fs_dev_info *zbd;
4923 	struct blk_zone zone;
4924 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4925 	block_t cs_zone_block, wp_block;
4926 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4927 	sector_t zone_sector;
4928 	int err;
4929 
4930 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4931 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4932 
4933 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4934 	if (!zbd)
4935 		return 0;
4936 
4937 	/* report zone for the sector the curseg points to */
4938 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4939 		<< log_sectors_per_block;
4940 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4941 				  report_one_zone_cb, &zone);
4942 	if (err != 1) {
4943 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4944 			 zbd->path, err);
4945 		return err;
4946 	}
4947 
4948 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4949 		return 0;
4950 
4951 	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4952 	wp_segno = GET_SEGNO(sbi, wp_block);
4953 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4954 	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4955 
4956 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4957 		wp_sector_off == 0)
4958 		return 0;
4959 
4960 	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4961 		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4962 		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4963 
4964 	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4965 		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4966 
4967 	f2fs_allocate_new_section(sbi, type, true);
4968 
4969 	/* check consistency of the zone curseg pointed to */
4970 	if (check_zone_write_pointer(sbi, zbd, &zone))
4971 		return -EIO;
4972 
4973 	/* check newly assigned zone */
4974 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4975 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4976 
4977 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4978 	if (!zbd)
4979 		return 0;
4980 
4981 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4982 		<< log_sectors_per_block;
4983 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4984 				  report_one_zone_cb, &zone);
4985 	if (err != 1) {
4986 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4987 			 zbd->path, err);
4988 		return err;
4989 	}
4990 
4991 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4992 		return 0;
4993 
4994 	if (zone.wp != zone.start) {
4995 		f2fs_notice(sbi,
4996 			    "New zone for curseg[%d] is not yet discarded. "
4997 			    "Reset the zone: curseg[0x%x,0x%x]",
4998 			    type, cs->segno, cs->next_blkoff);
4999 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
5000 				zone_sector >> log_sectors_per_block,
5001 				zone.len >> log_sectors_per_block);
5002 		if (err) {
5003 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5004 				 zbd->path, err);
5005 			return err;
5006 		}
5007 	}
5008 
5009 	return 0;
5010 }
5011 
5012 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5013 {
5014 	int i, ret;
5015 
5016 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5017 		ret = fix_curseg_write_pointer(sbi, i);
5018 		if (ret)
5019 			return ret;
5020 	}
5021 
5022 	return 0;
5023 }
5024 
5025 struct check_zone_write_pointer_args {
5026 	struct f2fs_sb_info *sbi;
5027 	struct f2fs_dev_info *fdev;
5028 };
5029 
5030 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5031 				      void *data)
5032 {
5033 	struct check_zone_write_pointer_args *args;
5034 
5035 	args = (struct check_zone_write_pointer_args *)data;
5036 
5037 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
5038 }
5039 
5040 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5041 {
5042 	int i, ret;
5043 	struct check_zone_write_pointer_args args;
5044 
5045 	for (i = 0; i < sbi->s_ndevs; i++) {
5046 		if (!bdev_is_zoned(FDEV(i).bdev))
5047 			continue;
5048 
5049 		args.sbi = sbi;
5050 		args.fdev = &FDEV(i);
5051 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5052 					  check_zone_write_pointer_cb, &args);
5053 		if (ret < 0)
5054 			return ret;
5055 	}
5056 
5057 	return 0;
5058 }
5059 
5060 static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
5061 						unsigned int dev_idx)
5062 {
5063 	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
5064 		return true;
5065 	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
5066 }
5067 
5068 /* Return the zone index in the given device */
5069 static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
5070 					int dev_idx)
5071 {
5072 	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5073 
5074 	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
5075 						sbi->log_blocks_per_blkz;
5076 }
5077 
5078 /*
5079  * Return the usable segments in a section based on the zone's
5080  * corresponding zone capacity. Zone is equal to a section.
5081  */
5082 static inline unsigned int f2fs_usable_zone_segs_in_sec(
5083 		struct f2fs_sb_info *sbi, unsigned int segno)
5084 {
5085 	unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
5086 
5087 	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
5088 	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
5089 
5090 	/* Conventional zone's capacity is always equal to zone size */
5091 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5092 		return sbi->segs_per_sec;
5093 
5094 	/*
5095 	 * If the zone_capacity_blocks array is NULL, then zone capacity
5096 	 * is equal to the zone size for all zones
5097 	 */
5098 	if (!FDEV(dev_idx).zone_capacity_blocks)
5099 		return sbi->segs_per_sec;
5100 
5101 	/* Get the segment count beyond zone capacity block */
5102 	unusable_segs_in_sec = (sbi->blocks_per_blkz -
5103 				FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
5104 				sbi->log_blocks_per_seg;
5105 	return sbi->segs_per_sec - unusable_segs_in_sec;
5106 }
5107 
5108 /*
5109  * Return the number of usable blocks in a segment. The number of blocks
5110  * returned is always equal to the number of blocks in a segment for
5111  * segments fully contained within a sequential zone capacity or a
5112  * conventional zone. For segments partially contained in a sequential
5113  * zone capacity, the number of usable blocks up to the zone capacity
5114  * is returned. 0 is returned in all other cases.
5115  */
5116 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5117 			struct f2fs_sb_info *sbi, unsigned int segno)
5118 {
5119 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5120 	unsigned int zone_idx, dev_idx, secno;
5121 
5122 	secno = GET_SEC_FROM_SEG(sbi, segno);
5123 	seg_start = START_BLOCK(sbi, segno);
5124 	dev_idx = f2fs_target_device_index(sbi, seg_start);
5125 	zone_idx = get_zone_idx(sbi, secno, dev_idx);
5126 
5127 	/*
5128 	 * Conventional zone's capacity is always equal to zone size,
5129 	 * so, blocks per segment is unchanged.
5130 	 */
5131 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5132 		return sbi->blocks_per_seg;
5133 
5134 	if (!FDEV(dev_idx).zone_capacity_blocks)
5135 		return sbi->blocks_per_seg;
5136 
5137 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5138 	sec_cap_blkaddr = sec_start_blkaddr +
5139 				FDEV(dev_idx).zone_capacity_blocks[zone_idx];
5140 
5141 	/*
5142 	 * If segment starts before zone capacity and spans beyond
5143 	 * zone capacity, then usable blocks are from seg start to
5144 	 * zone capacity. If the segment starts after the zone capacity,
5145 	 * then there are no usable blocks.
5146 	 */
5147 	if (seg_start >= sec_cap_blkaddr)
5148 		return 0;
5149 	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5150 		return sec_cap_blkaddr - seg_start;
5151 
5152 	return sbi->blocks_per_seg;
5153 }
5154 #else
5155 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5156 {
5157 	return 0;
5158 }
5159 
5160 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5161 {
5162 	return 0;
5163 }
5164 
5165 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5166 							unsigned int segno)
5167 {
5168 	return 0;
5169 }
5170 
5171 static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5172 							unsigned int segno)
5173 {
5174 	return 0;
5175 }
5176 #endif
5177 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5178 					unsigned int segno)
5179 {
5180 	if (f2fs_sb_has_blkzoned(sbi))
5181 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5182 
5183 	return sbi->blocks_per_seg;
5184 }
5185 
5186 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5187 					unsigned int segno)
5188 {
5189 	if (f2fs_sb_has_blkzoned(sbi))
5190 		return f2fs_usable_zone_segs_in_sec(sbi, segno);
5191 
5192 	return sbi->segs_per_sec;
5193 }
5194 
5195 /*
5196  * Update min, max modified time for cost-benefit GC algorithm
5197  */
5198 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5199 {
5200 	struct sit_info *sit_i = SIT_I(sbi);
5201 	unsigned int segno;
5202 
5203 	down_write(&sit_i->sentry_lock);
5204 
5205 	sit_i->min_mtime = ULLONG_MAX;
5206 
5207 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5208 		unsigned int i;
5209 		unsigned long long mtime = 0;
5210 
5211 		for (i = 0; i < sbi->segs_per_sec; i++)
5212 			mtime += get_seg_entry(sbi, segno + i)->mtime;
5213 
5214 		mtime = div_u64(mtime, sbi->segs_per_sec);
5215 
5216 		if (sit_i->min_mtime > mtime)
5217 			sit_i->min_mtime = mtime;
5218 	}
5219 	sit_i->max_mtime = get_mtime(sbi, false);
5220 	sit_i->dirty_max_mtime = 0;
5221 	up_write(&sit_i->sentry_lock);
5222 }
5223 
5224 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5225 {
5226 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5227 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5228 	struct f2fs_sm_info *sm_info;
5229 	int err;
5230 
5231 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5232 	if (!sm_info)
5233 		return -ENOMEM;
5234 
5235 	/* init sm info */
5236 	sbi->sm_info = sm_info;
5237 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5238 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5239 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5240 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5241 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5242 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5243 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5244 	sm_info->rec_prefree_segments = sm_info->main_segments *
5245 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5246 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5247 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5248 
5249 	if (!f2fs_lfs_mode(sbi))
5250 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5251 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5252 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5253 	sm_info->min_seq_blocks = sbi->blocks_per_seg;
5254 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5255 	sm_info->min_ssr_sections = reserved_sections(sbi);
5256 
5257 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5258 
5259 	init_rwsem(&sm_info->curseg_lock);
5260 
5261 	if (!f2fs_readonly(sbi->sb)) {
5262 		err = f2fs_create_flush_cmd_control(sbi);
5263 		if (err)
5264 			return err;
5265 	}
5266 
5267 	err = create_discard_cmd_control(sbi);
5268 	if (err)
5269 		return err;
5270 
5271 	err = build_sit_info(sbi);
5272 	if (err)
5273 		return err;
5274 	err = build_free_segmap(sbi);
5275 	if (err)
5276 		return err;
5277 	err = build_curseg(sbi);
5278 	if (err)
5279 		return err;
5280 
5281 	/* reinit free segmap based on SIT */
5282 	err = build_sit_entries(sbi);
5283 	if (err)
5284 		return err;
5285 
5286 	init_free_segmap(sbi);
5287 	err = build_dirty_segmap(sbi);
5288 	if (err)
5289 		return err;
5290 
5291 	err = sanity_check_curseg(sbi);
5292 	if (err)
5293 		return err;
5294 
5295 	init_min_max_mtime(sbi);
5296 	return 0;
5297 }
5298 
5299 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5300 		enum dirty_type dirty_type)
5301 {
5302 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5303 
5304 	mutex_lock(&dirty_i->seglist_lock);
5305 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5306 	dirty_i->nr_dirty[dirty_type] = 0;
5307 	mutex_unlock(&dirty_i->seglist_lock);
5308 }
5309 
5310 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5311 {
5312 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5313 
5314 	kvfree(dirty_i->victim_secmap);
5315 }
5316 
5317 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5318 {
5319 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5320 	int i;
5321 
5322 	if (!dirty_i)
5323 		return;
5324 
5325 	/* discard pre-free/dirty segments list */
5326 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5327 		discard_dirty_segmap(sbi, i);
5328 
5329 	if (__is_large_section(sbi)) {
5330 		mutex_lock(&dirty_i->seglist_lock);
5331 		kvfree(dirty_i->dirty_secmap);
5332 		mutex_unlock(&dirty_i->seglist_lock);
5333 	}
5334 
5335 	destroy_victim_secmap(sbi);
5336 	SM_I(sbi)->dirty_info = NULL;
5337 	kfree(dirty_i);
5338 }
5339 
5340 static void destroy_curseg(struct f2fs_sb_info *sbi)
5341 {
5342 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5343 	int i;
5344 
5345 	if (!array)
5346 		return;
5347 	SM_I(sbi)->curseg_array = NULL;
5348 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5349 		kfree(array[i].sum_blk);
5350 		kfree(array[i].journal);
5351 	}
5352 	kfree(array);
5353 }
5354 
5355 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5356 {
5357 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5358 
5359 	if (!free_i)
5360 		return;
5361 	SM_I(sbi)->free_info = NULL;
5362 	kvfree(free_i->free_segmap);
5363 	kvfree(free_i->free_secmap);
5364 	kfree(free_i);
5365 }
5366 
5367 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5368 {
5369 	struct sit_info *sit_i = SIT_I(sbi);
5370 
5371 	if (!sit_i)
5372 		return;
5373 
5374 	if (sit_i->sentries)
5375 		kvfree(sit_i->bitmap);
5376 	kfree(sit_i->tmp_map);
5377 
5378 	kvfree(sit_i->sentries);
5379 	kvfree(sit_i->sec_entries);
5380 	kvfree(sit_i->dirty_sentries_bitmap);
5381 
5382 	SM_I(sbi)->sit_info = NULL;
5383 	kvfree(sit_i->sit_bitmap);
5384 #ifdef CONFIG_F2FS_CHECK_FS
5385 	kvfree(sit_i->sit_bitmap_mir);
5386 	kvfree(sit_i->invalid_segmap);
5387 #endif
5388 	kfree(sit_i);
5389 }
5390 
5391 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5392 {
5393 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5394 
5395 	if (!sm_info)
5396 		return;
5397 	f2fs_destroy_flush_cmd_control(sbi, true);
5398 	destroy_discard_cmd_control(sbi);
5399 	destroy_dirty_segmap(sbi);
5400 	destroy_curseg(sbi);
5401 	destroy_free_segmap(sbi);
5402 	destroy_sit_info(sbi);
5403 	sbi->sm_info = NULL;
5404 	kfree(sm_info);
5405 }
5406 
5407 int __init f2fs_create_segment_manager_caches(void)
5408 {
5409 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5410 			sizeof(struct discard_entry));
5411 	if (!discard_entry_slab)
5412 		goto fail;
5413 
5414 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5415 			sizeof(struct discard_cmd));
5416 	if (!discard_cmd_slab)
5417 		goto destroy_discard_entry;
5418 
5419 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5420 			sizeof(struct sit_entry_set));
5421 	if (!sit_entry_set_slab)
5422 		goto destroy_discard_cmd;
5423 
5424 	inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
5425 			sizeof(struct inmem_pages));
5426 	if (!inmem_entry_slab)
5427 		goto destroy_sit_entry_set;
5428 	return 0;
5429 
5430 destroy_sit_entry_set:
5431 	kmem_cache_destroy(sit_entry_set_slab);
5432 destroy_discard_cmd:
5433 	kmem_cache_destroy(discard_cmd_slab);
5434 destroy_discard_entry:
5435 	kmem_cache_destroy(discard_entry_slab);
5436 fail:
5437 	return -ENOMEM;
5438 }
5439 
5440 void f2fs_destroy_segment_manager_caches(void)
5441 {
5442 	kmem_cache_destroy(sit_entry_set_slab);
5443 	kmem_cache_destroy(discard_cmd_slab);
5444 	kmem_cache_destroy(discard_entry_slab);
5445 	kmem_cache_destroy(inmem_entry_slab);
5446 }
5447