xref: /linux/fs/f2fs/segment.c (revision 81d8e5e2132215d21f2cddffcd2b16d08c0389fa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *revoke_entry_slab;
34 
__reverse_ulong(unsigned char * str)35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
__reverse_ffs(unsigned long word)54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
__find_rev_next_bit(const unsigned long * addr,unsigned long size,unsigned long offset)98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
__find_rev_next_zero_bit(const unsigned long * addr,unsigned long size,unsigned long offset)134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
f2fs_need_SSR(struct f2fs_sb_info * sbi)171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
f2fs_abort_atomic_write(struct inode * inode,bool clean)188 void f2fs_abort_atomic_write(struct inode *inode, bool clean)
189 {
190 	struct f2fs_inode_info *fi = F2FS_I(inode);
191 
192 	if (!f2fs_is_atomic_file(inode))
193 		return;
194 
195 	if (clean)
196 		truncate_inode_pages_final(inode->i_mapping);
197 
198 	release_atomic_write_cnt(inode);
199 	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
200 	clear_inode_flag(inode, FI_ATOMIC_REPLACE);
201 	clear_inode_flag(inode, FI_ATOMIC_FILE);
202 	if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
203 		clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
204 		/*
205 		 * The vfs inode keeps clean during commit, but the f2fs inode
206 		 * doesn't. So clear the dirty state after commit and let
207 		 * f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
208 		 */
209 		f2fs_inode_synced(inode);
210 		f2fs_mark_inode_dirty_sync(inode, true);
211 	}
212 	stat_dec_atomic_inode(inode);
213 
214 	F2FS_I(inode)->atomic_write_task = NULL;
215 
216 	if (clean) {
217 		f2fs_i_size_write(inode, fi->original_i_size);
218 		fi->original_i_size = 0;
219 	}
220 	/* avoid stale dirty inode during eviction */
221 	sync_inode_metadata(inode, 0);
222 }
223 
__replace_atomic_write_block(struct inode * inode,pgoff_t index,block_t new_addr,block_t * old_addr,bool recover)224 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
225 			block_t new_addr, block_t *old_addr, bool recover)
226 {
227 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
228 	struct dnode_of_data dn;
229 	struct node_info ni;
230 	int err;
231 
232 retry:
233 	set_new_dnode(&dn, inode, NULL, NULL, 0);
234 	err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
235 	if (err) {
236 		if (err == -ENOMEM) {
237 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
238 			goto retry;
239 		}
240 		return err;
241 	}
242 
243 	err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
244 	if (err) {
245 		f2fs_put_dnode(&dn);
246 		return err;
247 	}
248 
249 	if (recover) {
250 		/* dn.data_blkaddr is always valid */
251 		if (!__is_valid_data_blkaddr(new_addr)) {
252 			if (new_addr == NULL_ADDR)
253 				dec_valid_block_count(sbi, inode, 1);
254 			f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
255 			f2fs_update_data_blkaddr(&dn, new_addr);
256 		} else {
257 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
258 				new_addr, ni.version, true, true);
259 		}
260 	} else {
261 		blkcnt_t count = 1;
262 
263 		err = inc_valid_block_count(sbi, inode, &count, true);
264 		if (err) {
265 			f2fs_put_dnode(&dn);
266 			return err;
267 		}
268 
269 		*old_addr = dn.data_blkaddr;
270 		f2fs_truncate_data_blocks_range(&dn, 1);
271 		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
272 
273 		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
274 					ni.version, true, false);
275 	}
276 
277 	f2fs_put_dnode(&dn);
278 
279 	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
280 			index, old_addr ? *old_addr : 0, new_addr, recover);
281 	return 0;
282 }
283 
__complete_revoke_list(struct inode * inode,struct list_head * head,bool revoke)284 static void __complete_revoke_list(struct inode *inode, struct list_head *head,
285 					bool revoke)
286 {
287 	struct revoke_entry *cur, *tmp;
288 	pgoff_t start_index = 0;
289 	bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
290 
291 	list_for_each_entry_safe(cur, tmp, head, list) {
292 		if (revoke) {
293 			__replace_atomic_write_block(inode, cur->index,
294 						cur->old_addr, NULL, true);
295 		} else if (truncate) {
296 			f2fs_truncate_hole(inode, start_index, cur->index);
297 			start_index = cur->index + 1;
298 		}
299 
300 		list_del(&cur->list);
301 		kmem_cache_free(revoke_entry_slab, cur);
302 	}
303 
304 	if (!revoke && truncate)
305 		f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
306 }
307 
__f2fs_commit_atomic_write(struct inode * inode)308 static int __f2fs_commit_atomic_write(struct inode *inode)
309 {
310 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
311 	struct f2fs_inode_info *fi = F2FS_I(inode);
312 	struct inode *cow_inode = fi->cow_inode;
313 	struct revoke_entry *new;
314 	struct list_head revoke_list;
315 	block_t blkaddr;
316 	struct dnode_of_data dn;
317 	pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
318 	pgoff_t off = 0, blen, index;
319 	int ret = 0, i;
320 
321 	INIT_LIST_HEAD(&revoke_list);
322 
323 	while (len) {
324 		blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
325 
326 		set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
327 		ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
328 		if (ret && ret != -ENOENT) {
329 			goto out;
330 		} else if (ret == -ENOENT) {
331 			ret = 0;
332 			if (dn.max_level == 0)
333 				goto out;
334 			goto next;
335 		}
336 
337 		blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
338 				len);
339 		index = off;
340 		for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
341 			blkaddr = f2fs_data_blkaddr(&dn);
342 
343 			if (!__is_valid_data_blkaddr(blkaddr)) {
344 				continue;
345 			} else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
346 					DATA_GENERIC_ENHANCE)) {
347 				f2fs_put_dnode(&dn);
348 				ret = -EFSCORRUPTED;
349 				goto out;
350 			}
351 
352 			new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
353 							true, NULL);
354 
355 			ret = __replace_atomic_write_block(inode, index, blkaddr,
356 							&new->old_addr, false);
357 			if (ret) {
358 				f2fs_put_dnode(&dn);
359 				kmem_cache_free(revoke_entry_slab, new);
360 				goto out;
361 			}
362 
363 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
364 			new->index = index;
365 			list_add_tail(&new->list, &revoke_list);
366 		}
367 		f2fs_put_dnode(&dn);
368 next:
369 		off += blen;
370 		len -= blen;
371 	}
372 
373 out:
374 	if (ret) {
375 		sbi->revoked_atomic_block += fi->atomic_write_cnt;
376 	} else {
377 		sbi->committed_atomic_block += fi->atomic_write_cnt;
378 		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
379 		if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
380 			clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
381 			f2fs_mark_inode_dirty_sync(inode, true);
382 		}
383 	}
384 
385 	__complete_revoke_list(inode, &revoke_list, ret ? true : false);
386 
387 	return ret;
388 }
389 
f2fs_commit_atomic_write(struct inode * inode)390 int f2fs_commit_atomic_write(struct inode *inode)
391 {
392 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
393 	struct f2fs_inode_info *fi = F2FS_I(inode);
394 	int err;
395 
396 	err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
397 	if (err)
398 		return err;
399 
400 	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
401 	f2fs_lock_op(sbi);
402 
403 	err = __f2fs_commit_atomic_write(inode);
404 
405 	f2fs_unlock_op(sbi);
406 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
407 
408 	return err;
409 }
410 
411 /*
412  * This function balances dirty node and dentry pages.
413  * In addition, it controls garbage collection.
414  */
f2fs_balance_fs(struct f2fs_sb_info * sbi,bool need)415 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
416 {
417 	if (f2fs_cp_error(sbi))
418 		return;
419 
420 	if (time_to_inject(sbi, FAULT_CHECKPOINT))
421 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
422 
423 	/* balance_fs_bg is able to be pending */
424 	if (need && excess_cached_nats(sbi))
425 		f2fs_balance_fs_bg(sbi, false);
426 
427 	if (!f2fs_is_checkpoint_ready(sbi))
428 		return;
429 
430 	/*
431 	 * We should do GC or end up with checkpoint, if there are so many dirty
432 	 * dir/node pages without enough free segments.
433 	 */
434 	if (has_enough_free_secs(sbi, 0, 0))
435 		return;
436 
437 	if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
438 				sbi->gc_thread->f2fs_gc_task) {
439 		DEFINE_WAIT(wait);
440 
441 		prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
442 					TASK_UNINTERRUPTIBLE);
443 		wake_up(&sbi->gc_thread->gc_wait_queue_head);
444 		io_schedule();
445 		finish_wait(&sbi->gc_thread->fggc_wq, &wait);
446 	} else {
447 		struct f2fs_gc_control gc_control = {
448 			.victim_segno = NULL_SEGNO,
449 			.init_gc_type = BG_GC,
450 			.no_bg_gc = true,
451 			.should_migrate_blocks = false,
452 			.err_gc_skipped = false,
453 			.nr_free_secs = 1 };
454 		f2fs_down_write(&sbi->gc_lock);
455 		stat_inc_gc_call_count(sbi, FOREGROUND);
456 		f2fs_gc(sbi, &gc_control);
457 	}
458 }
459 
excess_dirty_threshold(struct f2fs_sb_info * sbi)460 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
461 {
462 	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
463 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
464 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
465 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
466 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
467 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
468 	unsigned int threshold =
469 		SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));
470 	unsigned int global_threshold = threshold * 3 / 2;
471 
472 	if (dents >= threshold || qdata >= threshold ||
473 		nodes >= threshold || meta >= threshold ||
474 		imeta >= threshold)
475 		return true;
476 	return dents + qdata + nodes + meta + imeta >  global_threshold;
477 }
478 
f2fs_balance_fs_bg(struct f2fs_sb_info * sbi,bool from_bg)479 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
480 {
481 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
482 		return;
483 
484 	/* try to shrink extent cache when there is no enough memory */
485 	if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
486 		f2fs_shrink_read_extent_tree(sbi,
487 				READ_EXTENT_CACHE_SHRINK_NUMBER);
488 
489 	/* try to shrink age extent cache when there is no enough memory */
490 	if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
491 		f2fs_shrink_age_extent_tree(sbi,
492 				AGE_EXTENT_CACHE_SHRINK_NUMBER);
493 
494 	/* check the # of cached NAT entries */
495 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
496 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
497 
498 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
499 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
500 	else
501 		f2fs_build_free_nids(sbi, false, false);
502 
503 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
504 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
505 		goto do_sync;
506 
507 	/* there is background inflight IO or foreground operation recently */
508 	if (is_inflight_io(sbi, REQ_TIME) ||
509 		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
510 		return;
511 
512 	/* exceed periodical checkpoint timeout threshold */
513 	if (f2fs_time_over(sbi, CP_TIME))
514 		goto do_sync;
515 
516 	/* checkpoint is the only way to shrink partial cached entries */
517 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
518 		f2fs_available_free_memory(sbi, INO_ENTRIES))
519 		return;
520 
521 do_sync:
522 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
523 		struct blk_plug plug;
524 
525 		mutex_lock(&sbi->flush_lock);
526 
527 		blk_start_plug(&plug);
528 		f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
529 		blk_finish_plug(&plug);
530 
531 		mutex_unlock(&sbi->flush_lock);
532 	}
533 	stat_inc_cp_call_count(sbi, BACKGROUND);
534 	f2fs_sync_fs(sbi->sb, 1);
535 }
536 
__submit_flush_wait(struct f2fs_sb_info * sbi,struct block_device * bdev)537 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
538 				struct block_device *bdev)
539 {
540 	int ret = blkdev_issue_flush(bdev);
541 
542 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
543 				test_opt(sbi, FLUSH_MERGE), ret);
544 	if (!ret)
545 		f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
546 	return ret;
547 }
548 
submit_flush_wait(struct f2fs_sb_info * sbi,nid_t ino)549 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
550 {
551 	int ret = 0;
552 	int i;
553 
554 	if (!f2fs_is_multi_device(sbi))
555 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
556 
557 	for (i = 0; i < sbi->s_ndevs; i++) {
558 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
559 			continue;
560 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
561 		if (ret)
562 			break;
563 	}
564 	return ret;
565 }
566 
issue_flush_thread(void * data)567 static int issue_flush_thread(void *data)
568 {
569 	struct f2fs_sb_info *sbi = data;
570 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
571 	wait_queue_head_t *q = &fcc->flush_wait_queue;
572 repeat:
573 	if (kthread_should_stop())
574 		return 0;
575 
576 	if (!llist_empty(&fcc->issue_list)) {
577 		struct flush_cmd *cmd, *next;
578 		int ret;
579 
580 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
581 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
582 
583 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
584 
585 		ret = submit_flush_wait(sbi, cmd->ino);
586 		atomic_inc(&fcc->issued_flush);
587 
588 		llist_for_each_entry_safe(cmd, next,
589 					  fcc->dispatch_list, llnode) {
590 			cmd->ret = ret;
591 			complete(&cmd->wait);
592 		}
593 		fcc->dispatch_list = NULL;
594 	}
595 
596 	wait_event_interruptible(*q,
597 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
598 	goto repeat;
599 }
600 
f2fs_issue_flush(struct f2fs_sb_info * sbi,nid_t ino)601 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
602 {
603 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
604 	struct flush_cmd cmd;
605 	int ret;
606 
607 	if (test_opt(sbi, NOBARRIER))
608 		return 0;
609 
610 	if (!test_opt(sbi, FLUSH_MERGE)) {
611 		atomic_inc(&fcc->queued_flush);
612 		ret = submit_flush_wait(sbi, ino);
613 		atomic_dec(&fcc->queued_flush);
614 		atomic_inc(&fcc->issued_flush);
615 		return ret;
616 	}
617 
618 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
619 	    f2fs_is_multi_device(sbi)) {
620 		ret = submit_flush_wait(sbi, ino);
621 		atomic_dec(&fcc->queued_flush);
622 
623 		atomic_inc(&fcc->issued_flush);
624 		return ret;
625 	}
626 
627 	cmd.ino = ino;
628 	init_completion(&cmd.wait);
629 
630 	llist_add(&cmd.llnode, &fcc->issue_list);
631 
632 	/*
633 	 * update issue_list before we wake up issue_flush thread, this
634 	 * smp_mb() pairs with another barrier in ___wait_event(), see
635 	 * more details in comments of waitqueue_active().
636 	 */
637 	smp_mb();
638 
639 	if (waitqueue_active(&fcc->flush_wait_queue))
640 		wake_up(&fcc->flush_wait_queue);
641 
642 	if (fcc->f2fs_issue_flush) {
643 		wait_for_completion(&cmd.wait);
644 		atomic_dec(&fcc->queued_flush);
645 	} else {
646 		struct llist_node *list;
647 
648 		list = llist_del_all(&fcc->issue_list);
649 		if (!list) {
650 			wait_for_completion(&cmd.wait);
651 			atomic_dec(&fcc->queued_flush);
652 		} else {
653 			struct flush_cmd *tmp, *next;
654 
655 			ret = submit_flush_wait(sbi, ino);
656 
657 			llist_for_each_entry_safe(tmp, next, list, llnode) {
658 				if (tmp == &cmd) {
659 					cmd.ret = ret;
660 					atomic_dec(&fcc->queued_flush);
661 					continue;
662 				}
663 				tmp->ret = ret;
664 				complete(&tmp->wait);
665 			}
666 		}
667 	}
668 
669 	return cmd.ret;
670 }
671 
f2fs_create_flush_cmd_control(struct f2fs_sb_info * sbi)672 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
673 {
674 	dev_t dev = sbi->sb->s_bdev->bd_dev;
675 	struct flush_cmd_control *fcc;
676 
677 	if (SM_I(sbi)->fcc_info) {
678 		fcc = SM_I(sbi)->fcc_info;
679 		if (fcc->f2fs_issue_flush)
680 			return 0;
681 		goto init_thread;
682 	}
683 
684 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
685 	if (!fcc)
686 		return -ENOMEM;
687 	atomic_set(&fcc->issued_flush, 0);
688 	atomic_set(&fcc->queued_flush, 0);
689 	init_waitqueue_head(&fcc->flush_wait_queue);
690 	init_llist_head(&fcc->issue_list);
691 	SM_I(sbi)->fcc_info = fcc;
692 	if (!test_opt(sbi, FLUSH_MERGE))
693 		return 0;
694 
695 init_thread:
696 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
697 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
698 	if (IS_ERR(fcc->f2fs_issue_flush)) {
699 		int err = PTR_ERR(fcc->f2fs_issue_flush);
700 
701 		fcc->f2fs_issue_flush = NULL;
702 		return err;
703 	}
704 
705 	return 0;
706 }
707 
f2fs_destroy_flush_cmd_control(struct f2fs_sb_info * sbi,bool free)708 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
709 {
710 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
711 
712 	if (fcc && fcc->f2fs_issue_flush) {
713 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
714 
715 		fcc->f2fs_issue_flush = NULL;
716 		kthread_stop(flush_thread);
717 	}
718 	if (free) {
719 		kfree(fcc);
720 		SM_I(sbi)->fcc_info = NULL;
721 	}
722 }
723 
f2fs_flush_device_cache(struct f2fs_sb_info * sbi)724 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
725 {
726 	int ret = 0, i;
727 
728 	if (!f2fs_is_multi_device(sbi))
729 		return 0;
730 
731 	if (test_opt(sbi, NOBARRIER))
732 		return 0;
733 
734 	for (i = 1; i < sbi->s_ndevs; i++) {
735 		int count = DEFAULT_RETRY_IO_COUNT;
736 
737 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
738 			continue;
739 
740 		do {
741 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
742 			if (ret)
743 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
744 		} while (ret && --count);
745 
746 		if (ret) {
747 			f2fs_stop_checkpoint(sbi, false,
748 					STOP_CP_REASON_FLUSH_FAIL);
749 			break;
750 		}
751 
752 		spin_lock(&sbi->dev_lock);
753 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
754 		spin_unlock(&sbi->dev_lock);
755 	}
756 
757 	return ret;
758 }
759 
__locate_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno,enum dirty_type dirty_type)760 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
761 		enum dirty_type dirty_type)
762 {
763 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
764 
765 	/* need not be added */
766 	if (IS_CURSEG(sbi, segno))
767 		return;
768 
769 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
770 		dirty_i->nr_dirty[dirty_type]++;
771 
772 	if (dirty_type == DIRTY) {
773 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
774 		enum dirty_type t = sentry->type;
775 
776 		if (unlikely(t >= DIRTY)) {
777 			f2fs_bug_on(sbi, 1);
778 			return;
779 		}
780 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
781 			dirty_i->nr_dirty[t]++;
782 
783 		if (__is_large_section(sbi)) {
784 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
785 			block_t valid_blocks =
786 				get_valid_blocks(sbi, segno, true);
787 
788 			f2fs_bug_on(sbi,
789 				(!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
790 				!valid_blocks) ||
791 				valid_blocks == CAP_BLKS_PER_SEC(sbi));
792 
793 			if (!IS_CURSEC(sbi, secno))
794 				set_bit(secno, dirty_i->dirty_secmap);
795 		}
796 	}
797 }
798 
__remove_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno,enum dirty_type dirty_type)799 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
800 		enum dirty_type dirty_type)
801 {
802 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
803 	block_t valid_blocks;
804 
805 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
806 		dirty_i->nr_dirty[dirty_type]--;
807 
808 	if (dirty_type == DIRTY) {
809 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
810 		enum dirty_type t = sentry->type;
811 
812 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
813 			dirty_i->nr_dirty[t]--;
814 
815 		valid_blocks = get_valid_blocks(sbi, segno, true);
816 		if (valid_blocks == 0) {
817 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
818 						dirty_i->victim_secmap);
819 #ifdef CONFIG_F2FS_CHECK_FS
820 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
821 #endif
822 		}
823 		if (__is_large_section(sbi)) {
824 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
825 
826 			if (!valid_blocks ||
827 					valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
828 				clear_bit(secno, dirty_i->dirty_secmap);
829 				return;
830 			}
831 
832 			if (!IS_CURSEC(sbi, secno))
833 				set_bit(secno, dirty_i->dirty_secmap);
834 		}
835 	}
836 }
837 
838 /*
839  * Should not occur error such as -ENOMEM.
840  * Adding dirty entry into seglist is not critical operation.
841  * If a given segment is one of current working segments, it won't be added.
842  */
locate_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno)843 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
844 {
845 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
846 	unsigned short valid_blocks, ckpt_valid_blocks;
847 	unsigned int usable_blocks;
848 
849 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
850 		return;
851 
852 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
853 	mutex_lock(&dirty_i->seglist_lock);
854 
855 	valid_blocks = get_valid_blocks(sbi, segno, false);
856 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
857 
858 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
859 		ckpt_valid_blocks == usable_blocks)) {
860 		__locate_dirty_segment(sbi, segno, PRE);
861 		__remove_dirty_segment(sbi, segno, DIRTY);
862 	} else if (valid_blocks < usable_blocks) {
863 		__locate_dirty_segment(sbi, segno, DIRTY);
864 	} else {
865 		/* Recovery routine with SSR needs this */
866 		__remove_dirty_segment(sbi, segno, DIRTY);
867 	}
868 
869 	mutex_unlock(&dirty_i->seglist_lock);
870 }
871 
872 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
f2fs_dirty_to_prefree(struct f2fs_sb_info * sbi)873 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
874 {
875 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
876 	unsigned int segno;
877 
878 	mutex_lock(&dirty_i->seglist_lock);
879 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
880 		if (get_valid_blocks(sbi, segno, false))
881 			continue;
882 		if (IS_CURSEG(sbi, segno))
883 			continue;
884 		__locate_dirty_segment(sbi, segno, PRE);
885 		__remove_dirty_segment(sbi, segno, DIRTY);
886 	}
887 	mutex_unlock(&dirty_i->seglist_lock);
888 }
889 
f2fs_get_unusable_blocks(struct f2fs_sb_info * sbi)890 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
891 {
892 	int ovp_hole_segs =
893 		(overprovision_segments(sbi) - reserved_segments(sbi));
894 	block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);
895 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
896 	block_t holes[2] = {0, 0};	/* DATA and NODE */
897 	block_t unusable;
898 	struct seg_entry *se;
899 	unsigned int segno;
900 
901 	mutex_lock(&dirty_i->seglist_lock);
902 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
903 		se = get_seg_entry(sbi, segno);
904 		if (IS_NODESEG(se->type))
905 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
906 							se->valid_blocks;
907 		else
908 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
909 							se->valid_blocks;
910 	}
911 	mutex_unlock(&dirty_i->seglist_lock);
912 
913 	unusable = max(holes[DATA], holes[NODE]);
914 	if (unusable > ovp_holes)
915 		return unusable - ovp_holes;
916 	return 0;
917 }
918 
f2fs_disable_cp_again(struct f2fs_sb_info * sbi,block_t unusable)919 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
920 {
921 	int ovp_hole_segs =
922 		(overprovision_segments(sbi) - reserved_segments(sbi));
923 
924 	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
925 		return 0;
926 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
927 		return -EAGAIN;
928 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
929 		dirty_segments(sbi) > ovp_hole_segs)
930 		return -EAGAIN;
931 	if (has_not_enough_free_secs(sbi, 0, 0))
932 		return -EAGAIN;
933 	return 0;
934 }
935 
936 /* This is only used by SBI_CP_DISABLED */
get_free_segment(struct f2fs_sb_info * sbi)937 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
938 {
939 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
940 	unsigned int segno = 0;
941 
942 	mutex_lock(&dirty_i->seglist_lock);
943 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
944 		if (get_valid_blocks(sbi, segno, false))
945 			continue;
946 		if (get_ckpt_valid_blocks(sbi, segno, false))
947 			continue;
948 		mutex_unlock(&dirty_i->seglist_lock);
949 		return segno;
950 	}
951 	mutex_unlock(&dirty_i->seglist_lock);
952 	return NULL_SEGNO;
953 }
954 
__create_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)955 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
956 		struct block_device *bdev, block_t lstart,
957 		block_t start, block_t len)
958 {
959 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
960 	struct list_head *pend_list;
961 	struct discard_cmd *dc;
962 
963 	f2fs_bug_on(sbi, !len);
964 
965 	pend_list = &dcc->pend_list[plist_idx(len)];
966 
967 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
968 	INIT_LIST_HEAD(&dc->list);
969 	dc->bdev = bdev;
970 	dc->di.lstart = lstart;
971 	dc->di.start = start;
972 	dc->di.len = len;
973 	dc->ref = 0;
974 	dc->state = D_PREP;
975 	dc->queued = 0;
976 	dc->error = 0;
977 	init_completion(&dc->wait);
978 	list_add_tail(&dc->list, pend_list);
979 	spin_lock_init(&dc->lock);
980 	dc->bio_ref = 0;
981 	atomic_inc(&dcc->discard_cmd_cnt);
982 	dcc->undiscard_blks += len;
983 
984 	return dc;
985 }
986 
f2fs_check_discard_tree(struct f2fs_sb_info * sbi)987 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
988 {
989 #ifdef CONFIG_F2FS_CHECK_FS
990 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
991 	struct rb_node *cur = rb_first_cached(&dcc->root), *next;
992 	struct discard_cmd *cur_dc, *next_dc;
993 
994 	while (cur) {
995 		next = rb_next(cur);
996 		if (!next)
997 			return true;
998 
999 		cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
1000 		next_dc = rb_entry(next, struct discard_cmd, rb_node);
1001 
1002 		if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
1003 			f2fs_info(sbi, "broken discard_rbtree, "
1004 				"cur(%u, %u) next(%u, %u)",
1005 				cur_dc->di.lstart, cur_dc->di.len,
1006 				next_dc->di.lstart, next_dc->di.len);
1007 			return false;
1008 		}
1009 		cur = next;
1010 	}
1011 #endif
1012 	return true;
1013 }
1014 
__lookup_discard_cmd(struct f2fs_sb_info * sbi,block_t blkaddr)1015 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
1016 						block_t blkaddr)
1017 {
1018 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1019 	struct rb_node *node = dcc->root.rb_root.rb_node;
1020 	struct discard_cmd *dc;
1021 
1022 	while (node) {
1023 		dc = rb_entry(node, struct discard_cmd, rb_node);
1024 
1025 		if (blkaddr < dc->di.lstart)
1026 			node = node->rb_left;
1027 		else if (blkaddr >= dc->di.lstart + dc->di.len)
1028 			node = node->rb_right;
1029 		else
1030 			return dc;
1031 	}
1032 	return NULL;
1033 }
1034 
__lookup_discard_cmd_ret(struct rb_root_cached * root,block_t blkaddr,struct discard_cmd ** prev_entry,struct discard_cmd ** next_entry,struct rb_node *** insert_p,struct rb_node ** insert_parent)1035 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
1036 				block_t blkaddr,
1037 				struct discard_cmd **prev_entry,
1038 				struct discard_cmd **next_entry,
1039 				struct rb_node ***insert_p,
1040 				struct rb_node **insert_parent)
1041 {
1042 	struct rb_node **pnode = &root->rb_root.rb_node;
1043 	struct rb_node *parent = NULL, *tmp_node;
1044 	struct discard_cmd *dc;
1045 
1046 	*insert_p = NULL;
1047 	*insert_parent = NULL;
1048 	*prev_entry = NULL;
1049 	*next_entry = NULL;
1050 
1051 	if (RB_EMPTY_ROOT(&root->rb_root))
1052 		return NULL;
1053 
1054 	while (*pnode) {
1055 		parent = *pnode;
1056 		dc = rb_entry(*pnode, struct discard_cmd, rb_node);
1057 
1058 		if (blkaddr < dc->di.lstart)
1059 			pnode = &(*pnode)->rb_left;
1060 		else if (blkaddr >= dc->di.lstart + dc->di.len)
1061 			pnode = &(*pnode)->rb_right;
1062 		else
1063 			goto lookup_neighbors;
1064 	}
1065 
1066 	*insert_p = pnode;
1067 	*insert_parent = parent;
1068 
1069 	dc = rb_entry(parent, struct discard_cmd, rb_node);
1070 	tmp_node = parent;
1071 	if (parent && blkaddr > dc->di.lstart)
1072 		tmp_node = rb_next(parent);
1073 	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1074 
1075 	tmp_node = parent;
1076 	if (parent && blkaddr < dc->di.lstart)
1077 		tmp_node = rb_prev(parent);
1078 	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1079 	return NULL;
1080 
1081 lookup_neighbors:
1082 	/* lookup prev node for merging backward later */
1083 	tmp_node = rb_prev(&dc->rb_node);
1084 	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1085 
1086 	/* lookup next node for merging frontward later */
1087 	tmp_node = rb_next(&dc->rb_node);
1088 	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1089 	return dc;
1090 }
1091 
__detach_discard_cmd(struct discard_cmd_control * dcc,struct discard_cmd * dc)1092 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1093 							struct discard_cmd *dc)
1094 {
1095 	if (dc->state == D_DONE)
1096 		atomic_sub(dc->queued, &dcc->queued_discard);
1097 
1098 	list_del(&dc->list);
1099 	rb_erase_cached(&dc->rb_node, &dcc->root);
1100 	dcc->undiscard_blks -= dc->di.len;
1101 
1102 	kmem_cache_free(discard_cmd_slab, dc);
1103 
1104 	atomic_dec(&dcc->discard_cmd_cnt);
1105 }
1106 
__remove_discard_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc)1107 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1108 							struct discard_cmd *dc)
1109 {
1110 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1111 	unsigned long flags;
1112 
1113 	trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
1114 
1115 	spin_lock_irqsave(&dc->lock, flags);
1116 	if (dc->bio_ref) {
1117 		spin_unlock_irqrestore(&dc->lock, flags);
1118 		return;
1119 	}
1120 	spin_unlock_irqrestore(&dc->lock, flags);
1121 
1122 	f2fs_bug_on(sbi, dc->ref);
1123 
1124 	if (dc->error == -EOPNOTSUPP)
1125 		dc->error = 0;
1126 
1127 	if (dc->error)
1128 		f2fs_info_ratelimited(sbi,
1129 			"Issue discard(%u, %u, %u) failed, ret: %d",
1130 			dc->di.lstart, dc->di.start, dc->di.len, dc->error);
1131 	__detach_discard_cmd(dcc, dc);
1132 }
1133 
f2fs_submit_discard_endio(struct bio * bio)1134 static void f2fs_submit_discard_endio(struct bio *bio)
1135 {
1136 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1137 	unsigned long flags;
1138 
1139 	spin_lock_irqsave(&dc->lock, flags);
1140 	if (!dc->error)
1141 		dc->error = blk_status_to_errno(bio->bi_status);
1142 	dc->bio_ref--;
1143 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1144 		dc->state = D_DONE;
1145 		complete_all(&dc->wait);
1146 	}
1147 	spin_unlock_irqrestore(&dc->lock, flags);
1148 	bio_put(bio);
1149 }
1150 
__check_sit_bitmap(struct f2fs_sb_info * sbi,block_t start,block_t end)1151 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1152 				block_t start, block_t end)
1153 {
1154 #ifdef CONFIG_F2FS_CHECK_FS
1155 	struct seg_entry *sentry;
1156 	unsigned int segno;
1157 	block_t blk = start;
1158 	unsigned long offset, size, *map;
1159 
1160 	while (blk < end) {
1161 		segno = GET_SEGNO(sbi, blk);
1162 		sentry = get_seg_entry(sbi, segno);
1163 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1164 
1165 		if (end < START_BLOCK(sbi, segno + 1))
1166 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1167 		else
1168 			size = BLKS_PER_SEG(sbi);
1169 		map = (unsigned long *)(sentry->cur_valid_map);
1170 		offset = __find_rev_next_bit(map, size, offset);
1171 		f2fs_bug_on(sbi, offset != size);
1172 		blk = START_BLOCK(sbi, segno + 1);
1173 	}
1174 #endif
1175 }
1176 
__init_discard_policy(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,int discard_type,unsigned int granularity)1177 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1178 				struct discard_policy *dpolicy,
1179 				int discard_type, unsigned int granularity)
1180 {
1181 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1182 
1183 	/* common policy */
1184 	dpolicy->type = discard_type;
1185 	dpolicy->sync = true;
1186 	dpolicy->ordered = false;
1187 	dpolicy->granularity = granularity;
1188 
1189 	dpolicy->max_requests = dcc->max_discard_request;
1190 	dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
1191 	dpolicy->timeout = false;
1192 
1193 	if (discard_type == DPOLICY_BG) {
1194 		dpolicy->min_interval = dcc->min_discard_issue_time;
1195 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1196 		dpolicy->max_interval = dcc->max_discard_issue_time;
1197 		if (dcc->discard_io_aware == DPOLICY_IO_AWARE_ENABLE)
1198 			dpolicy->io_aware = true;
1199 		else if (dcc->discard_io_aware == DPOLICY_IO_AWARE_DISABLE)
1200 			dpolicy->io_aware = false;
1201 		dpolicy->sync = false;
1202 		dpolicy->ordered = true;
1203 		if (utilization(sbi) > dcc->discard_urgent_util) {
1204 			dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1205 			if (atomic_read(&dcc->discard_cmd_cnt))
1206 				dpolicy->max_interval =
1207 					dcc->min_discard_issue_time;
1208 		}
1209 	} else if (discard_type == DPOLICY_FORCE) {
1210 		dpolicy->min_interval = dcc->min_discard_issue_time;
1211 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1212 		dpolicy->max_interval = dcc->max_discard_issue_time;
1213 		dpolicy->io_aware = false;
1214 	} else if (discard_type == DPOLICY_FSTRIM) {
1215 		dpolicy->io_aware = false;
1216 	} else if (discard_type == DPOLICY_UMOUNT) {
1217 		dpolicy->io_aware = false;
1218 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1219 		dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1220 		dpolicy->timeout = true;
1221 	}
1222 }
1223 
1224 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1225 				struct block_device *bdev, block_t lstart,
1226 				block_t start, block_t len);
1227 
1228 #ifdef CONFIG_BLK_DEV_ZONED
__submit_zone_reset_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc,blk_opf_t flag,struct list_head * wait_list,unsigned int * issued)1229 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1230 				   struct discard_cmd *dc, blk_opf_t flag,
1231 				   struct list_head *wait_list,
1232 				   unsigned int *issued)
1233 {
1234 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1235 	struct block_device *bdev = dc->bdev;
1236 	struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
1237 	unsigned long flags;
1238 
1239 	trace_f2fs_issue_reset_zone(bdev, dc->di.start);
1240 
1241 	spin_lock_irqsave(&dc->lock, flags);
1242 	dc->state = D_SUBMIT;
1243 	dc->bio_ref++;
1244 	spin_unlock_irqrestore(&dc->lock, flags);
1245 
1246 	if (issued)
1247 		(*issued)++;
1248 
1249 	atomic_inc(&dcc->queued_discard);
1250 	dc->queued++;
1251 	list_move_tail(&dc->list, wait_list);
1252 
1253 	/* sanity check on discard range */
1254 	__check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1255 
1256 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
1257 	bio->bi_private = dc;
1258 	bio->bi_end_io = f2fs_submit_discard_endio;
1259 	submit_bio(bio);
1260 
1261 	atomic_inc(&dcc->issued_discard);
1262 	f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1263 }
1264 #endif
1265 
1266 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
__submit_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,struct discard_cmd * dc,int * issued)1267 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1268 				struct discard_policy *dpolicy,
1269 				struct discard_cmd *dc, int *issued)
1270 {
1271 	struct block_device *bdev = dc->bdev;
1272 	unsigned int max_discard_blocks =
1273 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1274 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1275 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1276 					&(dcc->fstrim_list) : &(dcc->wait_list);
1277 	blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
1278 	block_t lstart, start, len, total_len;
1279 	int err = 0;
1280 
1281 	if (dc->state != D_PREP)
1282 		return 0;
1283 
1284 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1285 		return 0;
1286 
1287 #ifdef CONFIG_BLK_DEV_ZONED
1288 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1289 		int devi = f2fs_bdev_index(sbi, bdev);
1290 
1291 		if (devi < 0)
1292 			return -EINVAL;
1293 
1294 		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1295 			__submit_zone_reset_cmd(sbi, dc, flag,
1296 						wait_list, issued);
1297 			return 0;
1298 		}
1299 	}
1300 #endif
1301 
1302 	/*
1303 	 * stop issuing discard for any of below cases:
1304 	 * 1. device is conventional zone, but it doesn't support discard.
1305 	 * 2. device is regulare device, after snapshot it doesn't support
1306 	 * discard.
1307 	 */
1308 	if (!bdev_max_discard_sectors(bdev))
1309 		return -EOPNOTSUPP;
1310 
1311 	trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
1312 
1313 	lstart = dc->di.lstart;
1314 	start = dc->di.start;
1315 	len = dc->di.len;
1316 	total_len = len;
1317 
1318 	dc->di.len = 0;
1319 
1320 	while (total_len && *issued < dpolicy->max_requests && !err) {
1321 		struct bio *bio = NULL;
1322 		unsigned long flags;
1323 		bool last = true;
1324 
1325 		if (len > max_discard_blocks) {
1326 			len = max_discard_blocks;
1327 			last = false;
1328 		}
1329 
1330 		(*issued)++;
1331 		if (*issued == dpolicy->max_requests)
1332 			last = true;
1333 
1334 		dc->di.len += len;
1335 
1336 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1337 			err = -EIO;
1338 		} else {
1339 			err = __blkdev_issue_discard(bdev,
1340 					SECTOR_FROM_BLOCK(start),
1341 					SECTOR_FROM_BLOCK(len),
1342 					GFP_NOFS, &bio);
1343 		}
1344 		if (err) {
1345 			spin_lock_irqsave(&dc->lock, flags);
1346 			if (dc->state == D_PARTIAL)
1347 				dc->state = D_SUBMIT;
1348 			spin_unlock_irqrestore(&dc->lock, flags);
1349 
1350 			break;
1351 		}
1352 
1353 		f2fs_bug_on(sbi, !bio);
1354 
1355 		/*
1356 		 * should keep before submission to avoid D_DONE
1357 		 * right away
1358 		 */
1359 		spin_lock_irqsave(&dc->lock, flags);
1360 		if (last)
1361 			dc->state = D_SUBMIT;
1362 		else
1363 			dc->state = D_PARTIAL;
1364 		dc->bio_ref++;
1365 		spin_unlock_irqrestore(&dc->lock, flags);
1366 
1367 		atomic_inc(&dcc->queued_discard);
1368 		dc->queued++;
1369 		list_move_tail(&dc->list, wait_list);
1370 
1371 		/* sanity check on discard range */
1372 		__check_sit_bitmap(sbi, lstart, lstart + len);
1373 
1374 		bio->bi_private = dc;
1375 		bio->bi_end_io = f2fs_submit_discard_endio;
1376 		bio->bi_opf |= flag;
1377 		submit_bio(bio);
1378 
1379 		atomic_inc(&dcc->issued_discard);
1380 
1381 		f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1382 
1383 		lstart += len;
1384 		start += len;
1385 		total_len -= len;
1386 		len = total_len;
1387 	}
1388 
1389 	if (!err && len) {
1390 		dcc->undiscard_blks -= len;
1391 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1392 	}
1393 	return err;
1394 }
1395 
__insert_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)1396 static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1397 				struct block_device *bdev, block_t lstart,
1398 				block_t start, block_t len)
1399 {
1400 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1401 	struct rb_node **p = &dcc->root.rb_root.rb_node;
1402 	struct rb_node *parent = NULL;
1403 	struct discard_cmd *dc;
1404 	bool leftmost = true;
1405 
1406 	/* look up rb tree to find parent node */
1407 	while (*p) {
1408 		parent = *p;
1409 		dc = rb_entry(parent, struct discard_cmd, rb_node);
1410 
1411 		if (lstart < dc->di.lstart) {
1412 			p = &(*p)->rb_left;
1413 		} else if (lstart >= dc->di.lstart + dc->di.len) {
1414 			p = &(*p)->rb_right;
1415 			leftmost = false;
1416 		} else {
1417 			/* Let's skip to add, if exists */
1418 			return;
1419 		}
1420 	}
1421 
1422 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1423 
1424 	rb_link_node(&dc->rb_node, parent, p);
1425 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1426 }
1427 
__relocate_discard_cmd(struct discard_cmd_control * dcc,struct discard_cmd * dc)1428 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1429 						struct discard_cmd *dc)
1430 {
1431 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
1432 }
1433 
__punch_discard_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc,block_t blkaddr)1434 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1435 				struct discard_cmd *dc, block_t blkaddr)
1436 {
1437 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1438 	struct discard_info di = dc->di;
1439 	bool modified = false;
1440 
1441 	if (dc->state == D_DONE || dc->di.len == 1) {
1442 		__remove_discard_cmd(sbi, dc);
1443 		return;
1444 	}
1445 
1446 	dcc->undiscard_blks -= di.len;
1447 
1448 	if (blkaddr > di.lstart) {
1449 		dc->di.len = blkaddr - dc->di.lstart;
1450 		dcc->undiscard_blks += dc->di.len;
1451 		__relocate_discard_cmd(dcc, dc);
1452 		modified = true;
1453 	}
1454 
1455 	if (blkaddr < di.lstart + di.len - 1) {
1456 		if (modified) {
1457 			__insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1458 					di.start + blkaddr + 1 - di.lstart,
1459 					di.lstart + di.len - 1 - blkaddr);
1460 		} else {
1461 			dc->di.lstart++;
1462 			dc->di.len--;
1463 			dc->di.start++;
1464 			dcc->undiscard_blks += dc->di.len;
1465 			__relocate_discard_cmd(dcc, dc);
1466 		}
1467 	}
1468 }
1469 
__update_discard_tree_range(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)1470 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1471 				struct block_device *bdev, block_t lstart,
1472 				block_t start, block_t len)
1473 {
1474 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1475 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1476 	struct discard_cmd *dc;
1477 	struct discard_info di = {0};
1478 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1479 	unsigned int max_discard_blocks =
1480 			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1481 	block_t end = lstart + len;
1482 
1483 	dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
1484 				&prev_dc, &next_dc, &insert_p, &insert_parent);
1485 	if (dc)
1486 		prev_dc = dc;
1487 
1488 	if (!prev_dc) {
1489 		di.lstart = lstart;
1490 		di.len = next_dc ? next_dc->di.lstart - lstart : len;
1491 		di.len = min(di.len, len);
1492 		di.start = start;
1493 	}
1494 
1495 	while (1) {
1496 		struct rb_node *node;
1497 		bool merged = false;
1498 		struct discard_cmd *tdc = NULL;
1499 
1500 		if (prev_dc) {
1501 			di.lstart = prev_dc->di.lstart + prev_dc->di.len;
1502 			if (di.lstart < lstart)
1503 				di.lstart = lstart;
1504 			if (di.lstart >= end)
1505 				break;
1506 
1507 			if (!next_dc || next_dc->di.lstart > end)
1508 				di.len = end - di.lstart;
1509 			else
1510 				di.len = next_dc->di.lstart - di.lstart;
1511 			di.start = start + di.lstart - lstart;
1512 		}
1513 
1514 		if (!di.len)
1515 			goto next;
1516 
1517 		if (prev_dc && prev_dc->state == D_PREP &&
1518 			prev_dc->bdev == bdev &&
1519 			__is_discard_back_mergeable(&di, &prev_dc->di,
1520 							max_discard_blocks)) {
1521 			prev_dc->di.len += di.len;
1522 			dcc->undiscard_blks += di.len;
1523 			__relocate_discard_cmd(dcc, prev_dc);
1524 			di = prev_dc->di;
1525 			tdc = prev_dc;
1526 			merged = true;
1527 		}
1528 
1529 		if (next_dc && next_dc->state == D_PREP &&
1530 			next_dc->bdev == bdev &&
1531 			__is_discard_front_mergeable(&di, &next_dc->di,
1532 							max_discard_blocks)) {
1533 			next_dc->di.lstart = di.lstart;
1534 			next_dc->di.len += di.len;
1535 			next_dc->di.start = di.start;
1536 			dcc->undiscard_blks += di.len;
1537 			__relocate_discard_cmd(dcc, next_dc);
1538 			if (tdc)
1539 				__remove_discard_cmd(sbi, tdc);
1540 			merged = true;
1541 		}
1542 
1543 		if (!merged)
1544 			__insert_discard_cmd(sbi, bdev,
1545 						di.lstart, di.start, di.len);
1546  next:
1547 		prev_dc = next_dc;
1548 		if (!prev_dc)
1549 			break;
1550 
1551 		node = rb_next(&prev_dc->rb_node);
1552 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1553 	}
1554 }
1555 
1556 #ifdef CONFIG_BLK_DEV_ZONED
__queue_zone_reset_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t lblkstart,block_t blklen)1557 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1558 		struct block_device *bdev, block_t blkstart, block_t lblkstart,
1559 		block_t blklen)
1560 {
1561 	trace_f2fs_queue_reset_zone(bdev, blkstart);
1562 
1563 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1564 	__insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1565 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1566 }
1567 #endif
1568 
__queue_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)1569 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1570 		struct block_device *bdev, block_t blkstart, block_t blklen)
1571 {
1572 	block_t lblkstart = blkstart;
1573 
1574 	if (!f2fs_bdev_support_discard(bdev))
1575 		return;
1576 
1577 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1578 
1579 	if (f2fs_is_multi_device(sbi)) {
1580 		int devi = f2fs_target_device_index(sbi, blkstart);
1581 
1582 		blkstart -= FDEV(devi).start_blk;
1583 	}
1584 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1585 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1586 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1587 }
1588 
__issue_discard_cmd_orderly(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,int * issued)1589 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1590 		struct discard_policy *dpolicy, int *issued)
1591 {
1592 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1593 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1594 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1595 	struct discard_cmd *dc;
1596 	struct blk_plug plug;
1597 	bool io_interrupted = false;
1598 
1599 	mutex_lock(&dcc->cmd_lock);
1600 	dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
1601 				&prev_dc, &next_dc, &insert_p, &insert_parent);
1602 	if (!dc)
1603 		dc = next_dc;
1604 
1605 	blk_start_plug(&plug);
1606 
1607 	while (dc) {
1608 		struct rb_node *node;
1609 		int err = 0;
1610 
1611 		if (dc->state != D_PREP)
1612 			goto next;
1613 
1614 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1615 			io_interrupted = true;
1616 			break;
1617 		}
1618 
1619 		dcc->next_pos = dc->di.lstart + dc->di.len;
1620 		err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1621 
1622 		if (*issued >= dpolicy->max_requests)
1623 			break;
1624 next:
1625 		node = rb_next(&dc->rb_node);
1626 		if (err)
1627 			__remove_discard_cmd(sbi, dc);
1628 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1629 	}
1630 
1631 	blk_finish_plug(&plug);
1632 
1633 	if (!dc)
1634 		dcc->next_pos = 0;
1635 
1636 	mutex_unlock(&dcc->cmd_lock);
1637 
1638 	if (!(*issued) && io_interrupted)
1639 		*issued = -1;
1640 }
1641 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1642 					struct discard_policy *dpolicy);
1643 
__issue_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy)1644 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1645 					struct discard_policy *dpolicy)
1646 {
1647 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1648 	struct list_head *pend_list;
1649 	struct discard_cmd *dc, *tmp;
1650 	struct blk_plug plug;
1651 	int i, issued;
1652 	bool io_interrupted = false;
1653 
1654 	if (dpolicy->timeout)
1655 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1656 
1657 retry:
1658 	issued = 0;
1659 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1660 		if (dpolicy->timeout &&
1661 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1662 			break;
1663 
1664 		if (i + 1 < dpolicy->granularity)
1665 			break;
1666 
1667 		if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
1668 			__issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1669 			return issued;
1670 		}
1671 
1672 		pend_list = &dcc->pend_list[i];
1673 
1674 		mutex_lock(&dcc->cmd_lock);
1675 		if (list_empty(pend_list))
1676 			goto next;
1677 		if (unlikely(dcc->rbtree_check))
1678 			f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1679 		blk_start_plug(&plug);
1680 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1681 			f2fs_bug_on(sbi, dc->state != D_PREP);
1682 
1683 			if (dpolicy->timeout &&
1684 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1685 				break;
1686 
1687 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1688 						!is_idle(sbi, DISCARD_TIME)) {
1689 				io_interrupted = true;
1690 				break;
1691 			}
1692 
1693 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1694 
1695 			if (issued >= dpolicy->max_requests)
1696 				break;
1697 		}
1698 		blk_finish_plug(&plug);
1699 next:
1700 		mutex_unlock(&dcc->cmd_lock);
1701 
1702 		if (issued >= dpolicy->max_requests || io_interrupted)
1703 			break;
1704 	}
1705 
1706 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1707 		__wait_all_discard_cmd(sbi, dpolicy);
1708 		goto retry;
1709 	}
1710 
1711 	if (!issued && io_interrupted)
1712 		issued = -1;
1713 
1714 	return issued;
1715 }
1716 
__drop_discard_cmd(struct f2fs_sb_info * sbi)1717 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1718 {
1719 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1720 	struct list_head *pend_list;
1721 	struct discard_cmd *dc, *tmp;
1722 	int i;
1723 	bool dropped = false;
1724 
1725 	mutex_lock(&dcc->cmd_lock);
1726 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1727 		pend_list = &dcc->pend_list[i];
1728 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1729 			f2fs_bug_on(sbi, dc->state != D_PREP);
1730 			__remove_discard_cmd(sbi, dc);
1731 			dropped = true;
1732 		}
1733 	}
1734 	mutex_unlock(&dcc->cmd_lock);
1735 
1736 	return dropped;
1737 }
1738 
f2fs_drop_discard_cmd(struct f2fs_sb_info * sbi)1739 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1740 {
1741 	__drop_discard_cmd(sbi);
1742 }
1743 
__wait_one_discard_bio(struct f2fs_sb_info * sbi,struct discard_cmd * dc)1744 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1745 							struct discard_cmd *dc)
1746 {
1747 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1748 	unsigned int len = 0;
1749 
1750 	wait_for_completion_io(&dc->wait);
1751 	mutex_lock(&dcc->cmd_lock);
1752 	f2fs_bug_on(sbi, dc->state != D_DONE);
1753 	dc->ref--;
1754 	if (!dc->ref) {
1755 		if (!dc->error)
1756 			len = dc->di.len;
1757 		__remove_discard_cmd(sbi, dc);
1758 	}
1759 	mutex_unlock(&dcc->cmd_lock);
1760 
1761 	return len;
1762 }
1763 
__wait_discard_cmd_range(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,block_t start,block_t end)1764 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1765 						struct discard_policy *dpolicy,
1766 						block_t start, block_t end)
1767 {
1768 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1769 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1770 					&(dcc->fstrim_list) : &(dcc->wait_list);
1771 	struct discard_cmd *dc = NULL, *iter, *tmp;
1772 	unsigned int trimmed = 0;
1773 
1774 next:
1775 	dc = NULL;
1776 
1777 	mutex_lock(&dcc->cmd_lock);
1778 	list_for_each_entry_safe(iter, tmp, wait_list, list) {
1779 		if (iter->di.lstart + iter->di.len <= start ||
1780 					end <= iter->di.lstart)
1781 			continue;
1782 		if (iter->di.len < dpolicy->granularity)
1783 			continue;
1784 		if (iter->state == D_DONE && !iter->ref) {
1785 			wait_for_completion_io(&iter->wait);
1786 			if (!iter->error)
1787 				trimmed += iter->di.len;
1788 			__remove_discard_cmd(sbi, iter);
1789 		} else {
1790 			iter->ref++;
1791 			dc = iter;
1792 			break;
1793 		}
1794 	}
1795 	mutex_unlock(&dcc->cmd_lock);
1796 
1797 	if (dc) {
1798 		trimmed += __wait_one_discard_bio(sbi, dc);
1799 		goto next;
1800 	}
1801 
1802 	return trimmed;
1803 }
1804 
__wait_all_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy)1805 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1806 						struct discard_policy *dpolicy)
1807 {
1808 	struct discard_policy dp;
1809 	unsigned int discard_blks;
1810 
1811 	if (dpolicy)
1812 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1813 
1814 	/* wait all */
1815 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1816 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1817 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1818 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1819 
1820 	return discard_blks;
1821 }
1822 
1823 /* This should be covered by global mutex, &sit_i->sentry_lock */
f2fs_wait_discard_bio(struct f2fs_sb_info * sbi,block_t blkaddr)1824 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1825 {
1826 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1827 	struct discard_cmd *dc;
1828 	bool need_wait = false;
1829 
1830 	mutex_lock(&dcc->cmd_lock);
1831 	dc = __lookup_discard_cmd(sbi, blkaddr);
1832 #ifdef CONFIG_BLK_DEV_ZONED
1833 	if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1834 		int devi = f2fs_bdev_index(sbi, dc->bdev);
1835 
1836 		if (devi < 0) {
1837 			mutex_unlock(&dcc->cmd_lock);
1838 			return;
1839 		}
1840 
1841 		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1842 			/* force submit zone reset */
1843 			if (dc->state == D_PREP)
1844 				__submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1845 							&dcc->wait_list, NULL);
1846 			dc->ref++;
1847 			mutex_unlock(&dcc->cmd_lock);
1848 			/* wait zone reset */
1849 			__wait_one_discard_bio(sbi, dc);
1850 			return;
1851 		}
1852 	}
1853 #endif
1854 	if (dc) {
1855 		if (dc->state == D_PREP) {
1856 			__punch_discard_cmd(sbi, dc, blkaddr);
1857 		} else {
1858 			dc->ref++;
1859 			need_wait = true;
1860 		}
1861 	}
1862 	mutex_unlock(&dcc->cmd_lock);
1863 
1864 	if (need_wait)
1865 		__wait_one_discard_bio(sbi, dc);
1866 }
1867 
f2fs_stop_discard_thread(struct f2fs_sb_info * sbi)1868 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1869 {
1870 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1871 
1872 	if (dcc && dcc->f2fs_issue_discard) {
1873 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1874 
1875 		dcc->f2fs_issue_discard = NULL;
1876 		kthread_stop(discard_thread);
1877 	}
1878 }
1879 
1880 /**
1881  * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
1882  * @sbi: the f2fs_sb_info data for discard cmd to issue
1883  *
1884  * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
1885  *
1886  * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
1887  */
f2fs_issue_discard_timeout(struct f2fs_sb_info * sbi)1888 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1889 {
1890 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1891 	struct discard_policy dpolicy;
1892 	bool dropped;
1893 
1894 	if (!atomic_read(&dcc->discard_cmd_cnt))
1895 		return true;
1896 
1897 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1898 					dcc->discard_granularity);
1899 	__issue_discard_cmd(sbi, &dpolicy);
1900 	dropped = __drop_discard_cmd(sbi);
1901 
1902 	/* just to make sure there is no pending discard commands */
1903 	__wait_all_discard_cmd(sbi, NULL);
1904 
1905 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1906 	return !dropped;
1907 }
1908 
issue_discard_thread(void * data)1909 static int issue_discard_thread(void *data)
1910 {
1911 	struct f2fs_sb_info *sbi = data;
1912 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1913 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1914 	struct discard_policy dpolicy;
1915 	unsigned int wait_ms = dcc->min_discard_issue_time;
1916 	int issued;
1917 
1918 	set_freezable();
1919 
1920 	do {
1921 		wait_event_freezable_timeout(*q,
1922 				kthread_should_stop() || dcc->discard_wake,
1923 				msecs_to_jiffies(wait_ms));
1924 
1925 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1926 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1927 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1928 						MIN_DISCARD_GRANULARITY);
1929 		else
1930 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1931 						dcc->discard_granularity);
1932 
1933 		if (dcc->discard_wake)
1934 			dcc->discard_wake = false;
1935 
1936 		/* clean up pending candidates before going to sleep */
1937 		if (atomic_read(&dcc->queued_discard))
1938 			__wait_all_discard_cmd(sbi, NULL);
1939 
1940 		if (f2fs_readonly(sbi->sb))
1941 			continue;
1942 		if (kthread_should_stop())
1943 			return 0;
1944 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1945 			!atomic_read(&dcc->discard_cmd_cnt)) {
1946 			wait_ms = dpolicy.max_interval;
1947 			continue;
1948 		}
1949 
1950 		sb_start_intwrite(sbi->sb);
1951 
1952 		issued = __issue_discard_cmd(sbi, &dpolicy);
1953 		if (issued > 0) {
1954 			__wait_all_discard_cmd(sbi, &dpolicy);
1955 			wait_ms = dpolicy.min_interval;
1956 		} else if (issued == -1) {
1957 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1958 			if (!wait_ms)
1959 				wait_ms = dpolicy.mid_interval;
1960 		} else {
1961 			wait_ms = dpolicy.max_interval;
1962 		}
1963 		if (!atomic_read(&dcc->discard_cmd_cnt))
1964 			wait_ms = dpolicy.max_interval;
1965 
1966 		sb_end_intwrite(sbi->sb);
1967 
1968 	} while (!kthread_should_stop());
1969 	return 0;
1970 }
1971 
1972 #ifdef CONFIG_BLK_DEV_ZONED
__f2fs_issue_discard_zone(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)1973 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1974 		struct block_device *bdev, block_t blkstart, block_t blklen)
1975 {
1976 	sector_t sector, nr_sects;
1977 	block_t lblkstart = blkstart;
1978 	int devi = 0;
1979 	u64 remainder = 0;
1980 
1981 	if (f2fs_is_multi_device(sbi)) {
1982 		devi = f2fs_target_device_index(sbi, blkstart);
1983 		if (blkstart < FDEV(devi).start_blk ||
1984 		    blkstart > FDEV(devi).end_blk) {
1985 			f2fs_err(sbi, "Invalid block %x", blkstart);
1986 			return -EIO;
1987 		}
1988 		blkstart -= FDEV(devi).start_blk;
1989 	}
1990 
1991 	/* For sequential zones, reset the zone write pointer */
1992 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1993 		sector = SECTOR_FROM_BLOCK(blkstart);
1994 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1995 		div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
1996 
1997 		if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
1998 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1999 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
2000 				 blkstart, blklen);
2001 			return -EIO;
2002 		}
2003 
2004 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
2005 			unsigned int nofs_flags;
2006 			int ret;
2007 
2008 			trace_f2fs_issue_reset_zone(bdev, blkstart);
2009 			nofs_flags = memalloc_nofs_save();
2010 			ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
2011 						sector, nr_sects);
2012 			memalloc_nofs_restore(nofs_flags);
2013 			return ret;
2014 		}
2015 
2016 		__queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
2017 		return 0;
2018 	}
2019 
2020 	/* For conventional zones, use regular discard if supported */
2021 	__queue_discard_cmd(sbi, bdev, lblkstart, blklen);
2022 	return 0;
2023 }
2024 #endif
2025 
__issue_discard_async(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)2026 static int __issue_discard_async(struct f2fs_sb_info *sbi,
2027 		struct block_device *bdev, block_t blkstart, block_t blklen)
2028 {
2029 #ifdef CONFIG_BLK_DEV_ZONED
2030 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
2031 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
2032 #endif
2033 	__queue_discard_cmd(sbi, bdev, blkstart, blklen);
2034 	return 0;
2035 }
2036 
f2fs_issue_discard(struct f2fs_sb_info * sbi,block_t blkstart,block_t blklen)2037 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2038 				block_t blkstart, block_t blklen)
2039 {
2040 	sector_t start = blkstart, len = 0;
2041 	struct block_device *bdev;
2042 	struct seg_entry *se;
2043 	unsigned int offset;
2044 	block_t i;
2045 	int err = 0;
2046 
2047 	bdev = f2fs_target_device(sbi, blkstart, NULL);
2048 
2049 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
2050 		if (i != start) {
2051 			struct block_device *bdev2 =
2052 				f2fs_target_device(sbi, i, NULL);
2053 
2054 			if (bdev2 != bdev) {
2055 				err = __issue_discard_async(sbi, bdev,
2056 						start, len);
2057 				if (err)
2058 					return err;
2059 				bdev = bdev2;
2060 				start = i;
2061 				len = 0;
2062 			}
2063 		}
2064 
2065 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2066 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2067 
2068 		if (f2fs_block_unit_discard(sbi) &&
2069 				!f2fs_test_and_set_bit(offset, se->discard_map))
2070 			sbi->discard_blks--;
2071 	}
2072 
2073 	if (len)
2074 		err = __issue_discard_async(sbi, bdev, start, len);
2075 	return err;
2076 }
2077 
add_discard_addrs(struct f2fs_sb_info * sbi,struct cp_control * cpc,bool check_only)2078 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2079 							bool check_only)
2080 {
2081 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2082 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2083 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2084 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2085 	unsigned long *discard_map = (unsigned long *)se->discard_map;
2086 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
2087 	unsigned int start = 0, end = -1;
2088 	bool force = (cpc->reason & CP_DISCARD);
2089 	struct discard_entry *de = NULL;
2090 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2091 	int i;
2092 
2093 	if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
2094 	    !f2fs_hw_support_discard(sbi) ||
2095 	    !f2fs_block_unit_discard(sbi))
2096 		return false;
2097 
2098 	if (!force) {
2099 		if (!f2fs_realtime_discard_enable(sbi) ||
2100 			(!se->valid_blocks &&
2101 				!IS_CURSEG(sbi, cpc->trim_start)) ||
2102 			SM_I(sbi)->dcc_info->nr_discards >=
2103 				SM_I(sbi)->dcc_info->max_discards)
2104 			return false;
2105 	}
2106 
2107 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
2108 	for (i = 0; i < entries; i++)
2109 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
2110 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
2111 
2112 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
2113 				SM_I(sbi)->dcc_info->max_discards) {
2114 		start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
2115 		if (start >= BLKS_PER_SEG(sbi))
2116 			break;
2117 
2118 		end = __find_rev_next_zero_bit(dmap,
2119 						BLKS_PER_SEG(sbi), start + 1);
2120 		if (force && start && end != BLKS_PER_SEG(sbi) &&
2121 		    (end - start) < cpc->trim_minlen)
2122 			continue;
2123 
2124 		if (check_only)
2125 			return true;
2126 
2127 		if (!de) {
2128 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
2129 						GFP_F2FS_ZERO, true, NULL);
2130 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2131 			list_add_tail(&de->list, head);
2132 		}
2133 
2134 		for (i = start; i < end; i++)
2135 			__set_bit_le(i, (void *)de->discard_map);
2136 
2137 		SM_I(sbi)->dcc_info->nr_discards += end - start;
2138 	}
2139 	return false;
2140 }
2141 
release_discard_addr(struct discard_entry * entry)2142 static void release_discard_addr(struct discard_entry *entry)
2143 {
2144 	list_del(&entry->list);
2145 	kmem_cache_free(discard_entry_slab, entry);
2146 }
2147 
f2fs_release_discard_addrs(struct f2fs_sb_info * sbi)2148 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2149 {
2150 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2151 	struct discard_entry *entry, *this;
2152 
2153 	/* drop caches */
2154 	list_for_each_entry_safe(entry, this, head, list)
2155 		release_discard_addr(entry);
2156 }
2157 
2158 /*
2159  * Should call f2fs_clear_prefree_segments after checkpoint is done.
2160  */
set_prefree_as_free_segments(struct f2fs_sb_info * sbi)2161 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2162 {
2163 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2164 	unsigned int segno;
2165 
2166 	mutex_lock(&dirty_i->seglist_lock);
2167 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2168 		__set_test_and_free(sbi, segno, false);
2169 	mutex_unlock(&dirty_i->seglist_lock);
2170 }
2171 
f2fs_clear_prefree_segments(struct f2fs_sb_info * sbi,struct cp_control * cpc)2172 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2173 						struct cp_control *cpc)
2174 {
2175 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2176 	struct list_head *head = &dcc->entry_list;
2177 	struct discard_entry *entry, *this;
2178 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2179 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2180 	unsigned int start = 0, end = -1;
2181 	unsigned int secno, start_segno;
2182 	bool force = (cpc->reason & CP_DISCARD);
2183 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2184 						DISCARD_UNIT_SECTION;
2185 
2186 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2187 		section_alignment = true;
2188 
2189 	mutex_lock(&dirty_i->seglist_lock);
2190 
2191 	while (1) {
2192 		int i;
2193 
2194 		if (section_alignment && end != -1)
2195 			end--;
2196 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2197 		if (start >= MAIN_SEGS(sbi))
2198 			break;
2199 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2200 								start + 1);
2201 
2202 		if (section_alignment) {
2203 			start = rounddown(start, SEGS_PER_SEC(sbi));
2204 			end = roundup(end, SEGS_PER_SEC(sbi));
2205 		}
2206 
2207 		for (i = start; i < end; i++) {
2208 			if (test_and_clear_bit(i, prefree_map))
2209 				dirty_i->nr_dirty[PRE]--;
2210 		}
2211 
2212 		if (!f2fs_realtime_discard_enable(sbi))
2213 			continue;
2214 
2215 		if (force && start >= cpc->trim_start &&
2216 					(end - 1) <= cpc->trim_end)
2217 			continue;
2218 
2219 		/* Should cover 2MB zoned device for zone-based reset */
2220 		if (!f2fs_sb_has_blkzoned(sbi) &&
2221 		    (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2222 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2223 				SEGS_TO_BLKS(sbi, end - start));
2224 			continue;
2225 		}
2226 next:
2227 		secno = GET_SEC_FROM_SEG(sbi, start);
2228 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2229 		if (!IS_CURSEC(sbi, secno) &&
2230 			!get_valid_blocks(sbi, start, true))
2231 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2232 						BLKS_PER_SEC(sbi));
2233 
2234 		start = start_segno + SEGS_PER_SEC(sbi);
2235 		if (start < end)
2236 			goto next;
2237 		else
2238 			end = start - 1;
2239 	}
2240 	mutex_unlock(&dirty_i->seglist_lock);
2241 
2242 	if (!f2fs_block_unit_discard(sbi))
2243 		goto wakeup;
2244 
2245 	/* send small discards */
2246 	list_for_each_entry_safe(entry, this, head, list) {
2247 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2248 		bool is_valid = test_bit_le(0, entry->discard_map);
2249 
2250 find_next:
2251 		if (is_valid) {
2252 			next_pos = find_next_zero_bit_le(entry->discard_map,
2253 						BLKS_PER_SEG(sbi), cur_pos);
2254 			len = next_pos - cur_pos;
2255 
2256 			if (f2fs_sb_has_blkzoned(sbi) ||
2257 			    (force && len < cpc->trim_minlen))
2258 				goto skip;
2259 
2260 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2261 									len);
2262 			total_len += len;
2263 		} else {
2264 			next_pos = find_next_bit_le(entry->discard_map,
2265 						BLKS_PER_SEG(sbi), cur_pos);
2266 		}
2267 skip:
2268 		cur_pos = next_pos;
2269 		is_valid = !is_valid;
2270 
2271 		if (cur_pos < BLKS_PER_SEG(sbi))
2272 			goto find_next;
2273 
2274 		release_discard_addr(entry);
2275 		dcc->nr_discards -= total_len;
2276 	}
2277 
2278 wakeup:
2279 	wake_up_discard_thread(sbi, false);
2280 }
2281 
f2fs_start_discard_thread(struct f2fs_sb_info * sbi)2282 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2283 {
2284 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2285 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2286 	int err = 0;
2287 
2288 	if (f2fs_sb_has_readonly(sbi)) {
2289 		f2fs_info(sbi,
2290 			"Skip to start discard thread for readonly image");
2291 		return 0;
2292 	}
2293 
2294 	if (!f2fs_realtime_discard_enable(sbi))
2295 		return 0;
2296 
2297 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2298 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2299 	if (IS_ERR(dcc->f2fs_issue_discard)) {
2300 		err = PTR_ERR(dcc->f2fs_issue_discard);
2301 		dcc->f2fs_issue_discard = NULL;
2302 	}
2303 
2304 	return err;
2305 }
2306 
create_discard_cmd_control(struct f2fs_sb_info * sbi)2307 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2308 {
2309 	struct discard_cmd_control *dcc;
2310 	int err = 0, i;
2311 
2312 	if (SM_I(sbi)->dcc_info) {
2313 		dcc = SM_I(sbi)->dcc_info;
2314 		goto init_thread;
2315 	}
2316 
2317 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2318 	if (!dcc)
2319 		return -ENOMEM;
2320 
2321 	dcc->discard_io_aware_gran = MAX_PLIST_NUM;
2322 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2323 	dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
2324 	dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
2325 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT ||
2326 		F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2327 		dcc->discard_granularity = BLKS_PER_SEG(sbi);
2328 
2329 	INIT_LIST_HEAD(&dcc->entry_list);
2330 	for (i = 0; i < MAX_PLIST_NUM; i++)
2331 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2332 	INIT_LIST_HEAD(&dcc->wait_list);
2333 	INIT_LIST_HEAD(&dcc->fstrim_list);
2334 	mutex_init(&dcc->cmd_lock);
2335 	atomic_set(&dcc->issued_discard, 0);
2336 	atomic_set(&dcc->queued_discard, 0);
2337 	atomic_set(&dcc->discard_cmd_cnt, 0);
2338 	dcc->nr_discards = 0;
2339 	dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));
2340 	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2341 	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2342 	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2343 	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2344 	dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2345 	dcc->undiscard_blks = 0;
2346 	dcc->next_pos = 0;
2347 	dcc->root = RB_ROOT_CACHED;
2348 	dcc->rbtree_check = false;
2349 
2350 	init_waitqueue_head(&dcc->discard_wait_queue);
2351 	SM_I(sbi)->dcc_info = dcc;
2352 init_thread:
2353 	err = f2fs_start_discard_thread(sbi);
2354 	if (err) {
2355 		kfree(dcc);
2356 		SM_I(sbi)->dcc_info = NULL;
2357 	}
2358 
2359 	return err;
2360 }
2361 
destroy_discard_cmd_control(struct f2fs_sb_info * sbi)2362 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2363 {
2364 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2365 
2366 	if (!dcc)
2367 		return;
2368 
2369 	f2fs_stop_discard_thread(sbi);
2370 
2371 	/*
2372 	 * Recovery can cache discard commands, so in error path of
2373 	 * fill_super(), it needs to give a chance to handle them.
2374 	 */
2375 	f2fs_issue_discard_timeout(sbi);
2376 
2377 	kfree(dcc);
2378 	SM_I(sbi)->dcc_info = NULL;
2379 }
2380 
__mark_sit_entry_dirty(struct f2fs_sb_info * sbi,unsigned int segno)2381 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2382 {
2383 	struct sit_info *sit_i = SIT_I(sbi);
2384 
2385 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2386 		sit_i->dirty_sentries++;
2387 		return false;
2388 	}
2389 
2390 	return true;
2391 }
2392 
__set_sit_entry_type(struct f2fs_sb_info * sbi,int type,unsigned int segno,int modified)2393 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2394 					unsigned int segno, int modified)
2395 {
2396 	struct seg_entry *se = get_seg_entry(sbi, segno);
2397 
2398 	se->type = type;
2399 	if (modified)
2400 		__mark_sit_entry_dirty(sbi, segno);
2401 }
2402 
get_segment_mtime(struct f2fs_sb_info * sbi,block_t blkaddr)2403 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2404 								block_t blkaddr)
2405 {
2406 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2407 
2408 	if (segno == NULL_SEGNO)
2409 		return 0;
2410 	return get_seg_entry(sbi, segno)->mtime;
2411 }
2412 
update_segment_mtime(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned long long old_mtime)2413 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2414 						unsigned long long old_mtime)
2415 {
2416 	struct seg_entry *se;
2417 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2418 	unsigned long long ctime = get_mtime(sbi, false);
2419 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2420 
2421 	if (segno == NULL_SEGNO)
2422 		return;
2423 
2424 	se = get_seg_entry(sbi, segno);
2425 
2426 	if (!se->mtime)
2427 		se->mtime = mtime;
2428 	else
2429 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2430 						se->valid_blocks + 1);
2431 
2432 	if (ctime > SIT_I(sbi)->max_mtime)
2433 		SIT_I(sbi)->max_mtime = ctime;
2434 }
2435 
2436 /*
2437  * NOTE: when updating multiple blocks at the same time, please ensure
2438  * that the consecutive input blocks belong to the same segment.
2439  */
update_sit_entry_for_release(struct f2fs_sb_info * sbi,struct seg_entry * se,block_t blkaddr,unsigned int offset,int del)2440 static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
2441 				block_t blkaddr, unsigned int offset, int del)
2442 {
2443 	bool exist;
2444 #ifdef CONFIG_F2FS_CHECK_FS
2445 	bool mir_exist;
2446 #endif
2447 	int i;
2448 	int del_count = -del;
2449 
2450 	f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
2451 
2452 	for (i = 0; i < del_count; i++) {
2453 		exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
2454 #ifdef CONFIG_F2FS_CHECK_FS
2455 		mir_exist = f2fs_test_and_clear_bit(offset + i,
2456 						se->cur_valid_map_mir);
2457 		if (unlikely(exist != mir_exist)) {
2458 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2459 				blkaddr + i, exist);
2460 			f2fs_bug_on(sbi, 1);
2461 		}
2462 #endif
2463 		if (unlikely(!exist)) {
2464 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
2465 			f2fs_bug_on(sbi, 1);
2466 			se->valid_blocks++;
2467 			del += 1;
2468 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2469 			/*
2470 			 * If checkpoints are off, we must not reuse data that
2471 			 * was used in the previous checkpoint. If it was used
2472 			 * before, we must track that to know how much space we
2473 			 * really have.
2474 			 */
2475 			if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
2476 				spin_lock(&sbi->stat_lock);
2477 				sbi->unusable_block_count++;
2478 				spin_unlock(&sbi->stat_lock);
2479 			}
2480 		}
2481 
2482 		if (f2fs_block_unit_discard(sbi) &&
2483 				f2fs_test_and_clear_bit(offset + i, se->discard_map))
2484 			sbi->discard_blks++;
2485 
2486 		if (!f2fs_test_bit(offset + i, se->ckpt_valid_map))
2487 			se->ckpt_valid_blocks -= 1;
2488 	}
2489 
2490 	return del;
2491 }
2492 
update_sit_entry_for_alloc(struct f2fs_sb_info * sbi,struct seg_entry * se,block_t blkaddr,unsigned int offset,int del)2493 static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
2494 				block_t blkaddr, unsigned int offset, int del)
2495 {
2496 	bool exist;
2497 #ifdef CONFIG_F2FS_CHECK_FS
2498 	bool mir_exist;
2499 #endif
2500 
2501 	exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2502 #ifdef CONFIG_F2FS_CHECK_FS
2503 	mir_exist = f2fs_test_and_set_bit(offset,
2504 					se->cur_valid_map_mir);
2505 	if (unlikely(exist != mir_exist)) {
2506 		f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2507 			blkaddr, exist);
2508 		f2fs_bug_on(sbi, 1);
2509 	}
2510 #endif
2511 	if (unlikely(exist)) {
2512 		f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
2513 		f2fs_bug_on(sbi, 1);
2514 		se->valid_blocks--;
2515 		del = 0;
2516 	}
2517 
2518 	if (f2fs_block_unit_discard(sbi) &&
2519 			!f2fs_test_and_set_bit(offset, se->discard_map))
2520 		sbi->discard_blks--;
2521 
2522 	/*
2523 	 * SSR should never reuse block which is checkpointed
2524 	 * or newly invalidated.
2525 	 */
2526 	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2527 		if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2528 			se->ckpt_valid_blocks++;
2529 	}
2530 
2531 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2532 		se->ckpt_valid_blocks += del;
2533 
2534 	return del;
2535 }
2536 
2537 /*
2538  * If releasing blocks, this function supports updating multiple consecutive blocks
2539  * at one time, but please note that these consecutive blocks need to belong to the
2540  * same segment.
2541  */
update_sit_entry(struct f2fs_sb_info * sbi,block_t blkaddr,int del)2542 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2543 {
2544 	struct seg_entry *se;
2545 	unsigned int segno, offset;
2546 	long int new_vblocks;
2547 
2548 	segno = GET_SEGNO(sbi, blkaddr);
2549 	if (segno == NULL_SEGNO)
2550 		return;
2551 
2552 	se = get_seg_entry(sbi, segno);
2553 	new_vblocks = se->valid_blocks + del;
2554 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2555 
2556 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2557 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2558 
2559 	se->valid_blocks = new_vblocks;
2560 
2561 	/* Update valid block bitmap */
2562 	if (del > 0) {
2563 		del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del);
2564 	} else {
2565 		del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del);
2566 	}
2567 
2568 	__mark_sit_entry_dirty(sbi, segno);
2569 
2570 	/* update total number of valid blocks to be written in ckpt area */
2571 	SIT_I(sbi)->written_valid_blocks += del;
2572 
2573 	if (__is_large_section(sbi))
2574 		get_sec_entry(sbi, segno)->valid_blocks += del;
2575 }
2576 
f2fs_invalidate_blocks(struct f2fs_sb_info * sbi,block_t addr,unsigned int len)2577 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
2578 				unsigned int len)
2579 {
2580 	unsigned int segno = GET_SEGNO(sbi, addr);
2581 	struct sit_info *sit_i = SIT_I(sbi);
2582 	block_t addr_start = addr, addr_end = addr + len - 1;
2583 	unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
2584 	unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
2585 
2586 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2587 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2588 		return;
2589 
2590 	f2fs_invalidate_internal_cache(sbi, addr, len);
2591 
2592 	/* add it into sit main buffer */
2593 	down_write(&sit_i->sentry_lock);
2594 
2595 	if (seg_num == 1)
2596 		cnt = len;
2597 	else
2598 		cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
2599 
2600 	do {
2601 		update_segment_mtime(sbi, addr_start, 0);
2602 		update_sit_entry(sbi, addr_start, -cnt);
2603 
2604 		/* add it into dirty seglist */
2605 		locate_dirty_segment(sbi, segno);
2606 
2607 		/* update @addr_start and @cnt and @segno */
2608 		addr_start = START_BLOCK(sbi, ++segno);
2609 		if (++i == seg_num)
2610 			cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
2611 		else
2612 			cnt = max_blocks;
2613 	} while (i <= seg_num);
2614 
2615 	up_write(&sit_i->sentry_lock);
2616 }
2617 
f2fs_is_checkpointed_data(struct f2fs_sb_info * sbi,block_t blkaddr)2618 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2619 {
2620 	struct sit_info *sit_i = SIT_I(sbi);
2621 	unsigned int segno, offset;
2622 	struct seg_entry *se;
2623 	bool is_cp = false;
2624 
2625 	if (!__is_valid_data_blkaddr(blkaddr))
2626 		return true;
2627 
2628 	down_read(&sit_i->sentry_lock);
2629 
2630 	segno = GET_SEGNO(sbi, blkaddr);
2631 	se = get_seg_entry(sbi, segno);
2632 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2633 
2634 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2635 		is_cp = true;
2636 
2637 	up_read(&sit_i->sentry_lock);
2638 
2639 	return is_cp;
2640 }
2641 
f2fs_curseg_valid_blocks(struct f2fs_sb_info * sbi,int type)2642 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2643 {
2644 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2645 
2646 	if (sbi->ckpt->alloc_type[type] == SSR)
2647 		return BLKS_PER_SEG(sbi);
2648 	return curseg->next_blkoff;
2649 }
2650 
2651 /*
2652  * Calculate the number of current summary pages for writing
2653  */
f2fs_npages_for_summary_flush(struct f2fs_sb_info * sbi,bool for_ra)2654 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2655 {
2656 	int valid_sum_count = 0;
2657 	int i, sum_in_page;
2658 
2659 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2660 		if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2661 			valid_sum_count +=
2662 				le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2663 		else
2664 			valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2665 	}
2666 
2667 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2668 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2669 	if (valid_sum_count <= sum_in_page)
2670 		return 1;
2671 	else if ((valid_sum_count - sum_in_page) <=
2672 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2673 		return 2;
2674 	return 3;
2675 }
2676 
2677 /*
2678  * Caller should put this summary page
2679  */
f2fs_get_sum_page(struct f2fs_sb_info * sbi,unsigned int segno)2680 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2681 {
2682 	if (unlikely(f2fs_cp_error(sbi)))
2683 		return ERR_PTR(-EIO);
2684 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2685 }
2686 
f2fs_update_meta_page(struct f2fs_sb_info * sbi,void * src,block_t blk_addr)2687 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2688 					void *src, block_t blk_addr)
2689 {
2690 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2691 
2692 	memcpy(page_address(page), src, PAGE_SIZE);
2693 	set_page_dirty(page);
2694 	f2fs_put_page(page, 1);
2695 }
2696 
write_sum_page(struct f2fs_sb_info * sbi,struct f2fs_summary_block * sum_blk,block_t blk_addr)2697 static void write_sum_page(struct f2fs_sb_info *sbi,
2698 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2699 {
2700 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2701 }
2702 
write_current_sum_page(struct f2fs_sb_info * sbi,int type,block_t blk_addr)2703 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2704 						int type, block_t blk_addr)
2705 {
2706 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2707 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2708 	struct f2fs_summary_block *src = curseg->sum_blk;
2709 	struct f2fs_summary_block *dst;
2710 
2711 	dst = (struct f2fs_summary_block *)page_address(page);
2712 	memset(dst, 0, PAGE_SIZE);
2713 
2714 	mutex_lock(&curseg->curseg_mutex);
2715 
2716 	down_read(&curseg->journal_rwsem);
2717 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2718 	up_read(&curseg->journal_rwsem);
2719 
2720 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2721 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2722 
2723 	mutex_unlock(&curseg->curseg_mutex);
2724 
2725 	set_page_dirty(page);
2726 	f2fs_put_page(page, 1);
2727 }
2728 
is_next_segment_free(struct f2fs_sb_info * sbi,struct curseg_info * curseg)2729 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2730 				struct curseg_info *curseg)
2731 {
2732 	unsigned int segno = curseg->segno + 1;
2733 	struct free_segmap_info *free_i = FREE_I(sbi);
2734 
2735 	if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
2736 		return !test_bit(segno, free_i->free_segmap);
2737 	return 0;
2738 }
2739 
2740 /*
2741  * Find a new segment from the free segments bitmap to right order
2742  * This function should be returned with success, otherwise BUG
2743  */
get_new_segment(struct f2fs_sb_info * sbi,unsigned int * newseg,bool new_sec,bool pinning)2744 static int get_new_segment(struct f2fs_sb_info *sbi,
2745 			unsigned int *newseg, bool new_sec, bool pinning)
2746 {
2747 	struct free_segmap_info *free_i = FREE_I(sbi);
2748 	unsigned int segno, secno, zoneno;
2749 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2750 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2751 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2752 	bool init = true;
2753 	int i;
2754 	int ret = 0;
2755 
2756 	spin_lock(&free_i->segmap_lock);
2757 
2758 	if (time_to_inject(sbi, FAULT_NO_SEGMENT)) {
2759 		ret = -ENOSPC;
2760 		goto out_unlock;
2761 	}
2762 
2763 	if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
2764 		segno = find_next_zero_bit(free_i->free_segmap,
2765 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2766 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2767 			goto got_it;
2768 	}
2769 
2770 #ifdef CONFIG_BLK_DEV_ZONED
2771 	/*
2772 	 * If we format f2fs on zoned storage, let's try to get pinned sections
2773 	 * from beginning of the storage, which should be a conventional one.
2774 	 */
2775 	if (f2fs_sb_has_blkzoned(sbi)) {
2776 		/* Prioritize writing to conventional zones */
2777 		if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
2778 			segno = 0;
2779 		else
2780 			segno = max(sbi->first_zoned_segno, *newseg);
2781 		hint = GET_SEC_FROM_SEG(sbi, segno);
2782 	}
2783 #endif
2784 
2785 find_other_zone:
2786 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2787 
2788 #ifdef CONFIG_BLK_DEV_ZONED
2789 	if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
2790 		/* Write only to sequential zones */
2791 		if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
2792 			hint = GET_SEC_FROM_SEG(sbi, sbi->first_zoned_segno);
2793 			secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2794 		} else
2795 			secno = find_first_zero_bit(free_i->free_secmap,
2796 								MAIN_SECS(sbi));
2797 		if (secno >= MAIN_SECS(sbi)) {
2798 			ret = -ENOSPC;
2799 			f2fs_bug_on(sbi, 1);
2800 			goto out_unlock;
2801 		}
2802 	}
2803 #endif
2804 
2805 	if (secno >= MAIN_SECS(sbi)) {
2806 		secno = find_first_zero_bit(free_i->free_secmap,
2807 							MAIN_SECS(sbi));
2808 		if (secno >= MAIN_SECS(sbi)) {
2809 			ret = -ENOSPC;
2810 			f2fs_bug_on(sbi, !pinning);
2811 			goto out_unlock;
2812 		}
2813 	}
2814 	segno = GET_SEG_FROM_SEC(sbi, secno);
2815 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2816 
2817 	/* give up on finding another zone */
2818 	if (!init)
2819 		goto got_it;
2820 	if (sbi->secs_per_zone == 1)
2821 		goto got_it;
2822 	if (zoneno == old_zoneno)
2823 		goto got_it;
2824 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2825 		if (CURSEG_I(sbi, i)->zone == zoneno)
2826 			break;
2827 
2828 	if (i < NR_CURSEG_TYPE) {
2829 		/* zone is in user, try another */
2830 		if (zoneno + 1 >= total_zones)
2831 			hint = 0;
2832 		else
2833 			hint = (zoneno + 1) * sbi->secs_per_zone;
2834 		init = false;
2835 		goto find_other_zone;
2836 	}
2837 got_it:
2838 	/* set it as dirty segment in free segmap */
2839 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2840 
2841 	/* no free section in conventional zone */
2842 	if (new_sec && pinning &&
2843 		!f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
2844 		ret = -EAGAIN;
2845 		goto out_unlock;
2846 	}
2847 	__set_inuse(sbi, segno);
2848 	*newseg = segno;
2849 out_unlock:
2850 	spin_unlock(&free_i->segmap_lock);
2851 
2852 	if (ret == -ENOSPC && !pinning)
2853 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
2854 	return ret;
2855 }
2856 
reset_curseg(struct f2fs_sb_info * sbi,int type,int modified)2857 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2858 {
2859 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2860 	struct summary_footer *sum_footer;
2861 	unsigned short seg_type = curseg->seg_type;
2862 
2863 	/* only happen when get_new_segment() fails */
2864 	if (curseg->next_segno == NULL_SEGNO)
2865 		return;
2866 
2867 	curseg->inited = true;
2868 	curseg->segno = curseg->next_segno;
2869 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2870 	curseg->next_blkoff = 0;
2871 	curseg->next_segno = NULL_SEGNO;
2872 
2873 	sum_footer = &(curseg->sum_blk->footer);
2874 	memset(sum_footer, 0, sizeof(struct summary_footer));
2875 
2876 	sanity_check_seg_type(sbi, seg_type);
2877 
2878 	if (IS_DATASEG(seg_type))
2879 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2880 	if (IS_NODESEG(seg_type))
2881 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2882 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2883 }
2884 
__get_next_segno(struct f2fs_sb_info * sbi,int type)2885 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2886 {
2887 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2888 	unsigned short seg_type = curseg->seg_type;
2889 
2890 	sanity_check_seg_type(sbi, seg_type);
2891 	if (__is_large_section(sbi)) {
2892 		if (f2fs_need_rand_seg(sbi)) {
2893 			unsigned int hint = GET_SEC_FROM_SEG(sbi, curseg->segno);
2894 
2895 			if (GET_SEC_FROM_SEG(sbi, curseg->segno + 1) != hint)
2896 				return curseg->segno;
2897 			return get_random_u32_inclusive(curseg->segno + 1,
2898 					GET_SEG_FROM_SEC(sbi, hint + 1) - 1);
2899 		}
2900 		return curseg->segno;
2901 	} else if (f2fs_need_rand_seg(sbi)) {
2902 		return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
2903 	}
2904 
2905 	/* inmem log may not locate on any segment after mount */
2906 	if (!curseg->inited)
2907 		return 0;
2908 
2909 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2910 		return 0;
2911 
2912 	if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
2913 		return 0;
2914 
2915 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2916 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2917 
2918 	/* find segments from 0 to reuse freed segments */
2919 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2920 		return 0;
2921 
2922 	return curseg->segno;
2923 }
2924 
reset_curseg_fields(struct curseg_info * curseg)2925 static void reset_curseg_fields(struct curseg_info *curseg)
2926 {
2927 	curseg->inited = false;
2928 	curseg->segno = NULL_SEGNO;
2929 	curseg->next_segno = 0;
2930 }
2931 
2932 /*
2933  * Allocate a current working segment.
2934  * This function always allocates a free segment in LFS manner.
2935  */
new_curseg(struct f2fs_sb_info * sbi,int type,bool new_sec)2936 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2937 {
2938 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2939 	unsigned int segno = curseg->segno;
2940 	bool pinning = type == CURSEG_COLD_DATA_PINNED;
2941 	int ret;
2942 
2943 	if (curseg->inited)
2944 		write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
2945 
2946 	segno = __get_next_segno(sbi, type);
2947 	ret = get_new_segment(sbi, &segno, new_sec, pinning);
2948 	if (ret) {
2949 		if (ret == -ENOSPC)
2950 			reset_curseg_fields(curseg);
2951 		return ret;
2952 	}
2953 
2954 	curseg->next_segno = segno;
2955 	reset_curseg(sbi, type, 1);
2956 	curseg->alloc_type = LFS;
2957 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2958 		curseg->fragment_remained_chunk =
2959 				get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2960 	return 0;
2961 }
2962 
__next_free_blkoff(struct f2fs_sb_info * sbi,int segno,block_t start)2963 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2964 					int segno, block_t start)
2965 {
2966 	struct seg_entry *se = get_seg_entry(sbi, segno);
2967 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2968 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2969 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2970 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2971 	int i;
2972 
2973 	for (i = 0; i < entries; i++)
2974 		target_map[i] = ckpt_map[i] | cur_map[i];
2975 
2976 	return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
2977 }
2978 
f2fs_find_next_ssr_block(struct f2fs_sb_info * sbi,struct curseg_info * seg)2979 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
2980 		struct curseg_info *seg)
2981 {
2982 	return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
2983 }
2984 
f2fs_segment_has_free_slot(struct f2fs_sb_info * sbi,int segno)2985 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2986 {
2987 	return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
2988 }
2989 
2990 /*
2991  * This function always allocates a used segment(from dirty seglist) by SSR
2992  * manner, so it should recover the existing segment information of valid blocks
2993  */
change_curseg(struct f2fs_sb_info * sbi,int type)2994 static int change_curseg(struct f2fs_sb_info *sbi, int type)
2995 {
2996 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2997 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2998 	unsigned int new_segno = curseg->next_segno;
2999 	struct f2fs_summary_block *sum_node;
3000 	struct page *sum_page;
3001 
3002 	if (curseg->inited)
3003 		write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
3004 
3005 	__set_test_and_inuse(sbi, new_segno);
3006 
3007 	mutex_lock(&dirty_i->seglist_lock);
3008 	__remove_dirty_segment(sbi, new_segno, PRE);
3009 	__remove_dirty_segment(sbi, new_segno, DIRTY);
3010 	mutex_unlock(&dirty_i->seglist_lock);
3011 
3012 	reset_curseg(sbi, type, 1);
3013 	curseg->alloc_type = SSR;
3014 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
3015 
3016 	sum_page = f2fs_get_sum_page(sbi, new_segno);
3017 	if (IS_ERR(sum_page)) {
3018 		/* GC won't be able to use stale summary pages by cp_error */
3019 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
3020 		return PTR_ERR(sum_page);
3021 	}
3022 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
3023 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
3024 	f2fs_put_page(sum_page, 1);
3025 	return 0;
3026 }
3027 
3028 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3029 				int alloc_mode, unsigned long long age);
3030 
get_atssr_segment(struct f2fs_sb_info * sbi,int type,int target_type,int alloc_mode,unsigned long long age)3031 static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
3032 					int target_type, int alloc_mode,
3033 					unsigned long long age)
3034 {
3035 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3036 	int ret = 0;
3037 
3038 	curseg->seg_type = target_type;
3039 
3040 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
3041 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
3042 
3043 		curseg->seg_type = se->type;
3044 		ret = change_curseg(sbi, type);
3045 	} else {
3046 		/* allocate cold segment by default */
3047 		curseg->seg_type = CURSEG_COLD_DATA;
3048 		ret = new_curseg(sbi, type, true);
3049 	}
3050 	stat_inc_seg_type(sbi, curseg);
3051 	return ret;
3052 }
3053 
__f2fs_init_atgc_curseg(struct f2fs_sb_info * sbi,bool force)3054 static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi, bool force)
3055 {
3056 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
3057 	int ret = 0;
3058 
3059 	if (!sbi->am.atgc_enabled && !force)
3060 		return 0;
3061 
3062 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3063 
3064 	mutex_lock(&curseg->curseg_mutex);
3065 	down_write(&SIT_I(sbi)->sentry_lock);
3066 
3067 	ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC,
3068 					CURSEG_COLD_DATA, SSR, 0);
3069 
3070 	up_write(&SIT_I(sbi)->sentry_lock);
3071 	mutex_unlock(&curseg->curseg_mutex);
3072 
3073 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3074 	return ret;
3075 }
3076 
f2fs_init_inmem_curseg(struct f2fs_sb_info * sbi)3077 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
3078 {
3079 	return __f2fs_init_atgc_curseg(sbi, false);
3080 }
3081 
f2fs_reinit_atgc_curseg(struct f2fs_sb_info * sbi)3082 int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi)
3083 {
3084 	int ret;
3085 
3086 	if (!test_opt(sbi, ATGC))
3087 		return 0;
3088 	if (sbi->am.atgc_enabled)
3089 		return 0;
3090 	if (le64_to_cpu(F2FS_CKPT(sbi)->elapsed_time) <
3091 			sbi->am.age_threshold)
3092 		return 0;
3093 
3094 	ret = __f2fs_init_atgc_curseg(sbi, true);
3095 	if (!ret) {
3096 		sbi->am.atgc_enabled = true;
3097 		f2fs_info(sbi, "reenabled age threshold GC");
3098 	}
3099 	return ret;
3100 }
3101 
__f2fs_save_inmem_curseg(struct f2fs_sb_info * sbi,int type)3102 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
3103 {
3104 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3105 
3106 	mutex_lock(&curseg->curseg_mutex);
3107 	if (!curseg->inited)
3108 		goto out;
3109 
3110 	if (get_valid_blocks(sbi, curseg->segno, false)) {
3111 		write_sum_page(sbi, curseg->sum_blk,
3112 				GET_SUM_BLOCK(sbi, curseg->segno));
3113 	} else {
3114 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
3115 		__set_test_and_free(sbi, curseg->segno, true);
3116 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3117 	}
3118 out:
3119 	mutex_unlock(&curseg->curseg_mutex);
3120 }
3121 
f2fs_save_inmem_curseg(struct f2fs_sb_info * sbi)3122 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
3123 {
3124 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3125 
3126 	if (sbi->am.atgc_enabled)
3127 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3128 }
3129 
__f2fs_restore_inmem_curseg(struct f2fs_sb_info * sbi,int type)3130 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
3131 {
3132 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3133 
3134 	mutex_lock(&curseg->curseg_mutex);
3135 	if (!curseg->inited)
3136 		goto out;
3137 	if (get_valid_blocks(sbi, curseg->segno, false))
3138 		goto out;
3139 
3140 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
3141 	__set_test_and_inuse(sbi, curseg->segno);
3142 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3143 out:
3144 	mutex_unlock(&curseg->curseg_mutex);
3145 }
3146 
f2fs_restore_inmem_curseg(struct f2fs_sb_info * sbi)3147 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
3148 {
3149 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3150 
3151 	if (sbi->am.atgc_enabled)
3152 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3153 }
3154 
get_ssr_segment(struct f2fs_sb_info * sbi,int type,int alloc_mode,unsigned long long age)3155 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3156 				int alloc_mode, unsigned long long age)
3157 {
3158 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3159 	unsigned segno = NULL_SEGNO;
3160 	unsigned short seg_type = curseg->seg_type;
3161 	int i, cnt;
3162 	bool reversed = false;
3163 
3164 	sanity_check_seg_type(sbi, seg_type);
3165 
3166 	/* f2fs_need_SSR() already forces to do this */
3167 	if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
3168 				alloc_mode, age, false)) {
3169 		curseg->next_segno = segno;
3170 		return 1;
3171 	}
3172 
3173 	/* For node segments, let's do SSR more intensively */
3174 	if (IS_NODESEG(seg_type)) {
3175 		if (seg_type >= CURSEG_WARM_NODE) {
3176 			reversed = true;
3177 			i = CURSEG_COLD_NODE;
3178 		} else {
3179 			i = CURSEG_HOT_NODE;
3180 		}
3181 		cnt = NR_CURSEG_NODE_TYPE;
3182 	} else {
3183 		if (seg_type >= CURSEG_WARM_DATA) {
3184 			reversed = true;
3185 			i = CURSEG_COLD_DATA;
3186 		} else {
3187 			i = CURSEG_HOT_DATA;
3188 		}
3189 		cnt = NR_CURSEG_DATA_TYPE;
3190 	}
3191 
3192 	for (; cnt-- > 0; reversed ? i-- : i++) {
3193 		if (i == seg_type)
3194 			continue;
3195 		if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
3196 					alloc_mode, age, false)) {
3197 			curseg->next_segno = segno;
3198 			return 1;
3199 		}
3200 	}
3201 
3202 	/* find valid_blocks=0 in dirty list */
3203 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3204 		segno = get_free_segment(sbi);
3205 		if (segno != NULL_SEGNO) {
3206 			curseg->next_segno = segno;
3207 			return 1;
3208 		}
3209 	}
3210 	return 0;
3211 }
3212 
need_new_seg(struct f2fs_sb_info * sbi,int type)3213 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3214 {
3215 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3216 
3217 	if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3218 	    curseg->seg_type == CURSEG_WARM_NODE)
3219 		return true;
3220 	if (curseg->alloc_type == LFS && is_next_segment_free(sbi, curseg) &&
3221 	    likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3222 		return true;
3223 	if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3224 		return true;
3225 	return false;
3226 }
3227 
f2fs_allocate_segment_for_resize(struct f2fs_sb_info * sbi,int type,unsigned int start,unsigned int end)3228 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3229 					unsigned int start, unsigned int end)
3230 {
3231 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3232 	unsigned int segno;
3233 	int ret = 0;
3234 
3235 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3236 	mutex_lock(&curseg->curseg_mutex);
3237 	down_write(&SIT_I(sbi)->sentry_lock);
3238 
3239 	segno = CURSEG_I(sbi, type)->segno;
3240 	if (segno < start || segno > end)
3241 		goto unlock;
3242 
3243 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3244 		ret = change_curseg(sbi, type);
3245 	else
3246 		ret = new_curseg(sbi, type, true);
3247 
3248 	stat_inc_seg_type(sbi, curseg);
3249 
3250 	locate_dirty_segment(sbi, segno);
3251 unlock:
3252 	up_write(&SIT_I(sbi)->sentry_lock);
3253 
3254 	if (segno != curseg->segno)
3255 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3256 			    type, segno, curseg->segno);
3257 
3258 	mutex_unlock(&curseg->curseg_mutex);
3259 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3260 	return ret;
3261 }
3262 
__allocate_new_segment(struct f2fs_sb_info * sbi,int type,bool new_sec,bool force)3263 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3264 						bool new_sec, bool force)
3265 {
3266 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3267 	unsigned int old_segno;
3268 	int err = 0;
3269 
3270 	if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited)
3271 		goto allocate;
3272 
3273 	if (!force && curseg->inited &&
3274 	    !curseg->next_blkoff &&
3275 	    !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3276 	    !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3277 		return 0;
3278 
3279 allocate:
3280 	old_segno = curseg->segno;
3281 	err = new_curseg(sbi, type, true);
3282 	if (err)
3283 		return err;
3284 	stat_inc_seg_type(sbi, curseg);
3285 	locate_dirty_segment(sbi, old_segno);
3286 	return 0;
3287 }
3288 
f2fs_allocate_new_section(struct f2fs_sb_info * sbi,int type,bool force)3289 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3290 {
3291 	int ret;
3292 
3293 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3294 	down_write(&SIT_I(sbi)->sentry_lock);
3295 	ret = __allocate_new_segment(sbi, type, true, force);
3296 	up_write(&SIT_I(sbi)->sentry_lock);
3297 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3298 
3299 	return ret;
3300 }
3301 
f2fs_allocate_pinning_section(struct f2fs_sb_info * sbi)3302 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
3303 {
3304 	int err;
3305 	bool gc_required = true;
3306 
3307 retry:
3308 	f2fs_lock_op(sbi);
3309 	err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3310 	f2fs_unlock_op(sbi);
3311 
3312 	if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
3313 		f2fs_down_write(&sbi->gc_lock);
3314 		err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk),
3315 				true, ZONED_PIN_SEC_REQUIRED_COUNT);
3316 		f2fs_up_write(&sbi->gc_lock);
3317 
3318 		gc_required = false;
3319 		if (!err)
3320 			goto retry;
3321 	}
3322 
3323 	return err;
3324 }
3325 
f2fs_allocate_new_segments(struct f2fs_sb_info * sbi)3326 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3327 {
3328 	int i;
3329 	int err = 0;
3330 
3331 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3332 	down_write(&SIT_I(sbi)->sentry_lock);
3333 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3334 		err += __allocate_new_segment(sbi, i, false, false);
3335 	up_write(&SIT_I(sbi)->sentry_lock);
3336 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3337 
3338 	return err;
3339 }
3340 
f2fs_exist_trim_candidates(struct f2fs_sb_info * sbi,struct cp_control * cpc)3341 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3342 						struct cp_control *cpc)
3343 {
3344 	__u64 trim_start = cpc->trim_start;
3345 	bool has_candidate = false;
3346 
3347 	down_write(&SIT_I(sbi)->sentry_lock);
3348 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3349 		if (add_discard_addrs(sbi, cpc, true)) {
3350 			has_candidate = true;
3351 			break;
3352 		}
3353 	}
3354 	up_write(&SIT_I(sbi)->sentry_lock);
3355 
3356 	cpc->trim_start = trim_start;
3357 	return has_candidate;
3358 }
3359 
__issue_discard_cmd_range(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,unsigned int start,unsigned int end)3360 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3361 					struct discard_policy *dpolicy,
3362 					unsigned int start, unsigned int end)
3363 {
3364 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3365 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3366 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3367 	struct discard_cmd *dc;
3368 	struct blk_plug plug;
3369 	int issued;
3370 	unsigned int trimmed = 0;
3371 
3372 next:
3373 	issued = 0;
3374 
3375 	mutex_lock(&dcc->cmd_lock);
3376 	if (unlikely(dcc->rbtree_check))
3377 		f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3378 
3379 	dc = __lookup_discard_cmd_ret(&dcc->root, start,
3380 				&prev_dc, &next_dc, &insert_p, &insert_parent);
3381 	if (!dc)
3382 		dc = next_dc;
3383 
3384 	blk_start_plug(&plug);
3385 
3386 	while (dc && dc->di.lstart <= end) {
3387 		struct rb_node *node;
3388 		int err = 0;
3389 
3390 		if (dc->di.len < dpolicy->granularity)
3391 			goto skip;
3392 
3393 		if (dc->state != D_PREP) {
3394 			list_move_tail(&dc->list, &dcc->fstrim_list);
3395 			goto skip;
3396 		}
3397 
3398 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3399 
3400 		if (issued >= dpolicy->max_requests) {
3401 			start = dc->di.lstart + dc->di.len;
3402 
3403 			if (err)
3404 				__remove_discard_cmd(sbi, dc);
3405 
3406 			blk_finish_plug(&plug);
3407 			mutex_unlock(&dcc->cmd_lock);
3408 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3409 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3410 			goto next;
3411 		}
3412 skip:
3413 		node = rb_next(&dc->rb_node);
3414 		if (err)
3415 			__remove_discard_cmd(sbi, dc);
3416 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3417 
3418 		if (fatal_signal_pending(current))
3419 			break;
3420 	}
3421 
3422 	blk_finish_plug(&plug);
3423 	mutex_unlock(&dcc->cmd_lock);
3424 
3425 	return trimmed;
3426 }
3427 
f2fs_trim_fs(struct f2fs_sb_info * sbi,struct fstrim_range * range)3428 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3429 {
3430 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3431 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3432 	unsigned int start_segno, end_segno;
3433 	block_t start_block, end_block;
3434 	struct cp_control cpc;
3435 	struct discard_policy dpolicy;
3436 	unsigned long long trimmed = 0;
3437 	int err = 0;
3438 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3439 
3440 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3441 		return -EINVAL;
3442 
3443 	if (end < MAIN_BLKADDR(sbi))
3444 		goto out;
3445 
3446 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3447 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3448 		return -EFSCORRUPTED;
3449 	}
3450 
3451 	/* start/end segment number in main_area */
3452 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3453 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3454 						GET_SEGNO(sbi, end);
3455 	if (need_align) {
3456 		start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
3457 		end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
3458 	}
3459 
3460 	cpc.reason = CP_DISCARD;
3461 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3462 	cpc.trim_start = start_segno;
3463 	cpc.trim_end = end_segno;
3464 
3465 	if (sbi->discard_blks == 0)
3466 		goto out;
3467 
3468 	f2fs_down_write(&sbi->gc_lock);
3469 	stat_inc_cp_call_count(sbi, TOTAL_CALL);
3470 	err = f2fs_write_checkpoint(sbi, &cpc);
3471 	f2fs_up_write(&sbi->gc_lock);
3472 	if (err)
3473 		goto out;
3474 
3475 	/*
3476 	 * We filed discard candidates, but actually we don't need to wait for
3477 	 * all of them, since they'll be issued in idle time along with runtime
3478 	 * discard option. User configuration looks like using runtime discard
3479 	 * or periodic fstrim instead of it.
3480 	 */
3481 	if (f2fs_realtime_discard_enable(sbi))
3482 		goto out;
3483 
3484 	start_block = START_BLOCK(sbi, start_segno);
3485 	end_block = START_BLOCK(sbi, end_segno + 1);
3486 
3487 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3488 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3489 					start_block, end_block);
3490 
3491 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3492 					start_block, end_block);
3493 out:
3494 	if (!err)
3495 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3496 	return err;
3497 }
3498 
f2fs_rw_hint_to_seg_type(struct f2fs_sb_info * sbi,enum rw_hint hint)3499 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint)
3500 {
3501 	if (F2FS_OPTION(sbi).active_logs == 2)
3502 		return CURSEG_HOT_DATA;
3503 	else if (F2FS_OPTION(sbi).active_logs == 4)
3504 		return CURSEG_COLD_DATA;
3505 
3506 	/* active_log == 6 */
3507 	switch (hint) {
3508 	case WRITE_LIFE_SHORT:
3509 		return CURSEG_HOT_DATA;
3510 	case WRITE_LIFE_EXTREME:
3511 		return CURSEG_COLD_DATA;
3512 	default:
3513 		return CURSEG_WARM_DATA;
3514 	}
3515 }
3516 
3517 /*
3518  * This returns write hints for each segment type. This hints will be
3519  * passed down to block layer as below by default.
3520  *
3521  * User                  F2FS                     Block
3522  * ----                  ----                     -----
3523  *                       META                     WRITE_LIFE_NONE|REQ_META
3524  *                       HOT_NODE                 WRITE_LIFE_NONE
3525  *                       WARM_NODE                WRITE_LIFE_MEDIUM
3526  *                       COLD_NODE                WRITE_LIFE_LONG
3527  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3528  * extension list        "                        "
3529  *
3530  * -- buffered io
3531  *                       COLD_DATA                WRITE_LIFE_EXTREME
3532  *                       HOT_DATA                 WRITE_LIFE_SHORT
3533  *                       WARM_DATA                WRITE_LIFE_NOT_SET
3534  *
3535  * -- direct io
3536  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3537  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3538  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3539  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3540  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3541  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3542  */
f2fs_io_type_to_rw_hint(struct f2fs_sb_info * sbi,enum page_type type,enum temp_type temp)3543 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3544 				enum page_type type, enum temp_type temp)
3545 {
3546 	switch (type) {
3547 	case DATA:
3548 		switch (temp) {
3549 		case WARM:
3550 			return WRITE_LIFE_NOT_SET;
3551 		case HOT:
3552 			return WRITE_LIFE_SHORT;
3553 		case COLD:
3554 			return WRITE_LIFE_EXTREME;
3555 		default:
3556 			return WRITE_LIFE_NONE;
3557 		}
3558 	case NODE:
3559 		switch (temp) {
3560 		case WARM:
3561 			return WRITE_LIFE_MEDIUM;
3562 		case HOT:
3563 			return WRITE_LIFE_NONE;
3564 		case COLD:
3565 			return WRITE_LIFE_LONG;
3566 		default:
3567 			return WRITE_LIFE_NONE;
3568 		}
3569 	case META:
3570 		return WRITE_LIFE_NONE;
3571 	default:
3572 		return WRITE_LIFE_NONE;
3573 	}
3574 }
3575 
__get_segment_type_2(struct f2fs_io_info * fio)3576 static int __get_segment_type_2(struct f2fs_io_info *fio)
3577 {
3578 	if (fio->type == DATA)
3579 		return CURSEG_HOT_DATA;
3580 	else
3581 		return CURSEG_HOT_NODE;
3582 }
3583 
__get_segment_type_4(struct f2fs_io_info * fio)3584 static int __get_segment_type_4(struct f2fs_io_info *fio)
3585 {
3586 	if (fio->type == DATA) {
3587 		struct inode *inode = fio->page->mapping->host;
3588 
3589 		if (S_ISDIR(inode->i_mode))
3590 			return CURSEG_HOT_DATA;
3591 		else
3592 			return CURSEG_COLD_DATA;
3593 	} else {
3594 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3595 			return CURSEG_WARM_NODE;
3596 		else
3597 			return CURSEG_COLD_NODE;
3598 	}
3599 }
3600 
__get_age_segment_type(struct inode * inode,pgoff_t pgofs)3601 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
3602 {
3603 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3604 	struct extent_info ei = {};
3605 
3606 	if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
3607 		if (!ei.age)
3608 			return NO_CHECK_TYPE;
3609 		if (ei.age <= sbi->hot_data_age_threshold)
3610 			return CURSEG_HOT_DATA;
3611 		if (ei.age <= sbi->warm_data_age_threshold)
3612 			return CURSEG_WARM_DATA;
3613 		return CURSEG_COLD_DATA;
3614 	}
3615 	return NO_CHECK_TYPE;
3616 }
3617 
__get_segment_type_6(struct f2fs_io_info * fio)3618 static int __get_segment_type_6(struct f2fs_io_info *fio)
3619 {
3620 	if (fio->type == DATA) {
3621 		struct inode *inode = fio->page->mapping->host;
3622 		int type;
3623 
3624 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3625 			return CURSEG_COLD_DATA_PINNED;
3626 
3627 		if (page_private_gcing(fio->page)) {
3628 			if (fio->sbi->am.atgc_enabled &&
3629 				(fio->io_type == FS_DATA_IO) &&
3630 				(fio->sbi->gc_mode != GC_URGENT_HIGH) &&
3631 				__is_valid_data_blkaddr(fio->old_blkaddr) &&
3632 				!is_inode_flag_set(inode, FI_OPU_WRITE))
3633 				return CURSEG_ALL_DATA_ATGC;
3634 			else
3635 				return CURSEG_COLD_DATA;
3636 		}
3637 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3638 			return CURSEG_COLD_DATA;
3639 
3640 		type = __get_age_segment_type(inode,
3641 				page_folio(fio->page)->index);
3642 		if (type != NO_CHECK_TYPE)
3643 			return type;
3644 
3645 		if (file_is_hot(inode) ||
3646 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3647 				f2fs_is_cow_file(inode))
3648 			return CURSEG_HOT_DATA;
3649 		return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
3650 						inode->i_write_hint);
3651 	} else {
3652 		if (IS_DNODE(fio->page))
3653 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3654 						CURSEG_HOT_NODE;
3655 		return CURSEG_COLD_NODE;
3656 	}
3657 }
3658 
f2fs_get_segment_temp(struct f2fs_sb_info * sbi,enum log_type type)3659 enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
3660 						enum log_type type)
3661 {
3662 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3663 	enum temp_type temp = COLD;
3664 
3665 	switch (curseg->seg_type) {
3666 	case CURSEG_HOT_NODE:
3667 	case CURSEG_HOT_DATA:
3668 		temp = HOT;
3669 		break;
3670 	case CURSEG_WARM_NODE:
3671 	case CURSEG_WARM_DATA:
3672 		temp = WARM;
3673 		break;
3674 	case CURSEG_COLD_NODE:
3675 	case CURSEG_COLD_DATA:
3676 		temp = COLD;
3677 		break;
3678 	default:
3679 		f2fs_bug_on(sbi, 1);
3680 	}
3681 
3682 	return temp;
3683 }
3684 
__get_segment_type(struct f2fs_io_info * fio)3685 static int __get_segment_type(struct f2fs_io_info *fio)
3686 {
3687 	enum log_type type = CURSEG_HOT_DATA;
3688 
3689 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3690 	case 2:
3691 		type = __get_segment_type_2(fio);
3692 		break;
3693 	case 4:
3694 		type = __get_segment_type_4(fio);
3695 		break;
3696 	case 6:
3697 		type = __get_segment_type_6(fio);
3698 		break;
3699 	default:
3700 		f2fs_bug_on(fio->sbi, true);
3701 	}
3702 
3703 	fio->temp = f2fs_get_segment_temp(fio->sbi, type);
3704 
3705 	return type;
3706 }
3707 
f2fs_randomize_chunk(struct f2fs_sb_info * sbi,struct curseg_info * seg)3708 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3709 		struct curseg_info *seg)
3710 {
3711 	/* To allocate block chunks in different sizes, use random number */
3712 	if (--seg->fragment_remained_chunk > 0)
3713 		return;
3714 
3715 	seg->fragment_remained_chunk =
3716 		get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3717 	seg->next_blkoff +=
3718 		get_random_u32_inclusive(1, sbi->max_fragment_hole);
3719 }
3720 
f2fs_allocate_data_block(struct f2fs_sb_info * sbi,struct page * page,block_t old_blkaddr,block_t * new_blkaddr,struct f2fs_summary * sum,int type,struct f2fs_io_info * fio)3721 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3722 		block_t old_blkaddr, block_t *new_blkaddr,
3723 		struct f2fs_summary *sum, int type,
3724 		struct f2fs_io_info *fio)
3725 {
3726 	struct sit_info *sit_i = SIT_I(sbi);
3727 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3728 	unsigned long long old_mtime;
3729 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3730 	struct seg_entry *se = NULL;
3731 	bool segment_full = false;
3732 	int ret = 0;
3733 
3734 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3735 
3736 	mutex_lock(&curseg->curseg_mutex);
3737 	down_write(&sit_i->sentry_lock);
3738 
3739 	if (curseg->segno == NULL_SEGNO) {
3740 		ret = -ENOSPC;
3741 		goto out_err;
3742 	}
3743 
3744 	if (from_gc) {
3745 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3746 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3747 		sanity_check_seg_type(sbi, se->type);
3748 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3749 	}
3750 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3751 
3752 	f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
3753 
3754 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3755 
3756 	curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
3757 	if (curseg->alloc_type == SSR) {
3758 		curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3759 	} else {
3760 		curseg->next_blkoff++;
3761 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3762 			f2fs_randomize_chunk(sbi, curseg);
3763 	}
3764 	if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3765 		segment_full = true;
3766 	stat_inc_block_count(sbi, curseg);
3767 
3768 	if (from_gc) {
3769 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3770 	} else {
3771 		update_segment_mtime(sbi, old_blkaddr, 0);
3772 		old_mtime = 0;
3773 	}
3774 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3775 
3776 	/*
3777 	 * SIT information should be updated before segment allocation,
3778 	 * since SSR needs latest valid block information.
3779 	 */
3780 	update_sit_entry(sbi, *new_blkaddr, 1);
3781 	update_sit_entry(sbi, old_blkaddr, -1);
3782 
3783 	/*
3784 	 * If the current segment is full, flush it out and replace it with a
3785 	 * new segment.
3786 	 */
3787 	if (segment_full) {
3788 		if (type == CURSEG_COLD_DATA_PINNED &&
3789 		    !((curseg->segno + 1) % sbi->segs_per_sec)) {
3790 			write_sum_page(sbi, curseg->sum_blk,
3791 					GET_SUM_BLOCK(sbi, curseg->segno));
3792 			reset_curseg_fields(curseg);
3793 			goto skip_new_segment;
3794 		}
3795 
3796 		if (from_gc) {
3797 			ret = get_atssr_segment(sbi, type, se->type,
3798 						AT_SSR, se->mtime);
3799 		} else {
3800 			if (need_new_seg(sbi, type))
3801 				ret = new_curseg(sbi, type, false);
3802 			else
3803 				ret = change_curseg(sbi, type);
3804 			stat_inc_seg_type(sbi, curseg);
3805 		}
3806 
3807 		if (ret)
3808 			goto out_err;
3809 	}
3810 
3811 skip_new_segment:
3812 	/*
3813 	 * segment dirty status should be updated after segment allocation,
3814 	 * so we just need to update status only one time after previous
3815 	 * segment being closed.
3816 	 */
3817 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3818 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3819 
3820 	if (IS_DATASEG(curseg->seg_type))
3821 		atomic64_inc(&sbi->allocated_data_blocks);
3822 
3823 	up_write(&sit_i->sentry_lock);
3824 
3825 	if (page && IS_NODESEG(curseg->seg_type)) {
3826 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3827 
3828 		f2fs_inode_chksum_set(sbi, page);
3829 	}
3830 
3831 	if (fio) {
3832 		struct f2fs_bio_info *io;
3833 
3834 		INIT_LIST_HEAD(&fio->list);
3835 		fio->in_list = 1;
3836 		io = sbi->write_io[fio->type] + fio->temp;
3837 		spin_lock(&io->io_lock);
3838 		list_add_tail(&fio->list, &io->io_list);
3839 		spin_unlock(&io->io_lock);
3840 	}
3841 
3842 	mutex_unlock(&curseg->curseg_mutex);
3843 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3844 	return 0;
3845 
3846 out_err:
3847 	*new_blkaddr = NULL_ADDR;
3848 	up_write(&sit_i->sentry_lock);
3849 	mutex_unlock(&curseg->curseg_mutex);
3850 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3851 	return ret;
3852 }
3853 
f2fs_update_device_state(struct f2fs_sb_info * sbi,nid_t ino,block_t blkaddr,unsigned int blkcnt)3854 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3855 					block_t blkaddr, unsigned int blkcnt)
3856 {
3857 	if (!f2fs_is_multi_device(sbi))
3858 		return;
3859 
3860 	while (1) {
3861 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3862 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3863 
3864 		/* update device state for fsync */
3865 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3866 
3867 		/* update device state for checkpoint */
3868 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3869 			spin_lock(&sbi->dev_lock);
3870 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3871 			spin_unlock(&sbi->dev_lock);
3872 		}
3873 
3874 		if (blkcnt <= blks)
3875 			break;
3876 		blkcnt -= blks;
3877 		blkaddr += blks;
3878 	}
3879 }
3880 
log_type_to_seg_type(enum log_type type)3881 static int log_type_to_seg_type(enum log_type type)
3882 {
3883 	int seg_type = CURSEG_COLD_DATA;
3884 
3885 	switch (type) {
3886 	case CURSEG_HOT_DATA:
3887 	case CURSEG_WARM_DATA:
3888 	case CURSEG_COLD_DATA:
3889 	case CURSEG_HOT_NODE:
3890 	case CURSEG_WARM_NODE:
3891 	case CURSEG_COLD_NODE:
3892 		seg_type = (int)type;
3893 		break;
3894 	case CURSEG_COLD_DATA_PINNED:
3895 	case CURSEG_ALL_DATA_ATGC:
3896 		seg_type = CURSEG_COLD_DATA;
3897 		break;
3898 	default:
3899 		break;
3900 	}
3901 	return seg_type;
3902 }
3903 
do_write_page(struct f2fs_summary * sum,struct f2fs_io_info * fio)3904 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3905 {
3906 	struct folio *folio = page_folio(fio->page);
3907 	enum log_type type = __get_segment_type(fio);
3908 	int seg_type = log_type_to_seg_type(type);
3909 	bool keep_order = (f2fs_lfs_mode(fio->sbi) &&
3910 				seg_type == CURSEG_COLD_DATA);
3911 
3912 	if (keep_order)
3913 		f2fs_down_read(&fio->sbi->io_order_lock);
3914 
3915 	if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3916 			&fio->new_blkaddr, sum, type, fio)) {
3917 		if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
3918 			fscrypt_finalize_bounce_page(&fio->encrypted_page);
3919 		folio_end_writeback(folio);
3920 		if (f2fs_in_warm_node_list(fio->sbi, folio))
3921 			f2fs_del_fsync_node_entry(fio->sbi, fio->page);
3922 		goto out;
3923 	}
3924 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3925 		f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
3926 
3927 	/* writeout dirty page into bdev */
3928 	f2fs_submit_page_write(fio);
3929 
3930 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3931 out:
3932 	if (keep_order)
3933 		f2fs_up_read(&fio->sbi->io_order_lock);
3934 }
3935 
f2fs_do_write_meta_page(struct f2fs_sb_info * sbi,struct folio * folio,enum iostat_type io_type)3936 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
3937 					enum iostat_type io_type)
3938 {
3939 	struct f2fs_io_info fio = {
3940 		.sbi = sbi,
3941 		.type = META,
3942 		.temp = HOT,
3943 		.op = REQ_OP_WRITE,
3944 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3945 		.old_blkaddr = folio->index,
3946 		.new_blkaddr = folio->index,
3947 		.page = folio_page(folio, 0),
3948 		.encrypted_page = NULL,
3949 		.in_list = 0,
3950 	};
3951 
3952 	if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
3953 		fio.op_flags &= ~REQ_META;
3954 
3955 	folio_start_writeback(folio);
3956 	f2fs_submit_page_write(&fio);
3957 
3958 	stat_inc_meta_count(sbi, folio->index);
3959 	f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3960 }
3961 
f2fs_do_write_node_page(unsigned int nid,struct f2fs_io_info * fio)3962 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3963 {
3964 	struct f2fs_summary sum;
3965 
3966 	set_summary(&sum, nid, 0, 0);
3967 	do_write_page(&sum, fio);
3968 
3969 	f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3970 }
3971 
f2fs_outplace_write_data(struct dnode_of_data * dn,struct f2fs_io_info * fio)3972 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3973 					struct f2fs_io_info *fio)
3974 {
3975 	struct f2fs_sb_info *sbi = fio->sbi;
3976 	struct f2fs_summary sum;
3977 
3978 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3979 	if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
3980 		f2fs_update_age_extent_cache(dn);
3981 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3982 	do_write_page(&sum, fio);
3983 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3984 
3985 	f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3986 }
3987 
f2fs_inplace_write_data(struct f2fs_io_info * fio)3988 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3989 {
3990 	int err;
3991 	struct f2fs_sb_info *sbi = fio->sbi;
3992 	unsigned int segno;
3993 
3994 	fio->new_blkaddr = fio->old_blkaddr;
3995 	/* i/o temperature is needed for passing down write hints */
3996 	__get_segment_type(fio);
3997 
3998 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3999 
4000 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
4001 		set_sbi_flag(sbi, SBI_NEED_FSCK);
4002 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
4003 			  __func__, segno);
4004 		err = -EFSCORRUPTED;
4005 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4006 		goto drop_bio;
4007 	}
4008 
4009 	if (f2fs_cp_error(sbi)) {
4010 		err = -EIO;
4011 		goto drop_bio;
4012 	}
4013 
4014 	if (fio->meta_gc)
4015 		f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
4016 
4017 	stat_inc_inplace_blocks(fio->sbi);
4018 
4019 	if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
4020 		err = f2fs_merge_page_bio(fio);
4021 	else
4022 		err = f2fs_submit_page_bio(fio);
4023 	if (!err) {
4024 		f2fs_update_device_state(fio->sbi, fio->ino,
4025 						fio->new_blkaddr, 1);
4026 		f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
4027 						fio->io_type, F2FS_BLKSIZE);
4028 	}
4029 
4030 	return err;
4031 drop_bio:
4032 	if (fio->bio && *(fio->bio)) {
4033 		struct bio *bio = *(fio->bio);
4034 
4035 		bio->bi_status = BLK_STS_IOERR;
4036 		bio_endio(bio);
4037 		*(fio->bio) = NULL;
4038 	}
4039 	return err;
4040 }
4041 
__f2fs_get_curseg(struct f2fs_sb_info * sbi,unsigned int segno)4042 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
4043 						unsigned int segno)
4044 {
4045 	int i;
4046 
4047 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
4048 		if (CURSEG_I(sbi, i)->segno == segno)
4049 			break;
4050 	}
4051 	return i;
4052 }
4053 
f2fs_do_replace_block(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,block_t old_blkaddr,block_t new_blkaddr,bool recover_curseg,bool recover_newaddr,bool from_gc)4054 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
4055 				block_t old_blkaddr, block_t new_blkaddr,
4056 				bool recover_curseg, bool recover_newaddr,
4057 				bool from_gc)
4058 {
4059 	struct sit_info *sit_i = SIT_I(sbi);
4060 	struct curseg_info *curseg;
4061 	unsigned int segno, old_cursegno;
4062 	struct seg_entry *se;
4063 	int type;
4064 	unsigned short old_blkoff;
4065 	unsigned char old_alloc_type;
4066 
4067 	segno = GET_SEGNO(sbi, new_blkaddr);
4068 	se = get_seg_entry(sbi, segno);
4069 	type = se->type;
4070 
4071 	f2fs_down_write(&SM_I(sbi)->curseg_lock);
4072 
4073 	if (!recover_curseg) {
4074 		/* for recovery flow */
4075 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
4076 			if (old_blkaddr == NULL_ADDR)
4077 				type = CURSEG_COLD_DATA;
4078 			else
4079 				type = CURSEG_WARM_DATA;
4080 		}
4081 	} else {
4082 		if (IS_CURSEG(sbi, segno)) {
4083 			/* se->type is volatile as SSR allocation */
4084 			type = __f2fs_get_curseg(sbi, segno);
4085 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
4086 		} else {
4087 			type = CURSEG_WARM_DATA;
4088 		}
4089 	}
4090 
4091 	curseg = CURSEG_I(sbi, type);
4092 	f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type));
4093 
4094 	mutex_lock(&curseg->curseg_mutex);
4095 	down_write(&sit_i->sentry_lock);
4096 
4097 	old_cursegno = curseg->segno;
4098 	old_blkoff = curseg->next_blkoff;
4099 	old_alloc_type = curseg->alloc_type;
4100 
4101 	/* change the current segment */
4102 	if (segno != curseg->segno) {
4103 		curseg->next_segno = segno;
4104 		if (change_curseg(sbi, type))
4105 			goto out_unlock;
4106 	}
4107 
4108 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
4109 	curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
4110 
4111 	if (!recover_curseg || recover_newaddr) {
4112 		if (!from_gc)
4113 			update_segment_mtime(sbi, new_blkaddr, 0);
4114 		update_sit_entry(sbi, new_blkaddr, 1);
4115 	}
4116 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
4117 		f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
4118 		if (!from_gc)
4119 			update_segment_mtime(sbi, old_blkaddr, 0);
4120 		update_sit_entry(sbi, old_blkaddr, -1);
4121 	}
4122 
4123 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
4124 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
4125 
4126 	locate_dirty_segment(sbi, old_cursegno);
4127 
4128 	if (recover_curseg) {
4129 		if (old_cursegno != curseg->segno) {
4130 			curseg->next_segno = old_cursegno;
4131 			if (change_curseg(sbi, type))
4132 				goto out_unlock;
4133 		}
4134 		curseg->next_blkoff = old_blkoff;
4135 		curseg->alloc_type = old_alloc_type;
4136 	}
4137 
4138 out_unlock:
4139 	up_write(&sit_i->sentry_lock);
4140 	mutex_unlock(&curseg->curseg_mutex);
4141 	f2fs_up_write(&SM_I(sbi)->curseg_lock);
4142 }
4143 
f2fs_replace_block(struct f2fs_sb_info * sbi,struct dnode_of_data * dn,block_t old_addr,block_t new_addr,unsigned char version,bool recover_curseg,bool recover_newaddr)4144 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
4145 				block_t old_addr, block_t new_addr,
4146 				unsigned char version, bool recover_curseg,
4147 				bool recover_newaddr)
4148 {
4149 	struct f2fs_summary sum;
4150 
4151 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
4152 
4153 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
4154 					recover_curseg, recover_newaddr, false);
4155 
4156 	f2fs_update_data_blkaddr(dn, new_addr);
4157 }
4158 
f2fs_folio_wait_writeback(struct folio * folio,enum page_type type,bool ordered,bool locked)4159 void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
4160 		bool ordered, bool locked)
4161 {
4162 	if (folio_test_writeback(folio)) {
4163 		struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
4164 
4165 		/* submit cached LFS IO */
4166 		f2fs_submit_merged_write_cond(sbi, NULL, &folio->page, 0, type);
4167 		/* submit cached IPU IO */
4168 		f2fs_submit_merged_ipu_write(sbi, NULL, &folio->page);
4169 		if (ordered) {
4170 			folio_wait_writeback(folio);
4171 			f2fs_bug_on(sbi, locked && folio_test_writeback(folio));
4172 		} else {
4173 			folio_wait_stable(folio);
4174 		}
4175 	}
4176 }
4177 
f2fs_wait_on_block_writeback(struct inode * inode,block_t blkaddr)4178 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
4179 {
4180 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4181 	struct page *cpage;
4182 
4183 	if (!f2fs_meta_inode_gc_required(inode))
4184 		return;
4185 
4186 	if (!__is_valid_data_blkaddr(blkaddr))
4187 		return;
4188 
4189 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
4190 	if (cpage) {
4191 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
4192 		f2fs_put_page(cpage, 1);
4193 	}
4194 }
4195 
f2fs_wait_on_block_writeback_range(struct inode * inode,block_t blkaddr,block_t len)4196 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
4197 								block_t len)
4198 {
4199 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4200 	block_t i;
4201 
4202 	if (!f2fs_meta_inode_gc_required(inode))
4203 		return;
4204 
4205 	for (i = 0; i < len; i++)
4206 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
4207 
4208 	f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
4209 }
4210 
read_compacted_summaries(struct f2fs_sb_info * sbi)4211 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
4212 {
4213 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4214 	struct curseg_info *seg_i;
4215 	unsigned char *kaddr;
4216 	struct page *page;
4217 	block_t start;
4218 	int i, j, offset;
4219 
4220 	start = start_sum_block(sbi);
4221 
4222 	page = f2fs_get_meta_page(sbi, start++);
4223 	if (IS_ERR(page))
4224 		return PTR_ERR(page);
4225 	kaddr = (unsigned char *)page_address(page);
4226 
4227 	/* Step 1: restore nat cache */
4228 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4229 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
4230 
4231 	/* Step 2: restore sit cache */
4232 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4233 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
4234 	offset = 2 * SUM_JOURNAL_SIZE;
4235 
4236 	/* Step 3: restore summary entries */
4237 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4238 		unsigned short blk_off;
4239 		unsigned int segno;
4240 
4241 		seg_i = CURSEG_I(sbi, i);
4242 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
4243 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
4244 		seg_i->next_segno = segno;
4245 		reset_curseg(sbi, i, 0);
4246 		seg_i->alloc_type = ckpt->alloc_type[i];
4247 		seg_i->next_blkoff = blk_off;
4248 
4249 		if (seg_i->alloc_type == SSR)
4250 			blk_off = BLKS_PER_SEG(sbi);
4251 
4252 		for (j = 0; j < blk_off; j++) {
4253 			struct f2fs_summary *s;
4254 
4255 			s = (struct f2fs_summary *)(kaddr + offset);
4256 			seg_i->sum_blk->entries[j] = *s;
4257 			offset += SUMMARY_SIZE;
4258 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
4259 						SUM_FOOTER_SIZE)
4260 				continue;
4261 
4262 			f2fs_put_page(page, 1);
4263 			page = NULL;
4264 
4265 			page = f2fs_get_meta_page(sbi, start++);
4266 			if (IS_ERR(page))
4267 				return PTR_ERR(page);
4268 			kaddr = (unsigned char *)page_address(page);
4269 			offset = 0;
4270 		}
4271 	}
4272 	f2fs_put_page(page, 1);
4273 	return 0;
4274 }
4275 
read_normal_summaries(struct f2fs_sb_info * sbi,int type)4276 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
4277 {
4278 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4279 	struct f2fs_summary_block *sum;
4280 	struct curseg_info *curseg;
4281 	struct page *new;
4282 	unsigned short blk_off;
4283 	unsigned int segno = 0;
4284 	block_t blk_addr = 0;
4285 	int err = 0;
4286 
4287 	/* get segment number and block addr */
4288 	if (IS_DATASEG(type)) {
4289 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
4290 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
4291 							CURSEG_HOT_DATA]);
4292 		if (__exist_node_summaries(sbi))
4293 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
4294 		else
4295 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
4296 	} else {
4297 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
4298 							CURSEG_HOT_NODE]);
4299 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
4300 							CURSEG_HOT_NODE]);
4301 		if (__exist_node_summaries(sbi))
4302 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
4303 							type - CURSEG_HOT_NODE);
4304 		else
4305 			blk_addr = GET_SUM_BLOCK(sbi, segno);
4306 	}
4307 
4308 	new = f2fs_get_meta_page(sbi, blk_addr);
4309 	if (IS_ERR(new))
4310 		return PTR_ERR(new);
4311 	sum = (struct f2fs_summary_block *)page_address(new);
4312 
4313 	if (IS_NODESEG(type)) {
4314 		if (__exist_node_summaries(sbi)) {
4315 			struct f2fs_summary *ns = &sum->entries[0];
4316 			int i;
4317 
4318 			for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
4319 				ns->version = 0;
4320 				ns->ofs_in_node = 0;
4321 			}
4322 		} else {
4323 			err = f2fs_restore_node_summary(sbi, segno, sum);
4324 			if (err)
4325 				goto out;
4326 		}
4327 	}
4328 
4329 	/* set uncompleted segment to curseg */
4330 	curseg = CURSEG_I(sbi, type);
4331 	mutex_lock(&curseg->curseg_mutex);
4332 
4333 	/* update journal info */
4334 	down_write(&curseg->journal_rwsem);
4335 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
4336 	up_write(&curseg->journal_rwsem);
4337 
4338 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
4339 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
4340 	curseg->next_segno = segno;
4341 	reset_curseg(sbi, type, 0);
4342 	curseg->alloc_type = ckpt->alloc_type[type];
4343 	curseg->next_blkoff = blk_off;
4344 	mutex_unlock(&curseg->curseg_mutex);
4345 out:
4346 	f2fs_put_page(new, 1);
4347 	return err;
4348 }
4349 
restore_curseg_summaries(struct f2fs_sb_info * sbi)4350 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4351 {
4352 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
4353 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4354 	int type = CURSEG_HOT_DATA;
4355 	int err;
4356 
4357 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4358 		int npages = f2fs_npages_for_summary_flush(sbi, true);
4359 
4360 		if (npages >= 2)
4361 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4362 							META_CP, true);
4363 
4364 		/* restore for compacted data summary */
4365 		err = read_compacted_summaries(sbi);
4366 		if (err)
4367 			return err;
4368 		type = CURSEG_HOT_NODE;
4369 	}
4370 
4371 	if (__exist_node_summaries(sbi))
4372 		f2fs_ra_meta_pages(sbi,
4373 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4374 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4375 
4376 	for (; type <= CURSEG_COLD_NODE; type++) {
4377 		err = read_normal_summaries(sbi, type);
4378 		if (err)
4379 			return err;
4380 	}
4381 
4382 	/* sanity check for summary blocks */
4383 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
4384 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
4385 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4386 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4387 		return -EINVAL;
4388 	}
4389 
4390 	return 0;
4391 }
4392 
write_compacted_summaries(struct f2fs_sb_info * sbi,block_t blkaddr)4393 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4394 {
4395 	struct page *page;
4396 	unsigned char *kaddr;
4397 	struct f2fs_summary *summary;
4398 	struct curseg_info *seg_i;
4399 	int written_size = 0;
4400 	int i, j;
4401 
4402 	page = f2fs_grab_meta_page(sbi, blkaddr++);
4403 	kaddr = (unsigned char *)page_address(page);
4404 	memset(kaddr, 0, PAGE_SIZE);
4405 
4406 	/* Step 1: write nat cache */
4407 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4408 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
4409 	written_size += SUM_JOURNAL_SIZE;
4410 
4411 	/* Step 2: write sit cache */
4412 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4413 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
4414 	written_size += SUM_JOURNAL_SIZE;
4415 
4416 	/* Step 3: write summary entries */
4417 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4418 		seg_i = CURSEG_I(sbi, i);
4419 		for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4420 			if (!page) {
4421 				page = f2fs_grab_meta_page(sbi, blkaddr++);
4422 				kaddr = (unsigned char *)page_address(page);
4423 				memset(kaddr, 0, PAGE_SIZE);
4424 				written_size = 0;
4425 			}
4426 			summary = (struct f2fs_summary *)(kaddr + written_size);
4427 			*summary = seg_i->sum_blk->entries[j];
4428 			written_size += SUMMARY_SIZE;
4429 
4430 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
4431 							SUM_FOOTER_SIZE)
4432 				continue;
4433 
4434 			set_page_dirty(page);
4435 			f2fs_put_page(page, 1);
4436 			page = NULL;
4437 		}
4438 	}
4439 	if (page) {
4440 		set_page_dirty(page);
4441 		f2fs_put_page(page, 1);
4442 	}
4443 }
4444 
write_normal_summaries(struct f2fs_sb_info * sbi,block_t blkaddr,int type)4445 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4446 					block_t blkaddr, int type)
4447 {
4448 	int i, end;
4449 
4450 	if (IS_DATASEG(type))
4451 		end = type + NR_CURSEG_DATA_TYPE;
4452 	else
4453 		end = type + NR_CURSEG_NODE_TYPE;
4454 
4455 	for (i = type; i < end; i++)
4456 		write_current_sum_page(sbi, i, blkaddr + (i - type));
4457 }
4458 
f2fs_write_data_summaries(struct f2fs_sb_info * sbi,block_t start_blk)4459 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4460 {
4461 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4462 		write_compacted_summaries(sbi, start_blk);
4463 	else
4464 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4465 }
4466 
f2fs_write_node_summaries(struct f2fs_sb_info * sbi,block_t start_blk)4467 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4468 {
4469 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4470 }
4471 
f2fs_lookup_journal_in_cursum(struct f2fs_journal * journal,int type,unsigned int val,int alloc)4472 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4473 					unsigned int val, int alloc)
4474 {
4475 	int i;
4476 
4477 	if (type == NAT_JOURNAL) {
4478 		for (i = 0; i < nats_in_cursum(journal); i++) {
4479 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4480 				return i;
4481 		}
4482 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4483 			return update_nats_in_cursum(journal, 1);
4484 	} else if (type == SIT_JOURNAL) {
4485 		for (i = 0; i < sits_in_cursum(journal); i++)
4486 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4487 				return i;
4488 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4489 			return update_sits_in_cursum(journal, 1);
4490 	}
4491 	return -1;
4492 }
4493 
get_current_sit_page(struct f2fs_sb_info * sbi,unsigned int segno)4494 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4495 					unsigned int segno)
4496 {
4497 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4498 }
4499 
get_next_sit_page(struct f2fs_sb_info * sbi,unsigned int start)4500 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4501 					unsigned int start)
4502 {
4503 	struct sit_info *sit_i = SIT_I(sbi);
4504 	struct page *page;
4505 	pgoff_t src_off, dst_off;
4506 
4507 	src_off = current_sit_addr(sbi, start);
4508 	dst_off = next_sit_addr(sbi, src_off);
4509 
4510 	page = f2fs_grab_meta_page(sbi, dst_off);
4511 	seg_info_to_sit_page(sbi, page, start);
4512 
4513 	set_page_dirty(page);
4514 	set_to_next_sit(sit_i, start);
4515 
4516 	return page;
4517 }
4518 
grab_sit_entry_set(void)4519 static struct sit_entry_set *grab_sit_entry_set(void)
4520 {
4521 	struct sit_entry_set *ses =
4522 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4523 						GFP_NOFS, true, NULL);
4524 
4525 	ses->entry_cnt = 0;
4526 	INIT_LIST_HEAD(&ses->set_list);
4527 	return ses;
4528 }
4529 
release_sit_entry_set(struct sit_entry_set * ses)4530 static void release_sit_entry_set(struct sit_entry_set *ses)
4531 {
4532 	list_del(&ses->set_list);
4533 	kmem_cache_free(sit_entry_set_slab, ses);
4534 }
4535 
adjust_sit_entry_set(struct sit_entry_set * ses,struct list_head * head)4536 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4537 						struct list_head *head)
4538 {
4539 	struct sit_entry_set *next = ses;
4540 
4541 	if (list_is_last(&ses->set_list, head))
4542 		return;
4543 
4544 	list_for_each_entry_continue(next, head, set_list)
4545 		if (ses->entry_cnt <= next->entry_cnt) {
4546 			list_move_tail(&ses->set_list, &next->set_list);
4547 			return;
4548 		}
4549 
4550 	list_move_tail(&ses->set_list, head);
4551 }
4552 
add_sit_entry(unsigned int segno,struct list_head * head)4553 static void add_sit_entry(unsigned int segno, struct list_head *head)
4554 {
4555 	struct sit_entry_set *ses;
4556 	unsigned int start_segno = START_SEGNO(segno);
4557 
4558 	list_for_each_entry(ses, head, set_list) {
4559 		if (ses->start_segno == start_segno) {
4560 			ses->entry_cnt++;
4561 			adjust_sit_entry_set(ses, head);
4562 			return;
4563 		}
4564 	}
4565 
4566 	ses = grab_sit_entry_set();
4567 
4568 	ses->start_segno = start_segno;
4569 	ses->entry_cnt++;
4570 	list_add(&ses->set_list, head);
4571 }
4572 
add_sits_in_set(struct f2fs_sb_info * sbi)4573 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4574 {
4575 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4576 	struct list_head *set_list = &sm_info->sit_entry_set;
4577 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4578 	unsigned int segno;
4579 
4580 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4581 		add_sit_entry(segno, set_list);
4582 }
4583 
remove_sits_in_journal(struct f2fs_sb_info * sbi)4584 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4585 {
4586 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4587 	struct f2fs_journal *journal = curseg->journal;
4588 	int i;
4589 
4590 	down_write(&curseg->journal_rwsem);
4591 	for (i = 0; i < sits_in_cursum(journal); i++) {
4592 		unsigned int segno;
4593 		bool dirtied;
4594 
4595 		segno = le32_to_cpu(segno_in_journal(journal, i));
4596 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4597 
4598 		if (!dirtied)
4599 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4600 	}
4601 	update_sits_in_cursum(journal, -i);
4602 	up_write(&curseg->journal_rwsem);
4603 }
4604 
4605 /*
4606  * CP calls this function, which flushes SIT entries including sit_journal,
4607  * and moves prefree segs to free segs.
4608  */
f2fs_flush_sit_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)4609 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4610 {
4611 	struct sit_info *sit_i = SIT_I(sbi);
4612 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4613 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4614 	struct f2fs_journal *journal = curseg->journal;
4615 	struct sit_entry_set *ses, *tmp;
4616 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4617 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4618 	struct seg_entry *se;
4619 
4620 	down_write(&sit_i->sentry_lock);
4621 
4622 	if (!sit_i->dirty_sentries)
4623 		goto out;
4624 
4625 	/*
4626 	 * add and account sit entries of dirty bitmap in sit entry
4627 	 * set temporarily
4628 	 */
4629 	add_sits_in_set(sbi);
4630 
4631 	/*
4632 	 * if there are no enough space in journal to store dirty sit
4633 	 * entries, remove all entries from journal and add and account
4634 	 * them in sit entry set.
4635 	 */
4636 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4637 								!to_journal)
4638 		remove_sits_in_journal(sbi);
4639 
4640 	/*
4641 	 * there are two steps to flush sit entries:
4642 	 * #1, flush sit entries to journal in current cold data summary block.
4643 	 * #2, flush sit entries to sit page.
4644 	 */
4645 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4646 		struct page *page = NULL;
4647 		struct f2fs_sit_block *raw_sit = NULL;
4648 		unsigned int start_segno = ses->start_segno;
4649 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4650 						(unsigned long)MAIN_SEGS(sbi));
4651 		unsigned int segno = start_segno;
4652 
4653 		if (to_journal &&
4654 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4655 			to_journal = false;
4656 
4657 		if (to_journal) {
4658 			down_write(&curseg->journal_rwsem);
4659 		} else {
4660 			page = get_next_sit_page(sbi, start_segno);
4661 			raw_sit = page_address(page);
4662 		}
4663 
4664 		/* flush dirty sit entries in region of current sit set */
4665 		for_each_set_bit_from(segno, bitmap, end) {
4666 			int offset, sit_offset;
4667 
4668 			se = get_seg_entry(sbi, segno);
4669 #ifdef CONFIG_F2FS_CHECK_FS
4670 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4671 						SIT_VBLOCK_MAP_SIZE))
4672 				f2fs_bug_on(sbi, 1);
4673 #endif
4674 
4675 			/* add discard candidates */
4676 			if (!(cpc->reason & CP_DISCARD)) {
4677 				cpc->trim_start = segno;
4678 				add_discard_addrs(sbi, cpc, false);
4679 			}
4680 
4681 			if (to_journal) {
4682 				offset = f2fs_lookup_journal_in_cursum(journal,
4683 							SIT_JOURNAL, segno, 1);
4684 				f2fs_bug_on(sbi, offset < 0);
4685 				segno_in_journal(journal, offset) =
4686 							cpu_to_le32(segno);
4687 				seg_info_to_raw_sit(se,
4688 					&sit_in_journal(journal, offset));
4689 				check_block_count(sbi, segno,
4690 					&sit_in_journal(journal, offset));
4691 			} else {
4692 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4693 				seg_info_to_raw_sit(se,
4694 						&raw_sit->entries[sit_offset]);
4695 				check_block_count(sbi, segno,
4696 						&raw_sit->entries[sit_offset]);
4697 			}
4698 
4699 			__clear_bit(segno, bitmap);
4700 			sit_i->dirty_sentries--;
4701 			ses->entry_cnt--;
4702 		}
4703 
4704 		if (to_journal)
4705 			up_write(&curseg->journal_rwsem);
4706 		else
4707 			f2fs_put_page(page, 1);
4708 
4709 		f2fs_bug_on(sbi, ses->entry_cnt);
4710 		release_sit_entry_set(ses);
4711 	}
4712 
4713 	f2fs_bug_on(sbi, !list_empty(head));
4714 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4715 out:
4716 	if (cpc->reason & CP_DISCARD) {
4717 		__u64 trim_start = cpc->trim_start;
4718 
4719 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4720 			add_discard_addrs(sbi, cpc, false);
4721 
4722 		cpc->trim_start = trim_start;
4723 	}
4724 	up_write(&sit_i->sentry_lock);
4725 
4726 	set_prefree_as_free_segments(sbi);
4727 }
4728 
build_sit_info(struct f2fs_sb_info * sbi)4729 static int build_sit_info(struct f2fs_sb_info *sbi)
4730 {
4731 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4732 	struct sit_info *sit_i;
4733 	unsigned int sit_segs, start;
4734 	char *src_bitmap, *bitmap;
4735 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4736 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4737 
4738 	/* allocate memory for SIT information */
4739 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4740 	if (!sit_i)
4741 		return -ENOMEM;
4742 
4743 	SM_I(sbi)->sit_info = sit_i;
4744 
4745 	sit_i->sentries =
4746 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4747 					      MAIN_SEGS(sbi)),
4748 			      GFP_KERNEL);
4749 	if (!sit_i->sentries)
4750 		return -ENOMEM;
4751 
4752 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4753 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4754 								GFP_KERNEL);
4755 	if (!sit_i->dirty_sentries_bitmap)
4756 		return -ENOMEM;
4757 
4758 #ifdef CONFIG_F2FS_CHECK_FS
4759 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4760 #else
4761 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4762 #endif
4763 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4764 	if (!sit_i->bitmap)
4765 		return -ENOMEM;
4766 
4767 	bitmap = sit_i->bitmap;
4768 
4769 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4770 		sit_i->sentries[start].cur_valid_map = bitmap;
4771 		bitmap += SIT_VBLOCK_MAP_SIZE;
4772 
4773 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4774 		bitmap += SIT_VBLOCK_MAP_SIZE;
4775 
4776 #ifdef CONFIG_F2FS_CHECK_FS
4777 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4778 		bitmap += SIT_VBLOCK_MAP_SIZE;
4779 #endif
4780 
4781 		if (discard_map) {
4782 			sit_i->sentries[start].discard_map = bitmap;
4783 			bitmap += SIT_VBLOCK_MAP_SIZE;
4784 		}
4785 	}
4786 
4787 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4788 	if (!sit_i->tmp_map)
4789 		return -ENOMEM;
4790 
4791 	if (__is_large_section(sbi)) {
4792 		sit_i->sec_entries =
4793 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4794 						      MAIN_SECS(sbi)),
4795 				      GFP_KERNEL);
4796 		if (!sit_i->sec_entries)
4797 			return -ENOMEM;
4798 	}
4799 
4800 	/* get information related with SIT */
4801 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4802 
4803 	/* setup SIT bitmap from ckeckpoint pack */
4804 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4805 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4806 
4807 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4808 	if (!sit_i->sit_bitmap)
4809 		return -ENOMEM;
4810 
4811 #ifdef CONFIG_F2FS_CHECK_FS
4812 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4813 					sit_bitmap_size, GFP_KERNEL);
4814 	if (!sit_i->sit_bitmap_mir)
4815 		return -ENOMEM;
4816 
4817 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4818 					main_bitmap_size, GFP_KERNEL);
4819 	if (!sit_i->invalid_segmap)
4820 		return -ENOMEM;
4821 #endif
4822 
4823 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4824 	sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);
4825 	sit_i->written_valid_blocks = 0;
4826 	sit_i->bitmap_size = sit_bitmap_size;
4827 	sit_i->dirty_sentries = 0;
4828 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4829 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4830 	sit_i->mounted_time = ktime_get_boottime_seconds();
4831 	init_rwsem(&sit_i->sentry_lock);
4832 	return 0;
4833 }
4834 
build_free_segmap(struct f2fs_sb_info * sbi)4835 static int build_free_segmap(struct f2fs_sb_info *sbi)
4836 {
4837 	struct free_segmap_info *free_i;
4838 	unsigned int bitmap_size, sec_bitmap_size;
4839 
4840 	/* allocate memory for free segmap information */
4841 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4842 	if (!free_i)
4843 		return -ENOMEM;
4844 
4845 	SM_I(sbi)->free_info = free_i;
4846 
4847 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4848 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4849 	if (!free_i->free_segmap)
4850 		return -ENOMEM;
4851 
4852 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4853 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4854 	if (!free_i->free_secmap)
4855 		return -ENOMEM;
4856 
4857 	/* set all segments as dirty temporarily */
4858 	memset(free_i->free_segmap, 0xff, bitmap_size);
4859 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4860 
4861 	/* init free segmap information */
4862 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4863 	free_i->free_segments = 0;
4864 	free_i->free_sections = 0;
4865 	spin_lock_init(&free_i->segmap_lock);
4866 	return 0;
4867 }
4868 
build_curseg(struct f2fs_sb_info * sbi)4869 static int build_curseg(struct f2fs_sb_info *sbi)
4870 {
4871 	struct curseg_info *array;
4872 	int i;
4873 
4874 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4875 					sizeof(*array)), GFP_KERNEL);
4876 	if (!array)
4877 		return -ENOMEM;
4878 
4879 	SM_I(sbi)->curseg_array = array;
4880 
4881 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4882 		mutex_init(&array[i].curseg_mutex);
4883 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4884 		if (!array[i].sum_blk)
4885 			return -ENOMEM;
4886 		init_rwsem(&array[i].journal_rwsem);
4887 		array[i].journal = f2fs_kzalloc(sbi,
4888 				sizeof(struct f2fs_journal), GFP_KERNEL);
4889 		if (!array[i].journal)
4890 			return -ENOMEM;
4891 		array[i].seg_type = log_type_to_seg_type(i);
4892 		reset_curseg_fields(&array[i]);
4893 	}
4894 	return restore_curseg_summaries(sbi);
4895 }
4896 
build_sit_entries(struct f2fs_sb_info * sbi)4897 static int build_sit_entries(struct f2fs_sb_info *sbi)
4898 {
4899 	struct sit_info *sit_i = SIT_I(sbi);
4900 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4901 	struct f2fs_journal *journal = curseg->journal;
4902 	struct seg_entry *se;
4903 	struct f2fs_sit_entry sit;
4904 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4905 	unsigned int i, start, end;
4906 	unsigned int readed, start_blk = 0;
4907 	int err = 0;
4908 	block_t sit_valid_blocks[2] = {0, 0};
4909 
4910 	do {
4911 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4912 							META_SIT, true);
4913 
4914 		start = start_blk * sit_i->sents_per_block;
4915 		end = (start_blk + readed) * sit_i->sents_per_block;
4916 
4917 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4918 			struct f2fs_sit_block *sit_blk;
4919 			struct page *page;
4920 
4921 			se = &sit_i->sentries[start];
4922 			page = get_current_sit_page(sbi, start);
4923 			if (IS_ERR(page))
4924 				return PTR_ERR(page);
4925 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4926 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4927 			f2fs_put_page(page, 1);
4928 
4929 			err = check_block_count(sbi, start, &sit);
4930 			if (err)
4931 				return err;
4932 			seg_info_from_raw_sit(se, &sit);
4933 
4934 			if (se->type >= NR_PERSISTENT_LOG) {
4935 				f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4936 							se->type, start);
4937 				f2fs_handle_error(sbi,
4938 						ERROR_INCONSISTENT_SUM_TYPE);
4939 				return -EFSCORRUPTED;
4940 			}
4941 
4942 			sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4943 
4944 			if (!f2fs_block_unit_discard(sbi))
4945 				goto init_discard_map_done;
4946 
4947 			/* build discard map only one time */
4948 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4949 				memset(se->discard_map, 0xff,
4950 						SIT_VBLOCK_MAP_SIZE);
4951 				goto init_discard_map_done;
4952 			}
4953 			memcpy(se->discard_map, se->cur_valid_map,
4954 						SIT_VBLOCK_MAP_SIZE);
4955 			sbi->discard_blks += BLKS_PER_SEG(sbi) -
4956 						se->valid_blocks;
4957 init_discard_map_done:
4958 			if (__is_large_section(sbi))
4959 				get_sec_entry(sbi, start)->valid_blocks +=
4960 							se->valid_blocks;
4961 		}
4962 		start_blk += readed;
4963 	} while (start_blk < sit_blk_cnt);
4964 
4965 	down_read(&curseg->journal_rwsem);
4966 	for (i = 0; i < sits_in_cursum(journal); i++) {
4967 		unsigned int old_valid_blocks;
4968 
4969 		start = le32_to_cpu(segno_in_journal(journal, i));
4970 		if (start >= MAIN_SEGS(sbi)) {
4971 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4972 				 start);
4973 			err = -EFSCORRUPTED;
4974 			f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4975 			break;
4976 		}
4977 
4978 		se = &sit_i->sentries[start];
4979 		sit = sit_in_journal(journal, i);
4980 
4981 		old_valid_blocks = se->valid_blocks;
4982 
4983 		sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
4984 
4985 		err = check_block_count(sbi, start, &sit);
4986 		if (err)
4987 			break;
4988 		seg_info_from_raw_sit(se, &sit);
4989 
4990 		if (se->type >= NR_PERSISTENT_LOG) {
4991 			f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4992 							se->type, start);
4993 			err = -EFSCORRUPTED;
4994 			f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4995 			break;
4996 		}
4997 
4998 		sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4999 
5000 		if (f2fs_block_unit_discard(sbi)) {
5001 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
5002 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
5003 			} else {
5004 				memcpy(se->discard_map, se->cur_valid_map,
5005 							SIT_VBLOCK_MAP_SIZE);
5006 				sbi->discard_blks += old_valid_blocks;
5007 				sbi->discard_blks -= se->valid_blocks;
5008 			}
5009 		}
5010 
5011 		if (__is_large_section(sbi)) {
5012 			get_sec_entry(sbi, start)->valid_blocks +=
5013 							se->valid_blocks;
5014 			get_sec_entry(sbi, start)->valid_blocks -=
5015 							old_valid_blocks;
5016 		}
5017 	}
5018 	up_read(&curseg->journal_rwsem);
5019 
5020 	if (err)
5021 		return err;
5022 
5023 	if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
5024 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
5025 			 sit_valid_blocks[NODE], valid_node_count(sbi));
5026 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
5027 		return -EFSCORRUPTED;
5028 	}
5029 
5030 	if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
5031 				valid_user_blocks(sbi)) {
5032 		f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
5033 			 sit_valid_blocks[DATA], sit_valid_blocks[NODE],
5034 			 valid_user_blocks(sbi));
5035 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
5036 		return -EFSCORRUPTED;
5037 	}
5038 
5039 	return 0;
5040 }
5041 
init_free_segmap(struct f2fs_sb_info * sbi)5042 static void init_free_segmap(struct f2fs_sb_info *sbi)
5043 {
5044 	unsigned int start;
5045 	int type;
5046 	struct seg_entry *sentry;
5047 
5048 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
5049 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
5050 			continue;
5051 		sentry = get_seg_entry(sbi, start);
5052 		if (!sentry->valid_blocks)
5053 			__set_free(sbi, start);
5054 		else
5055 			SIT_I(sbi)->written_valid_blocks +=
5056 						sentry->valid_blocks;
5057 	}
5058 
5059 	/* set use the current segments */
5060 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
5061 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
5062 
5063 		__set_test_and_inuse(sbi, curseg_t->segno);
5064 	}
5065 }
5066 
init_dirty_segmap(struct f2fs_sb_info * sbi)5067 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
5068 {
5069 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5070 	struct free_segmap_info *free_i = FREE_I(sbi);
5071 	unsigned int segno = 0, offset = 0, secno;
5072 	block_t valid_blocks, usable_blks_in_seg;
5073 
5074 	while (1) {
5075 		/* find dirty segment based on free segmap */
5076 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
5077 		if (segno >= MAIN_SEGS(sbi))
5078 			break;
5079 		offset = segno + 1;
5080 		valid_blocks = get_valid_blocks(sbi, segno, false);
5081 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
5082 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
5083 			continue;
5084 		if (valid_blocks > usable_blks_in_seg) {
5085 			f2fs_bug_on(sbi, 1);
5086 			continue;
5087 		}
5088 		mutex_lock(&dirty_i->seglist_lock);
5089 		__locate_dirty_segment(sbi, segno, DIRTY);
5090 		mutex_unlock(&dirty_i->seglist_lock);
5091 	}
5092 
5093 	if (!__is_large_section(sbi))
5094 		return;
5095 
5096 	mutex_lock(&dirty_i->seglist_lock);
5097 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5098 		valid_blocks = get_valid_blocks(sbi, segno, true);
5099 		secno = GET_SEC_FROM_SEG(sbi, segno);
5100 
5101 		if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
5102 			continue;
5103 		if (IS_CURSEC(sbi, secno))
5104 			continue;
5105 		set_bit(secno, dirty_i->dirty_secmap);
5106 	}
5107 	mutex_unlock(&dirty_i->seglist_lock);
5108 }
5109 
init_victim_secmap(struct f2fs_sb_info * sbi)5110 static int init_victim_secmap(struct f2fs_sb_info *sbi)
5111 {
5112 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5113 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
5114 
5115 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5116 	if (!dirty_i->victim_secmap)
5117 		return -ENOMEM;
5118 
5119 	dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5120 	if (!dirty_i->pinned_secmap)
5121 		return -ENOMEM;
5122 
5123 	dirty_i->pinned_secmap_cnt = 0;
5124 	dirty_i->enable_pin_section = true;
5125 	return 0;
5126 }
5127 
build_dirty_segmap(struct f2fs_sb_info * sbi)5128 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
5129 {
5130 	struct dirty_seglist_info *dirty_i;
5131 	unsigned int bitmap_size, i;
5132 
5133 	/* allocate memory for dirty segments list information */
5134 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
5135 								GFP_KERNEL);
5136 	if (!dirty_i)
5137 		return -ENOMEM;
5138 
5139 	SM_I(sbi)->dirty_info = dirty_i;
5140 	mutex_init(&dirty_i->seglist_lock);
5141 
5142 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
5143 
5144 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
5145 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
5146 								GFP_KERNEL);
5147 		if (!dirty_i->dirty_segmap[i])
5148 			return -ENOMEM;
5149 	}
5150 
5151 	if (__is_large_section(sbi)) {
5152 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
5153 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
5154 						bitmap_size, GFP_KERNEL);
5155 		if (!dirty_i->dirty_secmap)
5156 			return -ENOMEM;
5157 	}
5158 
5159 	init_dirty_segmap(sbi);
5160 	return init_victim_secmap(sbi);
5161 }
5162 
sanity_check_curseg(struct f2fs_sb_info * sbi)5163 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
5164 {
5165 	int i;
5166 
5167 	/*
5168 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
5169 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
5170 	 */
5171 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5172 		struct curseg_info *curseg = CURSEG_I(sbi, i);
5173 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
5174 		unsigned int blkofs = curseg->next_blkoff;
5175 
5176 		if (f2fs_sb_has_readonly(sbi) &&
5177 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
5178 			continue;
5179 
5180 		sanity_check_seg_type(sbi, curseg->seg_type);
5181 
5182 		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
5183 			f2fs_err(sbi,
5184 				 "Current segment has invalid alloc_type:%d",
5185 				 curseg->alloc_type);
5186 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
5187 			return -EFSCORRUPTED;
5188 		}
5189 
5190 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
5191 			goto out;
5192 
5193 		if (curseg->alloc_type == SSR)
5194 			continue;
5195 
5196 		for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
5197 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
5198 				continue;
5199 out:
5200 			f2fs_err(sbi,
5201 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
5202 				 i, curseg->segno, curseg->alloc_type,
5203 				 curseg->next_blkoff, blkofs);
5204 			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
5205 			return -EFSCORRUPTED;
5206 		}
5207 	}
5208 	return 0;
5209 }
5210 
5211 #ifdef CONFIG_BLK_DEV_ZONED
check_zone_write_pointer(struct f2fs_sb_info * sbi,struct f2fs_dev_info * fdev,struct blk_zone * zone)5212 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
5213 				    struct f2fs_dev_info *fdev,
5214 				    struct blk_zone *zone)
5215 {
5216 	unsigned int zone_segno;
5217 	block_t zone_block, valid_block_cnt;
5218 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5219 	int ret;
5220 	unsigned int nofs_flags;
5221 
5222 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5223 		return 0;
5224 
5225 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
5226 	zone_segno = GET_SEGNO(sbi, zone_block);
5227 
5228 	/*
5229 	 * Skip check of zones cursegs point to, since
5230 	 * fix_curseg_write_pointer() checks them.
5231 	 */
5232 	if (zone_segno >= MAIN_SEGS(sbi))
5233 		return 0;
5234 
5235 	/*
5236 	 * Get # of valid block of the zone.
5237 	 */
5238 	valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
5239 	if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
5240 		f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
5241 				zone_segno, valid_block_cnt,
5242 				blk_zone_cond_str(zone->cond));
5243 		return 0;
5244 	}
5245 
5246 	if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
5247 	    (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
5248 		return 0;
5249 
5250 	if (!valid_block_cnt) {
5251 		f2fs_notice(sbi, "Zone without valid block has non-zero write "
5252 			    "pointer. Reset the write pointer: cond[%s]",
5253 			    blk_zone_cond_str(zone->cond));
5254 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
5255 					zone->len >> log_sectors_per_block);
5256 		if (ret)
5257 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5258 				 fdev->path, ret);
5259 		return ret;
5260 	}
5261 
5262 	/*
5263 	 * If there are valid blocks and the write pointer doesn't match
5264 	 * with them, we need to report the inconsistency and fill
5265 	 * the zone till the end to close the zone. This inconsistency
5266 	 * does not cause write error because the zone will not be
5267 	 * selected for write operation until it get discarded.
5268 	 */
5269 	f2fs_notice(sbi, "Valid blocks are not aligned with write "
5270 		    "pointer: valid block[0x%x,0x%x] cond[%s]",
5271 		    zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond));
5272 
5273 	nofs_flags = memalloc_nofs_save();
5274 	ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
5275 				zone->start, zone->len);
5276 	memalloc_nofs_restore(nofs_flags);
5277 	if (ret == -EOPNOTSUPP) {
5278 		ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
5279 					zone->len - (zone->wp - zone->start),
5280 					GFP_NOFS, 0);
5281 		if (ret)
5282 			f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
5283 					fdev->path, ret);
5284 	} else if (ret) {
5285 		f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
5286 				fdev->path, ret);
5287 	}
5288 
5289 	return ret;
5290 }
5291 
get_target_zoned_dev(struct f2fs_sb_info * sbi,block_t zone_blkaddr)5292 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
5293 						  block_t zone_blkaddr)
5294 {
5295 	int i;
5296 
5297 	for (i = 0; i < sbi->s_ndevs; i++) {
5298 		if (!bdev_is_zoned(FDEV(i).bdev))
5299 			continue;
5300 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
5301 				zone_blkaddr <= FDEV(i).end_blk))
5302 			return &FDEV(i);
5303 	}
5304 
5305 	return NULL;
5306 }
5307 
report_one_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)5308 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
5309 			      void *data)
5310 {
5311 	memcpy(data, zone, sizeof(struct blk_zone));
5312 	return 0;
5313 }
5314 
do_fix_curseg_write_pointer(struct f2fs_sb_info * sbi,int type)5315 static int do_fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
5316 {
5317 	struct curseg_info *cs = CURSEG_I(sbi, type);
5318 	struct f2fs_dev_info *zbd;
5319 	struct blk_zone zone;
5320 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
5321 	block_t cs_zone_block, wp_block;
5322 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5323 	sector_t zone_sector;
5324 	int err;
5325 
5326 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5327 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5328 
5329 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5330 	if (!zbd)
5331 		return 0;
5332 
5333 	/* report zone for the sector the curseg points to */
5334 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5335 		<< log_sectors_per_block;
5336 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5337 				  report_one_zone_cb, &zone);
5338 	if (err != 1) {
5339 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5340 			 zbd->path, err);
5341 		return err;
5342 	}
5343 
5344 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5345 		return 0;
5346 
5347 	/*
5348 	 * When safely unmounted in the previous mount, we could use current
5349 	 * segments. Otherwise, allocate new sections.
5350 	 */
5351 	if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5352 		wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
5353 		wp_segno = GET_SEGNO(sbi, wp_block);
5354 		wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5355 		wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
5356 
5357 		if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
5358 				wp_sector_off == 0)
5359 			return 0;
5360 
5361 		f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5362 			    "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
5363 			    cs->next_blkoff, wp_segno, wp_blkoff);
5364 	}
5365 
5366 	/* Allocate a new section if it's not new. */
5367 	if (cs->next_blkoff ||
5368 	    cs->segno != GET_SEG_FROM_SEC(sbi, GET_ZONE_FROM_SEC(sbi, cs_section))) {
5369 		unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
5370 
5371 		f2fs_allocate_new_section(sbi, type, true);
5372 		f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5373 				"[0x%x,0x%x] -> [0x%x,0x%x]",
5374 				type, old_segno, old_blkoff,
5375 				cs->segno, cs->next_blkoff);
5376 	}
5377 
5378 	/* check consistency of the zone curseg pointed to */
5379 	if (check_zone_write_pointer(sbi, zbd, &zone))
5380 		return -EIO;
5381 
5382 	/* check newly assigned zone */
5383 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5384 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5385 
5386 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5387 	if (!zbd)
5388 		return 0;
5389 
5390 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5391 		<< log_sectors_per_block;
5392 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5393 				  report_one_zone_cb, &zone);
5394 	if (err != 1) {
5395 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5396 			 zbd->path, err);
5397 		return err;
5398 	}
5399 
5400 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5401 		return 0;
5402 
5403 	if (zone.wp != zone.start) {
5404 		f2fs_notice(sbi,
5405 			    "New zone for curseg[%d] is not yet discarded. "
5406 			    "Reset the zone: curseg[0x%x,0x%x]",
5407 			    type, cs->segno, cs->next_blkoff);
5408 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,	cs_zone_block,
5409 					zone.len >> log_sectors_per_block);
5410 		if (err) {
5411 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5412 				 zbd->path, err);
5413 			return err;
5414 		}
5415 	}
5416 
5417 	return 0;
5418 }
5419 
fix_curseg_write_pointer(struct f2fs_sb_info * sbi)5420 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5421 {
5422 	int i, ret;
5423 
5424 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5425 		ret = do_fix_curseg_write_pointer(sbi, i);
5426 		if (ret)
5427 			return ret;
5428 	}
5429 
5430 	return 0;
5431 }
5432 
5433 struct check_zone_write_pointer_args {
5434 	struct f2fs_sb_info *sbi;
5435 	struct f2fs_dev_info *fdev;
5436 };
5437 
check_zone_write_pointer_cb(struct blk_zone * zone,unsigned int idx,void * data)5438 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5439 				      void *data)
5440 {
5441 	struct check_zone_write_pointer_args *args;
5442 
5443 	args = (struct check_zone_write_pointer_args *)data;
5444 
5445 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
5446 }
5447 
check_write_pointer(struct f2fs_sb_info * sbi)5448 static int check_write_pointer(struct f2fs_sb_info *sbi)
5449 {
5450 	int i, ret;
5451 	struct check_zone_write_pointer_args args;
5452 
5453 	for (i = 0; i < sbi->s_ndevs; i++) {
5454 		if (!bdev_is_zoned(FDEV(i).bdev))
5455 			continue;
5456 
5457 		args.sbi = sbi;
5458 		args.fdev = &FDEV(i);
5459 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5460 					  check_zone_write_pointer_cb, &args);
5461 		if (ret < 0)
5462 			return ret;
5463 	}
5464 
5465 	return 0;
5466 }
5467 
f2fs_check_and_fix_write_pointer(struct f2fs_sb_info * sbi)5468 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
5469 {
5470 	int ret;
5471 
5472 	if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
5473 	    f2fs_hw_is_readonly(sbi))
5474 		return 0;
5475 
5476 	f2fs_notice(sbi, "Checking entire write pointers");
5477 	ret = fix_curseg_write_pointer(sbi);
5478 	if (!ret)
5479 		ret = check_write_pointer(sbi);
5480 	return ret;
5481 }
5482 
5483 /*
5484  * Return the number of usable blocks in a segment. The number of blocks
5485  * returned is always equal to the number of blocks in a segment for
5486  * segments fully contained within a sequential zone capacity or a
5487  * conventional zone. For segments partially contained in a sequential
5488  * zone capacity, the number of usable blocks up to the zone capacity
5489  * is returned. 0 is returned in all other cases.
5490  */
f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5491 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5492 			struct f2fs_sb_info *sbi, unsigned int segno)
5493 {
5494 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5495 	unsigned int secno;
5496 
5497 	if (!sbi->unusable_blocks_per_sec)
5498 		return BLKS_PER_SEG(sbi);
5499 
5500 	secno = GET_SEC_FROM_SEG(sbi, segno);
5501 	seg_start = START_BLOCK(sbi, segno);
5502 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5503 	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5504 
5505 	/*
5506 	 * If segment starts before zone capacity and spans beyond
5507 	 * zone capacity, then usable blocks are from seg start to
5508 	 * zone capacity. If the segment starts after the zone capacity,
5509 	 * then there are no usable blocks.
5510 	 */
5511 	if (seg_start >= sec_cap_blkaddr)
5512 		return 0;
5513 	if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
5514 		return sec_cap_blkaddr - seg_start;
5515 
5516 	return BLKS_PER_SEG(sbi);
5517 }
5518 #else
f2fs_check_and_fix_write_pointer(struct f2fs_sb_info * sbi)5519 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
5520 {
5521 	return 0;
5522 }
5523 
f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5524 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5525 							unsigned int segno)
5526 {
5527 	return 0;
5528 }
5529 
5530 #endif
f2fs_usable_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5531 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5532 					unsigned int segno)
5533 {
5534 	if (f2fs_sb_has_blkzoned(sbi))
5535 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5536 
5537 	return BLKS_PER_SEG(sbi);
5538 }
5539 
f2fs_usable_segs_in_sec(struct f2fs_sb_info * sbi)5540 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
5541 {
5542 	if (f2fs_sb_has_blkzoned(sbi))
5543 		return CAP_SEGS_PER_SEC(sbi);
5544 
5545 	return SEGS_PER_SEC(sbi);
5546 }
5547 
f2fs_get_section_mtime(struct f2fs_sb_info * sbi,unsigned int segno)5548 unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
5549 	unsigned int segno)
5550 {
5551 	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
5552 	unsigned int secno = 0, start = 0;
5553 	unsigned int total_valid_blocks = 0;
5554 	unsigned long long mtime = 0;
5555 	unsigned int i = 0;
5556 
5557 	secno = GET_SEC_FROM_SEG(sbi, segno);
5558 	start = GET_SEG_FROM_SEC(sbi, secno);
5559 
5560 	if (!__is_large_section(sbi)) {
5561 		mtime = get_seg_entry(sbi, start + i)->mtime;
5562 		goto out;
5563 	}
5564 
5565 	for (i = 0; i < usable_segs_per_sec; i++) {
5566 		/* for large section, only check the mtime of valid segments */
5567 		struct seg_entry *se = get_seg_entry(sbi, start+i);
5568 
5569 		mtime += se->mtime * se->valid_blocks;
5570 		total_valid_blocks += se->valid_blocks;
5571 	}
5572 
5573 	if (total_valid_blocks == 0)
5574 		return INVALID_MTIME;
5575 
5576 	mtime = div_u64(mtime, total_valid_blocks);
5577 out:
5578 	if (unlikely(mtime == INVALID_MTIME))
5579 		mtime -= 1;
5580 	return mtime;
5581 }
5582 
5583 /*
5584  * Update min, max modified time for cost-benefit GC algorithm
5585  */
init_min_max_mtime(struct f2fs_sb_info * sbi)5586 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5587 {
5588 	struct sit_info *sit_i = SIT_I(sbi);
5589 	unsigned int segno;
5590 
5591 	down_write(&sit_i->sentry_lock);
5592 
5593 	sit_i->min_mtime = ULLONG_MAX;
5594 
5595 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5596 		unsigned long long mtime = 0;
5597 
5598 		mtime = f2fs_get_section_mtime(sbi, segno);
5599 
5600 		if (sit_i->min_mtime > mtime)
5601 			sit_i->min_mtime = mtime;
5602 	}
5603 	sit_i->max_mtime = get_mtime(sbi, false);
5604 	sit_i->dirty_max_mtime = 0;
5605 	up_write(&sit_i->sentry_lock);
5606 }
5607 
f2fs_build_segment_manager(struct f2fs_sb_info * sbi)5608 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5609 {
5610 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5611 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5612 	struct f2fs_sm_info *sm_info;
5613 	int err;
5614 
5615 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5616 	if (!sm_info)
5617 		return -ENOMEM;
5618 
5619 	/* init sm info */
5620 	sbi->sm_info = sm_info;
5621 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5622 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5623 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5624 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5625 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5626 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5627 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5628 	sm_info->rec_prefree_segments = sm_info->main_segments *
5629 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5630 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5631 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5632 
5633 	if (!f2fs_lfs_mode(sbi))
5634 		sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
5635 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5636 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5637 	sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
5638 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5639 	sm_info->min_ssr_sections = reserved_sections(sbi);
5640 
5641 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5642 
5643 	init_f2fs_rwsem(&sm_info->curseg_lock);
5644 
5645 	err = f2fs_create_flush_cmd_control(sbi);
5646 	if (err)
5647 		return err;
5648 
5649 	err = create_discard_cmd_control(sbi);
5650 	if (err)
5651 		return err;
5652 
5653 	err = build_sit_info(sbi);
5654 	if (err)
5655 		return err;
5656 	err = build_free_segmap(sbi);
5657 	if (err)
5658 		return err;
5659 	err = build_curseg(sbi);
5660 	if (err)
5661 		return err;
5662 
5663 	/* reinit free segmap based on SIT */
5664 	err = build_sit_entries(sbi);
5665 	if (err)
5666 		return err;
5667 
5668 	init_free_segmap(sbi);
5669 	err = build_dirty_segmap(sbi);
5670 	if (err)
5671 		return err;
5672 
5673 	err = sanity_check_curseg(sbi);
5674 	if (err)
5675 		return err;
5676 
5677 	init_min_max_mtime(sbi);
5678 	return 0;
5679 }
5680 
discard_dirty_segmap(struct f2fs_sb_info * sbi,enum dirty_type dirty_type)5681 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5682 		enum dirty_type dirty_type)
5683 {
5684 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5685 
5686 	mutex_lock(&dirty_i->seglist_lock);
5687 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5688 	dirty_i->nr_dirty[dirty_type] = 0;
5689 	mutex_unlock(&dirty_i->seglist_lock);
5690 }
5691 
destroy_victim_secmap(struct f2fs_sb_info * sbi)5692 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5693 {
5694 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5695 
5696 	kvfree(dirty_i->pinned_secmap);
5697 	kvfree(dirty_i->victim_secmap);
5698 }
5699 
destroy_dirty_segmap(struct f2fs_sb_info * sbi)5700 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5701 {
5702 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5703 	int i;
5704 
5705 	if (!dirty_i)
5706 		return;
5707 
5708 	/* discard pre-free/dirty segments list */
5709 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5710 		discard_dirty_segmap(sbi, i);
5711 
5712 	if (__is_large_section(sbi)) {
5713 		mutex_lock(&dirty_i->seglist_lock);
5714 		kvfree(dirty_i->dirty_secmap);
5715 		mutex_unlock(&dirty_i->seglist_lock);
5716 	}
5717 
5718 	destroy_victim_secmap(sbi);
5719 	SM_I(sbi)->dirty_info = NULL;
5720 	kfree(dirty_i);
5721 }
5722 
destroy_curseg(struct f2fs_sb_info * sbi)5723 static void destroy_curseg(struct f2fs_sb_info *sbi)
5724 {
5725 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5726 	int i;
5727 
5728 	if (!array)
5729 		return;
5730 	SM_I(sbi)->curseg_array = NULL;
5731 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5732 		kfree(array[i].sum_blk);
5733 		kfree(array[i].journal);
5734 	}
5735 	kfree(array);
5736 }
5737 
destroy_free_segmap(struct f2fs_sb_info * sbi)5738 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5739 {
5740 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5741 
5742 	if (!free_i)
5743 		return;
5744 	SM_I(sbi)->free_info = NULL;
5745 	kvfree(free_i->free_segmap);
5746 	kvfree(free_i->free_secmap);
5747 	kfree(free_i);
5748 }
5749 
destroy_sit_info(struct f2fs_sb_info * sbi)5750 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5751 {
5752 	struct sit_info *sit_i = SIT_I(sbi);
5753 
5754 	if (!sit_i)
5755 		return;
5756 
5757 	if (sit_i->sentries)
5758 		kvfree(sit_i->bitmap);
5759 	kfree(sit_i->tmp_map);
5760 
5761 	kvfree(sit_i->sentries);
5762 	kvfree(sit_i->sec_entries);
5763 	kvfree(sit_i->dirty_sentries_bitmap);
5764 
5765 	SM_I(sbi)->sit_info = NULL;
5766 	kvfree(sit_i->sit_bitmap);
5767 #ifdef CONFIG_F2FS_CHECK_FS
5768 	kvfree(sit_i->sit_bitmap_mir);
5769 	kvfree(sit_i->invalid_segmap);
5770 #endif
5771 	kfree(sit_i);
5772 }
5773 
f2fs_destroy_segment_manager(struct f2fs_sb_info * sbi)5774 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5775 {
5776 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5777 
5778 	if (!sm_info)
5779 		return;
5780 	f2fs_destroy_flush_cmd_control(sbi, true);
5781 	destroy_discard_cmd_control(sbi);
5782 	destroy_dirty_segmap(sbi);
5783 	destroy_curseg(sbi);
5784 	destroy_free_segmap(sbi);
5785 	destroy_sit_info(sbi);
5786 	sbi->sm_info = NULL;
5787 	kfree(sm_info);
5788 }
5789 
f2fs_create_segment_manager_caches(void)5790 int __init f2fs_create_segment_manager_caches(void)
5791 {
5792 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5793 			sizeof(struct discard_entry));
5794 	if (!discard_entry_slab)
5795 		goto fail;
5796 
5797 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5798 			sizeof(struct discard_cmd));
5799 	if (!discard_cmd_slab)
5800 		goto destroy_discard_entry;
5801 
5802 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5803 			sizeof(struct sit_entry_set));
5804 	if (!sit_entry_set_slab)
5805 		goto destroy_discard_cmd;
5806 
5807 	revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
5808 			sizeof(struct revoke_entry));
5809 	if (!revoke_entry_slab)
5810 		goto destroy_sit_entry_set;
5811 	return 0;
5812 
5813 destroy_sit_entry_set:
5814 	kmem_cache_destroy(sit_entry_set_slab);
5815 destroy_discard_cmd:
5816 	kmem_cache_destroy(discard_cmd_slab);
5817 destroy_discard_entry:
5818 	kmem_cache_destroy(discard_entry_slab);
5819 fail:
5820 	return -ENOMEM;
5821 }
5822 
f2fs_destroy_segment_manager_caches(void)5823 void f2fs_destroy_segment_manager_caches(void)
5824 {
5825 	kmem_cache_destroy(sit_entry_set_slab);
5826 	kmem_cache_destroy(discard_cmd_slab);
5827 	kmem_cache_destroy(discard_entry_slab);
5828 	kmem_cache_destroy(revoke_entry_slab);
5829 }
5830