xref: /linux/fs/f2fs/segment.h (revision 81d8e5e2132215d21f2cddffcd2b16d08c0389fa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/segment.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 
11 /* constant macro */
12 #define NULL_SEGNO			((unsigned int)(~0))
13 #define NULL_SECNO			((unsigned int)(~0))
14 
15 #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
17 
18 #define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19 #define F2FS_MIN_META_SEGMENTS	8 /* SB + 2 (CP + SIT + NAT) + SSA */
20 
21 #define INVALID_MTIME ULLONG_MAX /* no valid blocks in a segment/section */
22 
23 /* L: Logical segment # in volume, R: Relative segment # in main area */
24 #define GET_L2R_SEGNO(free_i, segno)	((segno) - (free_i)->start_segno)
25 #define GET_R2L_SEGNO(free_i, segno)	((segno) + (free_i)->start_segno)
26 
27 #define IS_DATASEG(t)	((t) <= CURSEG_COLD_DATA)
28 #define IS_NODESEG(t)	((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
29 #define SE_PAGETYPE(se)	((IS_NODESEG((se)->type) ? NODE : DATA))
30 
sanity_check_seg_type(struct f2fs_sb_info * sbi,unsigned short seg_type)31 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
32 						unsigned short seg_type)
33 {
34 	f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
35 }
36 
37 #define IS_CURSEG(sbi, seg)						\
38 	(((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
39 	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
40 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
41 	 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
42 	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
43 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) ||	\
44 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) ||	\
45 	 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
46 
47 #define IS_CURSEC(sbi, secno)						\
48 	(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\
49 	  SEGS_PER_SEC(sbi)) ||	\
50 	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /		\
51 	  SEGS_PER_SEC(sbi)) ||	\
52 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /		\
53 	  SEGS_PER_SEC(sbi)) ||	\
54 	 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /		\
55 	  SEGS_PER_SEC(sbi)) ||	\
56 	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /		\
57 	  SEGS_PER_SEC(sbi)) ||	\
58 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
59 	  SEGS_PER_SEC(sbi)) ||	\
60 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno /	\
61 	  SEGS_PER_SEC(sbi)) ||	\
62 	 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno /	\
63 	  SEGS_PER_SEC(sbi)))
64 
65 #define MAIN_BLKADDR(sbi)						\
66 	(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : 				\
67 		le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
68 #define SEG0_BLKADDR(sbi)						\
69 	(SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : 				\
70 		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
71 
72 #define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
73 #define MAIN_SECS(sbi)	((sbi)->total_sections)
74 
75 #define TOTAL_SEGS(sbi)							\
76 	(SM_I(sbi) ? SM_I(sbi)->segment_count : 				\
77 		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
78 #define TOTAL_BLKS(sbi)	(SEGS_TO_BLKS(sbi, TOTAL_SEGS(sbi)))
79 
80 #define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
81 #define SEGMENT_SIZE(sbi)	(1ULL << ((sbi)->log_blocksize +	\
82 					(sbi)->log_blocks_per_seg))
83 
84 #define START_BLOCK(sbi, segno)	(SEG0_BLKADDR(sbi) +			\
85 	 (SEGS_TO_BLKS(sbi, GET_R2L_SEGNO(FREE_I(sbi), segno))))
86 
87 #define NEXT_FREE_BLKADDR(sbi, curseg)					\
88 	(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
89 
90 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)	((blk_addr) - SEG0_BLKADDR(sbi))
91 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\
92 	(BLKS_TO_SEGS(sbi, GET_SEGOFF_FROM_SEG0(sbi, blk_addr)))
93 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)				\
94 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
95 
96 #define GET_SEGNO(sbi, blk_addr)					\
97 	((!__is_valid_data_blkaddr(blk_addr)) ?			\
98 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
99 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
100 #define CAP_BLKS_PER_SEC(sbi)					\
101 	(BLKS_PER_SEC(sbi) - (sbi)->unusable_blocks_per_sec)
102 #define CAP_SEGS_PER_SEC(sbi)					\
103 	(SEGS_PER_SEC(sbi) -					\
104 	BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec))
105 #define GET_SEC_FROM_SEG(sbi, segno)				\
106 	(((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
107 #define GET_SEG_FROM_SEC(sbi, secno)				\
108 	((secno) * SEGS_PER_SEC(sbi))
109 #define GET_ZONE_FROM_SEC(sbi, secno)				\
110 	(((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
111 #define GET_ZONE_FROM_SEG(sbi, segno)				\
112 	GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
113 
114 #define GET_SUM_BLOCK(sbi, segno)				\
115 	((sbi)->sm_info->ssa_blkaddr + (segno))
116 
117 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
118 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
119 
120 #define SIT_ENTRY_OFFSET(sit_i, segno)					\
121 	((segno) % (sit_i)->sents_per_block)
122 #define SIT_BLOCK_OFFSET(segno)					\
123 	((segno) / SIT_ENTRY_PER_BLOCK)
124 #define	START_SEGNO(segno)		\
125 	(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
126 #define SIT_BLK_CNT(sbi)			\
127 	DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
128 #define f2fs_bitmap_size(nr)			\
129 	(BITS_TO_LONGS(nr) * sizeof(unsigned long))
130 
131 #define SECTOR_FROM_BLOCK(blk_addr)					\
132 	(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
133 #define SECTOR_TO_BLOCK(sectors)					\
134 	((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
135 
136 /*
137  * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
138  * LFS writes data sequentially with cleaning operations.
139  * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
140  * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
141  * fragmented segment which has similar aging degree.
142  */
143 enum {
144 	LFS = 0,
145 	SSR,
146 	AT_SSR,
147 };
148 
149 /*
150  * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes.
151  * GC_CB is based on cost-benefit algorithm.
152  * GC_GREEDY is based on greedy algorithm.
153  * GC_AT is based on age-threshold algorithm.
154  */
155 enum {
156 	GC_CB = 0,
157 	GC_GREEDY,
158 	GC_AT,
159 	ALLOC_NEXT,
160 	FLUSH_DEVICE,
161 	MAX_GC_POLICY,
162 };
163 
164 /*
165  * BG_GC means the background cleaning job.
166  * FG_GC means the on-demand cleaning job.
167  */
168 enum {
169 	BG_GC = 0,
170 	FG_GC,
171 };
172 
173 /* for a function parameter to select a victim segment */
174 struct victim_sel_policy {
175 	int alloc_mode;			/* LFS or SSR */
176 	int gc_mode;			/* GC_CB or GC_GREEDY */
177 	unsigned long *dirty_bitmap;	/* dirty segment/section bitmap */
178 	unsigned int max_search;	/*
179 					 * maximum # of segments/sections
180 					 * to search
181 					 */
182 	unsigned int offset;		/* last scanned bitmap offset */
183 	unsigned int ofs_unit;		/* bitmap search unit */
184 	unsigned int min_cost;		/* minimum cost */
185 	unsigned long long oldest_age;	/* oldest age of segments having the same min cost */
186 	unsigned int min_segno;		/* segment # having min. cost */
187 	unsigned long long age;		/* mtime of GCed section*/
188 	unsigned long long age_threshold;/* age threshold */
189 	bool one_time_gc;		/* one time GC */
190 };
191 
192 struct seg_entry {
193 	unsigned int type:6;		/* segment type like CURSEG_XXX_TYPE */
194 	unsigned int valid_blocks:10;	/* # of valid blocks */
195 	unsigned int ckpt_valid_blocks:10;	/* # of valid blocks last cp */
196 	unsigned int padding:6;		/* padding */
197 	unsigned char *cur_valid_map;	/* validity bitmap of blocks */
198 #ifdef CONFIG_F2FS_CHECK_FS
199 	unsigned char *cur_valid_map_mir;	/* mirror of current valid bitmap */
200 #endif
201 	/*
202 	 * # of valid blocks and the validity bitmap stored in the last
203 	 * checkpoint pack. This information is used by the SSR mode.
204 	 */
205 	unsigned char *ckpt_valid_map;	/* validity bitmap of blocks last cp */
206 	unsigned char *discard_map;
207 	unsigned long long mtime;	/* modification time of the segment */
208 };
209 
210 struct sec_entry {
211 	unsigned int valid_blocks;	/* # of valid blocks in a section */
212 };
213 
214 #define MAX_SKIP_GC_COUNT			16
215 
216 struct revoke_entry {
217 	struct list_head list;
218 	block_t old_addr;		/* for revoking when fail to commit */
219 	pgoff_t index;
220 };
221 
222 struct sit_info {
223 	block_t sit_base_addr;		/* start block address of SIT area */
224 	block_t sit_blocks;		/* # of blocks used by SIT area */
225 	block_t written_valid_blocks;	/* # of valid blocks in main area */
226 	char *bitmap;			/* all bitmaps pointer */
227 	char *sit_bitmap;		/* SIT bitmap pointer */
228 #ifdef CONFIG_F2FS_CHECK_FS
229 	char *sit_bitmap_mir;		/* SIT bitmap mirror */
230 
231 	/* bitmap of segments to be ignored by GC in case of errors */
232 	unsigned long *invalid_segmap;
233 #endif
234 	unsigned int bitmap_size;	/* SIT bitmap size */
235 
236 	unsigned long *tmp_map;			/* bitmap for temporal use */
237 	unsigned long *dirty_sentries_bitmap;	/* bitmap for dirty sentries */
238 	unsigned int dirty_sentries;		/* # of dirty sentries */
239 	unsigned int sents_per_block;		/* # of SIT entries per block */
240 	struct rw_semaphore sentry_lock;	/* to protect SIT cache */
241 	struct seg_entry *sentries;		/* SIT segment-level cache */
242 	struct sec_entry *sec_entries;		/* SIT section-level cache */
243 
244 	/* for cost-benefit algorithm in cleaning procedure */
245 	unsigned long long elapsed_time;	/* elapsed time after mount */
246 	unsigned long long mounted_time;	/* mount time */
247 	unsigned long long min_mtime;		/* min. modification time */
248 	unsigned long long max_mtime;		/* max. modification time */
249 	unsigned long long dirty_min_mtime;	/* rerange candidates in GC_AT */
250 	unsigned long long dirty_max_mtime;	/* rerange candidates in GC_AT */
251 
252 	unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
253 };
254 
255 struct free_segmap_info {
256 	unsigned int start_segno;	/* start segment number logically */
257 	unsigned int free_segments;	/* # of free segments */
258 	unsigned int free_sections;	/* # of free sections */
259 	spinlock_t segmap_lock;		/* free segmap lock */
260 	unsigned long *free_segmap;	/* free segment bitmap */
261 	unsigned long *free_secmap;	/* free section bitmap */
262 };
263 
264 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
265 enum dirty_type {
266 	DIRTY_HOT_DATA,		/* dirty segments assigned as hot data logs */
267 	DIRTY_WARM_DATA,	/* dirty segments assigned as warm data logs */
268 	DIRTY_COLD_DATA,	/* dirty segments assigned as cold data logs */
269 	DIRTY_HOT_NODE,		/* dirty segments assigned as hot node logs */
270 	DIRTY_WARM_NODE,	/* dirty segments assigned as warm node logs */
271 	DIRTY_COLD_NODE,	/* dirty segments assigned as cold node logs */
272 	DIRTY,			/* to count # of dirty segments */
273 	PRE,			/* to count # of entirely obsolete segments */
274 	NR_DIRTY_TYPE
275 };
276 
277 struct dirty_seglist_info {
278 	unsigned long *dirty_segmap[NR_DIRTY_TYPE];
279 	unsigned long *dirty_secmap;
280 	struct mutex seglist_lock;		/* lock for segment bitmaps */
281 	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */
282 	unsigned long *victim_secmap;		/* background GC victims */
283 	unsigned long *pinned_secmap;		/* pinned victims from foreground GC */
284 	unsigned int pinned_secmap_cnt;		/* count of victims which has pinned data */
285 	bool enable_pin_section;		/* enable pinning section */
286 };
287 
288 /* for active log information */
289 struct curseg_info {
290 	struct mutex curseg_mutex;		/* lock for consistency */
291 	struct f2fs_summary_block *sum_blk;	/* cached summary block */
292 	struct rw_semaphore journal_rwsem;	/* protect journal area */
293 	struct f2fs_journal *journal;		/* cached journal info */
294 	unsigned char alloc_type;		/* current allocation type */
295 	unsigned short seg_type;		/* segment type like CURSEG_XXX_TYPE */
296 	unsigned int segno;			/* current segment number */
297 	unsigned short next_blkoff;		/* next block offset to write */
298 	unsigned int zone;			/* current zone number */
299 	unsigned int next_segno;		/* preallocated segment */
300 	int fragment_remained_chunk;		/* remained block size in a chunk for block fragmentation mode */
301 	bool inited;				/* indicate inmem log is inited */
302 };
303 
304 struct sit_entry_set {
305 	struct list_head set_list;	/* link with all sit sets */
306 	unsigned int start_segno;	/* start segno of sits in set */
307 	unsigned int entry_cnt;		/* the # of sit entries in set */
308 };
309 
310 /*
311  * inline functions
312  */
CURSEG_I(struct f2fs_sb_info * sbi,int type)313 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
314 {
315 	return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
316 }
317 
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)318 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
319 						unsigned int segno)
320 {
321 	struct sit_info *sit_i = SIT_I(sbi);
322 	return &sit_i->sentries[segno];
323 }
324 
get_sec_entry(struct f2fs_sb_info * sbi,unsigned int segno)325 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
326 						unsigned int segno)
327 {
328 	struct sit_info *sit_i = SIT_I(sbi);
329 	return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
330 }
331 
get_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)332 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
333 				unsigned int segno, bool use_section)
334 {
335 	/*
336 	 * In order to get # of valid blocks in a section instantly from many
337 	 * segments, f2fs manages two counting structures separately.
338 	 */
339 	if (use_section && __is_large_section(sbi))
340 		return get_sec_entry(sbi, segno)->valid_blocks;
341 	else
342 		return get_seg_entry(sbi, segno)->valid_blocks;
343 }
344 
get_ckpt_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)345 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
346 				unsigned int segno, bool use_section)
347 {
348 	if (use_section && __is_large_section(sbi)) {
349 		unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
350 		unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
351 		unsigned int blocks = 0;
352 		int i;
353 
354 		for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
355 			struct seg_entry *se = get_seg_entry(sbi, start_segno);
356 
357 			blocks += se->ckpt_valid_blocks;
358 		}
359 		return blocks;
360 	}
361 	return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
362 }
363 
seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)364 static inline void seg_info_from_raw_sit(struct seg_entry *se,
365 					struct f2fs_sit_entry *rs)
366 {
367 	se->valid_blocks = GET_SIT_VBLOCKS(rs);
368 	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
369 	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
370 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
371 #ifdef CONFIG_F2FS_CHECK_FS
372 	memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
373 #endif
374 	se->type = GET_SIT_TYPE(rs);
375 	se->mtime = le64_to_cpu(rs->mtime);
376 }
377 
__seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)378 static inline void __seg_info_to_raw_sit(struct seg_entry *se,
379 					struct f2fs_sit_entry *rs)
380 {
381 	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
382 					se->valid_blocks;
383 	rs->vblocks = cpu_to_le16(raw_vblocks);
384 	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
385 	rs->mtime = cpu_to_le64(se->mtime);
386 }
387 
seg_info_to_sit_page(struct f2fs_sb_info * sbi,struct page * page,unsigned int start)388 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
389 				struct page *page, unsigned int start)
390 {
391 	struct f2fs_sit_block *raw_sit;
392 	struct seg_entry *se;
393 	struct f2fs_sit_entry *rs;
394 	unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
395 					(unsigned long)MAIN_SEGS(sbi));
396 	int i;
397 
398 	raw_sit = (struct f2fs_sit_block *)page_address(page);
399 	memset(raw_sit, 0, PAGE_SIZE);
400 	for (i = 0; i < end - start; i++) {
401 		rs = &raw_sit->entries[i];
402 		se = get_seg_entry(sbi, start + i);
403 		__seg_info_to_raw_sit(se, rs);
404 	}
405 }
406 
seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)407 static inline void seg_info_to_raw_sit(struct seg_entry *se,
408 					struct f2fs_sit_entry *rs)
409 {
410 	__seg_info_to_raw_sit(se, rs);
411 
412 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
413 	se->ckpt_valid_blocks = se->valid_blocks;
414 }
415 
find_next_inuse(struct free_segmap_info * free_i,unsigned int max,unsigned int segno)416 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
417 		unsigned int max, unsigned int segno)
418 {
419 	unsigned int ret;
420 	spin_lock(&free_i->segmap_lock);
421 	ret = find_next_bit(free_i->free_segmap, max, segno);
422 	spin_unlock(&free_i->segmap_lock);
423 	return ret;
424 }
425 
__set_free(struct f2fs_sb_info * sbi,unsigned int segno)426 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
427 {
428 	struct free_segmap_info *free_i = FREE_I(sbi);
429 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
430 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
431 	unsigned int next;
432 	unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
433 
434 	spin_lock(&free_i->segmap_lock);
435 	clear_bit(segno, free_i->free_segmap);
436 	free_i->free_segments++;
437 
438 	next = find_next_bit(free_i->free_segmap,
439 			start_segno + SEGS_PER_SEC(sbi), start_segno);
440 	if (next >= start_segno + usable_segs) {
441 		clear_bit(secno, free_i->free_secmap);
442 		free_i->free_sections++;
443 	}
444 	spin_unlock(&free_i->segmap_lock);
445 }
446 
__set_inuse(struct f2fs_sb_info * sbi,unsigned int segno)447 static inline void __set_inuse(struct f2fs_sb_info *sbi,
448 		unsigned int segno)
449 {
450 	struct free_segmap_info *free_i = FREE_I(sbi);
451 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
452 
453 	set_bit(segno, free_i->free_segmap);
454 	free_i->free_segments--;
455 	if (!test_and_set_bit(secno, free_i->free_secmap))
456 		free_i->free_sections--;
457 }
458 
__set_test_and_free(struct f2fs_sb_info * sbi,unsigned int segno,bool inmem)459 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
460 		unsigned int segno, bool inmem)
461 {
462 	struct free_segmap_info *free_i = FREE_I(sbi);
463 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
464 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
465 	unsigned int next;
466 	unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
467 
468 	spin_lock(&free_i->segmap_lock);
469 	if (test_and_clear_bit(segno, free_i->free_segmap)) {
470 		free_i->free_segments++;
471 
472 		if (!inmem && IS_CURSEC(sbi, secno))
473 			goto skip_free;
474 		next = find_next_bit(free_i->free_segmap,
475 				start_segno + SEGS_PER_SEC(sbi), start_segno);
476 		if (next >= start_segno + usable_segs) {
477 			if (test_and_clear_bit(secno, free_i->free_secmap))
478 				free_i->free_sections++;
479 		}
480 	}
481 skip_free:
482 	spin_unlock(&free_i->segmap_lock);
483 }
484 
__set_test_and_inuse(struct f2fs_sb_info * sbi,unsigned int segno)485 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
486 		unsigned int segno)
487 {
488 	struct free_segmap_info *free_i = FREE_I(sbi);
489 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
490 
491 	spin_lock(&free_i->segmap_lock);
492 	if (!test_and_set_bit(segno, free_i->free_segmap)) {
493 		free_i->free_segments--;
494 		if (!test_and_set_bit(secno, free_i->free_secmap))
495 			free_i->free_sections--;
496 	}
497 	spin_unlock(&free_i->segmap_lock);
498 }
499 
get_sit_bitmap(struct f2fs_sb_info * sbi,void * dst_addr)500 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
501 		void *dst_addr)
502 {
503 	struct sit_info *sit_i = SIT_I(sbi);
504 
505 #ifdef CONFIG_F2FS_CHECK_FS
506 	if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
507 						sit_i->bitmap_size))
508 		f2fs_bug_on(sbi, 1);
509 #endif
510 	memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
511 }
512 
written_block_count(struct f2fs_sb_info * sbi)513 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
514 {
515 	return SIT_I(sbi)->written_valid_blocks;
516 }
517 
free_segments(struct f2fs_sb_info * sbi)518 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
519 {
520 	return FREE_I(sbi)->free_segments;
521 }
522 
reserved_segments(struct f2fs_sb_info * sbi)523 static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
524 {
525 	return SM_I(sbi)->reserved_segments;
526 }
527 
free_sections(struct f2fs_sb_info * sbi)528 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
529 {
530 	return FREE_I(sbi)->free_sections;
531 }
532 
prefree_segments(struct f2fs_sb_info * sbi)533 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
534 {
535 	return DIRTY_I(sbi)->nr_dirty[PRE];
536 }
537 
dirty_segments(struct f2fs_sb_info * sbi)538 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
539 {
540 	return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
541 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
542 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
543 		DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
544 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
545 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
546 }
547 
overprovision_segments(struct f2fs_sb_info * sbi)548 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
549 {
550 	return SM_I(sbi)->ovp_segments;
551 }
552 
reserved_sections(struct f2fs_sb_info * sbi)553 static inline int reserved_sections(struct f2fs_sb_info *sbi)
554 {
555 	return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
556 }
557 
has_curseg_enough_space(struct f2fs_sb_info * sbi,unsigned int node_blocks,unsigned int data_blocks,unsigned int dent_blocks)558 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
559 			unsigned int node_blocks, unsigned int data_blocks,
560 			unsigned int dent_blocks)
561 {
562 	unsigned int segno, left_blocks, blocks;
563 	int i;
564 
565 	/* check current data/node sections in the worst case. */
566 	for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
567 		segno = CURSEG_I(sbi, i)->segno;
568 
569 		if (unlikely(segno == NULL_SEGNO))
570 			return false;
571 
572 		left_blocks = CAP_BLKS_PER_SEC(sbi) -
573 				get_ckpt_valid_blocks(sbi, segno, true);
574 
575 		blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks;
576 		if (blocks > left_blocks)
577 			return false;
578 	}
579 
580 	/* check current data section for dentry blocks. */
581 	segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
582 
583 	if (unlikely(segno == NULL_SEGNO))
584 		return false;
585 
586 	left_blocks = CAP_BLKS_PER_SEC(sbi) -
587 			get_ckpt_valid_blocks(sbi, segno, true);
588 	if (dent_blocks > left_blocks)
589 		return false;
590 	return true;
591 }
592 
593 /*
594  * calculate needed sections for dirty node/dentry and call
595  * has_curseg_enough_space, please note that, it needs to account
596  * dirty data as well in lfs mode when checkpoint is disabled.
597  */
__get_secs_required(struct f2fs_sb_info * sbi,unsigned int * lower_p,unsigned int * upper_p,bool * curseg_p)598 static inline void __get_secs_required(struct f2fs_sb_info *sbi,
599 		unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
600 {
601 	unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
602 					get_pages(sbi, F2FS_DIRTY_DENTS) +
603 					get_pages(sbi, F2FS_DIRTY_IMETA);
604 	unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
605 	unsigned int total_data_blocks = 0;
606 	unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
607 	unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
608 	unsigned int data_secs = 0;
609 	unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
610 	unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
611 	unsigned int data_blocks = 0;
612 
613 	if (f2fs_lfs_mode(sbi) &&
614 		unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
615 		total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
616 		data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi);
617 		data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi);
618 	}
619 
620 	if (lower_p)
621 		*lower_p = node_secs + dent_secs + data_secs;
622 	if (upper_p)
623 		*upper_p = node_secs + dent_secs +
624 			(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) +
625 			(data_blocks ? 1 : 0);
626 	if (curseg_p)
627 		*curseg_p = has_curseg_enough_space(sbi,
628 				node_blocks, data_blocks, dent_blocks);
629 }
630 
has_not_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)631 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
632 					int freed, int needed)
633 {
634 	unsigned int free_secs, lower_secs, upper_secs;
635 	bool curseg_space;
636 
637 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
638 		return false;
639 
640 	__get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space);
641 
642 	free_secs = free_sections(sbi) + freed;
643 	lower_secs += needed + reserved_sections(sbi);
644 	upper_secs += needed + reserved_sections(sbi);
645 
646 	if (free_secs > upper_secs)
647 		return false;
648 	if (free_secs <= lower_secs)
649 		return true;
650 	return !curseg_space;
651 }
652 
has_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)653 static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi,
654 					int freed, int needed)
655 {
656 	return !has_not_enough_free_secs(sbi, freed, needed);
657 }
658 
has_enough_free_blks(struct f2fs_sb_info * sbi)659 static inline bool has_enough_free_blks(struct f2fs_sb_info *sbi)
660 {
661 	unsigned int total_free_blocks = 0;
662 	unsigned int avail_user_block_count;
663 
664 	spin_lock(&sbi->stat_lock);
665 
666 	avail_user_block_count = get_available_block_count(sbi, NULL, true);
667 	total_free_blocks = avail_user_block_count - (unsigned int)valid_user_blocks(sbi);
668 
669 	spin_unlock(&sbi->stat_lock);
670 
671 	return total_free_blocks > 0;
672 }
673 
f2fs_is_checkpoint_ready(struct f2fs_sb_info * sbi)674 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
675 {
676 	if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
677 		return true;
678 	if (likely(has_enough_free_secs(sbi, 0, 0)))
679 		return true;
680 	if (!f2fs_lfs_mode(sbi) &&
681 		likely(has_enough_free_blks(sbi)))
682 		return true;
683 	return false;
684 }
685 
excess_prefree_segs(struct f2fs_sb_info * sbi)686 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
687 {
688 	return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
689 }
690 
utilization(struct f2fs_sb_info * sbi)691 static inline int utilization(struct f2fs_sb_info *sbi)
692 {
693 	return div_u64((u64)valid_user_blocks(sbi) * 100,
694 					sbi->user_block_count);
695 }
696 
697 /*
698  * Sometimes f2fs may be better to drop out-of-place update policy.
699  * And, users can control the policy through sysfs entries.
700  * There are five policies with triggering conditions as follows.
701  * F2FS_IPU_FORCE - all the time,
702  * F2FS_IPU_SSR - if SSR mode is activated,
703  * F2FS_IPU_UTIL - if FS utilization is over threashold,
704  * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
705  *                     threashold,
706  * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
707  *                     storages. IPU will be triggered only if the # of dirty
708  *                     pages over min_fsync_blocks. (=default option)
709  * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
710  * F2FS_IPU_NOCACHE - disable IPU bio cache.
711  * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
712  *                            FI_OPU_WRITE flag.
713  * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
714  */
715 #define DEF_MIN_IPU_UTIL	70
716 #define DEF_MIN_FSYNC_BLOCKS	8
717 #define DEF_MIN_HOT_BLOCKS	16
718 
719 #define SMALL_VOLUME_SEGMENTS	(16 * 512)	/* 16GB */
720 
721 #define F2FS_IPU_DISABLE	0
722 
723 /* Modification on enum should be synchronized with ipu_mode_names array */
724 enum {
725 	F2FS_IPU_FORCE,
726 	F2FS_IPU_SSR,
727 	F2FS_IPU_UTIL,
728 	F2FS_IPU_SSR_UTIL,
729 	F2FS_IPU_FSYNC,
730 	F2FS_IPU_ASYNC,
731 	F2FS_IPU_NOCACHE,
732 	F2FS_IPU_HONOR_OPU_WRITE,
733 	F2FS_IPU_MAX,
734 };
735 
IS_F2FS_IPU_DISABLE(struct f2fs_sb_info * sbi)736 static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
737 {
738 	return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE;
739 }
740 
741 #define F2FS_IPU_POLICY(name)					\
742 static inline bool IS_##name(struct f2fs_sb_info *sbi)		\
743 {								\
744 	return SM_I(sbi)->ipu_policy & BIT(name);		\
745 }
746 
747 F2FS_IPU_POLICY(F2FS_IPU_FORCE);
748 F2FS_IPU_POLICY(F2FS_IPU_SSR);
749 F2FS_IPU_POLICY(F2FS_IPU_UTIL);
750 F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
751 F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
752 F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
753 F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
754 F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);
755 
curseg_segno(struct f2fs_sb_info * sbi,int type)756 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
757 		int type)
758 {
759 	struct curseg_info *curseg = CURSEG_I(sbi, type);
760 	return curseg->segno;
761 }
762 
curseg_alloc_type(struct f2fs_sb_info * sbi,int type)763 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
764 		int type)
765 {
766 	struct curseg_info *curseg = CURSEG_I(sbi, type);
767 	return curseg->alloc_type;
768 }
769 
valid_main_segno(struct f2fs_sb_info * sbi,unsigned int segno)770 static inline bool valid_main_segno(struct f2fs_sb_info *sbi,
771 		unsigned int segno)
772 {
773 	return segno <= (MAIN_SEGS(sbi) - 1);
774 }
775 
verify_fio_blkaddr(struct f2fs_io_info * fio)776 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
777 {
778 	struct f2fs_sb_info *sbi = fio->sbi;
779 
780 	if (__is_valid_data_blkaddr(fio->old_blkaddr))
781 		verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
782 					META_GENERIC : DATA_GENERIC);
783 	verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
784 					META_GENERIC : DATA_GENERIC_ENHANCE);
785 }
786 
787 /*
788  * Summary block is always treated as an invalid block
789  */
check_block_count(struct f2fs_sb_info * sbi,int segno,struct f2fs_sit_entry * raw_sit)790 static inline int check_block_count(struct f2fs_sb_info *sbi,
791 		int segno, struct f2fs_sit_entry *raw_sit)
792 {
793 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
794 	int valid_blocks = 0;
795 	int cur_pos = 0, next_pos;
796 	unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
797 
798 	/* check bitmap with valid block count */
799 	do {
800 		if (is_valid) {
801 			next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
802 					usable_blks_per_seg,
803 					cur_pos);
804 			valid_blocks += next_pos - cur_pos;
805 		} else
806 			next_pos = find_next_bit_le(&raw_sit->valid_map,
807 					usable_blks_per_seg,
808 					cur_pos);
809 		cur_pos = next_pos;
810 		is_valid = !is_valid;
811 	} while (cur_pos < usable_blks_per_seg);
812 
813 	if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
814 		f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
815 			 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
816 		set_sbi_flag(sbi, SBI_NEED_FSCK);
817 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
818 		return -EFSCORRUPTED;
819 	}
820 
821 	if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
822 		f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
823 				BLKS_PER_SEG(sbi),
824 				usable_blks_per_seg) != BLKS_PER_SEG(sbi));
825 
826 	/* check segment usage, and check boundary of a given segment number */
827 	if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
828 					|| !valid_main_segno(sbi, segno))) {
829 		f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
830 			 GET_SIT_VBLOCKS(raw_sit), segno);
831 		set_sbi_flag(sbi, SBI_NEED_FSCK);
832 		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
833 		return -EFSCORRUPTED;
834 	}
835 	return 0;
836 }
837 
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int start)838 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
839 						unsigned int start)
840 {
841 	struct sit_info *sit_i = SIT_I(sbi);
842 	unsigned int offset = SIT_BLOCK_OFFSET(start);
843 	block_t blk_addr = sit_i->sit_base_addr + offset;
844 
845 	f2fs_bug_on(sbi, !valid_main_segno(sbi, start));
846 
847 #ifdef CONFIG_F2FS_CHECK_FS
848 	if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
849 			f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
850 		f2fs_bug_on(sbi, 1);
851 #endif
852 
853 	/* calculate sit block address */
854 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
855 		blk_addr += sit_i->sit_blocks;
856 
857 	return blk_addr;
858 }
859 
next_sit_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)860 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
861 						pgoff_t block_addr)
862 {
863 	struct sit_info *sit_i = SIT_I(sbi);
864 	block_addr -= sit_i->sit_base_addr;
865 	if (block_addr < sit_i->sit_blocks)
866 		block_addr += sit_i->sit_blocks;
867 	else
868 		block_addr -= sit_i->sit_blocks;
869 
870 	return block_addr + sit_i->sit_base_addr;
871 }
872 
set_to_next_sit(struct sit_info * sit_i,unsigned int start)873 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
874 {
875 	unsigned int block_off = SIT_BLOCK_OFFSET(start);
876 
877 	f2fs_change_bit(block_off, sit_i->sit_bitmap);
878 #ifdef CONFIG_F2FS_CHECK_FS
879 	f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
880 #endif
881 }
882 
get_mtime(struct f2fs_sb_info * sbi,bool base_time)883 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
884 						bool base_time)
885 {
886 	struct sit_info *sit_i = SIT_I(sbi);
887 	time64_t diff, now = ktime_get_boottime_seconds();
888 
889 	if (now >= sit_i->mounted_time)
890 		return sit_i->elapsed_time + now - sit_i->mounted_time;
891 
892 	/* system time is set to the past */
893 	if (!base_time) {
894 		diff = sit_i->mounted_time - now;
895 		if (sit_i->elapsed_time >= diff)
896 			return sit_i->elapsed_time - diff;
897 		return 0;
898 	}
899 	return sit_i->elapsed_time;
900 }
901 
set_summary(struct f2fs_summary * sum,nid_t nid,unsigned int ofs_in_node,unsigned char version)902 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
903 			unsigned int ofs_in_node, unsigned char version)
904 {
905 	sum->nid = cpu_to_le32(nid);
906 	sum->ofs_in_node = cpu_to_le16(ofs_in_node);
907 	sum->version = version;
908 }
909 
start_sum_block(struct f2fs_sb_info * sbi)910 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
911 {
912 	return __start_cp_addr(sbi) +
913 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
914 }
915 
sum_blk_addr(struct f2fs_sb_info * sbi,int base,int type)916 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
917 {
918 	return __start_cp_addr(sbi) +
919 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
920 				- (base + 1) + type;
921 }
922 
sec_usage_check(struct f2fs_sb_info * sbi,unsigned int secno)923 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
924 {
925 	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
926 		return true;
927 	return false;
928 }
929 
930 /*
931  * It is very important to gather dirty pages and write at once, so that we can
932  * submit a big bio without interfering other data writes.
933  * By default, 512 pages for directory data,
934  * 512 pages (2MB) * 8 for nodes, and
935  * 256 pages * 8 for meta are set.
936  */
nr_pages_to_skip(struct f2fs_sb_info * sbi,int type)937 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
938 {
939 	if (sbi->sb->s_bdi->wb.dirty_exceeded)
940 		return 0;
941 
942 	if (type == DATA)
943 		return BLKS_PER_SEG(sbi);
944 	else if (type == NODE)
945 		return SEGS_TO_BLKS(sbi, 8);
946 	else if (type == META)
947 		return 8 * BIO_MAX_VECS;
948 	else
949 		return 0;
950 }
951 
952 /*
953  * When writing pages, it'd better align nr_to_write for segment size.
954  */
nr_pages_to_write(struct f2fs_sb_info * sbi,int type,struct writeback_control * wbc)955 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
956 					struct writeback_control *wbc)
957 {
958 	long nr_to_write, desired;
959 
960 	if (wbc->sync_mode != WB_SYNC_NONE)
961 		return 0;
962 
963 	nr_to_write = wbc->nr_to_write;
964 	desired = BIO_MAX_VECS;
965 	if (type == NODE)
966 		desired <<= 1;
967 
968 	wbc->nr_to_write = desired;
969 	return desired - nr_to_write;
970 }
971 
wake_up_discard_thread(struct f2fs_sb_info * sbi,bool force)972 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
973 {
974 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
975 	bool wakeup = false;
976 	int i;
977 
978 	if (force)
979 		goto wake_up;
980 
981 	mutex_lock(&dcc->cmd_lock);
982 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
983 		if (i + 1 < dcc->discard_granularity)
984 			break;
985 		if (!list_empty(&dcc->pend_list[i])) {
986 			wakeup = true;
987 			break;
988 		}
989 	}
990 	mutex_unlock(&dcc->cmd_lock);
991 	if (!wakeup || !is_idle(sbi, DISCARD_TIME))
992 		return;
993 wake_up:
994 	dcc->discard_wake = true;
995 	wake_up_interruptible_all(&dcc->discard_wait_queue);
996 }
997