1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/segment.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/blkdev.h> 9 #include <linux/backing-dev.h> 10 11 /* constant macro */ 12 #define NULL_SEGNO ((unsigned int)(~0)) 13 #define NULL_SECNO ((unsigned int)(~0)) 14 15 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ 16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */ 17 18 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */ 19 #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */ 20 21 /* L: Logical segment # in volume, R: Relative segment # in main area */ 22 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno) 23 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno) 24 25 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA) 26 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE) 27 28 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, 29 unsigned short seg_type) 30 { 31 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); 32 } 33 34 #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA) 35 #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA) 36 #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA) 37 38 #define IS_CURSEG(sbi, seg) \ 39 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 40 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 41 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 42 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 43 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 44 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ 45 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ 46 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno)) 47 48 #define IS_CURSEC(sbi, secno) \ 49 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ 50 (sbi)->segs_per_sec) || \ 51 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ 52 (sbi)->segs_per_sec) || \ 53 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ 54 (sbi)->segs_per_sec) || \ 55 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 56 (sbi)->segs_per_sec) || \ 57 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 58 (sbi)->segs_per_sec) || \ 59 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 60 (sbi)->segs_per_sec) || \ 61 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ 62 (sbi)->segs_per_sec) || \ 63 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ 64 (sbi)->segs_per_sec)) 65 66 #define MAIN_BLKADDR(sbi) \ 67 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ 68 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr)) 69 #define SEG0_BLKADDR(sbi) \ 70 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \ 71 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr)) 72 73 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) 74 #define MAIN_SECS(sbi) ((sbi)->total_sections) 75 76 #define TOTAL_SEGS(sbi) \ 77 (SM_I(sbi) ? SM_I(sbi)->segment_count : \ 78 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count)) 79 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg) 80 81 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) 82 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \ 83 (sbi)->log_blocks_per_seg)) 84 85 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ 86 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg)) 87 88 #define NEXT_FREE_BLKADDR(sbi, curseg) \ 89 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff) 90 91 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) 92 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 93 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg) 94 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 95 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1)) 96 97 #define GET_SEGNO(sbi, blk_addr) \ 98 ((!__is_valid_data_blkaddr(blk_addr)) ? \ 99 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ 100 GET_SEGNO_FROM_SEG0(sbi, blk_addr))) 101 #define BLKS_PER_SEC(sbi) \ 102 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg) 103 #define GET_SEC_FROM_SEG(sbi, segno) \ 104 (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec) 105 #define GET_SEG_FROM_SEC(sbi, secno) \ 106 ((secno) * (sbi)->segs_per_sec) 107 #define GET_ZONE_FROM_SEC(sbi, secno) \ 108 (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone) 109 #define GET_ZONE_FROM_SEG(sbi, segno) \ 110 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno)) 111 112 #define GET_SUM_BLOCK(sbi, segno) \ 113 ((sbi)->sm_info->ssa_blkaddr + (segno)) 114 115 #define GET_SUM_TYPE(footer) ((footer)->entry_type) 116 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type)) 117 118 #define SIT_ENTRY_OFFSET(sit_i, segno) \ 119 ((segno) % (sit_i)->sents_per_block) 120 #define SIT_BLOCK_OFFSET(segno) \ 121 ((segno) / SIT_ENTRY_PER_BLOCK) 122 #define START_SEGNO(segno) \ 123 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) 124 #define SIT_BLK_CNT(sbi) \ 125 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK) 126 #define f2fs_bitmap_size(nr) \ 127 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) 128 129 #define SECTOR_FROM_BLOCK(blk_addr) \ 130 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) 131 #define SECTOR_TO_BLOCK(sectors) \ 132 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK) 133 134 /* 135 * indicate a block allocation direction: RIGHT and LEFT. 136 * RIGHT means allocating new sections towards the end of volume. 137 * LEFT means the opposite direction. 138 */ 139 enum { 140 ALLOC_RIGHT = 0, 141 ALLOC_LEFT 142 }; 143 144 /* 145 * In the victim_sel_policy->alloc_mode, there are three block allocation modes. 146 * LFS writes data sequentially with cleaning operations. 147 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. 148 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into 149 * fragmented segment which has similar aging degree. 150 */ 151 enum { 152 LFS = 0, 153 SSR, 154 AT_SSR, 155 }; 156 157 /* 158 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes. 159 * GC_CB is based on cost-benefit algorithm. 160 * GC_GREEDY is based on greedy algorithm. 161 * GC_AT is based on age-threshold algorithm. 162 */ 163 enum { 164 GC_CB = 0, 165 GC_GREEDY, 166 GC_AT, 167 ALLOC_NEXT, 168 FLUSH_DEVICE, 169 MAX_GC_POLICY, 170 }; 171 172 /* 173 * BG_GC means the background cleaning job. 174 * FG_GC means the on-demand cleaning job. 175 */ 176 enum { 177 BG_GC = 0, 178 FG_GC, 179 }; 180 181 /* for a function parameter to select a victim segment */ 182 struct victim_sel_policy { 183 int alloc_mode; /* LFS or SSR */ 184 int gc_mode; /* GC_CB or GC_GREEDY */ 185 unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ 186 unsigned int max_search; /* 187 * maximum # of segments/sections 188 * to search 189 */ 190 unsigned int offset; /* last scanned bitmap offset */ 191 unsigned int ofs_unit; /* bitmap search unit */ 192 unsigned int min_cost; /* minimum cost */ 193 unsigned long long oldest_age; /* oldest age of segments having the same min cost */ 194 unsigned int min_segno; /* segment # having min. cost */ 195 unsigned long long age; /* mtime of GCed section*/ 196 unsigned long long age_threshold;/* age threshold */ 197 }; 198 199 struct seg_entry { 200 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */ 201 unsigned int valid_blocks:10; /* # of valid blocks */ 202 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ 203 unsigned int padding:6; /* padding */ 204 unsigned char *cur_valid_map; /* validity bitmap of blocks */ 205 #ifdef CONFIG_F2FS_CHECK_FS 206 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ 207 #endif 208 /* 209 * # of valid blocks and the validity bitmap stored in the last 210 * checkpoint pack. This information is used by the SSR mode. 211 */ 212 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ 213 unsigned char *discard_map; 214 unsigned long long mtime; /* modification time of the segment */ 215 }; 216 217 struct sec_entry { 218 unsigned int valid_blocks; /* # of valid blocks in a section */ 219 }; 220 221 struct segment_allocation { 222 void (*allocate_segment)(struct f2fs_sb_info *, int, bool); 223 }; 224 225 #define MAX_SKIP_GC_COUNT 16 226 227 struct inmem_pages { 228 struct list_head list; 229 struct page *page; 230 block_t old_addr; /* for revoking when fail to commit */ 231 }; 232 233 struct sit_info { 234 const struct segment_allocation *s_ops; 235 236 block_t sit_base_addr; /* start block address of SIT area */ 237 block_t sit_blocks; /* # of blocks used by SIT area */ 238 block_t written_valid_blocks; /* # of valid blocks in main area */ 239 char *bitmap; /* all bitmaps pointer */ 240 char *sit_bitmap; /* SIT bitmap pointer */ 241 #ifdef CONFIG_F2FS_CHECK_FS 242 char *sit_bitmap_mir; /* SIT bitmap mirror */ 243 244 /* bitmap of segments to be ignored by GC in case of errors */ 245 unsigned long *invalid_segmap; 246 #endif 247 unsigned int bitmap_size; /* SIT bitmap size */ 248 249 unsigned long *tmp_map; /* bitmap for temporal use */ 250 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ 251 unsigned int dirty_sentries; /* # of dirty sentries */ 252 unsigned int sents_per_block; /* # of SIT entries per block */ 253 struct rw_semaphore sentry_lock; /* to protect SIT cache */ 254 struct seg_entry *sentries; /* SIT segment-level cache */ 255 struct sec_entry *sec_entries; /* SIT section-level cache */ 256 257 /* for cost-benefit algorithm in cleaning procedure */ 258 unsigned long long elapsed_time; /* elapsed time after mount */ 259 unsigned long long mounted_time; /* mount time */ 260 unsigned long long min_mtime; /* min. modification time */ 261 unsigned long long max_mtime; /* max. modification time */ 262 unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */ 263 unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ 264 265 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ 266 }; 267 268 struct free_segmap_info { 269 unsigned int start_segno; /* start segment number logically */ 270 unsigned int free_segments; /* # of free segments */ 271 unsigned int free_sections; /* # of free sections */ 272 spinlock_t segmap_lock; /* free segmap lock */ 273 unsigned long *free_segmap; /* free segment bitmap */ 274 unsigned long *free_secmap; /* free section bitmap */ 275 }; 276 277 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ 278 enum dirty_type { 279 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ 280 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ 281 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ 282 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ 283 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ 284 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ 285 DIRTY, /* to count # of dirty segments */ 286 PRE, /* to count # of entirely obsolete segments */ 287 NR_DIRTY_TYPE 288 }; 289 290 struct dirty_seglist_info { 291 const struct victim_selection *v_ops; /* victim selction operation */ 292 unsigned long *dirty_segmap[NR_DIRTY_TYPE]; 293 unsigned long *dirty_secmap; 294 struct mutex seglist_lock; /* lock for segment bitmaps */ 295 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ 296 unsigned long *victim_secmap; /* background GC victims */ 297 }; 298 299 /* victim selection function for cleaning and SSR */ 300 struct victim_selection { 301 int (*get_victim)(struct f2fs_sb_info *, unsigned int *, 302 int, int, char, unsigned long long); 303 }; 304 305 /* for active log information */ 306 struct curseg_info { 307 struct mutex curseg_mutex; /* lock for consistency */ 308 struct f2fs_summary_block *sum_blk; /* cached summary block */ 309 struct rw_semaphore journal_rwsem; /* protect journal area */ 310 struct f2fs_journal *journal; /* cached journal info */ 311 unsigned char alloc_type; /* current allocation type */ 312 unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */ 313 unsigned int segno; /* current segment number */ 314 unsigned short next_blkoff; /* next block offset to write */ 315 unsigned int zone; /* current zone number */ 316 unsigned int next_segno; /* preallocated segment */ 317 int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */ 318 bool inited; /* indicate inmem log is inited */ 319 }; 320 321 struct sit_entry_set { 322 struct list_head set_list; /* link with all sit sets */ 323 unsigned int start_segno; /* start segno of sits in set */ 324 unsigned int entry_cnt; /* the # of sit entries in set */ 325 }; 326 327 /* 328 * inline functions 329 */ 330 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 331 { 332 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 333 } 334 335 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 336 unsigned int segno) 337 { 338 struct sit_info *sit_i = SIT_I(sbi); 339 return &sit_i->sentries[segno]; 340 } 341 342 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, 343 unsigned int segno) 344 { 345 struct sit_info *sit_i = SIT_I(sbi); 346 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)]; 347 } 348 349 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, 350 unsigned int segno, bool use_section) 351 { 352 /* 353 * In order to get # of valid blocks in a section instantly from many 354 * segments, f2fs manages two counting structures separately. 355 */ 356 if (use_section && __is_large_section(sbi)) 357 return get_sec_entry(sbi, segno)->valid_blocks; 358 else 359 return get_seg_entry(sbi, segno)->valid_blocks; 360 } 361 362 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, 363 unsigned int segno, bool use_section) 364 { 365 if (use_section && __is_large_section(sbi)) { 366 unsigned int start_segno = START_SEGNO(segno); 367 unsigned int blocks = 0; 368 int i; 369 370 for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) { 371 struct seg_entry *se = get_seg_entry(sbi, start_segno); 372 373 blocks += se->ckpt_valid_blocks; 374 } 375 return blocks; 376 } 377 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 378 } 379 380 static inline void seg_info_from_raw_sit(struct seg_entry *se, 381 struct f2fs_sit_entry *rs) 382 { 383 se->valid_blocks = GET_SIT_VBLOCKS(rs); 384 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); 385 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 386 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 387 #ifdef CONFIG_F2FS_CHECK_FS 388 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 389 #endif 390 se->type = GET_SIT_TYPE(rs); 391 se->mtime = le64_to_cpu(rs->mtime); 392 } 393 394 static inline void __seg_info_to_raw_sit(struct seg_entry *se, 395 struct f2fs_sit_entry *rs) 396 { 397 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | 398 se->valid_blocks; 399 rs->vblocks = cpu_to_le16(raw_vblocks); 400 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 401 rs->mtime = cpu_to_le64(se->mtime); 402 } 403 404 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi, 405 struct page *page, unsigned int start) 406 { 407 struct f2fs_sit_block *raw_sit; 408 struct seg_entry *se; 409 struct f2fs_sit_entry *rs; 410 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK, 411 (unsigned long)MAIN_SEGS(sbi)); 412 int i; 413 414 raw_sit = (struct f2fs_sit_block *)page_address(page); 415 memset(raw_sit, 0, PAGE_SIZE); 416 for (i = 0; i < end - start; i++) { 417 rs = &raw_sit->entries[i]; 418 se = get_seg_entry(sbi, start + i); 419 __seg_info_to_raw_sit(se, rs); 420 } 421 } 422 423 static inline void seg_info_to_raw_sit(struct seg_entry *se, 424 struct f2fs_sit_entry *rs) 425 { 426 __seg_info_to_raw_sit(se, rs); 427 428 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 429 se->ckpt_valid_blocks = se->valid_blocks; 430 } 431 432 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, 433 unsigned int max, unsigned int segno) 434 { 435 unsigned int ret; 436 spin_lock(&free_i->segmap_lock); 437 ret = find_next_bit(free_i->free_segmap, max, segno); 438 spin_unlock(&free_i->segmap_lock); 439 return ret; 440 } 441 442 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) 443 { 444 struct free_segmap_info *free_i = FREE_I(sbi); 445 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 446 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); 447 unsigned int next; 448 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); 449 450 spin_lock(&free_i->segmap_lock); 451 clear_bit(segno, free_i->free_segmap); 452 free_i->free_segments++; 453 454 next = find_next_bit(free_i->free_segmap, 455 start_segno + sbi->segs_per_sec, start_segno); 456 if (next >= start_segno + usable_segs) { 457 clear_bit(secno, free_i->free_secmap); 458 free_i->free_sections++; 459 } 460 spin_unlock(&free_i->segmap_lock); 461 } 462 463 static inline void __set_inuse(struct f2fs_sb_info *sbi, 464 unsigned int segno) 465 { 466 struct free_segmap_info *free_i = FREE_I(sbi); 467 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 468 469 set_bit(segno, free_i->free_segmap); 470 free_i->free_segments--; 471 if (!test_and_set_bit(secno, free_i->free_secmap)) 472 free_i->free_sections--; 473 } 474 475 static inline void __set_test_and_free(struct f2fs_sb_info *sbi, 476 unsigned int segno, bool inmem) 477 { 478 struct free_segmap_info *free_i = FREE_I(sbi); 479 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 480 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); 481 unsigned int next; 482 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); 483 484 spin_lock(&free_i->segmap_lock); 485 if (test_and_clear_bit(segno, free_i->free_segmap)) { 486 free_i->free_segments++; 487 488 if (!inmem && IS_CURSEC(sbi, secno)) 489 goto skip_free; 490 next = find_next_bit(free_i->free_segmap, 491 start_segno + sbi->segs_per_sec, start_segno); 492 if (next >= start_segno + usable_segs) { 493 if (test_and_clear_bit(secno, free_i->free_secmap)) 494 free_i->free_sections++; 495 } 496 } 497 skip_free: 498 spin_unlock(&free_i->segmap_lock); 499 } 500 501 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, 502 unsigned int segno) 503 { 504 struct free_segmap_info *free_i = FREE_I(sbi); 505 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 506 507 spin_lock(&free_i->segmap_lock); 508 if (!test_and_set_bit(segno, free_i->free_segmap)) { 509 free_i->free_segments--; 510 if (!test_and_set_bit(secno, free_i->free_secmap)) 511 free_i->free_sections--; 512 } 513 spin_unlock(&free_i->segmap_lock); 514 } 515 516 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, 517 void *dst_addr) 518 { 519 struct sit_info *sit_i = SIT_I(sbi); 520 521 #ifdef CONFIG_F2FS_CHECK_FS 522 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir, 523 sit_i->bitmap_size)) 524 f2fs_bug_on(sbi, 1); 525 #endif 526 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); 527 } 528 529 static inline block_t written_block_count(struct f2fs_sb_info *sbi) 530 { 531 return SIT_I(sbi)->written_valid_blocks; 532 } 533 534 static inline unsigned int free_segments(struct f2fs_sb_info *sbi) 535 { 536 return FREE_I(sbi)->free_segments; 537 } 538 539 static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) 540 { 541 return SM_I(sbi)->reserved_segments + 542 SM_I(sbi)->additional_reserved_segments; 543 } 544 545 static inline unsigned int free_sections(struct f2fs_sb_info *sbi) 546 { 547 return FREE_I(sbi)->free_sections; 548 } 549 550 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) 551 { 552 return DIRTY_I(sbi)->nr_dirty[PRE]; 553 } 554 555 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) 556 { 557 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + 558 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + 559 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + 560 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + 561 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + 562 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; 563 } 564 565 static inline int overprovision_segments(struct f2fs_sb_info *sbi) 566 { 567 return SM_I(sbi)->ovp_segments; 568 } 569 570 static inline int reserved_sections(struct f2fs_sb_info *sbi) 571 { 572 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi)); 573 } 574 575 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi) 576 { 577 unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + 578 get_pages(sbi, F2FS_DIRTY_DENTS); 579 unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); 580 unsigned int segno, left_blocks; 581 int i; 582 583 /* check current node segment */ 584 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { 585 segno = CURSEG_I(sbi, i)->segno; 586 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) - 587 get_seg_entry(sbi, segno)->ckpt_valid_blocks; 588 589 if (node_blocks > left_blocks) 590 return false; 591 } 592 593 /* check current data segment */ 594 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; 595 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) - 596 get_seg_entry(sbi, segno)->ckpt_valid_blocks; 597 if (dent_blocks > left_blocks) 598 return false; 599 return true; 600 } 601 602 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, 603 int freed, int needed) 604 { 605 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 606 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 607 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 608 609 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 610 return false; 611 612 if (free_sections(sbi) + freed == reserved_sections(sbi) + needed && 613 has_curseg_enough_space(sbi)) 614 return false; 615 return (free_sections(sbi) + freed) <= 616 (node_secs + 2 * dent_secs + imeta_secs + 617 reserved_sections(sbi) + needed); 618 } 619 620 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) 621 { 622 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 623 return true; 624 if (likely(!has_not_enough_free_secs(sbi, 0, 0))) 625 return true; 626 return false; 627 } 628 629 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) 630 { 631 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; 632 } 633 634 static inline int utilization(struct f2fs_sb_info *sbi) 635 { 636 return div_u64((u64)valid_user_blocks(sbi) * 100, 637 sbi->user_block_count); 638 } 639 640 /* 641 * Sometimes f2fs may be better to drop out-of-place update policy. 642 * And, users can control the policy through sysfs entries. 643 * There are five policies with triggering conditions as follows. 644 * F2FS_IPU_FORCE - all the time, 645 * F2FS_IPU_SSR - if SSR mode is activated, 646 * F2FS_IPU_UTIL - if FS utilization is over threashold, 647 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 648 * threashold, 649 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash 650 * storages. IPU will be triggered only if the # of dirty 651 * pages over min_fsync_blocks. (=default option) 652 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests. 653 * F2FS_IPU_NOCACHE - disable IPU bio cache. 654 * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has 655 * FI_OPU_WRITE flag. 656 * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode) 657 */ 658 #define DEF_MIN_IPU_UTIL 70 659 #define DEF_MIN_FSYNC_BLOCKS 8 660 #define DEF_MIN_HOT_BLOCKS 16 661 662 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */ 663 664 enum { 665 F2FS_IPU_FORCE, 666 F2FS_IPU_SSR, 667 F2FS_IPU_UTIL, 668 F2FS_IPU_SSR_UTIL, 669 F2FS_IPU_FSYNC, 670 F2FS_IPU_ASYNC, 671 F2FS_IPU_NOCACHE, 672 F2FS_IPU_HONOR_OPU_WRITE, 673 }; 674 675 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 676 int type) 677 { 678 struct curseg_info *curseg = CURSEG_I(sbi, type); 679 return curseg->segno; 680 } 681 682 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, 683 int type) 684 { 685 struct curseg_info *curseg = CURSEG_I(sbi, type); 686 return curseg->alloc_type; 687 } 688 689 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type) 690 { 691 struct curseg_info *curseg = CURSEG_I(sbi, type); 692 return curseg->next_blkoff; 693 } 694 695 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 696 { 697 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1); 698 } 699 700 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio) 701 { 702 struct f2fs_sb_info *sbi = fio->sbi; 703 704 if (__is_valid_data_blkaddr(fio->old_blkaddr)) 705 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ? 706 META_GENERIC : DATA_GENERIC); 707 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ? 708 META_GENERIC : DATA_GENERIC_ENHANCE); 709 } 710 711 /* 712 * Summary block is always treated as an invalid block 713 */ 714 static inline int check_block_count(struct f2fs_sb_info *sbi, 715 int segno, struct f2fs_sit_entry *raw_sit) 716 { 717 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 718 int valid_blocks = 0; 719 int cur_pos = 0, next_pos; 720 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno); 721 722 /* check bitmap with valid block count */ 723 do { 724 if (is_valid) { 725 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 726 usable_blks_per_seg, 727 cur_pos); 728 valid_blocks += next_pos - cur_pos; 729 } else 730 next_pos = find_next_bit_le(&raw_sit->valid_map, 731 usable_blks_per_seg, 732 cur_pos); 733 cur_pos = next_pos; 734 is_valid = !is_valid; 735 } while (cur_pos < usable_blks_per_seg); 736 737 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { 738 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", 739 GET_SIT_VBLOCKS(raw_sit), valid_blocks); 740 set_sbi_flag(sbi, SBI_NEED_FSCK); 741 return -EFSCORRUPTED; 742 } 743 744 if (usable_blks_per_seg < sbi->blocks_per_seg) 745 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map, 746 sbi->blocks_per_seg, 747 usable_blks_per_seg) != sbi->blocks_per_seg); 748 749 /* check segment usage, and check boundary of a given segment number */ 750 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg 751 || segno > TOTAL_SEGS(sbi) - 1)) { 752 f2fs_err(sbi, "Wrong valid blocks %d or segno %u", 753 GET_SIT_VBLOCKS(raw_sit), segno); 754 set_sbi_flag(sbi, SBI_NEED_FSCK); 755 return -EFSCORRUPTED; 756 } 757 return 0; 758 } 759 760 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 761 unsigned int start) 762 { 763 struct sit_info *sit_i = SIT_I(sbi); 764 unsigned int offset = SIT_BLOCK_OFFSET(start); 765 block_t blk_addr = sit_i->sit_base_addr + offset; 766 767 check_seg_range(sbi, start); 768 769 #ifdef CONFIG_F2FS_CHECK_FS 770 if (f2fs_test_bit(offset, sit_i->sit_bitmap) != 771 f2fs_test_bit(offset, sit_i->sit_bitmap_mir)) 772 f2fs_bug_on(sbi, 1); 773 #endif 774 775 /* calculate sit block address */ 776 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 777 blk_addr += sit_i->sit_blocks; 778 779 return blk_addr; 780 } 781 782 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, 783 pgoff_t block_addr) 784 { 785 struct sit_info *sit_i = SIT_I(sbi); 786 block_addr -= sit_i->sit_base_addr; 787 if (block_addr < sit_i->sit_blocks) 788 block_addr += sit_i->sit_blocks; 789 else 790 block_addr -= sit_i->sit_blocks; 791 792 return block_addr + sit_i->sit_base_addr; 793 } 794 795 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 796 { 797 unsigned int block_off = SIT_BLOCK_OFFSET(start); 798 799 f2fs_change_bit(block_off, sit_i->sit_bitmap); 800 #ifdef CONFIG_F2FS_CHECK_FS 801 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir); 802 #endif 803 } 804 805 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi, 806 bool base_time) 807 { 808 struct sit_info *sit_i = SIT_I(sbi); 809 time64_t diff, now = ktime_get_boottime_seconds(); 810 811 if (now >= sit_i->mounted_time) 812 return sit_i->elapsed_time + now - sit_i->mounted_time; 813 814 /* system time is set to the past */ 815 if (!base_time) { 816 diff = sit_i->mounted_time - now; 817 if (sit_i->elapsed_time >= diff) 818 return sit_i->elapsed_time - diff; 819 return 0; 820 } 821 return sit_i->elapsed_time; 822 } 823 824 static inline void set_summary(struct f2fs_summary *sum, nid_t nid, 825 unsigned int ofs_in_node, unsigned char version) 826 { 827 sum->nid = cpu_to_le32(nid); 828 sum->ofs_in_node = cpu_to_le16(ofs_in_node); 829 sum->version = version; 830 } 831 832 static inline block_t start_sum_block(struct f2fs_sb_info *sbi) 833 { 834 return __start_cp_addr(sbi) + 835 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 836 } 837 838 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) 839 { 840 return __start_cp_addr(sbi) + 841 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) 842 - (base + 1) + type; 843 } 844 845 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) 846 { 847 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) 848 return true; 849 return false; 850 } 851 852 /* 853 * It is very important to gather dirty pages and write at once, so that we can 854 * submit a big bio without interfering other data writes. 855 * By default, 512 pages for directory data, 856 * 512 pages (2MB) * 8 for nodes, and 857 * 256 pages * 8 for meta are set. 858 */ 859 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 860 { 861 if (sbi->sb->s_bdi->wb.dirty_exceeded) 862 return 0; 863 864 if (type == DATA) 865 return sbi->blocks_per_seg; 866 else if (type == NODE) 867 return 8 * sbi->blocks_per_seg; 868 else if (type == META) 869 return 8 * BIO_MAX_VECS; 870 else 871 return 0; 872 } 873 874 /* 875 * When writing pages, it'd better align nr_to_write for segment size. 876 */ 877 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, 878 struct writeback_control *wbc) 879 { 880 long nr_to_write, desired; 881 882 if (wbc->sync_mode != WB_SYNC_NONE) 883 return 0; 884 885 nr_to_write = wbc->nr_to_write; 886 desired = BIO_MAX_VECS; 887 if (type == NODE) 888 desired <<= 1; 889 890 wbc->nr_to_write = desired; 891 return desired - nr_to_write; 892 } 893 894 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) 895 { 896 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 897 bool wakeup = false; 898 int i; 899 900 if (force) 901 goto wake_up; 902 903 mutex_lock(&dcc->cmd_lock); 904 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 905 if (i + 1 < dcc->discard_granularity) 906 break; 907 if (!list_empty(&dcc->pend_list[i])) { 908 wakeup = true; 909 break; 910 } 911 } 912 mutex_unlock(&dcc->cmd_lock); 913 if (!wakeup || !is_idle(sbi, DISCARD_TIME)) 914 return; 915 wake_up: 916 dcc->discard_wake = 1; 917 wake_up_interruptible_all(&dcc->discard_wait_queue); 918 } 919