1 /* 2 * fs/f2fs/segment.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/blkdev.h> 12 #include <linux/backing-dev.h> 13 14 /* constant macro */ 15 #define NULL_SEGNO ((unsigned int)(~0)) 16 #define NULL_SECNO ((unsigned int)(~0)) 17 18 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ 19 20 /* L: Logical segment # in volume, R: Relative segment # in main area */ 21 #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno) 22 #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno) 23 24 #define IS_DATASEG(t) (t <= CURSEG_COLD_DATA) 25 #define IS_NODESEG(t) (t >= CURSEG_HOT_NODE) 26 27 #define IS_CURSEG(sbi, seg) \ 28 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 29 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 30 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 31 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 32 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 33 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) 34 35 #define IS_CURSEC(sbi, secno) \ 36 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ 37 sbi->segs_per_sec) || \ 38 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ 39 sbi->segs_per_sec) || \ 40 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ 41 sbi->segs_per_sec) || \ 42 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 43 sbi->segs_per_sec) || \ 44 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 45 sbi->segs_per_sec) || \ 46 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 47 sbi->segs_per_sec)) \ 48 49 #define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr) 50 #define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr) 51 52 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) 53 #define MAIN_SECS(sbi) (sbi->total_sections) 54 55 #define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count) 56 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg) 57 58 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) 59 #define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \ 60 sbi->log_blocks_per_seg)) 61 62 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ 63 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) 64 65 #define NEXT_FREE_BLKADDR(sbi, curseg) \ 66 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) 67 68 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) 69 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 70 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) 71 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 72 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1)) 73 74 #define GET_SEGNO(sbi, blk_addr) \ 75 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \ 76 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ 77 GET_SEGNO_FROM_SEG0(sbi, blk_addr))) 78 #define GET_SECNO(sbi, segno) \ 79 ((segno) / sbi->segs_per_sec) 80 #define GET_ZONENO_FROM_SEGNO(sbi, segno) \ 81 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone) 82 83 #define GET_SUM_BLOCK(sbi, segno) \ 84 ((sbi->sm_info->ssa_blkaddr) + segno) 85 86 #define GET_SUM_TYPE(footer) ((footer)->entry_type) 87 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) 88 89 #define SIT_ENTRY_OFFSET(sit_i, segno) \ 90 (segno % sit_i->sents_per_block) 91 #define SIT_BLOCK_OFFSET(segno) \ 92 (segno / SIT_ENTRY_PER_BLOCK) 93 #define START_SEGNO(segno) \ 94 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) 95 #define SIT_BLK_CNT(sbi) \ 96 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) 97 #define f2fs_bitmap_size(nr) \ 98 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) 99 100 #define SECTOR_FROM_BLOCK(blk_addr) \ 101 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) 102 #define SECTOR_TO_BLOCK(sectors) \ 103 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK) 104 #define MAX_BIO_BLOCKS(sbi) \ 105 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES)) 106 107 /* 108 * indicate a block allocation direction: RIGHT and LEFT. 109 * RIGHT means allocating new sections towards the end of volume. 110 * LEFT means the opposite direction. 111 */ 112 enum { 113 ALLOC_RIGHT = 0, 114 ALLOC_LEFT 115 }; 116 117 /* 118 * In the victim_sel_policy->alloc_mode, there are two block allocation modes. 119 * LFS writes data sequentially with cleaning operations. 120 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. 121 */ 122 enum { 123 LFS = 0, 124 SSR 125 }; 126 127 /* 128 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes. 129 * GC_CB is based on cost-benefit algorithm. 130 * GC_GREEDY is based on greedy algorithm. 131 */ 132 enum { 133 GC_CB = 0, 134 GC_GREEDY 135 }; 136 137 /* 138 * BG_GC means the background cleaning job. 139 * FG_GC means the on-demand cleaning job. 140 * FORCE_FG_GC means on-demand cleaning job in background. 141 */ 142 enum { 143 BG_GC = 0, 144 FG_GC, 145 FORCE_FG_GC, 146 }; 147 148 /* for a function parameter to select a victim segment */ 149 struct victim_sel_policy { 150 int alloc_mode; /* LFS or SSR */ 151 int gc_mode; /* GC_CB or GC_GREEDY */ 152 unsigned long *dirty_segmap; /* dirty segment bitmap */ 153 unsigned int max_search; /* maximum # of segments to search */ 154 unsigned int offset; /* last scanned bitmap offset */ 155 unsigned int ofs_unit; /* bitmap search unit */ 156 unsigned int min_cost; /* minimum cost */ 157 unsigned int min_segno; /* segment # having min. cost */ 158 }; 159 160 struct seg_entry { 161 unsigned short valid_blocks; /* # of valid blocks */ 162 unsigned char *cur_valid_map; /* validity bitmap of blocks */ 163 /* 164 * # of valid blocks and the validity bitmap stored in the the last 165 * checkpoint pack. This information is used by the SSR mode. 166 */ 167 unsigned short ckpt_valid_blocks; 168 unsigned char *ckpt_valid_map; 169 unsigned char *discard_map; 170 unsigned char type; /* segment type like CURSEG_XXX_TYPE */ 171 unsigned long long mtime; /* modification time of the segment */ 172 }; 173 174 struct sec_entry { 175 unsigned int valid_blocks; /* # of valid blocks in a section */ 176 }; 177 178 struct segment_allocation { 179 void (*allocate_segment)(struct f2fs_sb_info *, int, bool); 180 }; 181 182 /* 183 * this value is set in page as a private data which indicate that 184 * the page is atomically written, and it is in inmem_pages list. 185 */ 186 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 187 188 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 189 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE) 190 191 struct inmem_pages { 192 struct list_head list; 193 struct page *page; 194 block_t old_addr; /* for revoking when fail to commit */ 195 }; 196 197 struct sit_info { 198 const struct segment_allocation *s_ops; 199 200 block_t sit_base_addr; /* start block address of SIT area */ 201 block_t sit_blocks; /* # of blocks used by SIT area */ 202 block_t written_valid_blocks; /* # of valid blocks in main area */ 203 char *sit_bitmap; /* SIT bitmap pointer */ 204 unsigned int bitmap_size; /* SIT bitmap size */ 205 206 unsigned long *tmp_map; /* bitmap for temporal use */ 207 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ 208 unsigned int dirty_sentries; /* # of dirty sentries */ 209 unsigned int sents_per_block; /* # of SIT entries per block */ 210 struct mutex sentry_lock; /* to protect SIT cache */ 211 struct seg_entry *sentries; /* SIT segment-level cache */ 212 struct sec_entry *sec_entries; /* SIT section-level cache */ 213 214 /* for cost-benefit algorithm in cleaning procedure */ 215 unsigned long long elapsed_time; /* elapsed time after mount */ 216 unsigned long long mounted_time; /* mount time */ 217 unsigned long long min_mtime; /* min. modification time */ 218 unsigned long long max_mtime; /* max. modification time */ 219 }; 220 221 struct free_segmap_info { 222 unsigned int start_segno; /* start segment number logically */ 223 unsigned int free_segments; /* # of free segments */ 224 unsigned int free_sections; /* # of free sections */ 225 spinlock_t segmap_lock; /* free segmap lock */ 226 unsigned long *free_segmap; /* free segment bitmap */ 227 unsigned long *free_secmap; /* free section bitmap */ 228 }; 229 230 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ 231 enum dirty_type { 232 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ 233 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ 234 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ 235 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ 236 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ 237 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ 238 DIRTY, /* to count # of dirty segments */ 239 PRE, /* to count # of entirely obsolete segments */ 240 NR_DIRTY_TYPE 241 }; 242 243 struct dirty_seglist_info { 244 const struct victim_selection *v_ops; /* victim selction operation */ 245 unsigned long *dirty_segmap[NR_DIRTY_TYPE]; 246 struct mutex seglist_lock; /* lock for segment bitmaps */ 247 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ 248 unsigned long *victim_secmap; /* background GC victims */ 249 }; 250 251 /* victim selection function for cleaning and SSR */ 252 struct victim_selection { 253 int (*get_victim)(struct f2fs_sb_info *, unsigned int *, 254 int, int, char); 255 }; 256 257 /* for active log information */ 258 struct curseg_info { 259 struct mutex curseg_mutex; /* lock for consistency */ 260 struct f2fs_summary_block *sum_blk; /* cached summary block */ 261 struct rw_semaphore journal_rwsem; /* protect journal area */ 262 struct f2fs_journal *journal; /* cached journal info */ 263 unsigned char alloc_type; /* current allocation type */ 264 unsigned int segno; /* current segment number */ 265 unsigned short next_blkoff; /* next block offset to write */ 266 unsigned int zone; /* current zone number */ 267 unsigned int next_segno; /* preallocated segment */ 268 }; 269 270 struct sit_entry_set { 271 struct list_head set_list; /* link with all sit sets */ 272 unsigned int start_segno; /* start segno of sits in set */ 273 unsigned int entry_cnt; /* the # of sit entries in set */ 274 }; 275 276 /* 277 * inline functions 278 */ 279 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 280 { 281 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 282 } 283 284 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 285 unsigned int segno) 286 { 287 struct sit_info *sit_i = SIT_I(sbi); 288 return &sit_i->sentries[segno]; 289 } 290 291 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, 292 unsigned int segno) 293 { 294 struct sit_info *sit_i = SIT_I(sbi); 295 return &sit_i->sec_entries[GET_SECNO(sbi, segno)]; 296 } 297 298 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, 299 unsigned int segno, int section) 300 { 301 /* 302 * In order to get # of valid blocks in a section instantly from many 303 * segments, f2fs manages two counting structures separately. 304 */ 305 if (section > 1) 306 return get_sec_entry(sbi, segno)->valid_blocks; 307 else 308 return get_seg_entry(sbi, segno)->valid_blocks; 309 } 310 311 static inline void seg_info_from_raw_sit(struct seg_entry *se, 312 struct f2fs_sit_entry *rs) 313 { 314 se->valid_blocks = GET_SIT_VBLOCKS(rs); 315 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); 316 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 317 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 318 se->type = GET_SIT_TYPE(rs); 319 se->mtime = le64_to_cpu(rs->mtime); 320 } 321 322 static inline void seg_info_to_raw_sit(struct seg_entry *se, 323 struct f2fs_sit_entry *rs) 324 { 325 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | 326 se->valid_blocks; 327 rs->vblocks = cpu_to_le16(raw_vblocks); 328 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 329 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 330 se->ckpt_valid_blocks = se->valid_blocks; 331 rs->mtime = cpu_to_le64(se->mtime); 332 } 333 334 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, 335 unsigned int max, unsigned int segno) 336 { 337 unsigned int ret; 338 spin_lock(&free_i->segmap_lock); 339 ret = find_next_bit(free_i->free_segmap, max, segno); 340 spin_unlock(&free_i->segmap_lock); 341 return ret; 342 } 343 344 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) 345 { 346 struct free_segmap_info *free_i = FREE_I(sbi); 347 unsigned int secno = segno / sbi->segs_per_sec; 348 unsigned int start_segno = secno * sbi->segs_per_sec; 349 unsigned int next; 350 351 spin_lock(&free_i->segmap_lock); 352 clear_bit(segno, free_i->free_segmap); 353 free_i->free_segments++; 354 355 next = find_next_bit(free_i->free_segmap, 356 start_segno + sbi->segs_per_sec, start_segno); 357 if (next >= start_segno + sbi->segs_per_sec) { 358 clear_bit(secno, free_i->free_secmap); 359 free_i->free_sections++; 360 } 361 spin_unlock(&free_i->segmap_lock); 362 } 363 364 static inline void __set_inuse(struct f2fs_sb_info *sbi, 365 unsigned int segno) 366 { 367 struct free_segmap_info *free_i = FREE_I(sbi); 368 unsigned int secno = segno / sbi->segs_per_sec; 369 set_bit(segno, free_i->free_segmap); 370 free_i->free_segments--; 371 if (!test_and_set_bit(secno, free_i->free_secmap)) 372 free_i->free_sections--; 373 } 374 375 static inline void __set_test_and_free(struct f2fs_sb_info *sbi, 376 unsigned int segno) 377 { 378 struct free_segmap_info *free_i = FREE_I(sbi); 379 unsigned int secno = segno / sbi->segs_per_sec; 380 unsigned int start_segno = secno * sbi->segs_per_sec; 381 unsigned int next; 382 383 spin_lock(&free_i->segmap_lock); 384 if (test_and_clear_bit(segno, free_i->free_segmap)) { 385 free_i->free_segments++; 386 387 next = find_next_bit(free_i->free_segmap, 388 start_segno + sbi->segs_per_sec, start_segno); 389 if (next >= start_segno + sbi->segs_per_sec) { 390 if (test_and_clear_bit(secno, free_i->free_secmap)) 391 free_i->free_sections++; 392 } 393 } 394 spin_unlock(&free_i->segmap_lock); 395 } 396 397 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, 398 unsigned int segno) 399 { 400 struct free_segmap_info *free_i = FREE_I(sbi); 401 unsigned int secno = segno / sbi->segs_per_sec; 402 spin_lock(&free_i->segmap_lock); 403 if (!test_and_set_bit(segno, free_i->free_segmap)) { 404 free_i->free_segments--; 405 if (!test_and_set_bit(secno, free_i->free_secmap)) 406 free_i->free_sections--; 407 } 408 spin_unlock(&free_i->segmap_lock); 409 } 410 411 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, 412 void *dst_addr) 413 { 414 struct sit_info *sit_i = SIT_I(sbi); 415 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); 416 } 417 418 static inline block_t written_block_count(struct f2fs_sb_info *sbi) 419 { 420 return SIT_I(sbi)->written_valid_blocks; 421 } 422 423 static inline unsigned int free_segments(struct f2fs_sb_info *sbi) 424 { 425 return FREE_I(sbi)->free_segments; 426 } 427 428 static inline int reserved_segments(struct f2fs_sb_info *sbi) 429 { 430 return SM_I(sbi)->reserved_segments; 431 } 432 433 static inline unsigned int free_sections(struct f2fs_sb_info *sbi) 434 { 435 return FREE_I(sbi)->free_sections; 436 } 437 438 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) 439 { 440 return DIRTY_I(sbi)->nr_dirty[PRE]; 441 } 442 443 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) 444 { 445 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + 446 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + 447 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + 448 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + 449 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + 450 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; 451 } 452 453 static inline int overprovision_segments(struct f2fs_sb_info *sbi) 454 { 455 return SM_I(sbi)->ovp_segments; 456 } 457 458 static inline int overprovision_sections(struct f2fs_sb_info *sbi) 459 { 460 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec; 461 } 462 463 static inline int reserved_sections(struct f2fs_sb_info *sbi) 464 { 465 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; 466 } 467 468 static inline bool need_SSR(struct f2fs_sb_info *sbi) 469 { 470 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 471 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 472 return free_sections(sbi) <= (node_secs + 2 * dent_secs + 473 reserved_sections(sbi) + 1); 474 } 475 476 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) 477 { 478 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 479 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 480 481 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 482 return false; 483 484 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + 485 reserved_sections(sbi)); 486 } 487 488 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) 489 { 490 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; 491 } 492 493 static inline int utilization(struct f2fs_sb_info *sbi) 494 { 495 return div_u64((u64)valid_user_blocks(sbi) * 100, 496 sbi->user_block_count); 497 } 498 499 /* 500 * Sometimes f2fs may be better to drop out-of-place update policy. 501 * And, users can control the policy through sysfs entries. 502 * There are five policies with triggering conditions as follows. 503 * F2FS_IPU_FORCE - all the time, 504 * F2FS_IPU_SSR - if SSR mode is activated, 505 * F2FS_IPU_UTIL - if FS utilization is over threashold, 506 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 507 * threashold, 508 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash 509 * storages. IPU will be triggered only if the # of dirty 510 * pages over min_fsync_blocks. 511 * F2FS_IPUT_DISABLE - disable IPU. (=default option) 512 */ 513 #define DEF_MIN_IPU_UTIL 70 514 #define DEF_MIN_FSYNC_BLOCKS 8 515 516 enum { 517 F2FS_IPU_FORCE, 518 F2FS_IPU_SSR, 519 F2FS_IPU_UTIL, 520 F2FS_IPU_SSR_UTIL, 521 F2FS_IPU_FSYNC, 522 }; 523 524 static inline bool need_inplace_update(struct inode *inode) 525 { 526 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 527 unsigned int policy = SM_I(sbi)->ipu_policy; 528 529 /* IPU can be done only for the user data */ 530 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode)) 531 return false; 532 533 if (policy & (0x1 << F2FS_IPU_FORCE)) 534 return true; 535 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi)) 536 return true; 537 if (policy & (0x1 << F2FS_IPU_UTIL) && 538 utilization(sbi) > SM_I(sbi)->min_ipu_util) 539 return true; 540 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) && 541 utilization(sbi) > SM_I(sbi)->min_ipu_util) 542 return true; 543 544 /* this is only set during fdatasync */ 545 if (policy & (0x1 << F2FS_IPU_FSYNC) && 546 is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU)) 547 return true; 548 549 return false; 550 } 551 552 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 553 int type) 554 { 555 struct curseg_info *curseg = CURSEG_I(sbi, type); 556 return curseg->segno; 557 } 558 559 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, 560 int type) 561 { 562 struct curseg_info *curseg = CURSEG_I(sbi, type); 563 return curseg->alloc_type; 564 } 565 566 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type) 567 { 568 struct curseg_info *curseg = CURSEG_I(sbi, type); 569 return curseg->next_blkoff; 570 } 571 572 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 573 { 574 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1); 575 } 576 577 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 578 { 579 f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi) 580 || blk_addr >= MAX_BLKADDR(sbi)); 581 } 582 583 /* 584 * Summary block is always treated as an invalid block 585 */ 586 static inline void check_block_count(struct f2fs_sb_info *sbi, 587 int segno, struct f2fs_sit_entry *raw_sit) 588 { 589 #ifdef CONFIG_F2FS_CHECK_FS 590 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 591 int valid_blocks = 0; 592 int cur_pos = 0, next_pos; 593 594 /* check bitmap with valid block count */ 595 do { 596 if (is_valid) { 597 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 598 sbi->blocks_per_seg, 599 cur_pos); 600 valid_blocks += next_pos - cur_pos; 601 } else 602 next_pos = find_next_bit_le(&raw_sit->valid_map, 603 sbi->blocks_per_seg, 604 cur_pos); 605 cur_pos = next_pos; 606 is_valid = !is_valid; 607 } while (cur_pos < sbi->blocks_per_seg); 608 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); 609 #endif 610 /* check segment usage, and check boundary of a given segment number */ 611 f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg 612 || segno > TOTAL_SEGS(sbi) - 1); 613 } 614 615 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 616 unsigned int start) 617 { 618 struct sit_info *sit_i = SIT_I(sbi); 619 unsigned int offset = SIT_BLOCK_OFFSET(start); 620 block_t blk_addr = sit_i->sit_base_addr + offset; 621 622 check_seg_range(sbi, start); 623 624 /* calculate sit block address */ 625 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 626 blk_addr += sit_i->sit_blocks; 627 628 return blk_addr; 629 } 630 631 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, 632 pgoff_t block_addr) 633 { 634 struct sit_info *sit_i = SIT_I(sbi); 635 block_addr -= sit_i->sit_base_addr; 636 if (block_addr < sit_i->sit_blocks) 637 block_addr += sit_i->sit_blocks; 638 else 639 block_addr -= sit_i->sit_blocks; 640 641 return block_addr + sit_i->sit_base_addr; 642 } 643 644 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 645 { 646 unsigned int block_off = SIT_BLOCK_OFFSET(start); 647 648 f2fs_change_bit(block_off, sit_i->sit_bitmap); 649 } 650 651 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) 652 { 653 struct sit_info *sit_i = SIT_I(sbi); 654 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec - 655 sit_i->mounted_time; 656 } 657 658 static inline void set_summary(struct f2fs_summary *sum, nid_t nid, 659 unsigned int ofs_in_node, unsigned char version) 660 { 661 sum->nid = cpu_to_le32(nid); 662 sum->ofs_in_node = cpu_to_le16(ofs_in_node); 663 sum->version = version; 664 } 665 666 static inline block_t start_sum_block(struct f2fs_sb_info *sbi) 667 { 668 return __start_cp_addr(sbi) + 669 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 670 } 671 672 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) 673 { 674 return __start_cp_addr(sbi) + 675 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) 676 - (base + 1) + type; 677 } 678 679 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) 680 { 681 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) 682 return true; 683 return false; 684 } 685 686 static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) 687 { 688 struct block_device *bdev = sbi->sb->s_bdev; 689 struct request_queue *q = bdev_get_queue(bdev); 690 return SECTOR_TO_BLOCK(queue_max_sectors(q)); 691 } 692 693 /* 694 * It is very important to gather dirty pages and write at once, so that we can 695 * submit a big bio without interfering other data writes. 696 * By default, 512 pages for directory data, 697 * 512 pages (2MB) * 3 for three types of nodes, and 698 * max_bio_blocks for meta are set. 699 */ 700 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 701 { 702 if (sbi->sb->s_bdi->wb.dirty_exceeded) 703 return 0; 704 705 if (type == DATA) 706 return sbi->blocks_per_seg; 707 else if (type == NODE) 708 return 3 * sbi->blocks_per_seg; 709 else if (type == META) 710 return MAX_BIO_BLOCKS(sbi); 711 else 712 return 0; 713 } 714 715 /* 716 * When writing pages, it'd better align nr_to_write for segment size. 717 */ 718 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, 719 struct writeback_control *wbc) 720 { 721 long nr_to_write, desired; 722 723 if (wbc->sync_mode != WB_SYNC_NONE) 724 return 0; 725 726 nr_to_write = wbc->nr_to_write; 727 728 if (type == DATA) 729 desired = 4096; 730 else if (type == NODE) 731 desired = 3 * max_hw_blocks(sbi); 732 else 733 desired = MAX_BIO_BLOCKS(sbi); 734 735 wbc->nr_to_write = desired; 736 return desired - nr_to_write; 737 } 738