1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/f2fs_fs.h> 12 #include <linux/kthread.h> 13 #include <linux/delay.h> 14 #include <linux/freezer.h> 15 #include <linux/sched/signal.h> 16 #include <linux/random.h> 17 #include <linux/sched/mm.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include "gc.h" 23 #include "iostat.h" 24 #include <trace/events/f2fs.h> 25 26 static struct kmem_cache *victim_entry_slab; 27 28 static unsigned int count_bits(const unsigned long *addr, 29 unsigned int offset, unsigned int len); 30 31 static int gc_thread_func(void *data) 32 { 33 struct f2fs_sb_info *sbi = data; 34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; 37 unsigned int wait_ms; 38 struct f2fs_gc_control gc_control = { 39 .victim_segno = NULL_SEGNO, 40 .should_migrate_blocks = false, 41 .err_gc_skipped = false }; 42 43 wait_ms = gc_th->min_sleep_time; 44 45 set_freezable(); 46 do { 47 bool sync_mode, foreground = false; 48 49 wait_event_freezable_timeout(*wq, 50 kthread_should_stop() || 51 waitqueue_active(fggc_wq) || 52 gc_th->gc_wake, 53 msecs_to_jiffies(wait_ms)); 54 55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) 56 foreground = true; 57 58 /* give it a try one time */ 59 if (gc_th->gc_wake) 60 gc_th->gc_wake = false; 61 62 if (f2fs_readonly(sbi->sb)) { 63 stat_other_skip_bggc_count(sbi); 64 continue; 65 } 66 if (kthread_should_stop()) 67 break; 68 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 70 increase_sleep_time(gc_th, &wait_ms); 71 stat_other_skip_bggc_count(sbi); 72 continue; 73 } 74 75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) 76 f2fs_stop_checkpoint(sbi, false, 77 STOP_CP_REASON_FAULT_INJECT); 78 79 if (!sb_start_write_trylock(sbi->sb)) { 80 stat_other_skip_bggc_count(sbi); 81 continue; 82 } 83 84 gc_control.one_time = false; 85 86 /* 87 * [GC triggering condition] 88 * 0. GC is not conducted currently. 89 * 1. There are enough dirty segments. 90 * 2. IO subsystem is idle by checking the # of writeback pages. 91 * 3. IO subsystem is idle by checking the # of requests in 92 * bdev's request list. 93 * 94 * Note) We have to avoid triggering GCs frequently. 95 * Because it is possible that some segments can be 96 * invalidated soon after by user update or deletion. 97 * So, I'd like to wait some time to collect dirty segments. 98 */ 99 if (sbi->gc_mode == GC_URGENT_HIGH || 100 sbi->gc_mode == GC_URGENT_MID) { 101 wait_ms = gc_th->urgent_sleep_time; 102 f2fs_down_write(&sbi->gc_lock); 103 goto do_gc; 104 } 105 106 if (foreground) { 107 f2fs_down_write(&sbi->gc_lock); 108 goto do_gc; 109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 110 stat_other_skip_bggc_count(sbi); 111 goto next; 112 } 113 114 if (!is_idle(sbi, GC_TIME)) { 115 increase_sleep_time(gc_th, &wait_ms); 116 f2fs_up_write(&sbi->gc_lock); 117 stat_io_skip_bggc_count(sbi); 118 goto next; 119 } 120 121 if (f2fs_sb_has_blkzoned(sbi)) { 122 if (has_enough_free_blocks(sbi, 123 gc_th->no_zoned_gc_percent)) { 124 wait_ms = gc_th->no_gc_sleep_time; 125 f2fs_up_write(&sbi->gc_lock); 126 goto next; 127 } 128 if (wait_ms == gc_th->no_gc_sleep_time) 129 wait_ms = gc_th->max_sleep_time; 130 } 131 132 if (need_to_boost_gc(sbi)) { 133 decrease_sleep_time(gc_th, &wait_ms); 134 if (f2fs_sb_has_blkzoned(sbi)) 135 gc_control.one_time = true; 136 } else { 137 increase_sleep_time(gc_th, &wait_ms); 138 } 139 do_gc: 140 stat_inc_gc_call_count(sbi, foreground ? 141 FOREGROUND : BACKGROUND); 142 143 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) || 144 gc_control.one_time; 145 146 /* foreground GC was been triggered via f2fs_balance_fs() */ 147 if (foreground) 148 sync_mode = false; 149 150 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC; 151 gc_control.no_bg_gc = foreground; 152 gc_control.nr_free_secs = foreground ? 1 : 0; 153 154 /* if return value is not zero, no victim was selected */ 155 if (f2fs_gc(sbi, &gc_control)) { 156 /* don't bother wait_ms by foreground gc */ 157 if (!foreground) 158 wait_ms = gc_th->no_gc_sleep_time; 159 } else { 160 /* reset wait_ms to default sleep time */ 161 if (wait_ms == gc_th->no_gc_sleep_time) 162 wait_ms = gc_th->min_sleep_time; 163 } 164 165 if (foreground) 166 wake_up_all(&gc_th->fggc_wq); 167 168 trace_f2fs_background_gc(sbi->sb, wait_ms, 169 prefree_segments(sbi), free_segments(sbi)); 170 171 /* balancing f2fs's metadata periodically */ 172 f2fs_balance_fs_bg(sbi, true); 173 next: 174 if (sbi->gc_mode != GC_NORMAL) { 175 spin_lock(&sbi->gc_remaining_trials_lock); 176 if (sbi->gc_remaining_trials) { 177 sbi->gc_remaining_trials--; 178 if (!sbi->gc_remaining_trials) 179 sbi->gc_mode = GC_NORMAL; 180 } 181 spin_unlock(&sbi->gc_remaining_trials_lock); 182 } 183 sb_end_write(sbi->sb); 184 185 } while (!kthread_should_stop()); 186 return 0; 187 } 188 189 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) 190 { 191 struct f2fs_gc_kthread *gc_th; 192 dev_t dev = sbi->sb->s_bdev->bd_dev; 193 194 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 195 if (!gc_th) 196 return -ENOMEM; 197 198 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; 199 gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO; 200 201 if (f2fs_sb_has_blkzoned(sbi)) { 202 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED; 203 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED; 204 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED; 205 gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC; 206 gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC; 207 } else { 208 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 209 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 210 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 211 gc_th->no_zoned_gc_percent = 0; 212 gc_th->boost_zoned_gc_percent = 0; 213 } 214 215 gc_th->gc_wake = false; 216 217 sbi->gc_thread = gc_th; 218 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 219 init_waitqueue_head(&sbi->gc_thread->fggc_wq); 220 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 221 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 222 if (IS_ERR(gc_th->f2fs_gc_task)) { 223 int err = PTR_ERR(gc_th->f2fs_gc_task); 224 225 kfree(gc_th); 226 sbi->gc_thread = NULL; 227 return err; 228 } 229 230 return 0; 231 } 232 233 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 234 { 235 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 236 237 if (!gc_th) 238 return; 239 kthread_stop(gc_th->f2fs_gc_task); 240 wake_up_all(&gc_th->fggc_wq); 241 kfree(gc_th); 242 sbi->gc_thread = NULL; 243 } 244 245 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 246 { 247 int gc_mode; 248 249 if (gc_type == BG_GC) { 250 if (sbi->am.atgc_enabled) 251 gc_mode = GC_AT; 252 else 253 gc_mode = GC_CB; 254 } else { 255 gc_mode = GC_GREEDY; 256 } 257 258 switch (sbi->gc_mode) { 259 case GC_IDLE_CB: 260 case GC_URGENT_LOW: 261 case GC_URGENT_MID: 262 gc_mode = GC_CB; 263 break; 264 case GC_IDLE_GREEDY: 265 case GC_URGENT_HIGH: 266 gc_mode = GC_GREEDY; 267 break; 268 case GC_IDLE_AT: 269 gc_mode = GC_AT; 270 break; 271 } 272 273 return gc_mode; 274 } 275 276 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 277 int type, struct victim_sel_policy *p) 278 { 279 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 280 281 if (p->alloc_mode == SSR) { 282 p->gc_mode = GC_GREEDY; 283 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 284 p->max_search = dirty_i->nr_dirty[type]; 285 p->ofs_unit = 1; 286 } else if (p->alloc_mode == AT_SSR) { 287 p->gc_mode = GC_GREEDY; 288 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 289 p->max_search = dirty_i->nr_dirty[type]; 290 p->ofs_unit = 1; 291 } else { 292 p->gc_mode = select_gc_type(sbi, gc_type); 293 p->ofs_unit = SEGS_PER_SEC(sbi); 294 if (__is_large_section(sbi)) { 295 p->dirty_bitmap = dirty_i->dirty_secmap; 296 p->max_search = count_bits(p->dirty_bitmap, 297 0, MAIN_SECS(sbi)); 298 } else { 299 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; 300 p->max_search = dirty_i->nr_dirty[DIRTY]; 301 } 302 } 303 304 /* 305 * adjust candidates range, should select all dirty segments for 306 * foreground GC and urgent GC cases. 307 */ 308 if (gc_type != FG_GC && 309 (sbi->gc_mode != GC_URGENT_HIGH) && 310 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) && 311 p->max_search > sbi->max_victim_search) 312 p->max_search = sbi->max_victim_search; 313 314 /* let's select beginning hot/small space first. */ 315 if (f2fs_need_rand_seg(sbi)) 316 p->offset = get_random_u32_below(MAIN_SECS(sbi) * 317 SEGS_PER_SEC(sbi)); 318 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) 319 p->offset = 0; 320 else 321 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; 322 } 323 324 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 325 struct victim_sel_policy *p) 326 { 327 /* SSR allocates in a segment unit */ 328 if (p->alloc_mode == SSR) 329 return BLKS_PER_SEG(sbi); 330 else if (p->alloc_mode == AT_SSR) 331 return UINT_MAX; 332 333 /* LFS */ 334 if (p->gc_mode == GC_GREEDY) 335 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit); 336 else if (p->gc_mode == GC_CB) 337 return UINT_MAX; 338 else if (p->gc_mode == GC_AT) 339 return UINT_MAX; 340 else /* No other gc_mode */ 341 return 0; 342 } 343 344 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 345 { 346 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 347 unsigned int secno; 348 349 /* 350 * If the gc_type is FG_GC, we can select victim segments 351 * selected by background GC before. 352 * Those segments guarantee they have small valid blocks. 353 */ 354 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { 355 if (sec_usage_check(sbi, secno)) 356 continue; 357 clear_bit(secno, dirty_i->victim_secmap); 358 return GET_SEG_FROM_SEC(sbi, secno); 359 } 360 return NULL_SEGNO; 361 } 362 363 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 364 { 365 struct sit_info *sit_i = SIT_I(sbi); 366 unsigned long long mtime = 0; 367 unsigned int vblocks; 368 unsigned char age = 0; 369 unsigned char u; 370 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi); 371 372 mtime = f2fs_get_section_mtime(sbi, segno); 373 f2fs_bug_on(sbi, mtime == INVALID_MTIME); 374 vblocks = get_valid_blocks(sbi, segno, true); 375 vblocks = div_u64(vblocks, usable_segs_per_sec); 376 377 u = BLKS_TO_SEGS(sbi, vblocks * 100); 378 379 /* Handle if the system time has changed by the user */ 380 if (mtime < sit_i->min_mtime) 381 sit_i->min_mtime = mtime; 382 if (mtime > sit_i->max_mtime) 383 sit_i->max_mtime = mtime; 384 if (sit_i->max_mtime != sit_i->min_mtime) 385 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 386 sit_i->max_mtime - sit_i->min_mtime); 387 388 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 389 } 390 391 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, 392 unsigned int segno, struct victim_sel_policy *p) 393 { 394 if (p->alloc_mode == SSR) 395 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 396 397 if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >= 398 CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio / 399 100)) 400 return UINT_MAX; 401 402 /* alloc_mode == LFS */ 403 if (p->gc_mode == GC_GREEDY) 404 return get_valid_blocks(sbi, segno, true); 405 else if (p->gc_mode == GC_CB) 406 return get_cb_cost(sbi, segno); 407 408 f2fs_bug_on(sbi, 1); 409 return 0; 410 } 411 412 static unsigned int count_bits(const unsigned long *addr, 413 unsigned int offset, unsigned int len) 414 { 415 unsigned int end = offset + len, sum = 0; 416 417 while (offset < end) { 418 if (test_bit(offset++, addr)) 419 ++sum; 420 } 421 return sum; 422 } 423 424 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi, 425 struct rb_root_cached *root) 426 { 427 #ifdef CONFIG_F2FS_CHECK_FS 428 struct rb_node *cur = rb_first_cached(root), *next; 429 struct victim_entry *cur_ve, *next_ve; 430 431 while (cur) { 432 next = rb_next(cur); 433 if (!next) 434 return true; 435 436 cur_ve = rb_entry(cur, struct victim_entry, rb_node); 437 next_ve = rb_entry(next, struct victim_entry, rb_node); 438 439 if (cur_ve->mtime > next_ve->mtime) { 440 f2fs_info(sbi, "broken victim_rbtree, " 441 "cur_mtime(%llu) next_mtime(%llu)", 442 cur_ve->mtime, next_ve->mtime); 443 return false; 444 } 445 cur = next; 446 } 447 #endif 448 return true; 449 } 450 451 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi, 452 unsigned long long mtime) 453 { 454 struct atgc_management *am = &sbi->am; 455 struct rb_node *node = am->root.rb_root.rb_node; 456 struct victim_entry *ve = NULL; 457 458 while (node) { 459 ve = rb_entry(node, struct victim_entry, rb_node); 460 461 if (mtime < ve->mtime) 462 node = node->rb_left; 463 else 464 node = node->rb_right; 465 } 466 return ve; 467 } 468 469 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi, 470 unsigned long long mtime, unsigned int segno) 471 { 472 struct atgc_management *am = &sbi->am; 473 struct victim_entry *ve; 474 475 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL); 476 477 ve->mtime = mtime; 478 ve->segno = segno; 479 480 list_add_tail(&ve->list, &am->victim_list); 481 am->victim_count++; 482 483 return ve; 484 } 485 486 static void __insert_victim_entry(struct f2fs_sb_info *sbi, 487 unsigned long long mtime, unsigned int segno) 488 { 489 struct atgc_management *am = &sbi->am; 490 struct rb_root_cached *root = &am->root; 491 struct rb_node **p = &root->rb_root.rb_node; 492 struct rb_node *parent = NULL; 493 struct victim_entry *ve; 494 bool left_most = true; 495 496 /* look up rb tree to find parent node */ 497 while (*p) { 498 parent = *p; 499 ve = rb_entry(parent, struct victim_entry, rb_node); 500 501 if (mtime < ve->mtime) { 502 p = &(*p)->rb_left; 503 } else { 504 p = &(*p)->rb_right; 505 left_most = false; 506 } 507 } 508 509 ve = __create_victim_entry(sbi, mtime, segno); 510 511 rb_link_node(&ve->rb_node, parent, p); 512 rb_insert_color_cached(&ve->rb_node, root, left_most); 513 } 514 515 static void add_victim_entry(struct f2fs_sb_info *sbi, 516 struct victim_sel_policy *p, unsigned int segno) 517 { 518 struct sit_info *sit_i = SIT_I(sbi); 519 unsigned long long mtime = 0; 520 521 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 522 if (p->gc_mode == GC_AT && 523 get_valid_blocks(sbi, segno, true) == 0) 524 return; 525 } 526 527 mtime = f2fs_get_section_mtime(sbi, segno); 528 f2fs_bug_on(sbi, mtime == INVALID_MTIME); 529 530 /* Handle if the system time has changed by the user */ 531 if (mtime < sit_i->min_mtime) 532 sit_i->min_mtime = mtime; 533 if (mtime > sit_i->max_mtime) 534 sit_i->max_mtime = mtime; 535 if (mtime < sit_i->dirty_min_mtime) 536 sit_i->dirty_min_mtime = mtime; 537 if (mtime > sit_i->dirty_max_mtime) 538 sit_i->dirty_max_mtime = mtime; 539 540 /* don't choose young section as candidate */ 541 if (sit_i->dirty_max_mtime - mtime < p->age_threshold) 542 return; 543 544 __insert_victim_entry(sbi, mtime, segno); 545 } 546 547 static void atgc_lookup_victim(struct f2fs_sb_info *sbi, 548 struct victim_sel_policy *p) 549 { 550 struct sit_info *sit_i = SIT_I(sbi); 551 struct atgc_management *am = &sbi->am; 552 struct rb_root_cached *root = &am->root; 553 struct rb_node *node; 554 struct victim_entry *ve; 555 unsigned long long total_time; 556 unsigned long long age, u, accu; 557 unsigned long long max_mtime = sit_i->dirty_max_mtime; 558 unsigned long long min_mtime = sit_i->dirty_min_mtime; 559 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi); 560 unsigned int vblocks; 561 unsigned int dirty_threshold = max(am->max_candidate_count, 562 am->candidate_ratio * 563 am->victim_count / 100); 564 unsigned int age_weight = am->age_weight; 565 unsigned int cost; 566 unsigned int iter = 0; 567 568 if (max_mtime < min_mtime) 569 return; 570 571 max_mtime += 1; 572 total_time = max_mtime - min_mtime; 573 574 accu = div64_u64(ULLONG_MAX, total_time); 575 accu = min_t(unsigned long long, div_u64(accu, 100), 576 DEFAULT_ACCURACY_CLASS); 577 578 node = rb_first_cached(root); 579 next: 580 ve = rb_entry_safe(node, struct victim_entry, rb_node); 581 if (!ve) 582 return; 583 584 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 585 goto skip; 586 587 /* age = 10000 * x% * 60 */ 588 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * 589 age_weight; 590 591 vblocks = get_valid_blocks(sbi, ve->segno, true); 592 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); 593 594 /* u = 10000 * x% * 40 */ 595 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) * 596 (100 - age_weight); 597 598 f2fs_bug_on(sbi, age + u >= UINT_MAX); 599 600 cost = UINT_MAX - (age + u); 601 iter++; 602 603 if (cost < p->min_cost || 604 (cost == p->min_cost && age > p->oldest_age)) { 605 p->min_cost = cost; 606 p->oldest_age = age; 607 p->min_segno = ve->segno; 608 } 609 skip: 610 if (iter < dirty_threshold) { 611 node = rb_next(node); 612 goto next; 613 } 614 } 615 616 /* 617 * select candidates around source section in range of 618 * [target - dirty_threshold, target + dirty_threshold] 619 */ 620 static void atssr_lookup_victim(struct f2fs_sb_info *sbi, 621 struct victim_sel_policy *p) 622 { 623 struct sit_info *sit_i = SIT_I(sbi); 624 struct atgc_management *am = &sbi->am; 625 struct victim_entry *ve; 626 unsigned long long age; 627 unsigned long long max_mtime = sit_i->dirty_max_mtime; 628 unsigned long long min_mtime = sit_i->dirty_min_mtime; 629 unsigned int vblocks; 630 unsigned int dirty_threshold = max(am->max_candidate_count, 631 am->candidate_ratio * 632 am->victim_count / 100); 633 unsigned int cost, iter; 634 int stage = 0; 635 636 if (max_mtime < min_mtime) 637 return; 638 max_mtime += 1; 639 next_stage: 640 iter = 0; 641 ve = __lookup_victim_entry(sbi, p->age); 642 next_node: 643 if (!ve) { 644 if (stage++ == 0) 645 goto next_stage; 646 return; 647 } 648 649 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 650 goto skip_node; 651 652 age = max_mtime - ve->mtime; 653 654 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; 655 f2fs_bug_on(sbi, !vblocks); 656 657 /* rare case */ 658 if (vblocks == BLKS_PER_SEG(sbi)) 659 goto skip_node; 660 661 iter++; 662 663 age = max_mtime - abs(p->age - age); 664 cost = UINT_MAX - vblocks; 665 666 if (cost < p->min_cost || 667 (cost == p->min_cost && age > p->oldest_age)) { 668 p->min_cost = cost; 669 p->oldest_age = age; 670 p->min_segno = ve->segno; 671 } 672 skip_node: 673 if (iter < dirty_threshold) { 674 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) : 675 rb_next(&ve->rb_node), 676 struct victim_entry, rb_node); 677 goto next_node; 678 } 679 680 if (stage++ == 0) 681 goto next_stage; 682 } 683 684 static void lookup_victim_by_age(struct f2fs_sb_info *sbi, 685 struct victim_sel_policy *p) 686 { 687 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); 688 689 if (p->gc_mode == GC_AT) 690 atgc_lookup_victim(sbi, p); 691 else if (p->alloc_mode == AT_SSR) 692 atssr_lookup_victim(sbi, p); 693 else 694 f2fs_bug_on(sbi, 1); 695 } 696 697 static void release_victim_entry(struct f2fs_sb_info *sbi) 698 { 699 struct atgc_management *am = &sbi->am; 700 struct victim_entry *ve, *tmp; 701 702 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) { 703 list_del(&ve->list); 704 kmem_cache_free(victim_entry_slab, ve); 705 am->victim_count--; 706 } 707 708 am->root = RB_ROOT_CACHED; 709 710 f2fs_bug_on(sbi, am->victim_count); 711 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); 712 } 713 714 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno) 715 { 716 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 717 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 718 719 if (!dirty_i->enable_pin_section) 720 return false; 721 if (!test_and_set_bit(secno, dirty_i->pinned_secmap)) 722 dirty_i->pinned_secmap_cnt++; 723 return true; 724 } 725 726 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i) 727 { 728 return dirty_i->pinned_secmap_cnt; 729 } 730 731 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i, 732 unsigned int secno) 733 { 734 return dirty_i->enable_pin_section && 735 f2fs_pinned_section_exists(dirty_i) && 736 test_bit(secno, dirty_i->pinned_secmap); 737 } 738 739 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable) 740 { 741 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 742 743 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) { 744 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size); 745 DIRTY_I(sbi)->pinned_secmap_cnt = 0; 746 } 747 DIRTY_I(sbi)->enable_pin_section = enable; 748 } 749 750 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type, 751 unsigned int segno) 752 { 753 if (!f2fs_is_pinned_file(inode)) 754 return 0; 755 if (gc_type != FG_GC) 756 return -EBUSY; 757 if (!f2fs_pin_section(F2FS_I_SB(inode), segno)) 758 f2fs_pin_file_control(inode, true); 759 return -EAGAIN; 760 } 761 762 /* 763 * This function is called from two paths. 764 * One is garbage collection and the other is SSR segment selection. 765 * When it is called during GC, it just gets a victim segment 766 * and it does not remove it from dirty seglist. 767 * When it is called from SSR segment selection, it finds a segment 768 * which has minimum valid blocks and removes it from dirty seglist. 769 */ 770 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, 771 int gc_type, int type, char alloc_mode, 772 unsigned long long age, bool one_time) 773 { 774 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 775 struct sit_info *sm = SIT_I(sbi); 776 struct victim_sel_policy p; 777 unsigned int secno, last_victim; 778 unsigned int last_segment; 779 unsigned int nsearched; 780 bool is_atgc; 781 int ret = 0; 782 783 mutex_lock(&dirty_i->seglist_lock); 784 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi); 785 786 p.alloc_mode = alloc_mode; 787 p.age = age; 788 p.age_threshold = sbi->am.age_threshold; 789 p.one_time_gc = one_time; 790 791 retry: 792 select_policy(sbi, gc_type, type, &p); 793 p.min_segno = NULL_SEGNO; 794 p.oldest_age = 0; 795 p.min_cost = get_max_cost(sbi, &p); 796 797 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR); 798 nsearched = 0; 799 800 if (is_atgc) 801 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; 802 803 if (*result != NULL_SEGNO) { 804 if (!get_valid_blocks(sbi, *result, false)) { 805 ret = -ENODATA; 806 goto out; 807 } 808 809 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 810 ret = -EBUSY; 811 else 812 p.min_segno = *result; 813 goto out; 814 } 815 816 ret = -ENODATA; 817 if (p.max_search == 0) 818 goto out; 819 820 if (__is_large_section(sbi) && p.alloc_mode == LFS) { 821 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { 822 p.min_segno = sbi->next_victim_seg[BG_GC]; 823 *result = p.min_segno; 824 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; 825 goto got_result; 826 } 827 if (gc_type == FG_GC && 828 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { 829 p.min_segno = sbi->next_victim_seg[FG_GC]; 830 *result = p.min_segno; 831 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; 832 goto got_result; 833 } 834 } 835 836 last_victim = sm->last_victim[p.gc_mode]; 837 if (p.alloc_mode == LFS && gc_type == FG_GC) { 838 p.min_segno = check_bg_victims(sbi); 839 if (p.min_segno != NULL_SEGNO) 840 goto got_it; 841 } 842 843 while (1) { 844 unsigned long cost, *dirty_bitmap; 845 unsigned int unit_no, segno; 846 847 dirty_bitmap = p.dirty_bitmap; 848 unit_no = find_next_bit(dirty_bitmap, 849 last_segment / p.ofs_unit, 850 p.offset / p.ofs_unit); 851 segno = unit_no * p.ofs_unit; 852 if (segno >= last_segment) { 853 if (sm->last_victim[p.gc_mode]) { 854 last_segment = 855 sm->last_victim[p.gc_mode]; 856 sm->last_victim[p.gc_mode] = 0; 857 p.offset = 0; 858 continue; 859 } 860 break; 861 } 862 863 p.offset = segno + p.ofs_unit; 864 nsearched++; 865 866 #ifdef CONFIG_F2FS_CHECK_FS 867 /* 868 * skip selecting the invalid segno (that is failed due to block 869 * validity check failure during GC) to avoid endless GC loop in 870 * such cases. 871 */ 872 if (test_bit(segno, sm->invalid_segmap)) 873 goto next; 874 #endif 875 876 secno = GET_SEC_FROM_SEG(sbi, segno); 877 878 if (sec_usage_check(sbi, secno)) 879 goto next; 880 881 /* Don't touch checkpointed data */ 882 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 883 if (p.alloc_mode == LFS) { 884 /* 885 * LFS is set to find source section during GC. 886 * The victim should have no checkpointed data. 887 */ 888 if (get_ckpt_valid_blocks(sbi, segno, true)) 889 goto next; 890 } else { 891 /* 892 * SSR | AT_SSR are set to find target segment 893 * for writes which can be full by checkpointed 894 * and newly written blocks. 895 */ 896 if (!f2fs_segment_has_free_slot(sbi, segno)) 897 goto next; 898 } 899 } 900 901 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 902 goto next; 903 904 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno)) 905 goto next; 906 907 if (is_atgc) { 908 add_victim_entry(sbi, &p, segno); 909 goto next; 910 } 911 912 cost = get_gc_cost(sbi, segno, &p); 913 914 if (p.min_cost > cost) { 915 p.min_segno = segno; 916 p.min_cost = cost; 917 } 918 next: 919 if (nsearched >= p.max_search) { 920 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) 921 sm->last_victim[p.gc_mode] = 922 last_victim + p.ofs_unit; 923 else 924 sm->last_victim[p.gc_mode] = segno + p.ofs_unit; 925 sm->last_victim[p.gc_mode] %= 926 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); 927 break; 928 } 929 } 930 931 /* get victim for GC_AT/AT_SSR */ 932 if (is_atgc) { 933 lookup_victim_by_age(sbi, &p); 934 release_victim_entry(sbi); 935 } 936 937 if (is_atgc && p.min_segno == NULL_SEGNO && 938 sm->elapsed_time < p.age_threshold) { 939 p.age_threshold = 0; 940 goto retry; 941 } 942 943 if (p.min_segno != NULL_SEGNO) { 944 got_it: 945 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 946 got_result: 947 if (p.alloc_mode == LFS) { 948 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 949 if (gc_type == FG_GC) 950 sbi->cur_victim_sec = secno; 951 else 952 set_bit(secno, dirty_i->victim_secmap); 953 } 954 ret = 0; 955 956 } 957 out: 958 if (p.min_segno != NULL_SEGNO) 959 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 960 sbi->cur_victim_sec, 961 prefree_segments(sbi), free_segments(sbi)); 962 mutex_unlock(&dirty_i->seglist_lock); 963 964 return ret; 965 } 966 967 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 968 { 969 struct inode_entry *ie; 970 971 ie = radix_tree_lookup(&gc_list->iroot, ino); 972 if (ie) 973 return ie->inode; 974 return NULL; 975 } 976 977 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) 978 { 979 struct inode_entry *new_ie; 980 981 if (inode == find_gc_inode(gc_list, inode->i_ino)) { 982 iput(inode); 983 return; 984 } 985 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, 986 GFP_NOFS, true, NULL); 987 new_ie->inode = inode; 988 989 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); 990 list_add_tail(&new_ie->list, &gc_list->ilist); 991 } 992 993 static void put_gc_inode(struct gc_inode_list *gc_list) 994 { 995 struct inode_entry *ie, *next_ie; 996 997 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { 998 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); 999 iput(ie->inode); 1000 list_del(&ie->list); 1001 kmem_cache_free(f2fs_inode_entry_slab, ie); 1002 } 1003 } 1004 1005 static int check_valid_map(struct f2fs_sb_info *sbi, 1006 unsigned int segno, int offset) 1007 { 1008 struct sit_info *sit_i = SIT_I(sbi); 1009 struct seg_entry *sentry; 1010 int ret; 1011 1012 down_read(&sit_i->sentry_lock); 1013 sentry = get_seg_entry(sbi, segno); 1014 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 1015 up_read(&sit_i->sentry_lock); 1016 return ret; 1017 } 1018 1019 /* 1020 * This function compares node address got in summary with that in NAT. 1021 * On validity, copy that node with cold status, otherwise (invalid node) 1022 * ignore that. 1023 */ 1024 static int gc_node_segment(struct f2fs_sb_info *sbi, 1025 struct f2fs_summary *sum, unsigned int segno, int gc_type) 1026 { 1027 struct f2fs_summary *entry; 1028 block_t start_addr; 1029 int off; 1030 int phase = 0; 1031 bool fggc = (gc_type == FG_GC); 1032 int submitted = 0; 1033 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 1034 1035 start_addr = START_BLOCK(sbi, segno); 1036 1037 next_step: 1038 entry = sum; 1039 1040 if (fggc && phase == 2) 1041 atomic_inc(&sbi->wb_sync_req[NODE]); 1042 1043 for (off = 0; off < usable_blks_in_seg; off++, entry++) { 1044 nid_t nid = le32_to_cpu(entry->nid); 1045 struct page *node_page; 1046 struct node_info ni; 1047 int err; 1048 1049 /* stop BG_GC if there is not enough free sections. */ 1050 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 1051 return submitted; 1052 1053 if (check_valid_map(sbi, segno, off) == 0) 1054 continue; 1055 1056 if (phase == 0) { 1057 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 1058 META_NAT, true); 1059 continue; 1060 } 1061 1062 if (phase == 1) { 1063 f2fs_ra_node_page(sbi, nid); 1064 continue; 1065 } 1066 1067 /* phase == 2 */ 1068 node_page = f2fs_get_node_page(sbi, nid); 1069 if (IS_ERR(node_page)) 1070 continue; 1071 1072 /* block may become invalid during f2fs_get_node_page */ 1073 if (check_valid_map(sbi, segno, off) == 0) { 1074 f2fs_put_page(node_page, 1); 1075 continue; 1076 } 1077 1078 if (f2fs_get_node_info(sbi, nid, &ni, false)) { 1079 f2fs_put_page(node_page, 1); 1080 continue; 1081 } 1082 1083 if (ni.blk_addr != start_addr + off) { 1084 f2fs_put_page(node_page, 1); 1085 continue; 1086 } 1087 1088 err = f2fs_move_node_page(node_page, gc_type); 1089 if (!err && gc_type == FG_GC) 1090 submitted++; 1091 stat_inc_node_blk_count(sbi, 1, gc_type); 1092 } 1093 1094 if (++phase < 3) 1095 goto next_step; 1096 1097 if (fggc) 1098 atomic_dec(&sbi->wb_sync_req[NODE]); 1099 return submitted; 1100 } 1101 1102 /* 1103 * Calculate start block index indicating the given node offset. 1104 * Be careful, caller should give this node offset only indicating direct node 1105 * blocks. If any node offsets, which point the other types of node blocks such 1106 * as indirect or double indirect node blocks, are given, it must be a caller's 1107 * bug. 1108 */ 1109 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) 1110 { 1111 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 1112 unsigned int bidx; 1113 1114 if (node_ofs == 0) 1115 return 0; 1116 1117 if (node_ofs <= 2) { 1118 bidx = node_ofs - 1; 1119 } else if (node_ofs <= indirect_blks) { 1120 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 1121 1122 bidx = node_ofs - 2 - dec; 1123 } else { 1124 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 1125 1126 bidx = node_ofs - 5 - dec; 1127 } 1128 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); 1129 } 1130 1131 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 1132 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 1133 { 1134 struct page *node_page; 1135 nid_t nid; 1136 unsigned int ofs_in_node, max_addrs, base; 1137 block_t source_blkaddr; 1138 1139 nid = le32_to_cpu(sum->nid); 1140 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 1141 1142 node_page = f2fs_get_node_page(sbi, nid); 1143 if (IS_ERR(node_page)) 1144 return false; 1145 1146 if (f2fs_get_node_info(sbi, nid, dni, false)) { 1147 f2fs_put_page(node_page, 1); 1148 return false; 1149 } 1150 1151 if (sum->version != dni->version) { 1152 f2fs_warn(sbi, "%s: valid data with mismatched node version.", 1153 __func__); 1154 set_sbi_flag(sbi, SBI_NEED_FSCK); 1155 } 1156 1157 if (f2fs_check_nid_range(sbi, dni->ino)) { 1158 f2fs_put_page(node_page, 1); 1159 return false; 1160 } 1161 1162 if (IS_INODE(node_page)) { 1163 base = offset_in_addr(F2FS_INODE(node_page)); 1164 max_addrs = DEF_ADDRS_PER_INODE; 1165 } else { 1166 base = 0; 1167 max_addrs = DEF_ADDRS_PER_BLOCK; 1168 } 1169 1170 if (base + ofs_in_node >= max_addrs) { 1171 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", 1172 base, ofs_in_node, max_addrs, dni->ino, dni->nid); 1173 f2fs_put_page(node_page, 1); 1174 return false; 1175 } 1176 1177 *nofs = ofs_of_node(node_page); 1178 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); 1179 f2fs_put_page(node_page, 1); 1180 1181 if (source_blkaddr != blkaddr) { 1182 #ifdef CONFIG_F2FS_CHECK_FS 1183 unsigned int segno = GET_SEGNO(sbi, blkaddr); 1184 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 1185 1186 if (unlikely(check_valid_map(sbi, segno, offset))) { 1187 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { 1188 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u", 1189 blkaddr, source_blkaddr, segno); 1190 set_sbi_flag(sbi, SBI_NEED_FSCK); 1191 } 1192 } 1193 #endif 1194 return false; 1195 } 1196 return true; 1197 } 1198 1199 static int ra_data_block(struct inode *inode, pgoff_t index) 1200 { 1201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1202 struct address_space *mapping = f2fs_is_cow_file(inode) ? 1203 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; 1204 struct dnode_of_data dn; 1205 struct page *page; 1206 struct f2fs_io_info fio = { 1207 .sbi = sbi, 1208 .ino = inode->i_ino, 1209 .type = DATA, 1210 .temp = COLD, 1211 .op = REQ_OP_READ, 1212 .op_flags = 0, 1213 .encrypted_page = NULL, 1214 .in_list = 0, 1215 }; 1216 int err; 1217 1218 page = f2fs_grab_cache_page(mapping, index, true); 1219 if (!page) 1220 return -ENOMEM; 1221 1222 if (f2fs_lookup_read_extent_cache_block(inode, index, 1223 &dn.data_blkaddr)) { 1224 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1225 DATA_GENERIC_ENHANCE_READ))) { 1226 err = -EFSCORRUPTED; 1227 goto put_page; 1228 } 1229 goto got_it; 1230 } 1231 1232 set_new_dnode(&dn, inode, NULL, NULL, 0); 1233 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 1234 if (err) 1235 goto put_page; 1236 f2fs_put_dnode(&dn); 1237 1238 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { 1239 err = -ENOENT; 1240 goto put_page; 1241 } 1242 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1243 DATA_GENERIC_ENHANCE))) { 1244 err = -EFSCORRUPTED; 1245 goto put_page; 1246 } 1247 got_it: 1248 /* read page */ 1249 fio.page = page; 1250 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1251 1252 /* 1253 * don't cache encrypted data into meta inode until previous dirty 1254 * data were writebacked to avoid racing between GC and flush. 1255 */ 1256 f2fs_wait_on_page_writeback(page, DATA, true, true); 1257 1258 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 1259 1260 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 1261 dn.data_blkaddr, 1262 FGP_LOCK | FGP_CREAT, GFP_NOFS); 1263 if (!fio.encrypted_page) { 1264 err = -ENOMEM; 1265 goto put_page; 1266 } 1267 1268 err = f2fs_submit_page_bio(&fio); 1269 if (err) 1270 goto put_encrypted_page; 1271 f2fs_put_page(fio.encrypted_page, 0); 1272 f2fs_put_page(page, 1); 1273 1274 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); 1275 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1276 1277 return 0; 1278 put_encrypted_page: 1279 f2fs_put_page(fio.encrypted_page, 1); 1280 put_page: 1281 f2fs_put_page(page, 1); 1282 return err; 1283 } 1284 1285 /* 1286 * Move data block via META_MAPPING while keeping locked data page. 1287 * This can be used to move blocks, aka LBAs, directly on disk. 1288 */ 1289 static int move_data_block(struct inode *inode, block_t bidx, 1290 int gc_type, unsigned int segno, int off) 1291 { 1292 struct address_space *mapping = f2fs_is_cow_file(inode) ? 1293 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; 1294 struct f2fs_io_info fio = { 1295 .sbi = F2FS_I_SB(inode), 1296 .ino = inode->i_ino, 1297 .type = DATA, 1298 .temp = COLD, 1299 .op = REQ_OP_READ, 1300 .op_flags = 0, 1301 .encrypted_page = NULL, 1302 .in_list = 0, 1303 }; 1304 struct dnode_of_data dn; 1305 struct f2fs_summary sum; 1306 struct node_info ni; 1307 struct page *page, *mpage; 1308 block_t newaddr; 1309 int err = 0; 1310 bool lfs_mode = f2fs_lfs_mode(fio.sbi); 1311 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && 1312 (fio.sbi->gc_mode != GC_URGENT_HIGH) ? 1313 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; 1314 1315 /* do not read out */ 1316 page = f2fs_grab_cache_page(mapping, bidx, false); 1317 if (!page) 1318 return -ENOMEM; 1319 1320 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 1321 err = -ENOENT; 1322 goto out; 1323 } 1324 1325 err = f2fs_gc_pinned_control(inode, gc_type, segno); 1326 if (err) 1327 goto out; 1328 1329 set_new_dnode(&dn, inode, NULL, NULL, 0); 1330 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); 1331 if (err) 1332 goto out; 1333 1334 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 1335 ClearPageUptodate(page); 1336 err = -ENOENT; 1337 goto put_out; 1338 } 1339 1340 /* 1341 * don't cache encrypted data into meta inode until previous dirty 1342 * data were writebacked to avoid racing between GC and flush. 1343 */ 1344 f2fs_wait_on_page_writeback(page, DATA, true, true); 1345 1346 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 1347 1348 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); 1349 if (err) 1350 goto put_out; 1351 1352 /* read page */ 1353 fio.page = page; 1354 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1355 1356 if (lfs_mode) 1357 f2fs_down_write(&fio.sbi->io_order_lock); 1358 1359 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), 1360 fio.old_blkaddr, false); 1361 if (!mpage) { 1362 err = -ENOMEM; 1363 goto up_out; 1364 } 1365 1366 fio.encrypted_page = mpage; 1367 1368 /* read source block in mpage */ 1369 if (!PageUptodate(mpage)) { 1370 err = f2fs_submit_page_bio(&fio); 1371 if (err) { 1372 f2fs_put_page(mpage, 1); 1373 goto up_out; 1374 } 1375 1376 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO, 1377 F2FS_BLKSIZE); 1378 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, 1379 F2FS_BLKSIZE); 1380 1381 lock_page(mpage); 1382 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || 1383 !PageUptodate(mpage))) { 1384 err = -EIO; 1385 f2fs_put_page(mpage, 1); 1386 goto up_out; 1387 } 1388 } 1389 1390 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 1391 1392 /* allocate block address */ 1393 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 1394 &sum, type, NULL); 1395 if (err) { 1396 f2fs_put_page(mpage, 1); 1397 /* filesystem should shutdown, no need to recovery block */ 1398 goto up_out; 1399 } 1400 1401 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), 1402 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); 1403 if (!fio.encrypted_page) { 1404 err = -ENOMEM; 1405 f2fs_put_page(mpage, 1); 1406 goto recover_block; 1407 } 1408 1409 /* write target block */ 1410 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); 1411 memcpy(page_address(fio.encrypted_page), 1412 page_address(mpage), PAGE_SIZE); 1413 f2fs_put_page(mpage, 1); 1414 1415 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr); 1416 1417 set_page_dirty(fio.encrypted_page); 1418 if (clear_page_dirty_for_io(fio.encrypted_page)) 1419 dec_page_count(fio.sbi, F2FS_DIRTY_META); 1420 1421 set_page_writeback(fio.encrypted_page); 1422 1423 fio.op = REQ_OP_WRITE; 1424 fio.op_flags = REQ_SYNC; 1425 fio.new_blkaddr = newaddr; 1426 f2fs_submit_page_write(&fio); 1427 1428 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE); 1429 1430 f2fs_update_data_blkaddr(&dn, newaddr); 1431 set_inode_flag(inode, FI_APPEND_WRITE); 1432 1433 f2fs_put_page(fio.encrypted_page, 1); 1434 recover_block: 1435 if (err) 1436 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 1437 true, true, true); 1438 up_out: 1439 if (lfs_mode) 1440 f2fs_up_write(&fio.sbi->io_order_lock); 1441 put_out: 1442 f2fs_put_dnode(&dn); 1443 out: 1444 f2fs_put_page(page, 1); 1445 return err; 1446 } 1447 1448 static int move_data_page(struct inode *inode, block_t bidx, int gc_type, 1449 unsigned int segno, int off) 1450 { 1451 struct page *page; 1452 int err = 0; 1453 1454 page = f2fs_get_lock_data_page(inode, bidx, true); 1455 if (IS_ERR(page)) 1456 return PTR_ERR(page); 1457 1458 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 1459 err = -ENOENT; 1460 goto out; 1461 } 1462 1463 err = f2fs_gc_pinned_control(inode, gc_type, segno); 1464 if (err) 1465 goto out; 1466 1467 if (gc_type == BG_GC) { 1468 if (folio_test_writeback(page_folio(page))) { 1469 err = -EAGAIN; 1470 goto out; 1471 } 1472 set_page_dirty(page); 1473 set_page_private_gcing(page); 1474 } else { 1475 struct f2fs_io_info fio = { 1476 .sbi = F2FS_I_SB(inode), 1477 .ino = inode->i_ino, 1478 .type = DATA, 1479 .temp = COLD, 1480 .op = REQ_OP_WRITE, 1481 .op_flags = REQ_SYNC, 1482 .old_blkaddr = NULL_ADDR, 1483 .page = page, 1484 .encrypted_page = NULL, 1485 .need_lock = LOCK_REQ, 1486 .io_type = FS_GC_DATA_IO, 1487 }; 1488 bool is_dirty = PageDirty(page); 1489 1490 retry: 1491 f2fs_wait_on_page_writeback(page, DATA, true, true); 1492 1493 set_page_dirty(page); 1494 if (clear_page_dirty_for_io(page)) { 1495 inode_dec_dirty_pages(inode); 1496 f2fs_remove_dirty_inode(inode); 1497 } 1498 1499 set_page_private_gcing(page); 1500 1501 err = f2fs_do_write_data_page(&fio); 1502 if (err) { 1503 clear_page_private_gcing(page); 1504 if (err == -ENOMEM) { 1505 memalloc_retry_wait(GFP_NOFS); 1506 goto retry; 1507 } 1508 if (is_dirty) 1509 set_page_dirty(page); 1510 } 1511 } 1512 out: 1513 f2fs_put_page(page, 1); 1514 return err; 1515 } 1516 1517 /* 1518 * This function tries to get parent node of victim data block, and identifies 1519 * data block validity. If the block is valid, copy that with cold status and 1520 * modify parent node. 1521 * If the parent node is not valid or the data block address is different, 1522 * the victim data block is ignored. 1523 */ 1524 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 1525 struct gc_inode_list *gc_list, unsigned int segno, int gc_type, 1526 bool force_migrate) 1527 { 1528 struct super_block *sb = sbi->sb; 1529 struct f2fs_summary *entry; 1530 block_t start_addr; 1531 int off; 1532 int phase = 0; 1533 int submitted = 0; 1534 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 1535 1536 start_addr = START_BLOCK(sbi, segno); 1537 1538 next_step: 1539 entry = sum; 1540 1541 for (off = 0; off < usable_blks_in_seg; off++, entry++) { 1542 struct page *data_page; 1543 struct inode *inode; 1544 struct node_info dni; /* dnode info for the data */ 1545 unsigned int ofs_in_node, nofs; 1546 block_t start_bidx; 1547 nid_t nid = le32_to_cpu(entry->nid); 1548 1549 /* 1550 * stop BG_GC if there is not enough free sections. 1551 * Or, stop GC if the segment becomes fully valid caused by 1552 * race condition along with SSR block allocation. 1553 */ 1554 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || 1555 (!force_migrate && get_valid_blocks(sbi, segno, true) == 1556 CAP_BLKS_PER_SEC(sbi))) 1557 return submitted; 1558 1559 if (check_valid_map(sbi, segno, off) == 0) 1560 continue; 1561 1562 if (phase == 0) { 1563 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 1564 META_NAT, true); 1565 continue; 1566 } 1567 1568 if (phase == 1) { 1569 f2fs_ra_node_page(sbi, nid); 1570 continue; 1571 } 1572 1573 /* Get an inode by ino with checking validity */ 1574 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) 1575 continue; 1576 1577 if (phase == 2) { 1578 f2fs_ra_node_page(sbi, dni.ino); 1579 continue; 1580 } 1581 1582 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 1583 1584 if (phase == 3) { 1585 int err; 1586 1587 inode = f2fs_iget(sb, dni.ino); 1588 if (IS_ERR(inode)) 1589 continue; 1590 1591 if (is_bad_inode(inode) || 1592 special_file(inode->i_mode)) { 1593 iput(inode); 1594 continue; 1595 } 1596 1597 if (f2fs_has_inline_data(inode)) { 1598 iput(inode); 1599 set_sbi_flag(sbi, SBI_NEED_FSCK); 1600 f2fs_err_ratelimited(sbi, 1601 "inode %lx has both inline_data flag and " 1602 "data block, nid=%u, ofs_in_node=%u", 1603 inode->i_ino, dni.nid, ofs_in_node); 1604 continue; 1605 } 1606 1607 err = f2fs_gc_pinned_control(inode, gc_type, segno); 1608 if (err == -EAGAIN) { 1609 iput(inode); 1610 return submitted; 1611 } 1612 1613 if (!f2fs_down_write_trylock( 1614 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { 1615 iput(inode); 1616 sbi->skipped_gc_rwsem++; 1617 continue; 1618 } 1619 1620 start_bidx = f2fs_start_bidx_of_node(nofs, inode) + 1621 ofs_in_node; 1622 1623 if (f2fs_meta_inode_gc_required(inode)) { 1624 int err = ra_data_block(inode, start_bidx); 1625 1626 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1627 if (err) { 1628 iput(inode); 1629 continue; 1630 } 1631 add_gc_inode(gc_list, inode); 1632 continue; 1633 } 1634 1635 data_page = f2fs_get_read_data_page(inode, start_bidx, 1636 REQ_RAHEAD, true, NULL); 1637 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1638 if (IS_ERR(data_page)) { 1639 iput(inode); 1640 continue; 1641 } 1642 1643 f2fs_put_page(data_page, 0); 1644 add_gc_inode(gc_list, inode); 1645 continue; 1646 } 1647 1648 /* phase 4 */ 1649 inode = find_gc_inode(gc_list, dni.ino); 1650 if (inode) { 1651 struct f2fs_inode_info *fi = F2FS_I(inode); 1652 bool locked = false; 1653 int err; 1654 1655 if (S_ISREG(inode->i_mode)) { 1656 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) { 1657 sbi->skipped_gc_rwsem++; 1658 continue; 1659 } 1660 if (!f2fs_down_write_trylock( 1661 &fi->i_gc_rwsem[READ])) { 1662 sbi->skipped_gc_rwsem++; 1663 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1664 continue; 1665 } 1666 locked = true; 1667 1668 /* wait for all inflight aio data */ 1669 inode_dio_wait(inode); 1670 } 1671 1672 start_bidx = f2fs_start_bidx_of_node(nofs, inode) 1673 + ofs_in_node; 1674 if (f2fs_meta_inode_gc_required(inode)) 1675 err = move_data_block(inode, start_bidx, 1676 gc_type, segno, off); 1677 else 1678 err = move_data_page(inode, start_bidx, gc_type, 1679 segno, off); 1680 1681 if (!err && (gc_type == FG_GC || 1682 f2fs_meta_inode_gc_required(inode))) 1683 submitted++; 1684 1685 if (locked) { 1686 f2fs_up_write(&fi->i_gc_rwsem[READ]); 1687 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1688 } 1689 1690 stat_inc_data_blk_count(sbi, 1, gc_type); 1691 } 1692 } 1693 1694 if (++phase < 5) 1695 goto next_step; 1696 1697 return submitted; 1698 } 1699 1700 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1701 int gc_type, bool one_time) 1702 { 1703 struct sit_info *sit_i = SIT_I(sbi); 1704 int ret; 1705 1706 down_write(&sit_i->sentry_lock); 1707 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, 1708 LFS, 0, one_time); 1709 up_write(&sit_i->sentry_lock); 1710 return ret; 1711 } 1712 1713 static int do_garbage_collect(struct f2fs_sb_info *sbi, 1714 unsigned int start_segno, 1715 struct gc_inode_list *gc_list, int gc_type, 1716 bool force_migrate, bool one_time) 1717 { 1718 struct page *sum_page; 1719 struct f2fs_summary_block *sum; 1720 struct blk_plug plug; 1721 unsigned int segno = start_segno; 1722 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi); 1723 unsigned int sec_end_segno; 1724 int seg_freed = 0, migrated = 0; 1725 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1726 SUM_TYPE_DATA : SUM_TYPE_NODE; 1727 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE; 1728 int submitted = 0; 1729 1730 if (__is_large_section(sbi)) { 1731 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi)); 1732 1733 /* 1734 * zone-capacity can be less than zone-size in zoned devices, 1735 * resulting in less than expected usable segments in the zone, 1736 * calculate the end segno in the zone which can be garbage 1737 * collected 1738 */ 1739 if (f2fs_sb_has_blkzoned(sbi)) 1740 sec_end_segno -= SEGS_PER_SEC(sbi) - 1741 f2fs_usable_segs_in_sec(sbi); 1742 1743 if (gc_type == BG_GC || one_time) { 1744 unsigned int window_granularity = 1745 sbi->migration_window_granularity; 1746 1747 if (f2fs_sb_has_blkzoned(sbi) && 1748 !has_enough_free_blocks(sbi, 1749 sbi->gc_thread->boost_zoned_gc_percent)) 1750 window_granularity *= 1751 BOOST_GC_MULTIPLE; 1752 1753 end_segno = start_segno + window_granularity; 1754 } 1755 1756 if (end_segno > sec_end_segno) 1757 end_segno = sec_end_segno; 1758 } 1759 1760 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); 1761 1762 /* readahead multi ssa blocks those have contiguous address */ 1763 if (__is_large_section(sbi)) 1764 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), 1765 end_segno - segno, META_SSA, true); 1766 1767 /* reference all summary page */ 1768 while (segno < end_segno) { 1769 sum_page = f2fs_get_sum_page(sbi, segno++); 1770 if (IS_ERR(sum_page)) { 1771 int err = PTR_ERR(sum_page); 1772 1773 end_segno = segno - 1; 1774 for (segno = start_segno; segno < end_segno; segno++) { 1775 sum_page = find_get_page(META_MAPPING(sbi), 1776 GET_SUM_BLOCK(sbi, segno)); 1777 f2fs_put_page(sum_page, 0); 1778 f2fs_put_page(sum_page, 0); 1779 } 1780 return err; 1781 } 1782 unlock_page(sum_page); 1783 } 1784 1785 blk_start_plug(&plug); 1786 1787 for (segno = start_segno; segno < end_segno; segno++) { 1788 1789 /* find segment summary of victim */ 1790 sum_page = find_get_page(META_MAPPING(sbi), 1791 GET_SUM_BLOCK(sbi, segno)); 1792 f2fs_put_page(sum_page, 0); 1793 1794 if (get_valid_blocks(sbi, segno, false) == 0) 1795 goto freed; 1796 if (gc_type == BG_GC && __is_large_section(sbi) && 1797 migrated >= sbi->migration_granularity) 1798 goto skip; 1799 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) 1800 goto skip; 1801 1802 sum = page_address(sum_page); 1803 if (type != GET_SUM_TYPE((&sum->footer))) { 1804 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", 1805 segno, type, GET_SUM_TYPE((&sum->footer))); 1806 f2fs_stop_checkpoint(sbi, false, 1807 STOP_CP_REASON_CORRUPTED_SUMMARY); 1808 goto skip; 1809 } 1810 1811 /* 1812 * this is to avoid deadlock: 1813 * - lock_page(sum_page) - f2fs_replace_block 1814 * - check_valid_map() - down_write(sentry_lock) 1815 * - down_read(sentry_lock) - change_curseg() 1816 * - lock_page(sum_page) 1817 */ 1818 if (type == SUM_TYPE_NODE) 1819 submitted += gc_node_segment(sbi, sum->entries, segno, 1820 gc_type); 1821 else 1822 submitted += gc_data_segment(sbi, sum->entries, gc_list, 1823 segno, gc_type, 1824 force_migrate); 1825 1826 stat_inc_gc_seg_count(sbi, data_type, gc_type); 1827 sbi->gc_reclaimed_segs[sbi->gc_mode]++; 1828 migrated++; 1829 1830 freed: 1831 if (gc_type == FG_GC && 1832 get_valid_blocks(sbi, segno, false) == 0) 1833 seg_freed++; 1834 1835 if (__is_large_section(sbi)) 1836 sbi->next_victim_seg[gc_type] = 1837 (segno + 1 < sec_end_segno) ? 1838 segno + 1 : NULL_SEGNO; 1839 skip: 1840 f2fs_put_page(sum_page, 0); 1841 } 1842 1843 if (submitted) 1844 f2fs_submit_merged_write(sbi, data_type); 1845 1846 blk_finish_plug(&plug); 1847 1848 if (migrated) 1849 stat_inc_gc_sec_count(sbi, data_type, gc_type); 1850 1851 return seg_freed; 1852 } 1853 1854 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control) 1855 { 1856 int gc_type = gc_control->init_gc_type; 1857 unsigned int segno = gc_control->victim_segno; 1858 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0; 1859 int ret = 0; 1860 struct cp_control cpc; 1861 struct gc_inode_list gc_list = { 1862 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1863 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1864 }; 1865 unsigned int skipped_round = 0, round = 0; 1866 unsigned int upper_secs; 1867 1868 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, 1869 gc_control->nr_free_secs, 1870 get_pages(sbi, F2FS_DIRTY_NODES), 1871 get_pages(sbi, F2FS_DIRTY_DENTS), 1872 get_pages(sbi, F2FS_DIRTY_IMETA), 1873 free_sections(sbi), 1874 free_segments(sbi), 1875 reserved_segments(sbi), 1876 prefree_segments(sbi)); 1877 1878 cpc.reason = __get_cp_reason(sbi); 1879 gc_more: 1880 sbi->skipped_gc_rwsem = 0; 1881 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1882 ret = -EINVAL; 1883 goto stop; 1884 } 1885 if (unlikely(f2fs_cp_error(sbi))) { 1886 ret = -EIO; 1887 goto stop; 1888 } 1889 1890 /* Let's run FG_GC, if we don't have enough space. */ 1891 if (has_not_enough_free_secs(sbi, 0, 0)) { 1892 gc_type = FG_GC; 1893 1894 /* 1895 * For example, if there are many prefree_segments below given 1896 * threshold, we can make them free by checkpoint. Then, we 1897 * secure free segments which doesn't need fggc any more. 1898 */ 1899 if (prefree_segments(sbi)) { 1900 stat_inc_cp_call_count(sbi, TOTAL_CALL); 1901 ret = f2fs_write_checkpoint(sbi, &cpc); 1902 if (ret) 1903 goto stop; 1904 /* Reset due to checkpoint */ 1905 sec_freed = 0; 1906 } 1907 } 1908 1909 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1910 if (gc_type == BG_GC && gc_control->no_bg_gc) { 1911 ret = -EINVAL; 1912 goto stop; 1913 } 1914 retry: 1915 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time); 1916 if (ret) { 1917 /* allow to search victim from sections has pinned data */ 1918 if (ret == -ENODATA && gc_type == FG_GC && 1919 f2fs_pinned_section_exists(DIRTY_I(sbi))) { 1920 f2fs_unpin_all_sections(sbi, false); 1921 goto retry; 1922 } 1923 goto stop; 1924 } 1925 1926 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, 1927 gc_control->should_migrate_blocks, 1928 gc_control->one_time); 1929 if (seg_freed < 0) 1930 goto stop; 1931 1932 total_freed += seg_freed; 1933 1934 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) { 1935 sec_freed++; 1936 total_sec_freed++; 1937 } 1938 1939 if (gc_control->one_time) 1940 goto stop; 1941 1942 if (gc_type == FG_GC) { 1943 sbi->cur_victim_sec = NULL_SEGNO; 1944 1945 if (has_enough_free_secs(sbi, sec_freed, 0)) { 1946 if (!gc_control->no_bg_gc && 1947 total_sec_freed < gc_control->nr_free_secs) 1948 goto go_gc_more; 1949 goto stop; 1950 } 1951 if (sbi->skipped_gc_rwsem) 1952 skipped_round++; 1953 round++; 1954 if (skipped_round > MAX_SKIP_GC_COUNT && 1955 skipped_round * 2 >= round) { 1956 stat_inc_cp_call_count(sbi, TOTAL_CALL); 1957 ret = f2fs_write_checkpoint(sbi, &cpc); 1958 goto stop; 1959 } 1960 } else if (has_enough_free_secs(sbi, 0, 0)) { 1961 goto stop; 1962 } 1963 1964 __get_secs_required(sbi, NULL, &upper_secs, NULL); 1965 1966 /* 1967 * Write checkpoint to reclaim prefree segments. 1968 * We need more three extra sections for writer's data/node/dentry. 1969 */ 1970 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS && 1971 prefree_segments(sbi)) { 1972 stat_inc_cp_call_count(sbi, TOTAL_CALL); 1973 ret = f2fs_write_checkpoint(sbi, &cpc); 1974 if (ret) 1975 goto stop; 1976 /* Reset due to checkpoint */ 1977 sec_freed = 0; 1978 } 1979 go_gc_more: 1980 segno = NULL_SEGNO; 1981 goto gc_more; 1982 1983 stop: 1984 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; 1985 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno; 1986 1987 if (gc_type == FG_GC) 1988 f2fs_unpin_all_sections(sbi, true); 1989 1990 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed, 1991 get_pages(sbi, F2FS_DIRTY_NODES), 1992 get_pages(sbi, F2FS_DIRTY_DENTS), 1993 get_pages(sbi, F2FS_DIRTY_IMETA), 1994 free_sections(sbi), 1995 free_segments(sbi), 1996 reserved_segments(sbi), 1997 prefree_segments(sbi)); 1998 1999 f2fs_up_write(&sbi->gc_lock); 2000 2001 put_gc_inode(&gc_list); 2002 2003 if (gc_control->err_gc_skipped && !ret) 2004 ret = total_sec_freed ? 0 : -EAGAIN; 2005 return ret; 2006 } 2007 2008 int __init f2fs_create_garbage_collection_cache(void) 2009 { 2010 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry", 2011 sizeof(struct victim_entry)); 2012 return victim_entry_slab ? 0 : -ENOMEM; 2013 } 2014 2015 void f2fs_destroy_garbage_collection_cache(void) 2016 { 2017 kmem_cache_destroy(victim_entry_slab); 2018 } 2019 2020 static void init_atgc_management(struct f2fs_sb_info *sbi) 2021 { 2022 struct atgc_management *am = &sbi->am; 2023 2024 if (test_opt(sbi, ATGC) && 2025 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) 2026 am->atgc_enabled = true; 2027 2028 am->root = RB_ROOT_CACHED; 2029 INIT_LIST_HEAD(&am->victim_list); 2030 am->victim_count = 0; 2031 2032 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; 2033 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; 2034 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; 2035 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; 2036 } 2037 2038 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 2039 { 2040 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 2041 2042 /* give warm/cold data area from slower device */ 2043 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) 2044 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 2045 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 2046 2047 init_atgc_management(sbi); 2048 } 2049 2050 int f2fs_gc_range(struct f2fs_sb_info *sbi, 2051 unsigned int start_seg, unsigned int end_seg, 2052 bool dry_run, unsigned int dry_run_sections) 2053 { 2054 unsigned int segno; 2055 unsigned int gc_secs = dry_run_sections; 2056 2057 if (unlikely(f2fs_cp_error(sbi))) 2058 return -EIO; 2059 2060 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) { 2061 struct gc_inode_list gc_list = { 2062 .ilist = LIST_HEAD_INIT(gc_list.ilist), 2063 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 2064 }; 2065 2066 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false); 2067 put_gc_inode(&gc_list); 2068 2069 if (!dry_run && get_valid_blocks(sbi, segno, true)) 2070 return -EAGAIN; 2071 if (dry_run && dry_run_sections && 2072 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0) 2073 break; 2074 2075 if (fatal_signal_pending(current)) 2076 return -ERESTARTSYS; 2077 } 2078 2079 return 0; 2080 } 2081 2082 static int free_segment_range(struct f2fs_sb_info *sbi, 2083 unsigned int secs, bool dry_run) 2084 { 2085 unsigned int next_inuse, start, end; 2086 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 2087 int gc_mode, gc_type; 2088 int err = 0; 2089 int type; 2090 2091 /* Force block allocation for GC */ 2092 MAIN_SECS(sbi) -= secs; 2093 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi); 2094 end = MAIN_SEGS(sbi) - 1; 2095 2096 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2097 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) 2098 if (SIT_I(sbi)->last_victim[gc_mode] >= start) 2099 SIT_I(sbi)->last_victim[gc_mode] = 0; 2100 2101 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) 2102 if (sbi->next_victim_seg[gc_type] >= start) 2103 sbi->next_victim_seg[gc_type] = NULL_SEGNO; 2104 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2105 2106 /* Move out cursegs from the target range */ 2107 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) { 2108 err = f2fs_allocate_segment_for_resize(sbi, type, start, end); 2109 if (err) 2110 goto out; 2111 } 2112 2113 /* do GC to move out valid blocks in the range */ 2114 err = f2fs_gc_range(sbi, start, end, dry_run, 0); 2115 if (err || dry_run) 2116 goto out; 2117 2118 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2119 err = f2fs_write_checkpoint(sbi, &cpc); 2120 if (err) 2121 goto out; 2122 2123 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); 2124 if (next_inuse <= end) { 2125 f2fs_err(sbi, "segno %u should be free but still inuse!", 2126 next_inuse); 2127 f2fs_bug_on(sbi, 1); 2128 } 2129 out: 2130 MAIN_SECS(sbi) += secs; 2131 return err; 2132 } 2133 2134 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) 2135 { 2136 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 2137 int section_count; 2138 int segment_count; 2139 int segment_count_main; 2140 long long block_count; 2141 int segs = secs * SEGS_PER_SEC(sbi); 2142 2143 f2fs_down_write(&sbi->sb_lock); 2144 2145 section_count = le32_to_cpu(raw_sb->section_count); 2146 segment_count = le32_to_cpu(raw_sb->segment_count); 2147 segment_count_main = le32_to_cpu(raw_sb->segment_count_main); 2148 block_count = le64_to_cpu(raw_sb->block_count); 2149 2150 raw_sb->section_count = cpu_to_le32(section_count + secs); 2151 raw_sb->segment_count = cpu_to_le32(segment_count + segs); 2152 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); 2153 raw_sb->block_count = cpu_to_le64(block_count + 2154 (long long)SEGS_TO_BLKS(sbi, segs)); 2155 if (f2fs_is_multi_device(sbi)) { 2156 int last_dev = sbi->s_ndevs - 1; 2157 int dev_segs = 2158 le32_to_cpu(raw_sb->devs[last_dev].total_segments); 2159 2160 raw_sb->devs[last_dev].total_segments = 2161 cpu_to_le32(dev_segs + segs); 2162 } 2163 2164 f2fs_up_write(&sbi->sb_lock); 2165 } 2166 2167 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) 2168 { 2169 int segs = secs * SEGS_PER_SEC(sbi); 2170 long long blks = SEGS_TO_BLKS(sbi, segs); 2171 long long user_block_count = 2172 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); 2173 2174 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; 2175 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; 2176 MAIN_SECS(sbi) += secs; 2177 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; 2178 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; 2179 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); 2180 2181 if (f2fs_is_multi_device(sbi)) { 2182 int last_dev = sbi->s_ndevs - 1; 2183 2184 FDEV(last_dev).total_segments = 2185 (int)FDEV(last_dev).total_segments + segs; 2186 FDEV(last_dev).end_blk = 2187 (long long)FDEV(last_dev).end_blk + blks; 2188 #ifdef CONFIG_BLK_DEV_ZONED 2189 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz + 2190 div_u64(blks, sbi->blocks_per_blkz); 2191 #endif 2192 } 2193 } 2194 2195 int f2fs_resize_fs(struct file *filp, __u64 block_count) 2196 { 2197 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 2198 __u64 old_block_count, shrunk_blocks; 2199 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 2200 unsigned int secs; 2201 int err = 0; 2202 __u32 rem; 2203 2204 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); 2205 if (block_count > old_block_count) 2206 return -EINVAL; 2207 2208 if (f2fs_is_multi_device(sbi)) { 2209 int last_dev = sbi->s_ndevs - 1; 2210 __u64 last_segs = FDEV(last_dev).total_segments; 2211 2212 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <= 2213 old_block_count) 2214 return -EINVAL; 2215 } 2216 2217 /* new fs size should align to section size */ 2218 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); 2219 if (rem) 2220 return -EINVAL; 2221 2222 if (block_count == old_block_count) 2223 return 0; 2224 2225 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 2226 f2fs_err(sbi, "Should run fsck to repair first."); 2227 return -EFSCORRUPTED; 2228 } 2229 2230 if (test_opt(sbi, DISABLE_CHECKPOINT)) { 2231 f2fs_err(sbi, "Checkpoint should be enabled."); 2232 return -EINVAL; 2233 } 2234 2235 err = mnt_want_write_file(filp); 2236 if (err) 2237 return err; 2238 2239 shrunk_blocks = old_block_count - block_count; 2240 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); 2241 2242 /* stop other GC */ 2243 if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 2244 err = -EAGAIN; 2245 goto out_drop_write; 2246 } 2247 2248 /* stop CP to protect MAIN_SEC in free_segment_range */ 2249 f2fs_lock_op(sbi); 2250 2251 spin_lock(&sbi->stat_lock); 2252 if (shrunk_blocks + valid_user_blocks(sbi) + 2253 sbi->current_reserved_blocks + sbi->unusable_block_count + 2254 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 2255 err = -ENOSPC; 2256 spin_unlock(&sbi->stat_lock); 2257 2258 if (err) 2259 goto out_unlock; 2260 2261 err = free_segment_range(sbi, secs, true); 2262 2263 out_unlock: 2264 f2fs_unlock_op(sbi); 2265 f2fs_up_write(&sbi->gc_lock); 2266 out_drop_write: 2267 mnt_drop_write_file(filp); 2268 if (err) 2269 return err; 2270 2271 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE); 2272 if (err) 2273 return err; 2274 2275 if (f2fs_readonly(sbi->sb)) { 2276 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); 2277 if (err) 2278 return err; 2279 return -EROFS; 2280 } 2281 2282 f2fs_down_write(&sbi->gc_lock); 2283 f2fs_down_write(&sbi->cp_global_sem); 2284 2285 spin_lock(&sbi->stat_lock); 2286 if (shrunk_blocks + valid_user_blocks(sbi) + 2287 sbi->current_reserved_blocks + sbi->unusable_block_count + 2288 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 2289 err = -ENOSPC; 2290 else 2291 sbi->user_block_count -= shrunk_blocks; 2292 spin_unlock(&sbi->stat_lock); 2293 if (err) 2294 goto out_err; 2295 2296 set_sbi_flag(sbi, SBI_IS_RESIZEFS); 2297 err = free_segment_range(sbi, secs, false); 2298 if (err) 2299 goto recover_out; 2300 2301 update_sb_metadata(sbi, -secs); 2302 2303 err = f2fs_commit_super(sbi, false); 2304 if (err) { 2305 update_sb_metadata(sbi, secs); 2306 goto recover_out; 2307 } 2308 2309 update_fs_metadata(sbi, -secs); 2310 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2311 set_sbi_flag(sbi, SBI_IS_DIRTY); 2312 2313 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2314 err = f2fs_write_checkpoint(sbi, &cpc); 2315 if (err) { 2316 update_fs_metadata(sbi, secs); 2317 update_sb_metadata(sbi, secs); 2318 f2fs_commit_super(sbi, false); 2319 } 2320 recover_out: 2321 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2322 if (err) { 2323 set_sbi_flag(sbi, SBI_NEED_FSCK); 2324 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); 2325 2326 spin_lock(&sbi->stat_lock); 2327 sbi->user_block_count += shrunk_blocks; 2328 spin_unlock(&sbi->stat_lock); 2329 } 2330 out_err: 2331 f2fs_up_write(&sbi->cp_global_sem); 2332 f2fs_up_write(&sbi->gc_lock); 2333 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); 2334 return err; 2335 } 2336