1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/f2fs_fs.h> 12 #include <linux/kthread.h> 13 #include <linux/delay.h> 14 #include <linux/freezer.h> 15 #include <linux/sched/signal.h> 16 #include <linux/random.h> 17 #include <linux/sched/mm.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include "gc.h" 23 #include "iostat.h" 24 #include <trace/events/f2fs.h> 25 26 static struct kmem_cache *victim_entry_slab; 27 28 static unsigned int count_bits(const unsigned long *addr, 29 unsigned int offset, unsigned int len); 30 31 static int gc_thread_func(void *data) 32 { 33 struct f2fs_sb_info *sbi = data; 34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; 37 unsigned int wait_ms; 38 struct f2fs_gc_control gc_control = { 39 .victim_segno = NULL_SEGNO, 40 .should_migrate_blocks = false, 41 .err_gc_skipped = false }; 42 43 wait_ms = gc_th->min_sleep_time; 44 45 set_freezable(); 46 do { 47 bool sync_mode, foreground = false; 48 49 wait_event_freezable_timeout(*wq, 50 kthread_should_stop() || 51 waitqueue_active(fggc_wq) || 52 gc_th->gc_wake, 53 msecs_to_jiffies(wait_ms)); 54 55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) 56 foreground = true; 57 58 /* give it a try one time */ 59 if (gc_th->gc_wake) 60 gc_th->gc_wake = false; 61 62 if (f2fs_readonly(sbi->sb)) { 63 stat_other_skip_bggc_count(sbi); 64 continue; 65 } 66 if (kthread_should_stop()) 67 break; 68 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 70 increase_sleep_time(gc_th, &wait_ms); 71 stat_other_skip_bggc_count(sbi); 72 continue; 73 } 74 75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) 76 f2fs_stop_checkpoint(sbi, false, 77 STOP_CP_REASON_FAULT_INJECT); 78 79 if (!sb_start_write_trylock(sbi->sb)) { 80 stat_other_skip_bggc_count(sbi); 81 continue; 82 } 83 84 /* 85 * [GC triggering condition] 86 * 0. GC is not conducted currently. 87 * 1. There are enough dirty segments. 88 * 2. IO subsystem is idle by checking the # of writeback pages. 89 * 3. IO subsystem is idle by checking the # of requests in 90 * bdev's request list. 91 * 92 * Note) We have to avoid triggering GCs frequently. 93 * Because it is possible that some segments can be 94 * invalidated soon after by user update or deletion. 95 * So, I'd like to wait some time to collect dirty segments. 96 */ 97 if (sbi->gc_mode == GC_URGENT_HIGH || 98 sbi->gc_mode == GC_URGENT_MID) { 99 wait_ms = gc_th->urgent_sleep_time; 100 f2fs_down_write(&sbi->gc_lock); 101 goto do_gc; 102 } 103 104 if (foreground) { 105 f2fs_down_write(&sbi->gc_lock); 106 goto do_gc; 107 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 108 stat_other_skip_bggc_count(sbi); 109 goto next; 110 } 111 112 if (!is_idle(sbi, GC_TIME)) { 113 increase_sleep_time(gc_th, &wait_ms); 114 f2fs_up_write(&sbi->gc_lock); 115 stat_io_skip_bggc_count(sbi); 116 goto next; 117 } 118 119 if (has_enough_invalid_blocks(sbi)) 120 decrease_sleep_time(gc_th, &wait_ms); 121 else 122 increase_sleep_time(gc_th, &wait_ms); 123 do_gc: 124 stat_inc_gc_call_count(sbi, foreground ? 125 FOREGROUND : BACKGROUND); 126 127 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; 128 129 /* foreground GC was been triggered via f2fs_balance_fs() */ 130 if (foreground) 131 sync_mode = false; 132 133 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC; 134 gc_control.no_bg_gc = foreground; 135 gc_control.nr_free_secs = foreground ? 1 : 0; 136 137 /* if return value is not zero, no victim was selected */ 138 if (f2fs_gc(sbi, &gc_control)) { 139 /* don't bother wait_ms by foreground gc */ 140 if (!foreground) 141 wait_ms = gc_th->no_gc_sleep_time; 142 } else { 143 /* reset wait_ms to default sleep time */ 144 if (wait_ms == gc_th->no_gc_sleep_time) 145 wait_ms = gc_th->min_sleep_time; 146 } 147 148 if (foreground) 149 wake_up_all(&gc_th->fggc_wq); 150 151 trace_f2fs_background_gc(sbi->sb, wait_ms, 152 prefree_segments(sbi), free_segments(sbi)); 153 154 /* balancing f2fs's metadata periodically */ 155 f2fs_balance_fs_bg(sbi, true); 156 next: 157 if (sbi->gc_mode != GC_NORMAL) { 158 spin_lock(&sbi->gc_remaining_trials_lock); 159 if (sbi->gc_remaining_trials) { 160 sbi->gc_remaining_trials--; 161 if (!sbi->gc_remaining_trials) 162 sbi->gc_mode = GC_NORMAL; 163 } 164 spin_unlock(&sbi->gc_remaining_trials_lock); 165 } 166 sb_end_write(sbi->sb); 167 168 } while (!kthread_should_stop()); 169 return 0; 170 } 171 172 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) 173 { 174 struct f2fs_gc_kthread *gc_th; 175 dev_t dev = sbi->sb->s_bdev->bd_dev; 176 177 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 178 if (!gc_th) 179 return -ENOMEM; 180 181 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; 182 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 183 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 184 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 185 186 gc_th->gc_wake = false; 187 188 sbi->gc_thread = gc_th; 189 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 190 init_waitqueue_head(&sbi->gc_thread->fggc_wq); 191 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 192 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 193 if (IS_ERR(gc_th->f2fs_gc_task)) { 194 int err = PTR_ERR(gc_th->f2fs_gc_task); 195 196 kfree(gc_th); 197 sbi->gc_thread = NULL; 198 return err; 199 } 200 201 return 0; 202 } 203 204 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 205 { 206 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 207 208 if (!gc_th) 209 return; 210 kthread_stop(gc_th->f2fs_gc_task); 211 wake_up_all(&gc_th->fggc_wq); 212 kfree(gc_th); 213 sbi->gc_thread = NULL; 214 } 215 216 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 217 { 218 int gc_mode; 219 220 if (gc_type == BG_GC) { 221 if (sbi->am.atgc_enabled) 222 gc_mode = GC_AT; 223 else 224 gc_mode = GC_CB; 225 } else { 226 gc_mode = GC_GREEDY; 227 } 228 229 switch (sbi->gc_mode) { 230 case GC_IDLE_CB: 231 gc_mode = GC_CB; 232 break; 233 case GC_IDLE_GREEDY: 234 case GC_URGENT_HIGH: 235 gc_mode = GC_GREEDY; 236 break; 237 case GC_IDLE_AT: 238 gc_mode = GC_AT; 239 break; 240 } 241 242 return gc_mode; 243 } 244 245 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 246 int type, struct victim_sel_policy *p) 247 { 248 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 249 250 if (p->alloc_mode == SSR) { 251 p->gc_mode = GC_GREEDY; 252 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 253 p->max_search = dirty_i->nr_dirty[type]; 254 p->ofs_unit = 1; 255 } else if (p->alloc_mode == AT_SSR) { 256 p->gc_mode = GC_GREEDY; 257 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 258 p->max_search = dirty_i->nr_dirty[type]; 259 p->ofs_unit = 1; 260 } else { 261 p->gc_mode = select_gc_type(sbi, gc_type); 262 p->ofs_unit = SEGS_PER_SEC(sbi); 263 if (__is_large_section(sbi)) { 264 p->dirty_bitmap = dirty_i->dirty_secmap; 265 p->max_search = count_bits(p->dirty_bitmap, 266 0, MAIN_SECS(sbi)); 267 } else { 268 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; 269 p->max_search = dirty_i->nr_dirty[DIRTY]; 270 } 271 } 272 273 /* 274 * adjust candidates range, should select all dirty segments for 275 * foreground GC and urgent GC cases. 276 */ 277 if (gc_type != FG_GC && 278 (sbi->gc_mode != GC_URGENT_HIGH) && 279 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) && 280 p->max_search > sbi->max_victim_search) 281 p->max_search = sbi->max_victim_search; 282 283 /* let's select beginning hot/small space first. */ 284 if (f2fs_need_rand_seg(sbi)) 285 p->offset = get_random_u32_below(MAIN_SECS(sbi) * 286 SEGS_PER_SEC(sbi)); 287 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) 288 p->offset = 0; 289 else 290 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; 291 } 292 293 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 294 struct victim_sel_policy *p) 295 { 296 /* SSR allocates in a segment unit */ 297 if (p->alloc_mode == SSR) 298 return BLKS_PER_SEG(sbi); 299 else if (p->alloc_mode == AT_SSR) 300 return UINT_MAX; 301 302 /* LFS */ 303 if (p->gc_mode == GC_GREEDY) 304 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit); 305 else if (p->gc_mode == GC_CB) 306 return UINT_MAX; 307 else if (p->gc_mode == GC_AT) 308 return UINT_MAX; 309 else /* No other gc_mode */ 310 return 0; 311 } 312 313 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 314 { 315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 316 unsigned int secno; 317 318 /* 319 * If the gc_type is FG_GC, we can select victim segments 320 * selected by background GC before. 321 * Those segments guarantee they have small valid blocks. 322 */ 323 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { 324 if (sec_usage_check(sbi, secno)) 325 continue; 326 clear_bit(secno, dirty_i->victim_secmap); 327 return GET_SEG_FROM_SEC(sbi, secno); 328 } 329 return NULL_SEGNO; 330 } 331 332 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 333 { 334 struct sit_info *sit_i = SIT_I(sbi); 335 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 336 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 337 unsigned long long mtime = 0; 338 unsigned int vblocks; 339 unsigned char age = 0; 340 unsigned char u; 341 unsigned int i; 342 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno); 343 344 for (i = 0; i < usable_segs_per_sec; i++) 345 mtime += get_seg_entry(sbi, start + i)->mtime; 346 vblocks = get_valid_blocks(sbi, segno, true); 347 348 mtime = div_u64(mtime, usable_segs_per_sec); 349 vblocks = div_u64(vblocks, usable_segs_per_sec); 350 351 u = BLKS_TO_SEGS(sbi, vblocks * 100); 352 353 /* Handle if the system time has changed by the user */ 354 if (mtime < sit_i->min_mtime) 355 sit_i->min_mtime = mtime; 356 if (mtime > sit_i->max_mtime) 357 sit_i->max_mtime = mtime; 358 if (sit_i->max_mtime != sit_i->min_mtime) 359 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 360 sit_i->max_mtime - sit_i->min_mtime); 361 362 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 363 } 364 365 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, 366 unsigned int segno, struct victim_sel_policy *p) 367 { 368 if (p->alloc_mode == SSR) 369 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 370 371 /* alloc_mode == LFS */ 372 if (p->gc_mode == GC_GREEDY) 373 return get_valid_blocks(sbi, segno, true); 374 else if (p->gc_mode == GC_CB) 375 return get_cb_cost(sbi, segno); 376 377 f2fs_bug_on(sbi, 1); 378 return 0; 379 } 380 381 static unsigned int count_bits(const unsigned long *addr, 382 unsigned int offset, unsigned int len) 383 { 384 unsigned int end = offset + len, sum = 0; 385 386 while (offset < end) { 387 if (test_bit(offset++, addr)) 388 ++sum; 389 } 390 return sum; 391 } 392 393 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi, 394 struct rb_root_cached *root) 395 { 396 #ifdef CONFIG_F2FS_CHECK_FS 397 struct rb_node *cur = rb_first_cached(root), *next; 398 struct victim_entry *cur_ve, *next_ve; 399 400 while (cur) { 401 next = rb_next(cur); 402 if (!next) 403 return true; 404 405 cur_ve = rb_entry(cur, struct victim_entry, rb_node); 406 next_ve = rb_entry(next, struct victim_entry, rb_node); 407 408 if (cur_ve->mtime > next_ve->mtime) { 409 f2fs_info(sbi, "broken victim_rbtree, " 410 "cur_mtime(%llu) next_mtime(%llu)", 411 cur_ve->mtime, next_ve->mtime); 412 return false; 413 } 414 cur = next; 415 } 416 #endif 417 return true; 418 } 419 420 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi, 421 unsigned long long mtime) 422 { 423 struct atgc_management *am = &sbi->am; 424 struct rb_node *node = am->root.rb_root.rb_node; 425 struct victim_entry *ve = NULL; 426 427 while (node) { 428 ve = rb_entry(node, struct victim_entry, rb_node); 429 430 if (mtime < ve->mtime) 431 node = node->rb_left; 432 else 433 node = node->rb_right; 434 } 435 return ve; 436 } 437 438 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi, 439 unsigned long long mtime, unsigned int segno) 440 { 441 struct atgc_management *am = &sbi->am; 442 struct victim_entry *ve; 443 444 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL); 445 446 ve->mtime = mtime; 447 ve->segno = segno; 448 449 list_add_tail(&ve->list, &am->victim_list); 450 am->victim_count++; 451 452 return ve; 453 } 454 455 static void __insert_victim_entry(struct f2fs_sb_info *sbi, 456 unsigned long long mtime, unsigned int segno) 457 { 458 struct atgc_management *am = &sbi->am; 459 struct rb_root_cached *root = &am->root; 460 struct rb_node **p = &root->rb_root.rb_node; 461 struct rb_node *parent = NULL; 462 struct victim_entry *ve; 463 bool left_most = true; 464 465 /* look up rb tree to find parent node */ 466 while (*p) { 467 parent = *p; 468 ve = rb_entry(parent, struct victim_entry, rb_node); 469 470 if (mtime < ve->mtime) { 471 p = &(*p)->rb_left; 472 } else { 473 p = &(*p)->rb_right; 474 left_most = false; 475 } 476 } 477 478 ve = __create_victim_entry(sbi, mtime, segno); 479 480 rb_link_node(&ve->rb_node, parent, p); 481 rb_insert_color_cached(&ve->rb_node, root, left_most); 482 } 483 484 static void add_victim_entry(struct f2fs_sb_info *sbi, 485 struct victim_sel_policy *p, unsigned int segno) 486 { 487 struct sit_info *sit_i = SIT_I(sbi); 488 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 489 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 490 unsigned long long mtime = 0; 491 unsigned int i; 492 493 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 494 if (p->gc_mode == GC_AT && 495 get_valid_blocks(sbi, segno, true) == 0) 496 return; 497 } 498 499 for (i = 0; i < SEGS_PER_SEC(sbi); i++) 500 mtime += get_seg_entry(sbi, start + i)->mtime; 501 mtime = div_u64(mtime, SEGS_PER_SEC(sbi)); 502 503 /* Handle if the system time has changed by the user */ 504 if (mtime < sit_i->min_mtime) 505 sit_i->min_mtime = mtime; 506 if (mtime > sit_i->max_mtime) 507 sit_i->max_mtime = mtime; 508 if (mtime < sit_i->dirty_min_mtime) 509 sit_i->dirty_min_mtime = mtime; 510 if (mtime > sit_i->dirty_max_mtime) 511 sit_i->dirty_max_mtime = mtime; 512 513 /* don't choose young section as candidate */ 514 if (sit_i->dirty_max_mtime - mtime < p->age_threshold) 515 return; 516 517 __insert_victim_entry(sbi, mtime, segno); 518 } 519 520 static void atgc_lookup_victim(struct f2fs_sb_info *sbi, 521 struct victim_sel_policy *p) 522 { 523 struct sit_info *sit_i = SIT_I(sbi); 524 struct atgc_management *am = &sbi->am; 525 struct rb_root_cached *root = &am->root; 526 struct rb_node *node; 527 struct victim_entry *ve; 528 unsigned long long total_time; 529 unsigned long long age, u, accu; 530 unsigned long long max_mtime = sit_i->dirty_max_mtime; 531 unsigned long long min_mtime = sit_i->dirty_min_mtime; 532 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi); 533 unsigned int vblocks; 534 unsigned int dirty_threshold = max(am->max_candidate_count, 535 am->candidate_ratio * 536 am->victim_count / 100); 537 unsigned int age_weight = am->age_weight; 538 unsigned int cost; 539 unsigned int iter = 0; 540 541 if (max_mtime < min_mtime) 542 return; 543 544 max_mtime += 1; 545 total_time = max_mtime - min_mtime; 546 547 accu = div64_u64(ULLONG_MAX, total_time); 548 accu = min_t(unsigned long long, div_u64(accu, 100), 549 DEFAULT_ACCURACY_CLASS); 550 551 node = rb_first_cached(root); 552 next: 553 ve = rb_entry_safe(node, struct victim_entry, rb_node); 554 if (!ve) 555 return; 556 557 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 558 goto skip; 559 560 /* age = 10000 * x% * 60 */ 561 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * 562 age_weight; 563 564 vblocks = get_valid_blocks(sbi, ve->segno, true); 565 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); 566 567 /* u = 10000 * x% * 40 */ 568 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) * 569 (100 - age_weight); 570 571 f2fs_bug_on(sbi, age + u >= UINT_MAX); 572 573 cost = UINT_MAX - (age + u); 574 iter++; 575 576 if (cost < p->min_cost || 577 (cost == p->min_cost && age > p->oldest_age)) { 578 p->min_cost = cost; 579 p->oldest_age = age; 580 p->min_segno = ve->segno; 581 } 582 skip: 583 if (iter < dirty_threshold) { 584 node = rb_next(node); 585 goto next; 586 } 587 } 588 589 /* 590 * select candidates around source section in range of 591 * [target - dirty_threshold, target + dirty_threshold] 592 */ 593 static void atssr_lookup_victim(struct f2fs_sb_info *sbi, 594 struct victim_sel_policy *p) 595 { 596 struct sit_info *sit_i = SIT_I(sbi); 597 struct atgc_management *am = &sbi->am; 598 struct victim_entry *ve; 599 unsigned long long age; 600 unsigned long long max_mtime = sit_i->dirty_max_mtime; 601 unsigned long long min_mtime = sit_i->dirty_min_mtime; 602 unsigned int vblocks; 603 unsigned int dirty_threshold = max(am->max_candidate_count, 604 am->candidate_ratio * 605 am->victim_count / 100); 606 unsigned int cost, iter; 607 int stage = 0; 608 609 if (max_mtime < min_mtime) 610 return; 611 max_mtime += 1; 612 next_stage: 613 iter = 0; 614 ve = __lookup_victim_entry(sbi, p->age); 615 next_node: 616 if (!ve) { 617 if (stage++ == 0) 618 goto next_stage; 619 return; 620 } 621 622 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 623 goto skip_node; 624 625 age = max_mtime - ve->mtime; 626 627 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; 628 f2fs_bug_on(sbi, !vblocks); 629 630 /* rare case */ 631 if (vblocks == BLKS_PER_SEG(sbi)) 632 goto skip_node; 633 634 iter++; 635 636 age = max_mtime - abs(p->age - age); 637 cost = UINT_MAX - vblocks; 638 639 if (cost < p->min_cost || 640 (cost == p->min_cost && age > p->oldest_age)) { 641 p->min_cost = cost; 642 p->oldest_age = age; 643 p->min_segno = ve->segno; 644 } 645 skip_node: 646 if (iter < dirty_threshold) { 647 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) : 648 rb_next(&ve->rb_node), 649 struct victim_entry, rb_node); 650 goto next_node; 651 } 652 653 if (stage++ == 0) 654 goto next_stage; 655 } 656 657 static void lookup_victim_by_age(struct f2fs_sb_info *sbi, 658 struct victim_sel_policy *p) 659 { 660 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); 661 662 if (p->gc_mode == GC_AT) 663 atgc_lookup_victim(sbi, p); 664 else if (p->alloc_mode == AT_SSR) 665 atssr_lookup_victim(sbi, p); 666 else 667 f2fs_bug_on(sbi, 1); 668 } 669 670 static void release_victim_entry(struct f2fs_sb_info *sbi) 671 { 672 struct atgc_management *am = &sbi->am; 673 struct victim_entry *ve, *tmp; 674 675 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) { 676 list_del(&ve->list); 677 kmem_cache_free(victim_entry_slab, ve); 678 am->victim_count--; 679 } 680 681 am->root = RB_ROOT_CACHED; 682 683 f2fs_bug_on(sbi, am->victim_count); 684 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); 685 } 686 687 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno) 688 { 689 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 690 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 691 692 if (!dirty_i->enable_pin_section) 693 return false; 694 if (!test_and_set_bit(secno, dirty_i->pinned_secmap)) 695 dirty_i->pinned_secmap_cnt++; 696 return true; 697 } 698 699 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i) 700 { 701 return dirty_i->pinned_secmap_cnt; 702 } 703 704 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i, 705 unsigned int secno) 706 { 707 return dirty_i->enable_pin_section && 708 f2fs_pinned_section_exists(dirty_i) && 709 test_bit(secno, dirty_i->pinned_secmap); 710 } 711 712 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable) 713 { 714 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 715 716 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) { 717 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size); 718 DIRTY_I(sbi)->pinned_secmap_cnt = 0; 719 } 720 DIRTY_I(sbi)->enable_pin_section = enable; 721 } 722 723 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type, 724 unsigned int segno) 725 { 726 if (!f2fs_is_pinned_file(inode)) 727 return 0; 728 if (gc_type != FG_GC) 729 return -EBUSY; 730 if (!f2fs_pin_section(F2FS_I_SB(inode), segno)) 731 f2fs_pin_file_control(inode, true); 732 return -EAGAIN; 733 } 734 735 /* 736 * This function is called from two paths. 737 * One is garbage collection and the other is SSR segment selection. 738 * When it is called during GC, it just gets a victim segment 739 * and it does not remove it from dirty seglist. 740 * When it is called from SSR segment selection, it finds a segment 741 * which has minimum valid blocks and removes it from dirty seglist. 742 */ 743 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, 744 int gc_type, int type, char alloc_mode, 745 unsigned long long age) 746 { 747 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 748 struct sit_info *sm = SIT_I(sbi); 749 struct victim_sel_policy p; 750 unsigned int secno, last_victim; 751 unsigned int last_segment; 752 unsigned int nsearched; 753 bool is_atgc; 754 int ret = 0; 755 756 mutex_lock(&dirty_i->seglist_lock); 757 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi); 758 759 p.alloc_mode = alloc_mode; 760 p.age = age; 761 p.age_threshold = sbi->am.age_threshold; 762 763 retry: 764 select_policy(sbi, gc_type, type, &p); 765 p.min_segno = NULL_SEGNO; 766 p.oldest_age = 0; 767 p.min_cost = get_max_cost(sbi, &p); 768 769 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR); 770 nsearched = 0; 771 772 if (is_atgc) 773 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; 774 775 if (*result != NULL_SEGNO) { 776 if (!get_valid_blocks(sbi, *result, false)) { 777 ret = -ENODATA; 778 goto out; 779 } 780 781 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 782 ret = -EBUSY; 783 else 784 p.min_segno = *result; 785 goto out; 786 } 787 788 ret = -ENODATA; 789 if (p.max_search == 0) 790 goto out; 791 792 if (__is_large_section(sbi) && p.alloc_mode == LFS) { 793 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { 794 p.min_segno = sbi->next_victim_seg[BG_GC]; 795 *result = p.min_segno; 796 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; 797 goto got_result; 798 } 799 if (gc_type == FG_GC && 800 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { 801 p.min_segno = sbi->next_victim_seg[FG_GC]; 802 *result = p.min_segno; 803 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; 804 goto got_result; 805 } 806 } 807 808 last_victim = sm->last_victim[p.gc_mode]; 809 if (p.alloc_mode == LFS && gc_type == FG_GC) { 810 p.min_segno = check_bg_victims(sbi); 811 if (p.min_segno != NULL_SEGNO) 812 goto got_it; 813 } 814 815 while (1) { 816 unsigned long cost, *dirty_bitmap; 817 unsigned int unit_no, segno; 818 819 dirty_bitmap = p.dirty_bitmap; 820 unit_no = find_next_bit(dirty_bitmap, 821 last_segment / p.ofs_unit, 822 p.offset / p.ofs_unit); 823 segno = unit_no * p.ofs_unit; 824 if (segno >= last_segment) { 825 if (sm->last_victim[p.gc_mode]) { 826 last_segment = 827 sm->last_victim[p.gc_mode]; 828 sm->last_victim[p.gc_mode] = 0; 829 p.offset = 0; 830 continue; 831 } 832 break; 833 } 834 835 p.offset = segno + p.ofs_unit; 836 nsearched++; 837 838 #ifdef CONFIG_F2FS_CHECK_FS 839 /* 840 * skip selecting the invalid segno (that is failed due to block 841 * validity check failure during GC) to avoid endless GC loop in 842 * such cases. 843 */ 844 if (test_bit(segno, sm->invalid_segmap)) 845 goto next; 846 #endif 847 848 secno = GET_SEC_FROM_SEG(sbi, segno); 849 850 if (sec_usage_check(sbi, secno)) 851 goto next; 852 853 /* Don't touch checkpointed data */ 854 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 855 if (p.alloc_mode == LFS) { 856 /* 857 * LFS is set to find source section during GC. 858 * The victim should have no checkpointed data. 859 */ 860 if (get_ckpt_valid_blocks(sbi, segno, true)) 861 goto next; 862 } else { 863 /* 864 * SSR | AT_SSR are set to find target segment 865 * for writes which can be full by checkpointed 866 * and newly written blocks. 867 */ 868 if (!f2fs_segment_has_free_slot(sbi, segno)) 869 goto next; 870 } 871 } 872 873 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 874 goto next; 875 876 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno)) 877 goto next; 878 879 if (is_atgc) { 880 add_victim_entry(sbi, &p, segno); 881 goto next; 882 } 883 884 cost = get_gc_cost(sbi, segno, &p); 885 886 if (p.min_cost > cost) { 887 p.min_segno = segno; 888 p.min_cost = cost; 889 } 890 next: 891 if (nsearched >= p.max_search) { 892 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) 893 sm->last_victim[p.gc_mode] = 894 last_victim + p.ofs_unit; 895 else 896 sm->last_victim[p.gc_mode] = segno + p.ofs_unit; 897 sm->last_victim[p.gc_mode] %= 898 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); 899 break; 900 } 901 } 902 903 /* get victim for GC_AT/AT_SSR */ 904 if (is_atgc) { 905 lookup_victim_by_age(sbi, &p); 906 release_victim_entry(sbi); 907 } 908 909 if (is_atgc && p.min_segno == NULL_SEGNO && 910 sm->elapsed_time < p.age_threshold) { 911 p.age_threshold = 0; 912 goto retry; 913 } 914 915 if (p.min_segno != NULL_SEGNO) { 916 got_it: 917 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 918 got_result: 919 if (p.alloc_mode == LFS) { 920 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 921 if (gc_type == FG_GC) 922 sbi->cur_victim_sec = secno; 923 else 924 set_bit(secno, dirty_i->victim_secmap); 925 } 926 ret = 0; 927 928 } 929 out: 930 if (p.min_segno != NULL_SEGNO) 931 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 932 sbi->cur_victim_sec, 933 prefree_segments(sbi), free_segments(sbi)); 934 mutex_unlock(&dirty_i->seglist_lock); 935 936 return ret; 937 } 938 939 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 940 { 941 struct inode_entry *ie; 942 943 ie = radix_tree_lookup(&gc_list->iroot, ino); 944 if (ie) 945 return ie->inode; 946 return NULL; 947 } 948 949 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) 950 { 951 struct inode_entry *new_ie; 952 953 if (inode == find_gc_inode(gc_list, inode->i_ino)) { 954 iput(inode); 955 return; 956 } 957 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, 958 GFP_NOFS, true, NULL); 959 new_ie->inode = inode; 960 961 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); 962 list_add_tail(&new_ie->list, &gc_list->ilist); 963 } 964 965 static void put_gc_inode(struct gc_inode_list *gc_list) 966 { 967 struct inode_entry *ie, *next_ie; 968 969 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { 970 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); 971 iput(ie->inode); 972 list_del(&ie->list); 973 kmem_cache_free(f2fs_inode_entry_slab, ie); 974 } 975 } 976 977 static int check_valid_map(struct f2fs_sb_info *sbi, 978 unsigned int segno, int offset) 979 { 980 struct sit_info *sit_i = SIT_I(sbi); 981 struct seg_entry *sentry; 982 int ret; 983 984 down_read(&sit_i->sentry_lock); 985 sentry = get_seg_entry(sbi, segno); 986 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 987 up_read(&sit_i->sentry_lock); 988 return ret; 989 } 990 991 /* 992 * This function compares node address got in summary with that in NAT. 993 * On validity, copy that node with cold status, otherwise (invalid node) 994 * ignore that. 995 */ 996 static int gc_node_segment(struct f2fs_sb_info *sbi, 997 struct f2fs_summary *sum, unsigned int segno, int gc_type) 998 { 999 struct f2fs_summary *entry; 1000 block_t start_addr; 1001 int off; 1002 int phase = 0; 1003 bool fggc = (gc_type == FG_GC); 1004 int submitted = 0; 1005 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 1006 1007 start_addr = START_BLOCK(sbi, segno); 1008 1009 next_step: 1010 entry = sum; 1011 1012 if (fggc && phase == 2) 1013 atomic_inc(&sbi->wb_sync_req[NODE]); 1014 1015 for (off = 0; off < usable_blks_in_seg; off++, entry++) { 1016 nid_t nid = le32_to_cpu(entry->nid); 1017 struct page *node_page; 1018 struct node_info ni; 1019 int err; 1020 1021 /* stop BG_GC if there is not enough free sections. */ 1022 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 1023 return submitted; 1024 1025 if (check_valid_map(sbi, segno, off) == 0) 1026 continue; 1027 1028 if (phase == 0) { 1029 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 1030 META_NAT, true); 1031 continue; 1032 } 1033 1034 if (phase == 1) { 1035 f2fs_ra_node_page(sbi, nid); 1036 continue; 1037 } 1038 1039 /* phase == 2 */ 1040 node_page = f2fs_get_node_page(sbi, nid); 1041 if (IS_ERR(node_page)) 1042 continue; 1043 1044 /* block may become invalid during f2fs_get_node_page */ 1045 if (check_valid_map(sbi, segno, off) == 0) { 1046 f2fs_put_page(node_page, 1); 1047 continue; 1048 } 1049 1050 if (f2fs_get_node_info(sbi, nid, &ni, false)) { 1051 f2fs_put_page(node_page, 1); 1052 continue; 1053 } 1054 1055 if (ni.blk_addr != start_addr + off) { 1056 f2fs_put_page(node_page, 1); 1057 continue; 1058 } 1059 1060 err = f2fs_move_node_page(node_page, gc_type); 1061 if (!err && gc_type == FG_GC) 1062 submitted++; 1063 stat_inc_node_blk_count(sbi, 1, gc_type); 1064 } 1065 1066 if (++phase < 3) 1067 goto next_step; 1068 1069 if (fggc) 1070 atomic_dec(&sbi->wb_sync_req[NODE]); 1071 return submitted; 1072 } 1073 1074 /* 1075 * Calculate start block index indicating the given node offset. 1076 * Be careful, caller should give this node offset only indicating direct node 1077 * blocks. If any node offsets, which point the other types of node blocks such 1078 * as indirect or double indirect node blocks, are given, it must be a caller's 1079 * bug. 1080 */ 1081 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) 1082 { 1083 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 1084 unsigned int bidx; 1085 1086 if (node_ofs == 0) 1087 return 0; 1088 1089 if (node_ofs <= 2) { 1090 bidx = node_ofs - 1; 1091 } else if (node_ofs <= indirect_blks) { 1092 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 1093 1094 bidx = node_ofs - 2 - dec; 1095 } else { 1096 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 1097 1098 bidx = node_ofs - 5 - dec; 1099 } 1100 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); 1101 } 1102 1103 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 1104 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 1105 { 1106 struct page *node_page; 1107 nid_t nid; 1108 unsigned int ofs_in_node, max_addrs, base; 1109 block_t source_blkaddr; 1110 1111 nid = le32_to_cpu(sum->nid); 1112 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 1113 1114 node_page = f2fs_get_node_page(sbi, nid); 1115 if (IS_ERR(node_page)) 1116 return false; 1117 1118 if (f2fs_get_node_info(sbi, nid, dni, false)) { 1119 f2fs_put_page(node_page, 1); 1120 return false; 1121 } 1122 1123 if (sum->version != dni->version) { 1124 f2fs_warn(sbi, "%s: valid data with mismatched node version.", 1125 __func__); 1126 set_sbi_flag(sbi, SBI_NEED_FSCK); 1127 } 1128 1129 if (f2fs_check_nid_range(sbi, dni->ino)) { 1130 f2fs_put_page(node_page, 1); 1131 return false; 1132 } 1133 1134 if (IS_INODE(node_page)) { 1135 base = offset_in_addr(F2FS_INODE(node_page)); 1136 max_addrs = DEF_ADDRS_PER_INODE; 1137 } else { 1138 base = 0; 1139 max_addrs = DEF_ADDRS_PER_BLOCK; 1140 } 1141 1142 if (base + ofs_in_node >= max_addrs) { 1143 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", 1144 base, ofs_in_node, max_addrs, dni->ino, dni->nid); 1145 f2fs_put_page(node_page, 1); 1146 return false; 1147 } 1148 1149 *nofs = ofs_of_node(node_page); 1150 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); 1151 f2fs_put_page(node_page, 1); 1152 1153 if (source_blkaddr != blkaddr) { 1154 #ifdef CONFIG_F2FS_CHECK_FS 1155 unsigned int segno = GET_SEGNO(sbi, blkaddr); 1156 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 1157 1158 if (unlikely(check_valid_map(sbi, segno, offset))) { 1159 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { 1160 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u", 1161 blkaddr, source_blkaddr, segno); 1162 set_sbi_flag(sbi, SBI_NEED_FSCK); 1163 } 1164 } 1165 #endif 1166 return false; 1167 } 1168 return true; 1169 } 1170 1171 static int ra_data_block(struct inode *inode, pgoff_t index) 1172 { 1173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1174 struct address_space *mapping = f2fs_is_cow_file(inode) ? 1175 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; 1176 struct dnode_of_data dn; 1177 struct page *page; 1178 struct f2fs_io_info fio = { 1179 .sbi = sbi, 1180 .ino = inode->i_ino, 1181 .type = DATA, 1182 .temp = COLD, 1183 .op = REQ_OP_READ, 1184 .op_flags = 0, 1185 .encrypted_page = NULL, 1186 .in_list = 0, 1187 }; 1188 int err; 1189 1190 page = f2fs_grab_cache_page(mapping, index, true); 1191 if (!page) 1192 return -ENOMEM; 1193 1194 if (f2fs_lookup_read_extent_cache_block(inode, index, 1195 &dn.data_blkaddr)) { 1196 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1197 DATA_GENERIC_ENHANCE_READ))) { 1198 err = -EFSCORRUPTED; 1199 goto put_page; 1200 } 1201 goto got_it; 1202 } 1203 1204 set_new_dnode(&dn, inode, NULL, NULL, 0); 1205 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 1206 if (err) 1207 goto put_page; 1208 f2fs_put_dnode(&dn); 1209 1210 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { 1211 err = -ENOENT; 1212 goto put_page; 1213 } 1214 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1215 DATA_GENERIC_ENHANCE))) { 1216 err = -EFSCORRUPTED; 1217 goto put_page; 1218 } 1219 got_it: 1220 /* read page */ 1221 fio.page = page; 1222 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1223 1224 /* 1225 * don't cache encrypted data into meta inode until previous dirty 1226 * data were writebacked to avoid racing between GC and flush. 1227 */ 1228 f2fs_wait_on_page_writeback(page, DATA, true, true); 1229 1230 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 1231 1232 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 1233 dn.data_blkaddr, 1234 FGP_LOCK | FGP_CREAT, GFP_NOFS); 1235 if (!fio.encrypted_page) { 1236 err = -ENOMEM; 1237 goto put_page; 1238 } 1239 1240 err = f2fs_submit_page_bio(&fio); 1241 if (err) 1242 goto put_encrypted_page; 1243 f2fs_put_page(fio.encrypted_page, 0); 1244 f2fs_put_page(page, 1); 1245 1246 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); 1247 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1248 1249 return 0; 1250 put_encrypted_page: 1251 f2fs_put_page(fio.encrypted_page, 1); 1252 put_page: 1253 f2fs_put_page(page, 1); 1254 return err; 1255 } 1256 1257 /* 1258 * Move data block via META_MAPPING while keeping locked data page. 1259 * This can be used to move blocks, aka LBAs, directly on disk. 1260 */ 1261 static int move_data_block(struct inode *inode, block_t bidx, 1262 int gc_type, unsigned int segno, int off) 1263 { 1264 struct address_space *mapping = f2fs_is_cow_file(inode) ? 1265 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; 1266 struct f2fs_io_info fio = { 1267 .sbi = F2FS_I_SB(inode), 1268 .ino = inode->i_ino, 1269 .type = DATA, 1270 .temp = COLD, 1271 .op = REQ_OP_READ, 1272 .op_flags = 0, 1273 .encrypted_page = NULL, 1274 .in_list = 0, 1275 }; 1276 struct dnode_of_data dn; 1277 struct f2fs_summary sum; 1278 struct node_info ni; 1279 struct page *page, *mpage; 1280 block_t newaddr; 1281 int err = 0; 1282 bool lfs_mode = f2fs_lfs_mode(fio.sbi); 1283 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && 1284 (fio.sbi->gc_mode != GC_URGENT_HIGH) ? 1285 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; 1286 1287 /* do not read out */ 1288 page = f2fs_grab_cache_page(mapping, bidx, false); 1289 if (!page) 1290 return -ENOMEM; 1291 1292 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 1293 err = -ENOENT; 1294 goto out; 1295 } 1296 1297 err = f2fs_gc_pinned_control(inode, gc_type, segno); 1298 if (err) 1299 goto out; 1300 1301 set_new_dnode(&dn, inode, NULL, NULL, 0); 1302 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); 1303 if (err) 1304 goto out; 1305 1306 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 1307 ClearPageUptodate(page); 1308 err = -ENOENT; 1309 goto put_out; 1310 } 1311 1312 /* 1313 * don't cache encrypted data into meta inode until previous dirty 1314 * data were writebacked to avoid racing between GC and flush. 1315 */ 1316 f2fs_wait_on_page_writeback(page, DATA, true, true); 1317 1318 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 1319 1320 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); 1321 if (err) 1322 goto put_out; 1323 1324 /* read page */ 1325 fio.page = page; 1326 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1327 1328 if (lfs_mode) 1329 f2fs_down_write(&fio.sbi->io_order_lock); 1330 1331 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), 1332 fio.old_blkaddr, false); 1333 if (!mpage) { 1334 err = -ENOMEM; 1335 goto up_out; 1336 } 1337 1338 fio.encrypted_page = mpage; 1339 1340 /* read source block in mpage */ 1341 if (!PageUptodate(mpage)) { 1342 err = f2fs_submit_page_bio(&fio); 1343 if (err) { 1344 f2fs_put_page(mpage, 1); 1345 goto up_out; 1346 } 1347 1348 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO, 1349 F2FS_BLKSIZE); 1350 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, 1351 F2FS_BLKSIZE); 1352 1353 lock_page(mpage); 1354 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || 1355 !PageUptodate(mpage))) { 1356 err = -EIO; 1357 f2fs_put_page(mpage, 1); 1358 goto up_out; 1359 } 1360 } 1361 1362 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 1363 1364 /* allocate block address */ 1365 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 1366 &sum, type, NULL); 1367 if (err) { 1368 f2fs_put_page(mpage, 1); 1369 /* filesystem should shutdown, no need to recovery block */ 1370 goto up_out; 1371 } 1372 1373 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), 1374 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); 1375 if (!fio.encrypted_page) { 1376 err = -ENOMEM; 1377 f2fs_put_page(mpage, 1); 1378 goto recover_block; 1379 } 1380 1381 /* write target block */ 1382 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); 1383 memcpy(page_address(fio.encrypted_page), 1384 page_address(mpage), PAGE_SIZE); 1385 f2fs_put_page(mpage, 1); 1386 1387 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr); 1388 1389 set_page_dirty(fio.encrypted_page); 1390 if (clear_page_dirty_for_io(fio.encrypted_page)) 1391 dec_page_count(fio.sbi, F2FS_DIRTY_META); 1392 1393 set_page_writeback(fio.encrypted_page); 1394 1395 fio.op = REQ_OP_WRITE; 1396 fio.op_flags = REQ_SYNC; 1397 fio.new_blkaddr = newaddr; 1398 f2fs_submit_page_write(&fio); 1399 1400 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE); 1401 1402 f2fs_update_data_blkaddr(&dn, newaddr); 1403 set_inode_flag(inode, FI_APPEND_WRITE); 1404 1405 f2fs_put_page(fio.encrypted_page, 1); 1406 recover_block: 1407 if (err) 1408 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 1409 true, true, true); 1410 up_out: 1411 if (lfs_mode) 1412 f2fs_up_write(&fio.sbi->io_order_lock); 1413 put_out: 1414 f2fs_put_dnode(&dn); 1415 out: 1416 f2fs_put_page(page, 1); 1417 return err; 1418 } 1419 1420 static int move_data_page(struct inode *inode, block_t bidx, int gc_type, 1421 unsigned int segno, int off) 1422 { 1423 struct page *page; 1424 int err = 0; 1425 1426 page = f2fs_get_lock_data_page(inode, bidx, true); 1427 if (IS_ERR(page)) 1428 return PTR_ERR(page); 1429 1430 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 1431 err = -ENOENT; 1432 goto out; 1433 } 1434 1435 err = f2fs_gc_pinned_control(inode, gc_type, segno); 1436 if (err) 1437 goto out; 1438 1439 if (gc_type == BG_GC) { 1440 if (folio_test_writeback(page_folio(page))) { 1441 err = -EAGAIN; 1442 goto out; 1443 } 1444 set_page_dirty(page); 1445 set_page_private_gcing(page); 1446 } else { 1447 struct f2fs_io_info fio = { 1448 .sbi = F2FS_I_SB(inode), 1449 .ino = inode->i_ino, 1450 .type = DATA, 1451 .temp = COLD, 1452 .op = REQ_OP_WRITE, 1453 .op_flags = REQ_SYNC, 1454 .old_blkaddr = NULL_ADDR, 1455 .page = page, 1456 .encrypted_page = NULL, 1457 .need_lock = LOCK_REQ, 1458 .io_type = FS_GC_DATA_IO, 1459 }; 1460 bool is_dirty = PageDirty(page); 1461 1462 retry: 1463 f2fs_wait_on_page_writeback(page, DATA, true, true); 1464 1465 set_page_dirty(page); 1466 if (clear_page_dirty_for_io(page)) { 1467 inode_dec_dirty_pages(inode); 1468 f2fs_remove_dirty_inode(inode); 1469 } 1470 1471 set_page_private_gcing(page); 1472 1473 err = f2fs_do_write_data_page(&fio); 1474 if (err) { 1475 clear_page_private_gcing(page); 1476 if (err == -ENOMEM) { 1477 memalloc_retry_wait(GFP_NOFS); 1478 goto retry; 1479 } 1480 if (is_dirty) 1481 set_page_dirty(page); 1482 } 1483 } 1484 out: 1485 f2fs_put_page(page, 1); 1486 return err; 1487 } 1488 1489 /* 1490 * This function tries to get parent node of victim data block, and identifies 1491 * data block validity. If the block is valid, copy that with cold status and 1492 * modify parent node. 1493 * If the parent node is not valid or the data block address is different, 1494 * the victim data block is ignored. 1495 */ 1496 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 1497 struct gc_inode_list *gc_list, unsigned int segno, int gc_type, 1498 bool force_migrate) 1499 { 1500 struct super_block *sb = sbi->sb; 1501 struct f2fs_summary *entry; 1502 block_t start_addr; 1503 int off; 1504 int phase = 0; 1505 int submitted = 0; 1506 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 1507 1508 start_addr = START_BLOCK(sbi, segno); 1509 1510 next_step: 1511 entry = sum; 1512 1513 for (off = 0; off < usable_blks_in_seg; off++, entry++) { 1514 struct page *data_page; 1515 struct inode *inode; 1516 struct node_info dni; /* dnode info for the data */ 1517 unsigned int ofs_in_node, nofs; 1518 block_t start_bidx; 1519 nid_t nid = le32_to_cpu(entry->nid); 1520 1521 /* 1522 * stop BG_GC if there is not enough free sections. 1523 * Or, stop GC if the segment becomes fully valid caused by 1524 * race condition along with SSR block allocation. 1525 */ 1526 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || 1527 (!force_migrate && get_valid_blocks(sbi, segno, true) == 1528 CAP_BLKS_PER_SEC(sbi))) 1529 return submitted; 1530 1531 if (check_valid_map(sbi, segno, off) == 0) 1532 continue; 1533 1534 if (phase == 0) { 1535 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 1536 META_NAT, true); 1537 continue; 1538 } 1539 1540 if (phase == 1) { 1541 f2fs_ra_node_page(sbi, nid); 1542 continue; 1543 } 1544 1545 /* Get an inode by ino with checking validity */ 1546 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) 1547 continue; 1548 1549 if (phase == 2) { 1550 f2fs_ra_node_page(sbi, dni.ino); 1551 continue; 1552 } 1553 1554 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 1555 1556 if (phase == 3) { 1557 int err; 1558 1559 inode = f2fs_iget(sb, dni.ino); 1560 if (IS_ERR(inode)) 1561 continue; 1562 1563 if (is_bad_inode(inode) || 1564 special_file(inode->i_mode)) { 1565 iput(inode); 1566 continue; 1567 } 1568 1569 if (f2fs_has_inline_data(inode)) { 1570 iput(inode); 1571 set_sbi_flag(sbi, SBI_NEED_FSCK); 1572 f2fs_err_ratelimited(sbi, 1573 "inode %lx has both inline_data flag and " 1574 "data block, nid=%u, ofs_in_node=%u", 1575 inode->i_ino, dni.nid, ofs_in_node); 1576 continue; 1577 } 1578 1579 err = f2fs_gc_pinned_control(inode, gc_type, segno); 1580 if (err == -EAGAIN) { 1581 iput(inode); 1582 return submitted; 1583 } 1584 1585 if (!f2fs_down_write_trylock( 1586 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { 1587 iput(inode); 1588 sbi->skipped_gc_rwsem++; 1589 continue; 1590 } 1591 1592 start_bidx = f2fs_start_bidx_of_node(nofs, inode) + 1593 ofs_in_node; 1594 1595 if (f2fs_meta_inode_gc_required(inode)) { 1596 int err = ra_data_block(inode, start_bidx); 1597 1598 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1599 if (err) { 1600 iput(inode); 1601 continue; 1602 } 1603 add_gc_inode(gc_list, inode); 1604 continue; 1605 } 1606 1607 data_page = f2fs_get_read_data_page(inode, start_bidx, 1608 REQ_RAHEAD, true, NULL); 1609 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1610 if (IS_ERR(data_page)) { 1611 iput(inode); 1612 continue; 1613 } 1614 1615 f2fs_put_page(data_page, 0); 1616 add_gc_inode(gc_list, inode); 1617 continue; 1618 } 1619 1620 /* phase 4 */ 1621 inode = find_gc_inode(gc_list, dni.ino); 1622 if (inode) { 1623 struct f2fs_inode_info *fi = F2FS_I(inode); 1624 bool locked = false; 1625 int err; 1626 1627 if (S_ISREG(inode->i_mode)) { 1628 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) { 1629 sbi->skipped_gc_rwsem++; 1630 continue; 1631 } 1632 if (!f2fs_down_write_trylock( 1633 &fi->i_gc_rwsem[READ])) { 1634 sbi->skipped_gc_rwsem++; 1635 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1636 continue; 1637 } 1638 locked = true; 1639 1640 /* wait for all inflight aio data */ 1641 inode_dio_wait(inode); 1642 } 1643 1644 start_bidx = f2fs_start_bidx_of_node(nofs, inode) 1645 + ofs_in_node; 1646 if (f2fs_meta_inode_gc_required(inode)) 1647 err = move_data_block(inode, start_bidx, 1648 gc_type, segno, off); 1649 else 1650 err = move_data_page(inode, start_bidx, gc_type, 1651 segno, off); 1652 1653 if (!err && (gc_type == FG_GC || 1654 f2fs_meta_inode_gc_required(inode))) 1655 submitted++; 1656 1657 if (locked) { 1658 f2fs_up_write(&fi->i_gc_rwsem[READ]); 1659 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1660 } 1661 1662 stat_inc_data_blk_count(sbi, 1, gc_type); 1663 } 1664 } 1665 1666 if (++phase < 5) 1667 goto next_step; 1668 1669 return submitted; 1670 } 1671 1672 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1673 int gc_type) 1674 { 1675 struct sit_info *sit_i = SIT_I(sbi); 1676 int ret; 1677 1678 down_write(&sit_i->sentry_lock); 1679 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0); 1680 up_write(&sit_i->sentry_lock); 1681 return ret; 1682 } 1683 1684 static int do_garbage_collect(struct f2fs_sb_info *sbi, 1685 unsigned int start_segno, 1686 struct gc_inode_list *gc_list, int gc_type, 1687 bool force_migrate) 1688 { 1689 struct page *sum_page; 1690 struct f2fs_summary_block *sum; 1691 struct blk_plug plug; 1692 unsigned int segno = start_segno; 1693 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi); 1694 int seg_freed = 0, migrated = 0; 1695 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1696 SUM_TYPE_DATA : SUM_TYPE_NODE; 1697 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE; 1698 int submitted = 0; 1699 1700 if (__is_large_section(sbi)) 1701 end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi)); 1702 1703 /* 1704 * zone-capacity can be less than zone-size in zoned devices, 1705 * resulting in less than expected usable segments in the zone, 1706 * calculate the end segno in the zone which can be garbage collected 1707 */ 1708 if (f2fs_sb_has_blkzoned(sbi)) 1709 end_segno -= SEGS_PER_SEC(sbi) - 1710 f2fs_usable_segs_in_sec(sbi, segno); 1711 1712 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); 1713 1714 /* readahead multi ssa blocks those have contiguous address */ 1715 if (__is_large_section(sbi)) 1716 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), 1717 end_segno - segno, META_SSA, true); 1718 1719 /* reference all summary page */ 1720 while (segno < end_segno) { 1721 sum_page = f2fs_get_sum_page(sbi, segno++); 1722 if (IS_ERR(sum_page)) { 1723 int err = PTR_ERR(sum_page); 1724 1725 end_segno = segno - 1; 1726 for (segno = start_segno; segno < end_segno; segno++) { 1727 sum_page = find_get_page(META_MAPPING(sbi), 1728 GET_SUM_BLOCK(sbi, segno)); 1729 f2fs_put_page(sum_page, 0); 1730 f2fs_put_page(sum_page, 0); 1731 } 1732 return err; 1733 } 1734 unlock_page(sum_page); 1735 } 1736 1737 blk_start_plug(&plug); 1738 1739 for (segno = start_segno; segno < end_segno; segno++) { 1740 1741 /* find segment summary of victim */ 1742 sum_page = find_get_page(META_MAPPING(sbi), 1743 GET_SUM_BLOCK(sbi, segno)); 1744 f2fs_put_page(sum_page, 0); 1745 1746 if (get_valid_blocks(sbi, segno, false) == 0) 1747 goto freed; 1748 if (gc_type == BG_GC && __is_large_section(sbi) && 1749 migrated >= sbi->migration_granularity) 1750 goto skip; 1751 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) 1752 goto skip; 1753 1754 sum = page_address(sum_page); 1755 if (type != GET_SUM_TYPE((&sum->footer))) { 1756 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", 1757 segno, type, GET_SUM_TYPE((&sum->footer))); 1758 f2fs_stop_checkpoint(sbi, false, 1759 STOP_CP_REASON_CORRUPTED_SUMMARY); 1760 goto skip; 1761 } 1762 1763 /* 1764 * this is to avoid deadlock: 1765 * - lock_page(sum_page) - f2fs_replace_block 1766 * - check_valid_map() - down_write(sentry_lock) 1767 * - down_read(sentry_lock) - change_curseg() 1768 * - lock_page(sum_page) 1769 */ 1770 if (type == SUM_TYPE_NODE) 1771 submitted += gc_node_segment(sbi, sum->entries, segno, 1772 gc_type); 1773 else 1774 submitted += gc_data_segment(sbi, sum->entries, gc_list, 1775 segno, gc_type, 1776 force_migrate); 1777 1778 stat_inc_gc_seg_count(sbi, data_type, gc_type); 1779 sbi->gc_reclaimed_segs[sbi->gc_mode]++; 1780 migrated++; 1781 1782 freed: 1783 if (gc_type == FG_GC && 1784 get_valid_blocks(sbi, segno, false) == 0) 1785 seg_freed++; 1786 1787 if (__is_large_section(sbi)) 1788 sbi->next_victim_seg[gc_type] = 1789 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO; 1790 skip: 1791 f2fs_put_page(sum_page, 0); 1792 } 1793 1794 if (submitted) 1795 f2fs_submit_merged_write(sbi, data_type); 1796 1797 blk_finish_plug(&plug); 1798 1799 if (migrated) 1800 stat_inc_gc_sec_count(sbi, data_type, gc_type); 1801 1802 return seg_freed; 1803 } 1804 1805 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control) 1806 { 1807 int gc_type = gc_control->init_gc_type; 1808 unsigned int segno = gc_control->victim_segno; 1809 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0; 1810 int ret = 0; 1811 struct cp_control cpc; 1812 struct gc_inode_list gc_list = { 1813 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1814 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1815 }; 1816 unsigned int skipped_round = 0, round = 0; 1817 unsigned int upper_secs; 1818 1819 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, 1820 gc_control->nr_free_secs, 1821 get_pages(sbi, F2FS_DIRTY_NODES), 1822 get_pages(sbi, F2FS_DIRTY_DENTS), 1823 get_pages(sbi, F2FS_DIRTY_IMETA), 1824 free_sections(sbi), 1825 free_segments(sbi), 1826 reserved_segments(sbi), 1827 prefree_segments(sbi)); 1828 1829 cpc.reason = __get_cp_reason(sbi); 1830 gc_more: 1831 sbi->skipped_gc_rwsem = 0; 1832 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1833 ret = -EINVAL; 1834 goto stop; 1835 } 1836 if (unlikely(f2fs_cp_error(sbi))) { 1837 ret = -EIO; 1838 goto stop; 1839 } 1840 1841 /* Let's run FG_GC, if we don't have enough space. */ 1842 if (has_not_enough_free_secs(sbi, 0, 0)) { 1843 gc_type = FG_GC; 1844 1845 /* 1846 * For example, if there are many prefree_segments below given 1847 * threshold, we can make them free by checkpoint. Then, we 1848 * secure free segments which doesn't need fggc any more. 1849 */ 1850 if (prefree_segments(sbi)) { 1851 stat_inc_cp_call_count(sbi, TOTAL_CALL); 1852 ret = f2fs_write_checkpoint(sbi, &cpc); 1853 if (ret) 1854 goto stop; 1855 /* Reset due to checkpoint */ 1856 sec_freed = 0; 1857 } 1858 } 1859 1860 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1861 if (gc_type == BG_GC && gc_control->no_bg_gc) { 1862 ret = -EINVAL; 1863 goto stop; 1864 } 1865 retry: 1866 ret = __get_victim(sbi, &segno, gc_type); 1867 if (ret) { 1868 /* allow to search victim from sections has pinned data */ 1869 if (ret == -ENODATA && gc_type == FG_GC && 1870 f2fs_pinned_section_exists(DIRTY_I(sbi))) { 1871 f2fs_unpin_all_sections(sbi, false); 1872 goto retry; 1873 } 1874 goto stop; 1875 } 1876 1877 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, 1878 gc_control->should_migrate_blocks); 1879 if (seg_freed < 0) 1880 goto stop; 1881 1882 total_freed += seg_freed; 1883 1884 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) { 1885 sec_freed++; 1886 total_sec_freed++; 1887 } 1888 1889 if (gc_type == FG_GC) { 1890 sbi->cur_victim_sec = NULL_SEGNO; 1891 1892 if (has_enough_free_secs(sbi, sec_freed, 0)) { 1893 if (!gc_control->no_bg_gc && 1894 total_sec_freed < gc_control->nr_free_secs) 1895 goto go_gc_more; 1896 goto stop; 1897 } 1898 if (sbi->skipped_gc_rwsem) 1899 skipped_round++; 1900 round++; 1901 if (skipped_round > MAX_SKIP_GC_COUNT && 1902 skipped_round * 2 >= round) { 1903 stat_inc_cp_call_count(sbi, TOTAL_CALL); 1904 ret = f2fs_write_checkpoint(sbi, &cpc); 1905 goto stop; 1906 } 1907 } else if (has_enough_free_secs(sbi, 0, 0)) { 1908 goto stop; 1909 } 1910 1911 __get_secs_required(sbi, NULL, &upper_secs, NULL); 1912 1913 /* 1914 * Write checkpoint to reclaim prefree segments. 1915 * We need more three extra sections for writer's data/node/dentry. 1916 */ 1917 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS && 1918 prefree_segments(sbi)) { 1919 stat_inc_cp_call_count(sbi, TOTAL_CALL); 1920 ret = f2fs_write_checkpoint(sbi, &cpc); 1921 if (ret) 1922 goto stop; 1923 /* Reset due to checkpoint */ 1924 sec_freed = 0; 1925 } 1926 go_gc_more: 1927 segno = NULL_SEGNO; 1928 goto gc_more; 1929 1930 stop: 1931 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; 1932 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno; 1933 1934 if (gc_type == FG_GC) 1935 f2fs_unpin_all_sections(sbi, true); 1936 1937 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed, 1938 get_pages(sbi, F2FS_DIRTY_NODES), 1939 get_pages(sbi, F2FS_DIRTY_DENTS), 1940 get_pages(sbi, F2FS_DIRTY_IMETA), 1941 free_sections(sbi), 1942 free_segments(sbi), 1943 reserved_segments(sbi), 1944 prefree_segments(sbi)); 1945 1946 f2fs_up_write(&sbi->gc_lock); 1947 1948 put_gc_inode(&gc_list); 1949 1950 if (gc_control->err_gc_skipped && !ret) 1951 ret = total_sec_freed ? 0 : -EAGAIN; 1952 return ret; 1953 } 1954 1955 int __init f2fs_create_garbage_collection_cache(void) 1956 { 1957 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry", 1958 sizeof(struct victim_entry)); 1959 return victim_entry_slab ? 0 : -ENOMEM; 1960 } 1961 1962 void f2fs_destroy_garbage_collection_cache(void) 1963 { 1964 kmem_cache_destroy(victim_entry_slab); 1965 } 1966 1967 static void init_atgc_management(struct f2fs_sb_info *sbi) 1968 { 1969 struct atgc_management *am = &sbi->am; 1970 1971 if (test_opt(sbi, ATGC) && 1972 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) 1973 am->atgc_enabled = true; 1974 1975 am->root = RB_ROOT_CACHED; 1976 INIT_LIST_HEAD(&am->victim_list); 1977 am->victim_count = 0; 1978 1979 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; 1980 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; 1981 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; 1982 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; 1983 } 1984 1985 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1986 { 1987 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1988 1989 /* give warm/cold data area from slower device */ 1990 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) 1991 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1992 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1993 1994 init_atgc_management(sbi); 1995 } 1996 1997 int f2fs_gc_range(struct f2fs_sb_info *sbi, 1998 unsigned int start_seg, unsigned int end_seg, 1999 bool dry_run, unsigned int dry_run_sections) 2000 { 2001 unsigned int segno; 2002 unsigned int gc_secs = dry_run_sections; 2003 2004 if (unlikely(f2fs_cp_error(sbi))) 2005 return -EIO; 2006 2007 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) { 2008 struct gc_inode_list gc_list = { 2009 .ilist = LIST_HEAD_INIT(gc_list.ilist), 2010 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 2011 }; 2012 2013 do_garbage_collect(sbi, segno, &gc_list, FG_GC, 2014 dry_run_sections == 0); 2015 put_gc_inode(&gc_list); 2016 2017 if (!dry_run && get_valid_blocks(sbi, segno, true)) 2018 return -EAGAIN; 2019 if (dry_run && dry_run_sections && 2020 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0) 2021 break; 2022 2023 if (fatal_signal_pending(current)) 2024 return -ERESTARTSYS; 2025 } 2026 2027 return 0; 2028 } 2029 2030 static int free_segment_range(struct f2fs_sb_info *sbi, 2031 unsigned int secs, bool dry_run) 2032 { 2033 unsigned int next_inuse, start, end; 2034 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 2035 int gc_mode, gc_type; 2036 int err = 0; 2037 int type; 2038 2039 /* Force block allocation for GC */ 2040 MAIN_SECS(sbi) -= secs; 2041 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi); 2042 end = MAIN_SEGS(sbi) - 1; 2043 2044 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2045 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) 2046 if (SIT_I(sbi)->last_victim[gc_mode] >= start) 2047 SIT_I(sbi)->last_victim[gc_mode] = 0; 2048 2049 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) 2050 if (sbi->next_victim_seg[gc_type] >= start) 2051 sbi->next_victim_seg[gc_type] = NULL_SEGNO; 2052 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2053 2054 /* Move out cursegs from the target range */ 2055 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) { 2056 err = f2fs_allocate_segment_for_resize(sbi, type, start, end); 2057 if (err) 2058 goto out; 2059 } 2060 2061 /* do GC to move out valid blocks in the range */ 2062 err = f2fs_gc_range(sbi, start, end, dry_run, 0); 2063 if (err || dry_run) 2064 goto out; 2065 2066 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2067 err = f2fs_write_checkpoint(sbi, &cpc); 2068 if (err) 2069 goto out; 2070 2071 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); 2072 if (next_inuse <= end) { 2073 f2fs_err(sbi, "segno %u should be free but still inuse!", 2074 next_inuse); 2075 f2fs_bug_on(sbi, 1); 2076 } 2077 out: 2078 MAIN_SECS(sbi) += secs; 2079 return err; 2080 } 2081 2082 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) 2083 { 2084 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 2085 int section_count; 2086 int segment_count; 2087 int segment_count_main; 2088 long long block_count; 2089 int segs = secs * SEGS_PER_SEC(sbi); 2090 2091 f2fs_down_write(&sbi->sb_lock); 2092 2093 section_count = le32_to_cpu(raw_sb->section_count); 2094 segment_count = le32_to_cpu(raw_sb->segment_count); 2095 segment_count_main = le32_to_cpu(raw_sb->segment_count_main); 2096 block_count = le64_to_cpu(raw_sb->block_count); 2097 2098 raw_sb->section_count = cpu_to_le32(section_count + secs); 2099 raw_sb->segment_count = cpu_to_le32(segment_count + segs); 2100 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); 2101 raw_sb->block_count = cpu_to_le64(block_count + 2102 (long long)SEGS_TO_BLKS(sbi, segs)); 2103 if (f2fs_is_multi_device(sbi)) { 2104 int last_dev = sbi->s_ndevs - 1; 2105 int dev_segs = 2106 le32_to_cpu(raw_sb->devs[last_dev].total_segments); 2107 2108 raw_sb->devs[last_dev].total_segments = 2109 cpu_to_le32(dev_segs + segs); 2110 } 2111 2112 f2fs_up_write(&sbi->sb_lock); 2113 } 2114 2115 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) 2116 { 2117 int segs = secs * SEGS_PER_SEC(sbi); 2118 long long blks = SEGS_TO_BLKS(sbi, segs); 2119 long long user_block_count = 2120 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); 2121 2122 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; 2123 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; 2124 MAIN_SECS(sbi) += secs; 2125 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; 2126 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; 2127 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); 2128 2129 if (f2fs_is_multi_device(sbi)) { 2130 int last_dev = sbi->s_ndevs - 1; 2131 2132 FDEV(last_dev).total_segments = 2133 (int)FDEV(last_dev).total_segments + segs; 2134 FDEV(last_dev).end_blk = 2135 (long long)FDEV(last_dev).end_blk + blks; 2136 #ifdef CONFIG_BLK_DEV_ZONED 2137 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz + 2138 div_u64(blks, sbi->blocks_per_blkz); 2139 #endif 2140 } 2141 } 2142 2143 int f2fs_resize_fs(struct file *filp, __u64 block_count) 2144 { 2145 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 2146 __u64 old_block_count, shrunk_blocks; 2147 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 2148 unsigned int secs; 2149 int err = 0; 2150 __u32 rem; 2151 2152 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); 2153 if (block_count > old_block_count) 2154 return -EINVAL; 2155 2156 if (f2fs_is_multi_device(sbi)) { 2157 int last_dev = sbi->s_ndevs - 1; 2158 __u64 last_segs = FDEV(last_dev).total_segments; 2159 2160 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <= 2161 old_block_count) 2162 return -EINVAL; 2163 } 2164 2165 /* new fs size should align to section size */ 2166 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); 2167 if (rem) 2168 return -EINVAL; 2169 2170 if (block_count == old_block_count) 2171 return 0; 2172 2173 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 2174 f2fs_err(sbi, "Should run fsck to repair first."); 2175 return -EFSCORRUPTED; 2176 } 2177 2178 if (test_opt(sbi, DISABLE_CHECKPOINT)) { 2179 f2fs_err(sbi, "Checkpoint should be enabled."); 2180 return -EINVAL; 2181 } 2182 2183 err = mnt_want_write_file(filp); 2184 if (err) 2185 return err; 2186 2187 shrunk_blocks = old_block_count - block_count; 2188 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); 2189 2190 /* stop other GC */ 2191 if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 2192 err = -EAGAIN; 2193 goto out_drop_write; 2194 } 2195 2196 /* stop CP to protect MAIN_SEC in free_segment_range */ 2197 f2fs_lock_op(sbi); 2198 2199 spin_lock(&sbi->stat_lock); 2200 if (shrunk_blocks + valid_user_blocks(sbi) + 2201 sbi->current_reserved_blocks + sbi->unusable_block_count + 2202 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 2203 err = -ENOSPC; 2204 spin_unlock(&sbi->stat_lock); 2205 2206 if (err) 2207 goto out_unlock; 2208 2209 err = free_segment_range(sbi, secs, true); 2210 2211 out_unlock: 2212 f2fs_unlock_op(sbi); 2213 f2fs_up_write(&sbi->gc_lock); 2214 out_drop_write: 2215 mnt_drop_write_file(filp); 2216 if (err) 2217 return err; 2218 2219 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE); 2220 if (err) 2221 return err; 2222 2223 if (f2fs_readonly(sbi->sb)) { 2224 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); 2225 if (err) 2226 return err; 2227 return -EROFS; 2228 } 2229 2230 f2fs_down_write(&sbi->gc_lock); 2231 f2fs_down_write(&sbi->cp_global_sem); 2232 2233 spin_lock(&sbi->stat_lock); 2234 if (shrunk_blocks + valid_user_blocks(sbi) + 2235 sbi->current_reserved_blocks + sbi->unusable_block_count + 2236 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 2237 err = -ENOSPC; 2238 else 2239 sbi->user_block_count -= shrunk_blocks; 2240 spin_unlock(&sbi->stat_lock); 2241 if (err) 2242 goto out_err; 2243 2244 set_sbi_flag(sbi, SBI_IS_RESIZEFS); 2245 err = free_segment_range(sbi, secs, false); 2246 if (err) 2247 goto recover_out; 2248 2249 update_sb_metadata(sbi, -secs); 2250 2251 err = f2fs_commit_super(sbi, false); 2252 if (err) { 2253 update_sb_metadata(sbi, secs); 2254 goto recover_out; 2255 } 2256 2257 update_fs_metadata(sbi, -secs); 2258 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2259 set_sbi_flag(sbi, SBI_IS_DIRTY); 2260 2261 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2262 err = f2fs_write_checkpoint(sbi, &cpc); 2263 if (err) { 2264 update_fs_metadata(sbi, secs); 2265 update_sb_metadata(sbi, secs); 2266 f2fs_commit_super(sbi, false); 2267 } 2268 recover_out: 2269 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2270 if (err) { 2271 set_sbi_flag(sbi, SBI_NEED_FSCK); 2272 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); 2273 2274 spin_lock(&sbi->stat_lock); 2275 sbi->user_block_count += shrunk_blocks; 2276 spin_unlock(&sbi->stat_lock); 2277 } 2278 out_err: 2279 f2fs_up_write(&sbi->cp_global_sem); 2280 f2fs_up_write(&sbi->gc_lock); 2281 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); 2282 return err; 2283 } 2284