gc.c (1724c7c0c9494dcbdd7f630f29e1e8427cb231d1) | gc.c (d9872a698c393e0d1abca86bf05b62712cbfc581) |
---|---|
1/* 2 * fs/f2fs/gc.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 21 unchanged lines hidden (view full) --- 30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 31 long wait_ms; 32 33 wait_ms = gc_th->min_sleep_time; 34 35 set_freezable(); 36 do { 37 wait_event_interruptible_timeout(*wq, | 1/* 2 * fs/f2fs/gc.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 21 unchanged lines hidden (view full) --- 30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 31 long wait_ms; 32 33 wait_ms = gc_th->min_sleep_time; 34 35 set_freezable(); 36 do { 37 wait_event_interruptible_timeout(*wq, |
38 kthread_should_stop() || freezing(current), | 38 kthread_should_stop() || freezing(current) || 39 gc_th->gc_wake, |
39 msecs_to_jiffies(wait_ms)); 40 | 40 msecs_to_jiffies(wait_ms)); 41 |
42 /* give it a try one time */ 43 if (gc_th->gc_wake) 44 gc_th->gc_wake = 0; 45 |
|
41 if (try_to_freeze()) 42 continue; 43 if (kthread_should_stop()) 44 break; 45 46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 47 increase_sleep_time(gc_th, &wait_ms); 48 continue; 49 } 50 51#ifdef CONFIG_F2FS_FAULT_INJECTION 52 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 53 f2fs_show_injection_info(FAULT_CHECKPOINT); 54 f2fs_stop_checkpoint(sbi, false); 55 } 56#endif 57 | 46 if (try_to_freeze()) 47 continue; 48 if (kthread_should_stop()) 49 break; 50 51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 52 increase_sleep_time(gc_th, &wait_ms); 53 continue; 54 } 55 56#ifdef CONFIG_F2FS_FAULT_INJECTION 57 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 58 f2fs_show_injection_info(FAULT_CHECKPOINT); 59 f2fs_stop_checkpoint(sbi, false); 60 } 61#endif 62 |
63 if (!sb_start_write_trylock(sbi->sb)) 64 continue; 65 |
|
58 /* 59 * [GC triggering condition] 60 * 0. GC is not conducted currently. 61 * 1. There are enough dirty segments. 62 * 2. IO subsystem is idle by checking the # of writeback pages. 63 * 3. IO subsystem is idle by checking the # of requests in 64 * bdev's request list. 65 * 66 * Note) We have to avoid triggering GCs frequently. 67 * Because it is possible that some segments can be 68 * invalidated soon after by user update or deletion. 69 * So, I'd like to wait some time to collect dirty segments. 70 */ 71 if (!mutex_trylock(&sbi->gc_mutex)) | 66 /* 67 * [GC triggering condition] 68 * 0. GC is not conducted currently. 69 * 1. There are enough dirty segments. 70 * 2. IO subsystem is idle by checking the # of writeback pages. 71 * 3. IO subsystem is idle by checking the # of requests in 72 * bdev's request list. 73 * 74 * Note) We have to avoid triggering GCs frequently. 75 * Because it is possible that some segments can be 76 * invalidated soon after by user update or deletion. 77 * So, I'd like to wait some time to collect dirty segments. 78 */ 79 if (!mutex_trylock(&sbi->gc_mutex)) |
72 continue; | 80 goto next; |
73 | 81 |
82 if (gc_th->gc_urgent) { 83 wait_ms = gc_th->urgent_sleep_time; 84 goto do_gc; 85 } 86 |
|
74 if (!is_idle(sbi)) { 75 increase_sleep_time(gc_th, &wait_ms); 76 mutex_unlock(&sbi->gc_mutex); | 87 if (!is_idle(sbi)) { 88 increase_sleep_time(gc_th, &wait_ms); 89 mutex_unlock(&sbi->gc_mutex); |
77 continue; | 90 goto next; |
78 } 79 80 if (has_enough_invalid_blocks(sbi)) 81 decrease_sleep_time(gc_th, &wait_ms); 82 else 83 increase_sleep_time(gc_th, &wait_ms); | 91 } 92 93 if (has_enough_invalid_blocks(sbi)) 94 decrease_sleep_time(gc_th, &wait_ms); 95 else 96 increase_sleep_time(gc_th, &wait_ms); |
84 | 97do_gc: |
85 stat_inc_bggc_count(sbi); 86 87 /* if return value is not zero, no victim was selected */ 88 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO)) 89 wait_ms = gc_th->no_gc_sleep_time; 90 91 trace_f2fs_background_gc(sbi->sb, wait_ms, 92 prefree_segments(sbi), free_segments(sbi)); 93 94 /* balancing f2fs's metadata periodically */ 95 f2fs_balance_fs_bg(sbi); | 98 stat_inc_bggc_count(sbi); 99 100 /* if return value is not zero, no victim was selected */ 101 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO)) 102 wait_ms = gc_th->no_gc_sleep_time; 103 104 trace_f2fs_background_gc(sbi->sb, wait_ms, 105 prefree_segments(sbi), free_segments(sbi)); 106 107 /* balancing f2fs's metadata periodically */ 108 f2fs_balance_fs_bg(sbi); |
109next: 110 sb_end_write(sbi->sb); |
|
96 97 } while (!kthread_should_stop()); 98 return 0; 99} 100 101int start_gc_thread(struct f2fs_sb_info *sbi) 102{ 103 struct f2fs_gc_kthread *gc_th; 104 dev_t dev = sbi->sb->s_bdev->bd_dev; 105 int err = 0; 106 107 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 108 if (!gc_th) { 109 err = -ENOMEM; 110 goto out; 111 } 112 | 111 112 } while (!kthread_should_stop()); 113 return 0; 114} 115 116int start_gc_thread(struct f2fs_sb_info *sbi) 117{ 118 struct f2fs_gc_kthread *gc_th; 119 dev_t dev = sbi->sb->s_bdev->bd_dev; 120 int err = 0; 121 122 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 123 if (!gc_th) { 124 err = -ENOMEM; 125 goto out; 126 } 127 |
128 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; |
|
113 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 114 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 115 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 116 117 gc_th->gc_idle = 0; | 129 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 130 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 131 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 132 133 gc_th->gc_idle = 0; |
134 gc_th->gc_urgent = 0; 135 gc_th->gc_wake= 0; |
|
118 119 sbi->gc_thread = gc_th; 120 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 121 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 122 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 123 if (IS_ERR(gc_th->f2fs_gc_task)) { 124 err = PTR_ERR(gc_th->f2fs_gc_task); 125 kfree(gc_th); --- 451 unchanged lines hidden (view full) --- 577 if (sum->version != dni->version) { 578 f2fs_msg(sbi->sb, KERN_WARNING, 579 "%s: valid data with mismatched node version.", 580 __func__); 581 set_sbi_flag(sbi, SBI_NEED_FSCK); 582 } 583 584 *nofs = ofs_of_node(node_page); | 136 137 sbi->gc_thread = gc_th; 138 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 139 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 140 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 141 if (IS_ERR(gc_th->f2fs_gc_task)) { 142 err = PTR_ERR(gc_th->f2fs_gc_task); 143 kfree(gc_th); --- 451 unchanged lines hidden (view full) --- 595 if (sum->version != dni->version) { 596 f2fs_msg(sbi->sb, KERN_WARNING, 597 "%s: valid data with mismatched node version.", 598 __func__); 599 set_sbi_flag(sbi, SBI_NEED_FSCK); 600 } 601 602 *nofs = ofs_of_node(node_page); |
585 source_blkaddr = datablock_addr(node_page, ofs_in_node); | 603 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node); |
586 f2fs_put_page(node_page, 1); 587 588 if (source_blkaddr != blkaddr) 589 return false; 590 return true; 591} 592 593static void move_encrypted_block(struct inode *inode, block_t bidx, --- 85 unchanged lines hidden (view full) --- 679 /* allocate block address */ 680 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 681 682 fio.op = REQ_OP_WRITE; 683 fio.op_flags = REQ_SYNC; 684 fio.new_blkaddr = newaddr; 685 f2fs_submit_page_write(&fio); 686 | 604 f2fs_put_page(node_page, 1); 605 606 if (source_blkaddr != blkaddr) 607 return false; 608 return true; 609} 610 611static void move_encrypted_block(struct inode *inode, block_t bidx, --- 85 unchanged lines hidden (view full) --- 697 /* allocate block address */ 698 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 699 700 fio.op = REQ_OP_WRITE; 701 fio.op_flags = REQ_SYNC; 702 fio.new_blkaddr = newaddr; 703 f2fs_submit_page_write(&fio); 704 |
705 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); 706 |
|
687 f2fs_update_data_blkaddr(&dn, newaddr); 688 set_inode_flag(inode, FI_APPEND_WRITE); 689 if (page->index == 0) 690 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 691put_page_out: 692 f2fs_put_page(fio.encrypted_page, 1); 693recover_block: 694 if (err) --- 31 unchanged lines hidden (view full) --- 726 .type = DATA, 727 .temp = COLD, 728 .op = REQ_OP_WRITE, 729 .op_flags = REQ_SYNC, 730 .old_blkaddr = NULL_ADDR, 731 .page = page, 732 .encrypted_page = NULL, 733 .need_lock = LOCK_REQ, | 707 f2fs_update_data_blkaddr(&dn, newaddr); 708 set_inode_flag(inode, FI_APPEND_WRITE); 709 if (page->index == 0) 710 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 711put_page_out: 712 f2fs_put_page(fio.encrypted_page, 1); 713recover_block: 714 if (err) --- 31 unchanged lines hidden (view full) --- 746 .type = DATA, 747 .temp = COLD, 748 .op = REQ_OP_WRITE, 749 .op_flags = REQ_SYNC, 750 .old_blkaddr = NULL_ADDR, 751 .page = page, 752 .encrypted_page = NULL, 753 .need_lock = LOCK_REQ, |
754 .io_type = FS_GC_DATA_IO, |
|
734 }; 735 bool is_dirty = PageDirty(page); 736 int err; 737 738retry: 739 set_page_dirty(page); 740 f2fs_wait_on_page_writeback(page, DATA, true); 741 if (clear_page_dirty_for_io(page)) { --- 318 unchanged lines hidden --- | 755 }; 756 bool is_dirty = PageDirty(page); 757 int err; 758 759retry: 760 set_page_dirty(page); 761 f2fs_wait_on_page_writeback(page, DATA, true); 762 if (clear_page_dirty_for_io(page)) { --- 318 unchanged lines hidden --- |