xref: /linux/fs/f2fs/shrinker.c (revision d8441523f21375b11a4593a2d89942b407bcb44f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs shrinker support
4  *   the basic infra was copied from fs/ubifs/shrinker.c
5  *
6  * Copyright (c) 2015 Motorola Mobility
7  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8  */
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
11 
12 #include "f2fs.h"
13 #include "node.h"
14 
15 static LIST_HEAD(f2fs_list);
16 static DEFINE_SPINLOCK(f2fs_list_lock);
17 static unsigned int shrinker_run_no;
18 
__count_nat_entries(struct f2fs_sb_info * sbi)19 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20 {
21 	return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
22 }
23 
__count_free_nids(struct f2fs_sb_info * sbi)24 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
25 {
26 	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
27 
28 	return count > 0 ? count : 0;
29 }
30 
__count_extent_cache(struct f2fs_sb_info * sbi,enum extent_type type)31 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
32 					enum extent_type type)
33 {
34 	struct extent_tree_info *eti = &sbi->extent_tree[type];
35 
36 	return atomic_read(&eti->total_zombie_tree) +
37 				atomic_read(&eti->total_ext_node);
38 }
39 
f2fs_shrink_count(struct shrinker * shrink,struct shrink_control * sc)40 unsigned long f2fs_shrink_count(struct shrinker *shrink,
41 				struct shrink_control *sc)
42 {
43 	struct f2fs_sb_info *sbi;
44 	struct list_head *p;
45 	unsigned long count = 0;
46 
47 	spin_lock(&f2fs_list_lock);
48 	p = f2fs_list.next;
49 	while (p != &f2fs_list) {
50 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
51 
52 		/* stop f2fs_put_super */
53 		if (!mutex_trylock(&sbi->umount_mutex)) {
54 			p = p->next;
55 			continue;
56 		}
57 		spin_unlock(&f2fs_list_lock);
58 
59 		/* count read extent cache entries */
60 		count += __count_extent_cache(sbi, EX_READ);
61 
62 		/* count block age extent cache entries */
63 		count += __count_extent_cache(sbi, EX_BLOCK_AGE);
64 
65 		/* count clean nat cache entries */
66 		count += __count_nat_entries(sbi);
67 
68 		/* count free nids cache entries */
69 		count += __count_free_nids(sbi);
70 
71 		spin_lock(&f2fs_list_lock);
72 		p = p->next;
73 		mutex_unlock(&sbi->umount_mutex);
74 	}
75 	spin_unlock(&f2fs_list_lock);
76 	return count ?: SHRINK_EMPTY;
77 }
78 
f2fs_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)79 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
80 				struct shrink_control *sc)
81 {
82 	unsigned long nr = sc->nr_to_scan;
83 	struct f2fs_sb_info *sbi;
84 	struct list_head *p;
85 	unsigned int run_no;
86 	unsigned long freed = 0;
87 
88 	spin_lock(&f2fs_list_lock);
89 	do {
90 		run_no = ++shrinker_run_no;
91 	} while (run_no == 0);
92 	p = f2fs_list.next;
93 	while (p != &f2fs_list) {
94 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
95 
96 		if (sbi->shrinker_run_no == run_no)
97 			break;
98 
99 		/* stop f2fs_put_super */
100 		if (!mutex_trylock(&sbi->umount_mutex)) {
101 			p = p->next;
102 			continue;
103 		}
104 		spin_unlock(&f2fs_list_lock);
105 
106 		sbi->shrinker_run_no = run_no;
107 
108 		/* shrink extent cache entries */
109 		freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2);
110 
111 		/* shrink read extent cache entries */
112 		freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2);
113 
114 		/* shrink clean nat cache entries */
115 		if (freed < nr)
116 			freed += f2fs_try_to_free_nats(sbi, nr - freed);
117 
118 		/* shrink free nids cache entries */
119 		if (freed < nr)
120 			freed += f2fs_try_to_free_nids(sbi, nr - freed);
121 
122 		spin_lock(&f2fs_list_lock);
123 		p = p->next;
124 		list_move_tail(&sbi->s_list, &f2fs_list);
125 		mutex_unlock(&sbi->umount_mutex);
126 		if (freed >= nr)
127 			break;
128 	}
129 	spin_unlock(&f2fs_list_lock);
130 	return freed;
131 }
132 
f2fs_donate_files(void)133 unsigned int f2fs_donate_files(void)
134 {
135 	struct f2fs_sb_info *sbi;
136 	struct list_head *p;
137 	unsigned int donate_files = 0;
138 
139 	spin_lock(&f2fs_list_lock);
140 	p = f2fs_list.next;
141 	while (p != &f2fs_list) {
142 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
143 
144 		/* stop f2fs_put_super */
145 		if (!mutex_trylock(&sbi->umount_mutex)) {
146 			p = p->next;
147 			continue;
148 		}
149 		spin_unlock(&f2fs_list_lock);
150 
151 		donate_files += sbi->donate_files;
152 
153 		spin_lock(&f2fs_list_lock);
154 		p = p->next;
155 		mutex_unlock(&sbi->umount_mutex);
156 	}
157 	spin_unlock(&f2fs_list_lock);
158 
159 	return donate_files;
160 }
161 
do_reclaim_caches(struct f2fs_sb_info * sbi,unsigned int reclaim_caches_kb)162 static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi,
163 				unsigned int reclaim_caches_kb)
164 {
165 	struct inode *inode;
166 	struct f2fs_inode_info *fi;
167 	unsigned int nfiles = sbi->donate_files;
168 	pgoff_t npages = reclaim_caches_kb >> (PAGE_SHIFT - 10);
169 
170 	while (npages && nfiles--) {
171 		pgoff_t len;
172 
173 		spin_lock(&sbi->inode_lock[DONATE_INODE]);
174 		if (list_empty(&sbi->inode_list[DONATE_INODE])) {
175 			spin_unlock(&sbi->inode_lock[DONATE_INODE]);
176 			break;
177 		}
178 		fi = list_first_entry(&sbi->inode_list[DONATE_INODE],
179 					struct f2fs_inode_info, gdonate_list);
180 		list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]);
181 		inode = igrab(&fi->vfs_inode);
182 		spin_unlock(&sbi->inode_lock[DONATE_INODE]);
183 
184 		if (!inode)
185 			continue;
186 
187 		inode_lock(inode);
188 		if (!is_inode_flag_set(inode, FI_DONATE_FINISHED)) {
189 			len = fi->donate_end - fi->donate_start + 1;
190 			npages = npages < len ? 0 : npages - len;
191 
192 			invalidate_inode_pages2_range(inode->i_mapping,
193 					fi->donate_start, fi->donate_end);
194 			set_inode_flag(inode, FI_DONATE_FINISHED);
195 		}
196 		inode_unlock(inode);
197 
198 		iput(inode);
199 		cond_resched();
200 	}
201 	return npages << (PAGE_SHIFT - 10);
202 }
203 
f2fs_reclaim_caches(unsigned int reclaim_caches_kb)204 void f2fs_reclaim_caches(unsigned int reclaim_caches_kb)
205 {
206 	struct f2fs_sb_info *sbi;
207 	struct list_head *p;
208 
209 	spin_lock(&f2fs_list_lock);
210 	p = f2fs_list.next;
211 	while (p != &f2fs_list && reclaim_caches_kb) {
212 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
213 
214 		/* stop f2fs_put_super */
215 		if (!mutex_trylock(&sbi->umount_mutex)) {
216 			p = p->next;
217 			continue;
218 		}
219 		spin_unlock(&f2fs_list_lock);
220 
221 		reclaim_caches_kb = do_reclaim_caches(sbi, reclaim_caches_kb);
222 
223 		spin_lock(&f2fs_list_lock);
224 		p = p->next;
225 		mutex_unlock(&sbi->umount_mutex);
226 	}
227 	spin_unlock(&f2fs_list_lock);
228 }
229 
f2fs_join_shrinker(struct f2fs_sb_info * sbi)230 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
231 {
232 	spin_lock(&f2fs_list_lock);
233 	list_add_tail(&sbi->s_list, &f2fs_list);
234 	spin_unlock(&f2fs_list_lock);
235 }
236 
f2fs_leave_shrinker(struct f2fs_sb_info * sbi)237 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
238 {
239 	f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
240 	f2fs_shrink_age_extent_tree(sbi,
241 				__count_extent_cache(sbi, EX_BLOCK_AGE));
242 
243 	spin_lock(&f2fs_list_lock);
244 	list_del_init(&sbi->s_list);
245 	spin_unlock(&f2fs_list_lock);
246 }
247