1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2024 Alibaba Cloud
6 */
7 #include "internal.h"
8
9 struct z_erofs_gbuf {
10 spinlock_t lock;
11 void *ptr;
12 struct page **pages;
13 unsigned int nrpages;
14 };
15
16 static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
17 static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
18 z_erofs_rsv_nrpages;
19
20 module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
21 module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
22
23 atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
24
25 /* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
26 static DEFINE_SPINLOCK(erofs_sb_list_lock);
27 static LIST_HEAD(erofs_sb_list);
28 static unsigned int shrinker_run_no;
29 static struct shrinker *erofs_shrinker_info;
30
z_erofs_gbuf_id(void)31 static unsigned int z_erofs_gbuf_id(void)
32 {
33 return raw_smp_processor_id() % z_erofs_gbuf_count;
34 }
35
z_erofs_get_gbuf(unsigned int requiredpages)36 void *z_erofs_get_gbuf(unsigned int requiredpages)
37 __acquires(gbuf->lock)
38 {
39 struct z_erofs_gbuf *gbuf;
40
41 migrate_disable();
42 gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
43 spin_lock(&gbuf->lock);
44 /* check if the buffer is too small */
45 if (requiredpages > gbuf->nrpages) {
46 spin_unlock(&gbuf->lock);
47 migrate_enable();
48 /* (for sparse checker) pretend gbuf->lock is still taken */
49 __acquire(gbuf->lock);
50 return NULL;
51 }
52 return gbuf->ptr;
53 }
54
z_erofs_put_gbuf(void * ptr)55 void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
56 {
57 struct z_erofs_gbuf *gbuf;
58
59 gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
60 DBG_BUGON(gbuf->ptr != ptr);
61 spin_unlock(&gbuf->lock);
62 migrate_enable();
63 }
64
z_erofs_gbuf_growsize(unsigned int nrpages)65 int z_erofs_gbuf_growsize(unsigned int nrpages)
66 {
67 static DEFINE_MUTEX(gbuf_resize_mutex);
68 struct page **tmp_pages = NULL;
69 struct z_erofs_gbuf *gbuf;
70 void *ptr, *old_ptr;
71 int last, i, j;
72
73 mutex_lock(&gbuf_resize_mutex);
74 /* avoid shrinking gbufs, since no idea how many fses rely on */
75 if (nrpages <= z_erofs_gbuf_nrpages) {
76 mutex_unlock(&gbuf_resize_mutex);
77 return 0;
78 }
79
80 for (i = 0; i < z_erofs_gbuf_count; ++i) {
81 gbuf = &z_erofs_gbufpool[i];
82 tmp_pages = kzalloc_objs(*tmp_pages, nrpages);
83 if (!tmp_pages)
84 goto out;
85
86 for (j = 0; j < gbuf->nrpages; ++j)
87 tmp_pages[j] = gbuf->pages[j];
88 do {
89 last = j;
90 j = alloc_pages_bulk(GFP_KERNEL, nrpages,
91 tmp_pages);
92 if (last == j)
93 goto out;
94 } while (j != nrpages);
95
96 ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
97 if (!ptr)
98 goto out;
99
100 spin_lock(&gbuf->lock);
101 kfree(gbuf->pages);
102 gbuf->pages = tmp_pages;
103 old_ptr = gbuf->ptr;
104 gbuf->ptr = ptr;
105 gbuf->nrpages = nrpages;
106 spin_unlock(&gbuf->lock);
107 if (old_ptr)
108 vunmap(old_ptr);
109 }
110 z_erofs_gbuf_nrpages = nrpages;
111 out:
112 if (i < z_erofs_gbuf_count && tmp_pages) {
113 for (j = 0; j < nrpages; ++j)
114 if (tmp_pages[j] && (j >= gbuf->nrpages ||
115 tmp_pages[j] != gbuf->pages[j]))
116 __free_page(tmp_pages[j]);
117 kfree(tmp_pages);
118 }
119 mutex_unlock(&gbuf_resize_mutex);
120 return i < z_erofs_gbuf_count ? -ENOMEM : 0;
121 }
122
z_erofs_gbuf_init(void)123 int __init z_erofs_gbuf_init(void)
124 {
125 unsigned int i, total = num_possible_cpus();
126
127 if (z_erofs_gbuf_count)
128 total = min(z_erofs_gbuf_count, total);
129 z_erofs_gbuf_count = total;
130
131 /* The last (special) global buffer is the reserved buffer */
132 total += !!z_erofs_rsv_nrpages;
133
134 z_erofs_gbufpool = kzalloc_objs(*z_erofs_gbufpool, total);
135 if (!z_erofs_gbufpool)
136 return -ENOMEM;
137
138 if (z_erofs_rsv_nrpages) {
139 z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
140 z_erofs_rsvbuf->pages = kzalloc_objs(*z_erofs_rsvbuf->pages,
141 z_erofs_rsv_nrpages);
142 if (!z_erofs_rsvbuf->pages) {
143 z_erofs_rsvbuf = NULL;
144 z_erofs_rsv_nrpages = 0;
145 }
146 }
147 for (i = 0; i < total; ++i)
148 spin_lock_init(&z_erofs_gbufpool[i].lock);
149 return 0;
150 }
151
z_erofs_gbuf_exit(void)152 void z_erofs_gbuf_exit(void)
153 {
154 int i, j;
155
156 for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
157 struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
158
159 if (gbuf->ptr) {
160 vunmap(gbuf->ptr);
161 gbuf->ptr = NULL;
162 }
163
164 if (!gbuf->pages)
165 continue;
166
167 for (j = 0; j < gbuf->nrpages; ++j)
168 if (gbuf->pages[j])
169 put_page(gbuf->pages[j]);
170 kfree(gbuf->pages);
171 gbuf->pages = NULL;
172 }
173 kfree(z_erofs_gbufpool);
174 }
175
__erofs_allocpage(struct page ** pagepool,gfp_t gfp,bool tryrsv)176 struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
177 {
178 struct page *page = *pagepool;
179
180 if (page) {
181 *pagepool = (struct page *)page_private(page);
182 } else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
183 spin_lock(&z_erofs_rsvbuf->lock);
184 if (z_erofs_rsvbuf->nrpages)
185 page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
186 spin_unlock(&z_erofs_rsvbuf->lock);
187 }
188 if (!page)
189 page = alloc_page(gfp);
190 DBG_BUGON(page && page_ref_count(page) != 1);
191 return page;
192 }
193
erofs_release_pages(struct page ** pagepool)194 void erofs_release_pages(struct page **pagepool)
195 {
196 while (*pagepool) {
197 struct page *page = *pagepool;
198
199 *pagepool = (struct page *)page_private(page);
200 /* try to fill reserved global pool first */
201 if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
202 z_erofs_rsv_nrpages) {
203 spin_lock(&z_erofs_rsvbuf->lock);
204 if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
205 z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
206 = page;
207 spin_unlock(&z_erofs_rsvbuf->lock);
208 continue;
209 }
210 spin_unlock(&z_erofs_rsvbuf->lock);
211 }
212 put_page(page);
213 }
214 }
215
erofs_shrinker_register(struct super_block * sb)216 void erofs_shrinker_register(struct super_block *sb)
217 {
218 struct erofs_sb_info *sbi = EROFS_SB(sb);
219
220 mutex_init(&sbi->umount_mutex);
221
222 spin_lock(&erofs_sb_list_lock);
223 list_add(&sbi->list, &erofs_sb_list);
224 spin_unlock(&erofs_sb_list_lock);
225 }
226
erofs_shrinker_unregister(struct super_block * sb)227 void erofs_shrinker_unregister(struct super_block *sb)
228 {
229 struct erofs_sb_info *const sbi = EROFS_SB(sb);
230
231 mutex_lock(&sbi->umount_mutex);
232 while (!xa_empty(&sbi->managed_pslots)) {
233 z_erofs_shrink_scan(sbi, ~0UL);
234 cond_resched();
235 }
236 spin_lock(&erofs_sb_list_lock);
237 list_del(&sbi->list);
238 spin_unlock(&erofs_sb_list_lock);
239 mutex_unlock(&sbi->umount_mutex);
240 }
241
erofs_shrink_count(struct shrinker * shrink,struct shrink_control * sc)242 static unsigned long erofs_shrink_count(struct shrinker *shrink,
243 struct shrink_control *sc)
244 {
245 return atomic_long_read(&erofs_global_shrink_cnt) ?: SHRINK_EMPTY;
246 }
247
erofs_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)248 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
249 struct shrink_control *sc)
250 {
251 struct erofs_sb_info *sbi;
252 struct list_head *p;
253
254 unsigned long nr = sc->nr_to_scan;
255 unsigned int run_no;
256 unsigned long freed = 0;
257
258 spin_lock(&erofs_sb_list_lock);
259 do {
260 run_no = ++shrinker_run_no;
261 } while (run_no == 0);
262
263 /* Iterate over all mounted superblocks and try to shrink them */
264 p = erofs_sb_list.next;
265 while (p != &erofs_sb_list) {
266 sbi = list_entry(p, struct erofs_sb_info, list);
267
268 /*
269 * We move the ones we do to the end of the list, so we stop
270 * when we see one we have already done.
271 */
272 if (sbi->shrinker_run_no == run_no)
273 break;
274
275 if (!mutex_trylock(&sbi->umount_mutex)) {
276 p = p->next;
277 continue;
278 }
279
280 spin_unlock(&erofs_sb_list_lock);
281 sbi->shrinker_run_no = run_no;
282 freed += z_erofs_shrink_scan(sbi, nr - freed);
283 spin_lock(&erofs_sb_list_lock);
284 /* Get the next list element before we move this one */
285 p = p->next;
286
287 /*
288 * Move this one to the end of the list to provide some
289 * fairness.
290 */
291 list_move_tail(&sbi->list, &erofs_sb_list);
292 mutex_unlock(&sbi->umount_mutex);
293
294 if (freed >= nr)
295 break;
296 }
297 spin_unlock(&erofs_sb_list_lock);
298 return freed;
299 }
300
erofs_init_shrinker(void)301 int __init erofs_init_shrinker(void)
302 {
303 erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
304 if (!erofs_shrinker_info)
305 return -ENOMEM;
306
307 erofs_shrinker_info->count_objects = erofs_shrink_count;
308 erofs_shrinker_info->scan_objects = erofs_shrink_scan;
309 shrinker_register(erofs_shrinker_info);
310 return 0;
311 }
312
erofs_exit_shrinker(void)313 void erofs_exit_shrinker(void)
314 {
315 shrinker_free(erofs_shrinker_info);
316 }
317