xref: /linux/fs/erofs/zutil.c (revision 643e2e259c2b25a2af0ae4c23c6e16586d9fd19c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2024 Alibaba Cloud
6  */
7 #include "internal.h"
8 
9 struct z_erofs_gbuf {
10 	spinlock_t lock;
11 	void *ptr;
12 	struct page **pages;
13 	unsigned int nrpages;
14 };
15 
16 static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
17 static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
18 		z_erofs_rsv_nrpages;
19 
20 module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
21 module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
22 
23 atomic_long_t erofs_global_shrink_cnt;	/* for all mounted instances */
24 
25 /* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
26 static DEFINE_SPINLOCK(erofs_sb_list_lock);
27 static LIST_HEAD(erofs_sb_list);
28 static unsigned int shrinker_run_no;
29 static struct shrinker *erofs_shrinker_info;
30 
31 static unsigned int z_erofs_gbuf_id(void)
32 {
33 	return raw_smp_processor_id() % z_erofs_gbuf_count;
34 }
35 
36 void *z_erofs_get_gbuf(unsigned int requiredpages)
37 	__acquires(gbuf->lock)
38 {
39 	struct z_erofs_gbuf *gbuf;
40 
41 	migrate_disable();
42 	gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
43 	spin_lock(&gbuf->lock);
44 	/* check if the buffer is too small */
45 	if (requiredpages > gbuf->nrpages) {
46 		spin_unlock(&gbuf->lock);
47 		migrate_enable();
48 		/* (for sparse checker) pretend gbuf->lock is still taken */
49 		__acquire(gbuf->lock);
50 		return NULL;
51 	}
52 	return gbuf->ptr;
53 }
54 
55 void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
56 {
57 	struct z_erofs_gbuf *gbuf;
58 
59 	gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
60 	DBG_BUGON(gbuf->ptr != ptr);
61 	spin_unlock(&gbuf->lock);
62 	migrate_enable();
63 }
64 
65 int z_erofs_gbuf_growsize(unsigned int nrpages)
66 {
67 	static DEFINE_MUTEX(gbuf_resize_mutex);
68 	struct page **tmp_pages = NULL;
69 	struct z_erofs_gbuf *gbuf;
70 	void *ptr, *old_ptr;
71 	int last, i, j;
72 
73 	mutex_lock(&gbuf_resize_mutex);
74 	/* avoid shrinking gbufs, since no idea how many fses rely on */
75 	if (nrpages <= z_erofs_gbuf_nrpages) {
76 		mutex_unlock(&gbuf_resize_mutex);
77 		return 0;
78 	}
79 
80 	for (i = 0; i < z_erofs_gbuf_count; ++i) {
81 		gbuf = &z_erofs_gbufpool[i];
82 		tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
83 		if (!tmp_pages)
84 			goto out;
85 
86 		for (j = 0; j < gbuf->nrpages; ++j)
87 			tmp_pages[j] = gbuf->pages[j];
88 		do {
89 			last = j;
90 			j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
91 						   tmp_pages);
92 			if (last == j)
93 				goto out;
94 		} while (j != nrpages);
95 
96 		ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
97 		if (!ptr)
98 			goto out;
99 
100 		spin_lock(&gbuf->lock);
101 		kfree(gbuf->pages);
102 		gbuf->pages = tmp_pages;
103 		old_ptr = gbuf->ptr;
104 		gbuf->ptr = ptr;
105 		gbuf->nrpages = nrpages;
106 		spin_unlock(&gbuf->lock);
107 		if (old_ptr)
108 			vunmap(old_ptr);
109 	}
110 	z_erofs_gbuf_nrpages = nrpages;
111 out:
112 	if (i < z_erofs_gbuf_count && tmp_pages) {
113 		for (j = 0; j < nrpages; ++j)
114 			if (tmp_pages[j] && (j >= gbuf->nrpages ||
115 					     tmp_pages[j] != gbuf->pages[j]))
116 				__free_page(tmp_pages[j]);
117 		kfree(tmp_pages);
118 	}
119 	mutex_unlock(&gbuf_resize_mutex);
120 	return i < z_erofs_gbuf_count ? -ENOMEM : 0;
121 }
122 
123 int __init z_erofs_gbuf_init(void)
124 {
125 	unsigned int i, total = num_possible_cpus();
126 
127 	if (z_erofs_gbuf_count)
128 		total = min(z_erofs_gbuf_count, total);
129 	z_erofs_gbuf_count = total;
130 
131 	/* The last (special) global buffer is the reserved buffer */
132 	total += !!z_erofs_rsv_nrpages;
133 
134 	z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
135 				   GFP_KERNEL);
136 	if (!z_erofs_gbufpool)
137 		return -ENOMEM;
138 
139 	if (z_erofs_rsv_nrpages) {
140 		z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
141 		z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
142 				sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
143 		if (!z_erofs_rsvbuf->pages) {
144 			z_erofs_rsvbuf = NULL;
145 			z_erofs_rsv_nrpages = 0;
146 		}
147 	}
148 	for (i = 0; i < total; ++i)
149 		spin_lock_init(&z_erofs_gbufpool[i].lock);
150 	return 0;
151 }
152 
153 void z_erofs_gbuf_exit(void)
154 {
155 	int i, j;
156 
157 	for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
158 		struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
159 
160 		if (gbuf->ptr) {
161 			vunmap(gbuf->ptr);
162 			gbuf->ptr = NULL;
163 		}
164 
165 		if (!gbuf->pages)
166 			continue;
167 
168 		for (j = 0; j < gbuf->nrpages; ++j)
169 			if (gbuf->pages[j])
170 				put_page(gbuf->pages[j]);
171 		kfree(gbuf->pages);
172 		gbuf->pages = NULL;
173 	}
174 	kfree(z_erofs_gbufpool);
175 }
176 
177 struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
178 {
179 	struct page *page = *pagepool;
180 
181 	if (page) {
182 		*pagepool = (struct page *)page_private(page);
183 	} else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
184 		spin_lock(&z_erofs_rsvbuf->lock);
185 		if (z_erofs_rsvbuf->nrpages)
186 			page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
187 		spin_unlock(&z_erofs_rsvbuf->lock);
188 	}
189 	if (!page)
190 		page = alloc_page(gfp);
191 	DBG_BUGON(page && page_ref_count(page) != 1);
192 	return page;
193 }
194 
195 void erofs_release_pages(struct page **pagepool)
196 {
197 	while (*pagepool) {
198 		struct page *page = *pagepool;
199 
200 		*pagepool = (struct page *)page_private(page);
201 		/* try to fill reserved global pool first */
202 		if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
203 				z_erofs_rsv_nrpages) {
204 			spin_lock(&z_erofs_rsvbuf->lock);
205 			if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
206 				z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
207 						= page;
208 				spin_unlock(&z_erofs_rsvbuf->lock);
209 				continue;
210 			}
211 			spin_unlock(&z_erofs_rsvbuf->lock);
212 		}
213 		put_page(page);
214 	}
215 }
216 
217 void erofs_shrinker_register(struct super_block *sb)
218 {
219 	struct erofs_sb_info *sbi = EROFS_SB(sb);
220 
221 	mutex_init(&sbi->umount_mutex);
222 
223 	spin_lock(&erofs_sb_list_lock);
224 	list_add(&sbi->list, &erofs_sb_list);
225 	spin_unlock(&erofs_sb_list_lock);
226 }
227 
228 void erofs_shrinker_unregister(struct super_block *sb)
229 {
230 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
231 
232 	mutex_lock(&sbi->umount_mutex);
233 	while (!xa_empty(&sbi->managed_pslots)) {
234 		z_erofs_shrink_scan(sbi, ~0UL);
235 		cond_resched();
236 	}
237 	spin_lock(&erofs_sb_list_lock);
238 	list_del(&sbi->list);
239 	spin_unlock(&erofs_sb_list_lock);
240 	mutex_unlock(&sbi->umount_mutex);
241 }
242 
243 static unsigned long erofs_shrink_count(struct shrinker *shrink,
244 					struct shrink_control *sc)
245 {
246 	return atomic_long_read(&erofs_global_shrink_cnt);
247 }
248 
249 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
250 				       struct shrink_control *sc)
251 {
252 	struct erofs_sb_info *sbi;
253 	struct list_head *p;
254 
255 	unsigned long nr = sc->nr_to_scan;
256 	unsigned int run_no;
257 	unsigned long freed = 0;
258 
259 	spin_lock(&erofs_sb_list_lock);
260 	do {
261 		run_no = ++shrinker_run_no;
262 	} while (run_no == 0);
263 
264 	/* Iterate over all mounted superblocks and try to shrink them */
265 	p = erofs_sb_list.next;
266 	while (p != &erofs_sb_list) {
267 		sbi = list_entry(p, struct erofs_sb_info, list);
268 
269 		/*
270 		 * We move the ones we do to the end of the list, so we stop
271 		 * when we see one we have already done.
272 		 */
273 		if (sbi->shrinker_run_no == run_no)
274 			break;
275 
276 		if (!mutex_trylock(&sbi->umount_mutex)) {
277 			p = p->next;
278 			continue;
279 		}
280 
281 		spin_unlock(&erofs_sb_list_lock);
282 		sbi->shrinker_run_no = run_no;
283 		freed += z_erofs_shrink_scan(sbi, nr - freed);
284 		spin_lock(&erofs_sb_list_lock);
285 		/* Get the next list element before we move this one */
286 		p = p->next;
287 
288 		/*
289 		 * Move this one to the end of the list to provide some
290 		 * fairness.
291 		 */
292 		list_move_tail(&sbi->list, &erofs_sb_list);
293 		mutex_unlock(&sbi->umount_mutex);
294 
295 		if (freed >= nr)
296 			break;
297 	}
298 	spin_unlock(&erofs_sb_list_lock);
299 	return freed;
300 }
301 
302 int __init erofs_init_shrinker(void)
303 {
304 	erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
305 	if (!erofs_shrinker_info)
306 		return -ENOMEM;
307 
308 	erofs_shrinker_info->count_objects = erofs_shrink_count;
309 	erofs_shrinker_info->scan_objects = erofs_shrink_scan;
310 	shrinker_register(erofs_shrinker_info);
311 	return 0;
312 }
313 
314 void erofs_exit_shrinker(void)
315 {
316 	shrinker_free(erofs_shrinker_info);
317 }
318