xref: /linux/mm/execmem.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2002 Richard Henderson
4  * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5  * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
6  * Copyright (C) 2024 Mike Rapoport IBM.
7  */
8 
9 #define pr_fmt(fmt) "execmem: " fmt
10 
11 #include <linux/mm.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <linux/execmem.h>
15 #include <linux/maple_tree.h>
16 #include <linux/set_memory.h>
17 #include <linux/moduleloader.h>
18 #include <linux/text-patching.h>
19 
20 #include <asm/tlbflush.h>
21 
22 #include "internal.h"
23 
24 static struct execmem_info *execmem_info __ro_after_init;
25 static struct execmem_info default_execmem_info __ro_after_init;
26 
27 #ifdef CONFIG_MMU
execmem_vmalloc(struct execmem_range * range,size_t size,pgprot_t pgprot,unsigned long vm_flags)28 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
29 			     pgprot_t pgprot, unsigned long vm_flags)
30 {
31 	bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
32 	gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
33 	unsigned int align = range->alignment;
34 	unsigned long start = range->start;
35 	unsigned long end = range->end;
36 	void *p;
37 
38 	if (kasan)
39 		vm_flags |= VM_DEFER_KMEMLEAK;
40 
41 	p = __vmalloc_node_range(size, align, start, end, gfp_flags,
42 				 pgprot, vm_flags, NUMA_NO_NODE,
43 				 __builtin_return_address(0));
44 	if (!p && range->fallback_start) {
45 		start = range->fallback_start;
46 		end = range->fallback_end;
47 		p = __vmalloc_node_range(size, align, start, end, gfp_flags,
48 					 pgprot, vm_flags, NUMA_NO_NODE,
49 					 __builtin_return_address(0));
50 	}
51 
52 	if (!p) {
53 		pr_warn_ratelimited("unable to allocate memory\n");
54 		return NULL;
55 	}
56 
57 	if (kasan && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
58 		vfree(p);
59 		return NULL;
60 	}
61 
62 	return p;
63 }
64 
execmem_vmap(size_t size)65 struct vm_struct *execmem_vmap(size_t size)
66 {
67 	struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
68 	struct vm_struct *area;
69 
70 	area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
71 				  range->start, range->end, NUMA_NO_NODE,
72 				  GFP_KERNEL, __builtin_return_address(0));
73 	if (!area && range->fallback_start)
74 		area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
75 					  range->fallback_start, range->fallback_end,
76 					  NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
77 
78 	return area;
79 }
80 #else
execmem_vmalloc(struct execmem_range * range,size_t size,pgprot_t pgprot,unsigned long vm_flags)81 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
82 			     pgprot_t pgprot, unsigned long vm_flags)
83 {
84 	return vmalloc(size);
85 }
86 #endif /* CONFIG_MMU */
87 
88 #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
89 struct execmem_cache {
90 	struct mutex mutex;
91 	struct maple_tree busy_areas;
92 	struct maple_tree free_areas;
93 	unsigned int pending_free_cnt;	/* protected by mutex */
94 };
95 
96 /* delay to schedule asynchronous free if fast path free fails */
97 #define FREE_DELAY	(msecs_to_jiffies(10))
98 
99 /* mark entries in busy_areas that should be freed asynchronously */
100 #define PENDING_FREE_MASK	(1 << (PAGE_SHIFT - 1))
101 
102 static struct execmem_cache execmem_cache = {
103 	.mutex = __MUTEX_INITIALIZER(execmem_cache.mutex),
104 	.busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN,
105 				     execmem_cache.mutex),
106 	.free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN,
107 				     execmem_cache.mutex),
108 };
109 
mas_range_len(struct ma_state * mas)110 static inline unsigned long mas_range_len(struct ma_state *mas)
111 {
112 	return mas->last - mas->index + 1;
113 }
114 
execmem_set_direct_map_valid(struct vm_struct * vm,bool valid)115 static int execmem_set_direct_map_valid(struct vm_struct *vm, bool valid)
116 {
117 	unsigned int nr = (1 << get_vm_area_page_order(vm));
118 	unsigned int updated = 0;
119 	int err = 0;
120 
121 	for (int i = 0; i < vm->nr_pages; i += nr) {
122 		err = set_direct_map_valid_noflush(vm->pages[i], nr, valid);
123 		if (err)
124 			goto err_restore;
125 		updated += nr;
126 	}
127 
128 	return 0;
129 
130 err_restore:
131 	for (int i = 0; i < updated; i += nr)
132 		set_direct_map_valid_noflush(vm->pages[i], nr, !valid);
133 
134 	return err;
135 }
136 
execmem_force_rw(void * ptr,size_t size)137 static int execmem_force_rw(void *ptr, size_t size)
138 {
139 	unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
140 	unsigned long addr = (unsigned long)ptr;
141 	int ret;
142 
143 	ret = set_memory_nx(addr, nr);
144 	if (ret)
145 		return ret;
146 
147 	return set_memory_rw(addr, nr);
148 }
149 
execmem_restore_rox(void * ptr,size_t size)150 int execmem_restore_rox(void *ptr, size_t size)
151 {
152 	unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
153 	unsigned long addr = (unsigned long)ptr;
154 
155 	return set_memory_rox(addr, nr);
156 }
157 
execmem_cache_clean(struct work_struct * work)158 static void execmem_cache_clean(struct work_struct *work)
159 {
160 	struct maple_tree *free_areas = &execmem_cache.free_areas;
161 	struct mutex *mutex = &execmem_cache.mutex;
162 	MA_STATE(mas, free_areas, 0, ULONG_MAX);
163 	void *area;
164 
165 	mutex_lock(mutex);
166 	mas_for_each(&mas, area, ULONG_MAX) {
167 		size_t size = mas_range_len(&mas);
168 
169 		if (IS_ALIGNED(size, PMD_SIZE) &&
170 		    IS_ALIGNED(mas.index, PMD_SIZE)) {
171 			struct vm_struct *vm = find_vm_area(area);
172 
173 			execmem_set_direct_map_valid(vm, true);
174 			mas_store_gfp(&mas, NULL, GFP_KERNEL);
175 			vfree(area);
176 		}
177 	}
178 	mutex_unlock(mutex);
179 }
180 
181 static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean);
182 
execmem_cache_add_locked(void * ptr,size_t size,gfp_t gfp_mask)183 static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
184 {
185 	struct maple_tree *free_areas = &execmem_cache.free_areas;
186 	unsigned long addr = (unsigned long)ptr;
187 	MA_STATE(mas, free_areas, addr - 1, addr + 1);
188 	unsigned long lower, upper;
189 	void *area = NULL;
190 
191 	lower = addr;
192 	upper = addr + size - 1;
193 
194 	area = mas_walk(&mas);
195 	if (area && mas.last == addr - 1)
196 		lower = mas.index;
197 
198 	area = mas_next(&mas, ULONG_MAX);
199 	if (area && mas.index == addr + size)
200 		upper = mas.last;
201 
202 	mas_set_range(&mas, lower, upper);
203 	return mas_store_gfp(&mas, (void *)lower, gfp_mask);
204 }
205 
execmem_cache_add(void * ptr,size_t size,gfp_t gfp_mask)206 static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
207 {
208 	guard(mutex)(&execmem_cache.mutex);
209 
210 	return execmem_cache_add_locked(ptr, size, gfp_mask);
211 }
212 
within_range(struct execmem_range * range,struct ma_state * mas,size_t size)213 static bool within_range(struct execmem_range *range, struct ma_state *mas,
214 			 size_t size)
215 {
216 	unsigned long addr = mas->index;
217 
218 	if (addr >= range->start && addr + size < range->end)
219 		return true;
220 
221 	if (range->fallback_start &&
222 	    addr >= range->fallback_start && addr + size < range->fallback_end)
223 		return true;
224 
225 	return false;
226 }
227 
__execmem_cache_alloc(struct execmem_range * range,size_t size)228 static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
229 {
230 	struct maple_tree *free_areas = &execmem_cache.free_areas;
231 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
232 	MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
233 	MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
234 	struct mutex *mutex = &execmem_cache.mutex;
235 	unsigned long addr, last, area_size = 0;
236 	void *area, *ptr = NULL;
237 	int err;
238 
239 	mutex_lock(mutex);
240 	mas_for_each(&mas_free, area, ULONG_MAX) {
241 		area_size = mas_range_len(&mas_free);
242 
243 		if (area_size >= size && within_range(range, &mas_free, size))
244 			break;
245 	}
246 
247 	if (area_size < size)
248 		goto out_unlock;
249 
250 	addr = mas_free.index;
251 	last = mas_free.last;
252 
253 	/* insert allocated size to busy_areas at range [addr, addr + size) */
254 	mas_set_range(&mas_busy, addr, addr + size - 1);
255 	err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
256 	if (err)
257 		goto out_unlock;
258 
259 	mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
260 	if (area_size > size) {
261 		void *ptr = (void *)(addr + size);
262 
263 		/*
264 		 * re-insert remaining free size to free_areas at range
265 		 * [addr + size, last]
266 		 */
267 		mas_set_range(&mas_free, addr + size, last);
268 		err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
269 		if (err) {
270 			mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
271 			goto out_unlock;
272 		}
273 	}
274 	ptr = (void *)addr;
275 
276 out_unlock:
277 	mutex_unlock(mutex);
278 	return ptr;
279 }
280 
execmem_cache_populate(struct execmem_range * range,size_t size)281 static int execmem_cache_populate(struct execmem_range *range, size_t size)
282 {
283 	unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
284 	struct vm_struct *vm;
285 	size_t alloc_size;
286 	int err = -ENOMEM;
287 	void *p;
288 
289 	alloc_size = round_up(size, PMD_SIZE);
290 	p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
291 	if (!p) {
292 		alloc_size = size;
293 		p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
294 	}
295 
296 	if (!p)
297 		return err;
298 
299 	vm = find_vm_area(p);
300 	if (!vm)
301 		goto err_free_mem;
302 
303 	/* fill memory with instructions that will trap */
304 	execmem_fill_trapping_insns(p, alloc_size);
305 
306 	err = set_memory_rox((unsigned long)p, vm->nr_pages);
307 	if (err)
308 		goto err_free_mem;
309 
310 	err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
311 	if (err)
312 		goto err_reset_direct_map;
313 
314 	return 0;
315 
316 err_reset_direct_map:
317 	execmem_set_direct_map_valid(vm, true);
318 err_free_mem:
319 	vfree(p);
320 	return err;
321 }
322 
execmem_cache_alloc(struct execmem_range * range,size_t size)323 static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
324 {
325 	void *p;
326 	int err;
327 
328 	p = __execmem_cache_alloc(range, size);
329 	if (p)
330 		return p;
331 
332 	err = execmem_cache_populate(range, size);
333 	if (err)
334 		return NULL;
335 
336 	return __execmem_cache_alloc(range, size);
337 }
338 
is_pending_free(void * ptr)339 static inline bool is_pending_free(void *ptr)
340 {
341 	return ((unsigned long)ptr & PENDING_FREE_MASK);
342 }
343 
pending_free_set(void * ptr)344 static inline void *pending_free_set(void *ptr)
345 {
346 	return (void *)((unsigned long)ptr | PENDING_FREE_MASK);
347 }
348 
pending_free_clear(void * ptr)349 static inline void *pending_free_clear(void *ptr)
350 {
351 	return (void *)((unsigned long)ptr & ~PENDING_FREE_MASK);
352 }
353 
__execmem_cache_free(struct ma_state * mas,void * ptr,gfp_t gfp_mask)354 static int __execmem_cache_free(struct ma_state *mas, void *ptr, gfp_t gfp_mask)
355 {
356 	size_t size = mas_range_len(mas);
357 	int err;
358 
359 	err = execmem_force_rw(ptr, size);
360 	if (err)
361 		return err;
362 
363 	execmem_fill_trapping_insns(ptr, size);
364 	execmem_restore_rox(ptr, size);
365 
366 	err = execmem_cache_add_locked(ptr, size, gfp_mask);
367 	if (err)
368 		return err;
369 
370 	mas_store_gfp(mas, NULL, gfp_mask);
371 	return 0;
372 }
373 
374 static void execmem_cache_free_slow(struct work_struct *work);
375 static DECLARE_DELAYED_WORK(execmem_cache_free_work, execmem_cache_free_slow);
376 
execmem_cache_free_slow(struct work_struct * work)377 static void execmem_cache_free_slow(struct work_struct *work)
378 {
379 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
380 	MA_STATE(mas, busy_areas, 0, ULONG_MAX);
381 	void *area;
382 
383 	guard(mutex)(&execmem_cache.mutex);
384 
385 	if (!execmem_cache.pending_free_cnt)
386 		return;
387 
388 	mas_for_each(&mas, area, ULONG_MAX) {
389 		if (!is_pending_free(area))
390 			continue;
391 
392 		area = pending_free_clear(area);
393 		if (__execmem_cache_free(&mas, area, GFP_KERNEL))
394 			continue;
395 
396 		execmem_cache.pending_free_cnt--;
397 	}
398 
399 	if (execmem_cache.pending_free_cnt)
400 		schedule_delayed_work(&execmem_cache_free_work, FREE_DELAY);
401 	else
402 		schedule_work(&execmem_cache_clean_work);
403 }
404 
execmem_cache_free(void * ptr)405 static bool execmem_cache_free(void *ptr)
406 {
407 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
408 	unsigned long addr = (unsigned long)ptr;
409 	MA_STATE(mas, busy_areas, addr, addr);
410 	void *area;
411 	int err;
412 
413 	guard(mutex)(&execmem_cache.mutex);
414 
415 	area = mas_walk(&mas);
416 	if (!area)
417 		return false;
418 
419 	err = __execmem_cache_free(&mas, area, GFP_KERNEL | __GFP_NORETRY);
420 	if (err) {
421 		/*
422 		 * mas points to exact slot we've got the area from, nothing
423 		 * else can modify the tree because of the mutex, so there
424 		 * won't be any allocations in mas_store_gfp() and it will just
425 		 * change the pointer.
426 		 */
427 		area = pending_free_set(area);
428 		mas_store_gfp(&mas, area, GFP_KERNEL);
429 		execmem_cache.pending_free_cnt++;
430 		schedule_delayed_work(&execmem_cache_free_work, FREE_DELAY);
431 		return true;
432 	}
433 
434 	schedule_work(&execmem_cache_clean_work);
435 
436 	return true;
437 }
438 
439 #else /* CONFIG_ARCH_HAS_EXECMEM_ROX */
440 /*
441  * when ROX cache is not used the permissions defined by architectures for
442  * execmem ranges that are updated before use (e.g. EXECMEM_MODULE_TEXT) must
443  * be writable anyway
444  */
execmem_force_rw(void * ptr,size_t size)445 static inline int execmem_force_rw(void *ptr, size_t size)
446 {
447 	return 0;
448 }
449 
execmem_cache_alloc(struct execmem_range * range,size_t size)450 static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
451 {
452 	return NULL;
453 }
454 
execmem_cache_free(void * ptr)455 static bool execmem_cache_free(void *ptr)
456 {
457 	return false;
458 }
459 #endif /* CONFIG_ARCH_HAS_EXECMEM_ROX */
460 
execmem_alloc(enum execmem_type type,size_t size)461 void *execmem_alloc(enum execmem_type type, size_t size)
462 {
463 	struct execmem_range *range = &execmem_info->ranges[type];
464 	bool use_cache = range->flags & EXECMEM_ROX_CACHE;
465 	unsigned long vm_flags = VM_FLUSH_RESET_PERMS;
466 	pgprot_t pgprot = range->pgprot;
467 	void *p = NULL;
468 
469 	size = PAGE_ALIGN(size);
470 
471 	if (use_cache)
472 		p = execmem_cache_alloc(range, size);
473 	else
474 		p = execmem_vmalloc(range, size, pgprot, vm_flags);
475 
476 	return kasan_reset_tag(p);
477 }
478 
execmem_alloc_rw(enum execmem_type type,size_t size)479 void *execmem_alloc_rw(enum execmem_type type, size_t size)
480 {
481 	void *p __free(execmem) = execmem_alloc(type, size);
482 	int err;
483 
484 	if (!p)
485 		return NULL;
486 
487 	err = execmem_force_rw(p, size);
488 	if (err)
489 		return NULL;
490 
491 	return no_free_ptr(p);
492 }
493 
execmem_free(void * ptr)494 void execmem_free(void *ptr)
495 {
496 	/*
497 	 * This memory may be RO, and freeing RO memory in an interrupt is not
498 	 * supported by vmalloc.
499 	 */
500 	WARN_ON(in_interrupt());
501 
502 	if (!execmem_cache_free(ptr))
503 		vfree(ptr);
504 }
505 
execmem_is_rox(enum execmem_type type)506 bool execmem_is_rox(enum execmem_type type)
507 {
508 	return !!(execmem_info->ranges[type].flags & EXECMEM_ROX_CACHE);
509 }
510 
execmem_validate(struct execmem_info * info)511 static bool execmem_validate(struct execmem_info *info)
512 {
513 	struct execmem_range *r = &info->ranges[EXECMEM_DEFAULT];
514 
515 	if (!r->alignment || !r->start || !r->end || !pgprot_val(r->pgprot)) {
516 		pr_crit("Invalid parameters for execmem allocator, module loading will fail");
517 		return false;
518 	}
519 
520 	if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) {
521 		for (int i = EXECMEM_DEFAULT; i < EXECMEM_TYPE_MAX; i++) {
522 			r = &info->ranges[i];
523 
524 			if (r->flags & EXECMEM_ROX_CACHE) {
525 				pr_warn_once("ROX cache is not supported\n");
526 				r->flags &= ~EXECMEM_ROX_CACHE;
527 			}
528 		}
529 	}
530 
531 	return true;
532 }
533 
execmem_init_missing(struct execmem_info * info)534 static void execmem_init_missing(struct execmem_info *info)
535 {
536 	struct execmem_range *default_range = &info->ranges[EXECMEM_DEFAULT];
537 
538 	for (int i = EXECMEM_DEFAULT + 1; i < EXECMEM_TYPE_MAX; i++) {
539 		struct execmem_range *r = &info->ranges[i];
540 
541 		if (!r->start) {
542 			if (i == EXECMEM_MODULE_DATA)
543 				r->pgprot = PAGE_KERNEL;
544 			else
545 				r->pgprot = default_range->pgprot;
546 			r->alignment = default_range->alignment;
547 			r->start = default_range->start;
548 			r->end = default_range->end;
549 			r->flags = default_range->flags;
550 			r->fallback_start = default_range->fallback_start;
551 			r->fallback_end = default_range->fallback_end;
552 		}
553 	}
554 }
555 
execmem_arch_setup(void)556 struct execmem_info * __weak execmem_arch_setup(void)
557 {
558 	return NULL;
559 }
560 
__execmem_init(void)561 static void __init __execmem_init(void)
562 {
563 	struct execmem_info *info = execmem_arch_setup();
564 
565 	if (!info) {
566 		info = execmem_info = &default_execmem_info;
567 		info->ranges[EXECMEM_DEFAULT].start = VMALLOC_START;
568 		info->ranges[EXECMEM_DEFAULT].end = VMALLOC_END;
569 		info->ranges[EXECMEM_DEFAULT].pgprot = PAGE_KERNEL_EXEC;
570 		info->ranges[EXECMEM_DEFAULT].alignment = 1;
571 	}
572 
573 	if (!execmem_validate(info))
574 		return;
575 
576 	execmem_init_missing(info);
577 
578 	execmem_info = info;
579 }
580 
581 #ifdef CONFIG_ARCH_WANTS_EXECMEM_LATE
execmem_late_init(void)582 static int __init execmem_late_init(void)
583 {
584 	__execmem_init();
585 	return 0;
586 }
587 core_initcall(execmem_late_init);
588 #else
execmem_init(void)589 void __init execmem_init(void)
590 {
591 	__execmem_init();
592 }
593 #endif
594