xref: /linux/mm/usercopy.c (revision 5ce1be0e40fe64cbf540ba54dd10824a8cef99e1)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f5509cc1SKees Cook /*
3f5509cc1SKees Cook  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4f5509cc1SKees Cook  * which are designed to protect kernel memory from needless exposure
5f5509cc1SKees Cook  * and overwrite under many unintended conditions. This code is based
6f5509cc1SKees Cook  * on PAX_USERCOPY, which is:
7f5509cc1SKees Cook  *
8f5509cc1SKees Cook  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9f5509cc1SKees Cook  * Security Inc.
10f5509cc1SKees Cook  */
11f5509cc1SKees Cook #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12f5509cc1SKees Cook 
13f5509cc1SKees Cook #include <linux/mm.h>
14314eed30SKees Cook #include <linux/highmem.h>
15f5509cc1SKees Cook #include <linux/slab.h>
165b825c3aSIngo Molnar #include <linux/sched.h>
1729930025SIngo Molnar #include <linux/sched/task.h>
1829930025SIngo Molnar #include <linux/sched/task_stack.h>
1996dc4f9fSSahara #include <linux/thread_info.h>
20b5cb15d9SChris von Recklinghausen #include <linux/atomic.h>
21b5cb15d9SChris von Recklinghausen #include <linux/jump_label.h>
22f5509cc1SKees Cook #include <asm/sections.h>
23f5509cc1SKees Cook 
24f5509cc1SKees Cook /*
25f5509cc1SKees Cook  * Checks if a given pointer and length is contained by the current
26f5509cc1SKees Cook  * stack frame (if possible).
27f5509cc1SKees Cook  *
28f5509cc1SKees Cook  * Returns:
29f5509cc1SKees Cook  *	NOT_STACK: not at all on the stack
30f5509cc1SKees Cook  *	GOOD_FRAME: fully within a valid stack frame
31f5509cc1SKees Cook  *	GOOD_STACK: fully on the stack (when can't do frame-checking)
32f5509cc1SKees Cook  *	BAD_STACK: error condition (invalid stack position or bad stack frame)
33f5509cc1SKees Cook  */
34f5509cc1SKees Cook static noinline int check_stack_object(const void *obj, unsigned long len)
35f5509cc1SKees Cook {
36f5509cc1SKees Cook 	const void * const stack = task_stack_page(current);
37f5509cc1SKees Cook 	const void * const stackend = stack + THREAD_SIZE;
38f5509cc1SKees Cook 	int ret;
39f5509cc1SKees Cook 
40f5509cc1SKees Cook 	/* Object is not on the stack at all. */
41f5509cc1SKees Cook 	if (obj + len <= stack || stackend <= obj)
42f5509cc1SKees Cook 		return NOT_STACK;
43f5509cc1SKees Cook 
44f5509cc1SKees Cook 	/*
45f5509cc1SKees Cook 	 * Reject: object partially overlaps the stack (passing the
46*5ce1be0eSRandy Dunlap 	 * check above means at least one end is within the stack,
47f5509cc1SKees Cook 	 * so if this check fails, the other end is outside the stack).
48f5509cc1SKees Cook 	 */
49f5509cc1SKees Cook 	if (obj < stack || stackend < obj + len)
50f5509cc1SKees Cook 		return BAD_STACK;
51f5509cc1SKees Cook 
52f5509cc1SKees Cook 	/* Check if object is safely within a valid frame. */
53f5509cc1SKees Cook 	ret = arch_within_stack_frames(stack, stackend, obj, len);
54f5509cc1SKees Cook 	if (ret)
55f5509cc1SKees Cook 		return ret;
56f5509cc1SKees Cook 
57f5509cc1SKees Cook 	return GOOD_STACK;
58f5509cc1SKees Cook }
59f5509cc1SKees Cook 
60b394d468SKees Cook /*
61afcc90f8SKees Cook  * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
62afcc90f8SKees Cook  * an unexpected state during a copy_from_user() or copy_to_user() call.
63b394d468SKees Cook  * There are several checks being performed on the buffer by the
64b394d468SKees Cook  * __check_object_size() function. Normal stack buffer usage should never
65b394d468SKees Cook  * trip the checks, and kernel text addressing will always trip the check.
66afcc90f8SKees Cook  * For cache objects, it is checking that only the whitelisted range of
67afcc90f8SKees Cook  * bytes for a given cache is being accessed (via the cache's usersize and
68afcc90f8SKees Cook  * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
69afcc90f8SKees Cook  * kmem_cache_create_usercopy() function to create the cache (and
70afcc90f8SKees Cook  * carefully audit the whitelist range).
71b394d468SKees Cook  */
72afcc90f8SKees Cook void usercopy_warn(const char *name, const char *detail, bool to_user,
73afcc90f8SKees Cook 		   unsigned long offset, unsigned long len)
74afcc90f8SKees Cook {
75afcc90f8SKees Cook 	WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
76afcc90f8SKees Cook 		 to_user ? "exposure" : "overwrite",
77afcc90f8SKees Cook 		 to_user ? "from" : "to",
78afcc90f8SKees Cook 		 name ? : "unknown?!",
79afcc90f8SKees Cook 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
80afcc90f8SKees Cook 		 offset, len);
81afcc90f8SKees Cook }
82afcc90f8SKees Cook 
83b394d468SKees Cook void __noreturn usercopy_abort(const char *name, const char *detail,
84b394d468SKees Cook 			       bool to_user, unsigned long offset,
85b394d468SKees Cook 			       unsigned long len)
86f5509cc1SKees Cook {
87b394d468SKees Cook 	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
88f5509cc1SKees Cook 		 to_user ? "exposure" : "overwrite",
89b394d468SKees Cook 		 to_user ? "from" : "to",
90b394d468SKees Cook 		 name ? : "unknown?!",
91b394d468SKees Cook 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
92b394d468SKees Cook 		 offset, len);
93b394d468SKees Cook 
94f5509cc1SKees Cook 	/*
95f5509cc1SKees Cook 	 * For greater effect, it would be nice to do do_group_exit(),
96f5509cc1SKees Cook 	 * but BUG() actually hooks all the lock-breaking and per-arch
97f5509cc1SKees Cook 	 * Oops code, so that is used here instead.
98f5509cc1SKees Cook 	 */
99f5509cc1SKees Cook 	BUG();
100f5509cc1SKees Cook }
101f5509cc1SKees Cook 
102f5509cc1SKees Cook /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
103f4e6e289SKees Cook static bool overlaps(const unsigned long ptr, unsigned long n,
104f4e6e289SKees Cook 		     unsigned long low, unsigned long high)
105f5509cc1SKees Cook {
106f4e6e289SKees Cook 	const unsigned long check_low = ptr;
107f5509cc1SKees Cook 	unsigned long check_high = check_low + n;
108f5509cc1SKees Cook 
109f5509cc1SKees Cook 	/* Does not overlap if entirely above or entirely below. */
11094cd97afSJosh Poimboeuf 	if (check_low >= high || check_high <= low)
111f5509cc1SKees Cook 		return false;
112f5509cc1SKees Cook 
113f5509cc1SKees Cook 	return true;
114f5509cc1SKees Cook }
115f5509cc1SKees Cook 
116f5509cc1SKees Cook /* Is this address range in the kernel text area? */
117f4e6e289SKees Cook static inline void check_kernel_text_object(const unsigned long ptr,
118f4e6e289SKees Cook 					    unsigned long n, bool to_user)
119f5509cc1SKees Cook {
120f5509cc1SKees Cook 	unsigned long textlow = (unsigned long)_stext;
121f5509cc1SKees Cook 	unsigned long texthigh = (unsigned long)_etext;
122f5509cc1SKees Cook 	unsigned long textlow_linear, texthigh_linear;
123f5509cc1SKees Cook 
124f5509cc1SKees Cook 	if (overlaps(ptr, n, textlow, texthigh))
125f4e6e289SKees Cook 		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
126f5509cc1SKees Cook 
127f5509cc1SKees Cook 	/*
128f5509cc1SKees Cook 	 * Some architectures have virtual memory mappings with a secondary
129f5509cc1SKees Cook 	 * mapping of the kernel text, i.e. there is more than one virtual
130f5509cc1SKees Cook 	 * kernel address that points to the kernel image. It is usually
131f5509cc1SKees Cook 	 * when there is a separate linear physical memory mapping, in that
132f5509cc1SKees Cook 	 * __pa() is not just the reverse of __va(). This can be detected
133f5509cc1SKees Cook 	 * and checked:
134f5509cc1SKees Cook 	 */
13546f6236aSLaura Abbott 	textlow_linear = (unsigned long)lm_alias(textlow);
136f5509cc1SKees Cook 	/* No different mapping: we're done. */
137f5509cc1SKees Cook 	if (textlow_linear == textlow)
138f4e6e289SKees Cook 		return;
139f5509cc1SKees Cook 
140f5509cc1SKees Cook 	/* Check the secondary mapping... */
14146f6236aSLaura Abbott 	texthigh_linear = (unsigned long)lm_alias(texthigh);
142f5509cc1SKees Cook 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
143f4e6e289SKees Cook 		usercopy_abort("linear kernel text", NULL, to_user,
144f4e6e289SKees Cook 			       ptr - textlow_linear, n);
145f5509cc1SKees Cook }
146f5509cc1SKees Cook 
147f4e6e289SKees Cook static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
148f4e6e289SKees Cook 				       bool to_user)
149f5509cc1SKees Cook {
150f5509cc1SKees Cook 	/* Reject if object wraps past end of memory. */
15195153169SIsaac J. Manjarres 	if (ptr + (n - 1) < ptr)
152f4e6e289SKees Cook 		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
153f5509cc1SKees Cook 
154f5509cc1SKees Cook 	/* Reject if NULL or ZERO-allocation. */
155f5509cc1SKees Cook 	if (ZERO_OR_NULL_PTR(ptr))
156f4e6e289SKees Cook 		usercopy_abort("null address", NULL, to_user, ptr, n);
157f5509cc1SKees Cook }
158f5509cc1SKees Cook 
1598e1f74eaSKees Cook /* Checks for allocs that are marked in some way as spanning multiple pages. */
160f4e6e289SKees Cook static inline void check_page_span(const void *ptr, unsigned long n,
1618e1f74eaSKees Cook 				   struct page *page, bool to_user)
162f5509cc1SKees Cook {
1638e1f74eaSKees Cook #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
164f5509cc1SKees Cook 	const void *end = ptr + n - 1;
1658e1f74eaSKees Cook 	struct page *endpage;
166f5509cc1SKees Cook 	bool is_reserved, is_cma;
167f5509cc1SKees Cook 
168f5509cc1SKees Cook 	/*
169f5509cc1SKees Cook 	 * Sometimes the kernel data regions are not marked Reserved (see
170f5509cc1SKees Cook 	 * check below). And sometimes [_sdata,_edata) does not cover
171f5509cc1SKees Cook 	 * rodata and/or bss, so check each range explicitly.
172f5509cc1SKees Cook 	 */
173f5509cc1SKees Cook 
174f5509cc1SKees Cook 	/* Allow reads of kernel rodata region (if not marked as Reserved). */
175f5509cc1SKees Cook 	if (ptr >= (const void *)__start_rodata &&
176f5509cc1SKees Cook 	    end <= (const void *)__end_rodata) {
177f5509cc1SKees Cook 		if (!to_user)
178f4e6e289SKees Cook 			usercopy_abort("rodata", NULL, to_user, 0, n);
179f4e6e289SKees Cook 		return;
180f5509cc1SKees Cook 	}
181f5509cc1SKees Cook 
182f5509cc1SKees Cook 	/* Allow kernel data region (if not marked as Reserved). */
183f5509cc1SKees Cook 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
184f4e6e289SKees Cook 		return;
185f5509cc1SKees Cook 
186f5509cc1SKees Cook 	/* Allow kernel bss region (if not marked as Reserved). */
187f5509cc1SKees Cook 	if (ptr >= (const void *)__bss_start &&
188f5509cc1SKees Cook 	    end <= (const void *)__bss_stop)
189f4e6e289SKees Cook 		return;
190f5509cc1SKees Cook 
191f5509cc1SKees Cook 	/* Is the object wholly within one base page? */
192f5509cc1SKees Cook 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
193f5509cc1SKees Cook 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
194f4e6e289SKees Cook 		return;
195f5509cc1SKees Cook 
1968e1f74eaSKees Cook 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
197f5509cc1SKees Cook 	endpage = virt_to_head_page(end);
198f5509cc1SKees Cook 	if (likely(endpage == page))
199f4e6e289SKees Cook 		return;
200f5509cc1SKees Cook 
201f5509cc1SKees Cook 	/*
202f5509cc1SKees Cook 	 * Reject if range is entirely either Reserved (i.e. special or
203f5509cc1SKees Cook 	 * device memory), or CMA. Otherwise, reject since the object spans
204f5509cc1SKees Cook 	 * several independently allocated pages.
205f5509cc1SKees Cook 	 */
206f5509cc1SKees Cook 	is_reserved = PageReserved(page);
207f5509cc1SKees Cook 	is_cma = is_migrate_cma_page(page);
208f5509cc1SKees Cook 	if (!is_reserved && !is_cma)
209f4e6e289SKees Cook 		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
210f5509cc1SKees Cook 
211f5509cc1SKees Cook 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
212f5509cc1SKees Cook 		page = virt_to_head_page(ptr);
213f5509cc1SKees Cook 		if (is_reserved && !PageReserved(page))
214f4e6e289SKees Cook 			usercopy_abort("spans Reserved and non-Reserved pages",
215f4e6e289SKees Cook 				       NULL, to_user, 0, n);
216f5509cc1SKees Cook 		if (is_cma && !is_migrate_cma_page(page))
217f4e6e289SKees Cook 			usercopy_abort("spans CMA and non-CMA pages", NULL,
218f4e6e289SKees Cook 				       to_user, 0, n);
219f5509cc1SKees Cook 	}
2208e1f74eaSKees Cook #endif
2218e1f74eaSKees Cook }
222f5509cc1SKees Cook 
223f4e6e289SKees Cook static inline void check_heap_object(const void *ptr, unsigned long n,
2248e1f74eaSKees Cook 				     bool to_user)
2258e1f74eaSKees Cook {
2268e1f74eaSKees Cook 	struct page *page;
2278e1f74eaSKees Cook 
2288e1f74eaSKees Cook 	if (!virt_addr_valid(ptr))
229f4e6e289SKees Cook 		return;
2308e1f74eaSKees Cook 
231314eed30SKees Cook 	/*
232314eed30SKees Cook 	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
233314eed30SKees Cook 	 * highmem page or fallback to virt_to_page(). The following
234314eed30SKees Cook 	 * is effectively a highmem-aware virt_to_head_page().
235314eed30SKees Cook 	 */
236314eed30SKees Cook 	page = compound_head(kmap_to_page((void *)ptr));
2378e1f74eaSKees Cook 
238f4e6e289SKees Cook 	if (PageSlab(page)) {
2398e1f74eaSKees Cook 		/* Check slab allocator for flags and size. */
240f4e6e289SKees Cook 		__check_heap_object(ptr, n, page, to_user);
241f4e6e289SKees Cook 	} else {
2428e1f74eaSKees Cook 		/* Verify object does not incorrectly span multiple pages. */
243f4e6e289SKees Cook 		check_page_span(ptr, n, page, to_user);
244f4e6e289SKees Cook 	}
245f5509cc1SKees Cook }
246f5509cc1SKees Cook 
247b5cb15d9SChris von Recklinghausen static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
248b5cb15d9SChris von Recklinghausen 
249f5509cc1SKees Cook /*
250f5509cc1SKees Cook  * Validates that the given object is:
251f5509cc1SKees Cook  * - not bogus address
2527bff3c06SQian Cai  * - fully contained by stack (or stack frame, when available)
2537bff3c06SQian Cai  * - fully within SLAB object (or object whitelist area, when available)
254f5509cc1SKees Cook  * - not in kernel text
255f5509cc1SKees Cook  */
256f5509cc1SKees Cook void __check_object_size(const void *ptr, unsigned long n, bool to_user)
257f5509cc1SKees Cook {
258b5cb15d9SChris von Recklinghausen 	if (static_branch_unlikely(&bypass_usercopy_checks))
259b5cb15d9SChris von Recklinghausen 		return;
260b5cb15d9SChris von Recklinghausen 
261f5509cc1SKees Cook 	/* Skip all tests if size is zero. */
262f5509cc1SKees Cook 	if (!n)
263f5509cc1SKees Cook 		return;
264f5509cc1SKees Cook 
265f5509cc1SKees Cook 	/* Check for invalid addresses. */
266f4e6e289SKees Cook 	check_bogus_address((const unsigned long)ptr, n, to_user);
267f5509cc1SKees Cook 
268f5509cc1SKees Cook 	/* Check for bad stack object. */
269f5509cc1SKees Cook 	switch (check_stack_object(ptr, n)) {
270f5509cc1SKees Cook 	case NOT_STACK:
271f5509cc1SKees Cook 		/* Object is not touching the current process stack. */
272f5509cc1SKees Cook 		break;
273f5509cc1SKees Cook 	case GOOD_FRAME:
274f5509cc1SKees Cook 	case GOOD_STACK:
275f5509cc1SKees Cook 		/*
276f5509cc1SKees Cook 		 * Object is either in the correct frame (when it
277f5509cc1SKees Cook 		 * is possible to check) or just generally on the
278f5509cc1SKees Cook 		 * process stack (when frame checking not available).
279f5509cc1SKees Cook 		 */
280f5509cc1SKees Cook 		return;
281f5509cc1SKees Cook 	default:
282f4e6e289SKees Cook 		usercopy_abort("process stack", NULL, to_user, 0, n);
283f5509cc1SKees Cook 	}
284f5509cc1SKees Cook 
2857bff3c06SQian Cai 	/* Check for bad heap object. */
2867bff3c06SQian Cai 	check_heap_object(ptr, n, to_user);
2877bff3c06SQian Cai 
288f5509cc1SKees Cook 	/* Check for object in kernel to avoid text exposure. */
289f4e6e289SKees Cook 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
290f5509cc1SKees Cook }
291f5509cc1SKees Cook EXPORT_SYMBOL(__check_object_size);
292b5cb15d9SChris von Recklinghausen 
293b5cb15d9SChris von Recklinghausen static bool enable_checks __initdata = true;
294b5cb15d9SChris von Recklinghausen 
295b5cb15d9SChris von Recklinghausen static int __init parse_hardened_usercopy(char *str)
296b5cb15d9SChris von Recklinghausen {
297b5cb15d9SChris von Recklinghausen 	return strtobool(str, &enable_checks);
298b5cb15d9SChris von Recklinghausen }
299b5cb15d9SChris von Recklinghausen 
300b5cb15d9SChris von Recklinghausen __setup("hardened_usercopy=", parse_hardened_usercopy);
301b5cb15d9SChris von Recklinghausen 
302b5cb15d9SChris von Recklinghausen static int __init set_hardened_usercopy(void)
303b5cb15d9SChris von Recklinghausen {
304b5cb15d9SChris von Recklinghausen 	if (enable_checks == false)
305b5cb15d9SChris von Recklinghausen 		static_branch_enable(&bypass_usercopy_checks);
306b5cb15d9SChris von Recklinghausen 	return 1;
307b5cb15d9SChris von Recklinghausen }
308b5cb15d9SChris von Recklinghausen 
309b5cb15d9SChris von Recklinghausen late_initcall(set_hardened_usercopy);
310