xref: /linux/mm/usercopy.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3  * which are designed to protect kernel memory from needless exposure
4  * and overwrite under many unintended conditions. This code is based
5  * on PAX_USERCOPY, which is:
6  *
7  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
8  * Security Inc.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/thread_info.h>
23 #include <linux/atomic.h>
24 #include <linux/jump_label.h>
25 #include <asm/sections.h>
26 
27 /*
28  * Checks if a given pointer and length is contained by the current
29  * stack frame (if possible).
30  *
31  * Returns:
32  *	NOT_STACK: not at all on the stack
33  *	GOOD_FRAME: fully within a valid stack frame
34  *	GOOD_STACK: fully on the stack (when can't do frame-checking)
35  *	BAD_STACK: error condition (invalid stack position or bad stack frame)
36  */
37 static noinline int check_stack_object(const void *obj, unsigned long len)
38 {
39 	const void * const stack = task_stack_page(current);
40 	const void * const stackend = stack + THREAD_SIZE;
41 	int ret;
42 
43 	/* Object is not on the stack at all. */
44 	if (obj + len <= stack || stackend <= obj)
45 		return NOT_STACK;
46 
47 	/*
48 	 * Reject: object partially overlaps the stack (passing the
49 	 * the check above means at least one end is within the stack,
50 	 * so if this check fails, the other end is outside the stack).
51 	 */
52 	if (obj < stack || stackend < obj + len)
53 		return BAD_STACK;
54 
55 	/* Check if object is safely within a valid frame. */
56 	ret = arch_within_stack_frames(stack, stackend, obj, len);
57 	if (ret)
58 		return ret;
59 
60 	return GOOD_STACK;
61 }
62 
63 /*
64  * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
65  * an unexpected state during a copy_from_user() or copy_to_user() call.
66  * There are several checks being performed on the buffer by the
67  * __check_object_size() function. Normal stack buffer usage should never
68  * trip the checks, and kernel text addressing will always trip the check.
69  * For cache objects, it is checking that only the whitelisted range of
70  * bytes for a given cache is being accessed (via the cache's usersize and
71  * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
72  * kmem_cache_create_usercopy() function to create the cache (and
73  * carefully audit the whitelist range).
74  */
75 void usercopy_warn(const char *name, const char *detail, bool to_user,
76 		   unsigned long offset, unsigned long len)
77 {
78 	WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
79 		 to_user ? "exposure" : "overwrite",
80 		 to_user ? "from" : "to",
81 		 name ? : "unknown?!",
82 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
83 		 offset, len);
84 }
85 
86 void __noreturn usercopy_abort(const char *name, const char *detail,
87 			       bool to_user, unsigned long offset,
88 			       unsigned long len)
89 {
90 	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
91 		 to_user ? "exposure" : "overwrite",
92 		 to_user ? "from" : "to",
93 		 name ? : "unknown?!",
94 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
95 		 offset, len);
96 
97 	/*
98 	 * For greater effect, it would be nice to do do_group_exit(),
99 	 * but BUG() actually hooks all the lock-breaking and per-arch
100 	 * Oops code, so that is used here instead.
101 	 */
102 	BUG();
103 }
104 
105 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
106 static bool overlaps(const unsigned long ptr, unsigned long n,
107 		     unsigned long low, unsigned long high)
108 {
109 	const unsigned long check_low = ptr;
110 	unsigned long check_high = check_low + n;
111 
112 	/* Does not overlap if entirely above or entirely below. */
113 	if (check_low >= high || check_high <= low)
114 		return false;
115 
116 	return true;
117 }
118 
119 /* Is this address range in the kernel text area? */
120 static inline void check_kernel_text_object(const unsigned long ptr,
121 					    unsigned long n, bool to_user)
122 {
123 	unsigned long textlow = (unsigned long)_stext;
124 	unsigned long texthigh = (unsigned long)_etext;
125 	unsigned long textlow_linear, texthigh_linear;
126 
127 	if (overlaps(ptr, n, textlow, texthigh))
128 		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
129 
130 	/*
131 	 * Some architectures have virtual memory mappings with a secondary
132 	 * mapping of the kernel text, i.e. there is more than one virtual
133 	 * kernel address that points to the kernel image. It is usually
134 	 * when there is a separate linear physical memory mapping, in that
135 	 * __pa() is not just the reverse of __va(). This can be detected
136 	 * and checked:
137 	 */
138 	textlow_linear = (unsigned long)lm_alias(textlow);
139 	/* No different mapping: we're done. */
140 	if (textlow_linear == textlow)
141 		return;
142 
143 	/* Check the secondary mapping... */
144 	texthigh_linear = (unsigned long)lm_alias(texthigh);
145 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
146 		usercopy_abort("linear kernel text", NULL, to_user,
147 			       ptr - textlow_linear, n);
148 }
149 
150 static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
151 				       bool to_user)
152 {
153 	/* Reject if object wraps past end of memory. */
154 	if (ptr + n < ptr)
155 		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
156 
157 	/* Reject if NULL or ZERO-allocation. */
158 	if (ZERO_OR_NULL_PTR(ptr))
159 		usercopy_abort("null address", NULL, to_user, ptr, n);
160 }
161 
162 /* Checks for allocs that are marked in some way as spanning multiple pages. */
163 static inline void check_page_span(const void *ptr, unsigned long n,
164 				   struct page *page, bool to_user)
165 {
166 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
167 	const void *end = ptr + n - 1;
168 	struct page *endpage;
169 	bool is_reserved, is_cma;
170 
171 	/*
172 	 * Sometimes the kernel data regions are not marked Reserved (see
173 	 * check below). And sometimes [_sdata,_edata) does not cover
174 	 * rodata and/or bss, so check each range explicitly.
175 	 */
176 
177 	/* Allow reads of kernel rodata region (if not marked as Reserved). */
178 	if (ptr >= (const void *)__start_rodata &&
179 	    end <= (const void *)__end_rodata) {
180 		if (!to_user)
181 			usercopy_abort("rodata", NULL, to_user, 0, n);
182 		return;
183 	}
184 
185 	/* Allow kernel data region (if not marked as Reserved). */
186 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
187 		return;
188 
189 	/* Allow kernel bss region (if not marked as Reserved). */
190 	if (ptr >= (const void *)__bss_start &&
191 	    end <= (const void *)__bss_stop)
192 		return;
193 
194 	/* Is the object wholly within one base page? */
195 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
196 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
197 		return;
198 
199 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
200 	endpage = virt_to_head_page(end);
201 	if (likely(endpage == page))
202 		return;
203 
204 	/*
205 	 * Reject if range is entirely either Reserved (i.e. special or
206 	 * device memory), or CMA. Otherwise, reject since the object spans
207 	 * several independently allocated pages.
208 	 */
209 	is_reserved = PageReserved(page);
210 	is_cma = is_migrate_cma_page(page);
211 	if (!is_reserved && !is_cma)
212 		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
213 
214 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
215 		page = virt_to_head_page(ptr);
216 		if (is_reserved && !PageReserved(page))
217 			usercopy_abort("spans Reserved and non-Reserved pages",
218 				       NULL, to_user, 0, n);
219 		if (is_cma && !is_migrate_cma_page(page))
220 			usercopy_abort("spans CMA and non-CMA pages", NULL,
221 				       to_user, 0, n);
222 	}
223 #endif
224 }
225 
226 static inline void check_heap_object(const void *ptr, unsigned long n,
227 				     bool to_user)
228 {
229 	struct page *page;
230 
231 	if (!virt_addr_valid(ptr))
232 		return;
233 
234 	page = virt_to_head_page(ptr);
235 
236 	if (PageSlab(page)) {
237 		/* Check slab allocator for flags and size. */
238 		__check_heap_object(ptr, n, page, to_user);
239 	} else {
240 		/* Verify object does not incorrectly span multiple pages. */
241 		check_page_span(ptr, n, page, to_user);
242 	}
243 }
244 
245 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
246 
247 /*
248  * Validates that the given object is:
249  * - not bogus address
250  * - known-safe heap or stack object
251  * - not in kernel text
252  */
253 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
254 {
255 	if (static_branch_unlikely(&bypass_usercopy_checks))
256 		return;
257 
258 	/* Skip all tests if size is zero. */
259 	if (!n)
260 		return;
261 
262 	/* Check for invalid addresses. */
263 	check_bogus_address((const unsigned long)ptr, n, to_user);
264 
265 	/* Check for bad heap object. */
266 	check_heap_object(ptr, n, to_user);
267 
268 	/* Check for bad stack object. */
269 	switch (check_stack_object(ptr, n)) {
270 	case NOT_STACK:
271 		/* Object is not touching the current process stack. */
272 		break;
273 	case GOOD_FRAME:
274 	case GOOD_STACK:
275 		/*
276 		 * Object is either in the correct frame (when it
277 		 * is possible to check) or just generally on the
278 		 * process stack (when frame checking not available).
279 		 */
280 		return;
281 	default:
282 		usercopy_abort("process stack", NULL, to_user, 0, n);
283 	}
284 
285 	/* Check for object in kernel to avoid text exposure. */
286 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
287 }
288 EXPORT_SYMBOL(__check_object_size);
289 
290 static bool enable_checks __initdata = true;
291 
292 static int __init parse_hardened_usercopy(char *str)
293 {
294 	return strtobool(str, &enable_checks);
295 }
296 
297 __setup("hardened_usercopy=", parse_hardened_usercopy);
298 
299 static int __init set_hardened_usercopy(void)
300 {
301 	if (enable_checks == false)
302 		static_branch_enable(&bypass_usercopy_checks);
303 	return 1;
304 }
305 
306 late_initcall(set_hardened_usercopy);
307