xref: /linux/mm/usercopy.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3  * which are designed to protect kernel memory from needless exposure
4  * and overwrite under many unintended conditions. This code is based
5  * on PAX_USERCOPY, which is:
6  *
7  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
8  * Security Inc.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/thread_info.h>
23 #include <asm/sections.h>
24 
25 /*
26  * Checks if a given pointer and length is contained by the current
27  * stack frame (if possible).
28  *
29  * Returns:
30  *	NOT_STACK: not at all on the stack
31  *	GOOD_FRAME: fully within a valid stack frame
32  *	GOOD_STACK: fully on the stack (when can't do frame-checking)
33  *	BAD_STACK: error condition (invalid stack position or bad stack frame)
34  */
35 static noinline int check_stack_object(const void *obj, unsigned long len)
36 {
37 	const void * const stack = task_stack_page(current);
38 	const void * const stackend = stack + THREAD_SIZE;
39 	int ret;
40 
41 	/* Object is not on the stack at all. */
42 	if (obj + len <= stack || stackend <= obj)
43 		return NOT_STACK;
44 
45 	/*
46 	 * Reject: object partially overlaps the stack (passing the
47 	 * the check above means at least one end is within the stack,
48 	 * so if this check fails, the other end is outside the stack).
49 	 */
50 	if (obj < stack || stackend < obj + len)
51 		return BAD_STACK;
52 
53 	/* Check if object is safely within a valid frame. */
54 	ret = arch_within_stack_frames(stack, stackend, obj, len);
55 	if (ret)
56 		return ret;
57 
58 	return GOOD_STACK;
59 }
60 
61 static void report_usercopy(const void *ptr, unsigned long len,
62 			    bool to_user, const char *type)
63 {
64 	pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
65 		to_user ? "exposure" : "overwrite",
66 		to_user ? "from" : "to", ptr, type ? : "unknown", len);
67 	/*
68 	 * For greater effect, it would be nice to do do_group_exit(),
69 	 * but BUG() actually hooks all the lock-breaking and per-arch
70 	 * Oops code, so that is used here instead.
71 	 */
72 	BUG();
73 }
74 
75 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
76 static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
77 		     unsigned long high)
78 {
79 	unsigned long check_low = (uintptr_t)ptr;
80 	unsigned long check_high = check_low + n;
81 
82 	/* Does not overlap if entirely above or entirely below. */
83 	if (check_low >= high || check_high <= low)
84 		return false;
85 
86 	return true;
87 }
88 
89 /* Is this address range in the kernel text area? */
90 static inline const char *check_kernel_text_object(const void *ptr,
91 						   unsigned long n)
92 {
93 	unsigned long textlow = (unsigned long)_stext;
94 	unsigned long texthigh = (unsigned long)_etext;
95 	unsigned long textlow_linear, texthigh_linear;
96 
97 	if (overlaps(ptr, n, textlow, texthigh))
98 		return "<kernel text>";
99 
100 	/*
101 	 * Some architectures have virtual memory mappings with a secondary
102 	 * mapping of the kernel text, i.e. there is more than one virtual
103 	 * kernel address that points to the kernel image. It is usually
104 	 * when there is a separate linear physical memory mapping, in that
105 	 * __pa() is not just the reverse of __va(). This can be detected
106 	 * and checked:
107 	 */
108 	textlow_linear = (unsigned long)lm_alias(textlow);
109 	/* No different mapping: we're done. */
110 	if (textlow_linear == textlow)
111 		return NULL;
112 
113 	/* Check the secondary mapping... */
114 	texthigh_linear = (unsigned long)lm_alias(texthigh);
115 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
116 		return "<linear kernel text>";
117 
118 	return NULL;
119 }
120 
121 static inline const char *check_bogus_address(const void *ptr, unsigned long n)
122 {
123 	/* Reject if object wraps past end of memory. */
124 	if ((unsigned long)ptr + n < (unsigned long)ptr)
125 		return "<wrapped address>";
126 
127 	/* Reject if NULL or ZERO-allocation. */
128 	if (ZERO_OR_NULL_PTR(ptr))
129 		return "<null>";
130 
131 	return NULL;
132 }
133 
134 /* Checks for allocs that are marked in some way as spanning multiple pages. */
135 static inline const char *check_page_span(const void *ptr, unsigned long n,
136 					  struct page *page, bool to_user)
137 {
138 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
139 	const void *end = ptr + n - 1;
140 	struct page *endpage;
141 	bool is_reserved, is_cma;
142 
143 	/*
144 	 * Sometimes the kernel data regions are not marked Reserved (see
145 	 * check below). And sometimes [_sdata,_edata) does not cover
146 	 * rodata and/or bss, so check each range explicitly.
147 	 */
148 
149 	/* Allow reads of kernel rodata region (if not marked as Reserved). */
150 	if (ptr >= (const void *)__start_rodata &&
151 	    end <= (const void *)__end_rodata) {
152 		if (!to_user)
153 			return "<rodata>";
154 		return NULL;
155 	}
156 
157 	/* Allow kernel data region (if not marked as Reserved). */
158 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
159 		return NULL;
160 
161 	/* Allow kernel bss region (if not marked as Reserved). */
162 	if (ptr >= (const void *)__bss_start &&
163 	    end <= (const void *)__bss_stop)
164 		return NULL;
165 
166 	/* Is the object wholly within one base page? */
167 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
168 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
169 		return NULL;
170 
171 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
172 	endpage = virt_to_head_page(end);
173 	if (likely(endpage == page))
174 		return NULL;
175 
176 	/*
177 	 * Reject if range is entirely either Reserved (i.e. special or
178 	 * device memory), or CMA. Otherwise, reject since the object spans
179 	 * several independently allocated pages.
180 	 */
181 	is_reserved = PageReserved(page);
182 	is_cma = is_migrate_cma_page(page);
183 	if (!is_reserved && !is_cma)
184 		return "<spans multiple pages>";
185 
186 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
187 		page = virt_to_head_page(ptr);
188 		if (is_reserved && !PageReserved(page))
189 			return "<spans Reserved and non-Reserved pages>";
190 		if (is_cma && !is_migrate_cma_page(page))
191 			return "<spans CMA and non-CMA pages>";
192 	}
193 #endif
194 
195 	return NULL;
196 }
197 
198 static inline const char *check_heap_object(const void *ptr, unsigned long n,
199 					    bool to_user)
200 {
201 	struct page *page;
202 
203 	if (!virt_addr_valid(ptr))
204 		return NULL;
205 
206 	page = virt_to_head_page(ptr);
207 
208 	/* Check slab allocator for flags and size. */
209 	if (PageSlab(page))
210 		return __check_heap_object(ptr, n, page);
211 
212 	/* Verify object does not incorrectly span multiple pages. */
213 	return check_page_span(ptr, n, page, to_user);
214 }
215 
216 /*
217  * Validates that the given object is:
218  * - not bogus address
219  * - known-safe heap or stack object
220  * - not in kernel text
221  */
222 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
223 {
224 	const char *err;
225 
226 	/* Skip all tests if size is zero. */
227 	if (!n)
228 		return;
229 
230 	/* Check for invalid addresses. */
231 	err = check_bogus_address(ptr, n);
232 	if (err)
233 		goto report;
234 
235 	/* Check for bad heap object. */
236 	err = check_heap_object(ptr, n, to_user);
237 	if (err)
238 		goto report;
239 
240 	/* Check for bad stack object. */
241 	switch (check_stack_object(ptr, n)) {
242 	case NOT_STACK:
243 		/* Object is not touching the current process stack. */
244 		break;
245 	case GOOD_FRAME:
246 	case GOOD_STACK:
247 		/*
248 		 * Object is either in the correct frame (when it
249 		 * is possible to check) or just generally on the
250 		 * process stack (when frame checking not available).
251 		 */
252 		return;
253 	default:
254 		err = "<process stack>";
255 		goto report;
256 	}
257 
258 	/* Check for object in kernel to avoid text exposure. */
259 	err = check_kernel_text_object(ptr, n);
260 	if (!err)
261 		return;
262 
263 report:
264 	report_usercopy(ptr, n, to_user, err);
265 }
266 EXPORT_SYMBOL(__check_object_size);
267