1 /* 2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*, 3 * which are designed to protect kernel memory from needless exposure 4 * and overwrite under many unintended conditions. This code is based 5 * on PAX_USERCOPY, which is: 6 * 7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source 8 * Security Inc. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 */ 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/mm.h> 18 #include <linux/slab.h> 19 #include <linux/sched.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <asm/sections.h> 23 24 enum { 25 BAD_STACK = -1, 26 NOT_STACK = 0, 27 GOOD_FRAME, 28 GOOD_STACK, 29 }; 30 31 /* 32 * Checks if a given pointer and length is contained by the current 33 * stack frame (if possible). 34 * 35 * Returns: 36 * NOT_STACK: not at all on the stack 37 * GOOD_FRAME: fully within a valid stack frame 38 * GOOD_STACK: fully on the stack (when can't do frame-checking) 39 * BAD_STACK: error condition (invalid stack position or bad stack frame) 40 */ 41 static noinline int check_stack_object(const void *obj, unsigned long len) 42 { 43 const void * const stack = task_stack_page(current); 44 const void * const stackend = stack + THREAD_SIZE; 45 int ret; 46 47 /* Object is not on the stack at all. */ 48 if (obj + len <= stack || stackend <= obj) 49 return NOT_STACK; 50 51 /* 52 * Reject: object partially overlaps the stack (passing the 53 * the check above means at least one end is within the stack, 54 * so if this check fails, the other end is outside the stack). 55 */ 56 if (obj < stack || stackend < obj + len) 57 return BAD_STACK; 58 59 /* Check if object is safely within a valid frame. */ 60 ret = arch_within_stack_frames(stack, stackend, obj, len); 61 if (ret) 62 return ret; 63 64 return GOOD_STACK; 65 } 66 67 static void report_usercopy(const void *ptr, unsigned long len, 68 bool to_user, const char *type) 69 { 70 pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", 71 to_user ? "exposure" : "overwrite", 72 to_user ? "from" : "to", ptr, type ? : "unknown", len); 73 /* 74 * For greater effect, it would be nice to do do_group_exit(), 75 * but BUG() actually hooks all the lock-breaking and per-arch 76 * Oops code, so that is used here instead. 77 */ 78 BUG(); 79 } 80 81 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 82 static bool overlaps(const void *ptr, unsigned long n, unsigned long low, 83 unsigned long high) 84 { 85 unsigned long check_low = (uintptr_t)ptr; 86 unsigned long check_high = check_low + n; 87 88 /* Does not overlap if entirely above or entirely below. */ 89 if (check_low >= high || check_high <= low) 90 return false; 91 92 return true; 93 } 94 95 /* Is this address range in the kernel text area? */ 96 static inline const char *check_kernel_text_object(const void *ptr, 97 unsigned long n) 98 { 99 unsigned long textlow = (unsigned long)_stext; 100 unsigned long texthigh = (unsigned long)_etext; 101 unsigned long textlow_linear, texthigh_linear; 102 103 if (overlaps(ptr, n, textlow, texthigh)) 104 return "<kernel text>"; 105 106 /* 107 * Some architectures have virtual memory mappings with a secondary 108 * mapping of the kernel text, i.e. there is more than one virtual 109 * kernel address that points to the kernel image. It is usually 110 * when there is a separate linear physical memory mapping, in that 111 * __pa() is not just the reverse of __va(). This can be detected 112 * and checked: 113 */ 114 textlow_linear = (unsigned long)lm_alias(textlow); 115 /* No different mapping: we're done. */ 116 if (textlow_linear == textlow) 117 return NULL; 118 119 /* Check the secondary mapping... */ 120 texthigh_linear = (unsigned long)lm_alias(texthigh); 121 if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 122 return "<linear kernel text>"; 123 124 return NULL; 125 } 126 127 static inline const char *check_bogus_address(const void *ptr, unsigned long n) 128 { 129 /* Reject if object wraps past end of memory. */ 130 if ((unsigned long)ptr + n < (unsigned long)ptr) 131 return "<wrapped address>"; 132 133 /* Reject if NULL or ZERO-allocation. */ 134 if (ZERO_OR_NULL_PTR(ptr)) 135 return "<null>"; 136 137 return NULL; 138 } 139 140 /* Checks for allocs that are marked in some way as spanning multiple pages. */ 141 static inline const char *check_page_span(const void *ptr, unsigned long n, 142 struct page *page, bool to_user) 143 { 144 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN 145 const void *end = ptr + n - 1; 146 struct page *endpage; 147 bool is_reserved, is_cma; 148 149 /* 150 * Sometimes the kernel data regions are not marked Reserved (see 151 * check below). And sometimes [_sdata,_edata) does not cover 152 * rodata and/or bss, so check each range explicitly. 153 */ 154 155 /* Allow reads of kernel rodata region (if not marked as Reserved). */ 156 if (ptr >= (const void *)__start_rodata && 157 end <= (const void *)__end_rodata) { 158 if (!to_user) 159 return "<rodata>"; 160 return NULL; 161 } 162 163 /* Allow kernel data region (if not marked as Reserved). */ 164 if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 165 return NULL; 166 167 /* Allow kernel bss region (if not marked as Reserved). */ 168 if (ptr >= (const void *)__bss_start && 169 end <= (const void *)__bss_stop) 170 return NULL; 171 172 /* Is the object wholly within one base page? */ 173 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 174 ((unsigned long)end & (unsigned long)PAGE_MASK))) 175 return NULL; 176 177 /* Allow if fully inside the same compound (__GFP_COMP) page. */ 178 endpage = virt_to_head_page(end); 179 if (likely(endpage == page)) 180 return NULL; 181 182 /* 183 * Reject if range is entirely either Reserved (i.e. special or 184 * device memory), or CMA. Otherwise, reject since the object spans 185 * several independently allocated pages. 186 */ 187 is_reserved = PageReserved(page); 188 is_cma = is_migrate_cma_page(page); 189 if (!is_reserved && !is_cma) 190 return "<spans multiple pages>"; 191 192 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 193 page = virt_to_head_page(ptr); 194 if (is_reserved && !PageReserved(page)) 195 return "<spans Reserved and non-Reserved pages>"; 196 if (is_cma && !is_migrate_cma_page(page)) 197 return "<spans CMA and non-CMA pages>"; 198 } 199 #endif 200 201 return NULL; 202 } 203 204 static inline const char *check_heap_object(const void *ptr, unsigned long n, 205 bool to_user) 206 { 207 struct page *page; 208 209 /* 210 * Some architectures (arm64) return true for virt_addr_valid() on 211 * vmalloced addresses. Work around this by checking for vmalloc 212 * first. 213 * 214 * We also need to check for module addresses explicitly since we 215 * may copy static data from modules to userspace 216 */ 217 if (is_vmalloc_or_module_addr(ptr)) 218 return NULL; 219 220 if (!virt_addr_valid(ptr)) 221 return NULL; 222 223 page = virt_to_head_page(ptr); 224 225 /* Check slab allocator for flags and size. */ 226 if (PageSlab(page)) 227 return __check_heap_object(ptr, n, page); 228 229 /* Verify object does not incorrectly span multiple pages. */ 230 return check_page_span(ptr, n, page, to_user); 231 } 232 233 /* 234 * Validates that the given object is: 235 * - not bogus address 236 * - known-safe heap or stack object 237 * - not in kernel text 238 */ 239 void __check_object_size(const void *ptr, unsigned long n, bool to_user) 240 { 241 const char *err; 242 243 /* Skip all tests if size is zero. */ 244 if (!n) 245 return; 246 247 /* Check for invalid addresses. */ 248 err = check_bogus_address(ptr, n); 249 if (err) 250 goto report; 251 252 /* Check for bad heap object. */ 253 err = check_heap_object(ptr, n, to_user); 254 if (err) 255 goto report; 256 257 /* Check for bad stack object. */ 258 switch (check_stack_object(ptr, n)) { 259 case NOT_STACK: 260 /* Object is not touching the current process stack. */ 261 break; 262 case GOOD_FRAME: 263 case GOOD_STACK: 264 /* 265 * Object is either in the correct frame (when it 266 * is possible to check) or just generally on the 267 * process stack (when frame checking not available). 268 */ 269 return; 270 default: 271 err = "<process stack>"; 272 goto report; 273 } 274 275 /* Check for object in kernel to avoid text exposure. */ 276 err = check_kernel_text_object(ptr, n); 277 if (!err) 278 return; 279 280 report: 281 report_usercopy(ptr, n, to_user, err); 282 } 283 EXPORT_SYMBOL(__check_object_size); 284