1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f5509cc1SKees Cook /* 3f5509cc1SKees Cook * This implements the various checks for CONFIG_HARDENED_USERCOPY*, 4f5509cc1SKees Cook * which are designed to protect kernel memory from needless exposure 5f5509cc1SKees Cook * and overwrite under many unintended conditions. This code is based 6f5509cc1SKees Cook * on PAX_USERCOPY, which is: 7f5509cc1SKees Cook * 8f5509cc1SKees Cook * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source 9f5509cc1SKees Cook * Security Inc. 10f5509cc1SKees Cook */ 11f5509cc1SKees Cook #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12f5509cc1SKees Cook 13f5509cc1SKees Cook #include <linux/mm.h> 14314eed30SKees Cook #include <linux/highmem.h> 15f5509cc1SKees Cook #include <linux/slab.h> 165b825c3aSIngo Molnar #include <linux/sched.h> 1729930025SIngo Molnar #include <linux/sched/task.h> 1829930025SIngo Molnar #include <linux/sched/task_stack.h> 1996dc4f9fSSahara #include <linux/thread_info.h> 20*0aef499fSMatthew Wilcox (Oracle) #include <linux/vmalloc.h> 21b5cb15d9SChris von Recklinghausen #include <linux/atomic.h> 22b5cb15d9SChris von Recklinghausen #include <linux/jump_label.h> 23f5509cc1SKees Cook #include <asm/sections.h> 240b3eb091SMatthew Wilcox (Oracle) #include "slab.h" 25f5509cc1SKees Cook 26f5509cc1SKees Cook /* 27f5509cc1SKees Cook * Checks if a given pointer and length is contained by the current 28f5509cc1SKees Cook * stack frame (if possible). 29f5509cc1SKees Cook * 30f5509cc1SKees Cook * Returns: 31f5509cc1SKees Cook * NOT_STACK: not at all on the stack 32f5509cc1SKees Cook * GOOD_FRAME: fully within a valid stack frame 332792d84eSKees Cook * GOOD_STACK: within the current stack (when can't frame-check exactly) 34f5509cc1SKees Cook * BAD_STACK: error condition (invalid stack position or bad stack frame) 35f5509cc1SKees Cook */ 36f5509cc1SKees Cook static noinline int check_stack_object(const void *obj, unsigned long len) 37f5509cc1SKees Cook { 38f5509cc1SKees Cook const void * const stack = task_stack_page(current); 39f5509cc1SKees Cook const void * const stackend = stack + THREAD_SIZE; 40f5509cc1SKees Cook int ret; 41f5509cc1SKees Cook 42f5509cc1SKees Cook /* Object is not on the stack at all. */ 43f5509cc1SKees Cook if (obj + len <= stack || stackend <= obj) 44f5509cc1SKees Cook return NOT_STACK; 45f5509cc1SKees Cook 46f5509cc1SKees Cook /* 47f5509cc1SKees Cook * Reject: object partially overlaps the stack (passing the 485ce1be0eSRandy Dunlap * check above means at least one end is within the stack, 49f5509cc1SKees Cook * so if this check fails, the other end is outside the stack). 50f5509cc1SKees Cook */ 51f5509cc1SKees Cook if (obj < stack || stackend < obj + len) 52f5509cc1SKees Cook return BAD_STACK; 53f5509cc1SKees Cook 54f5509cc1SKees Cook /* Check if object is safely within a valid frame. */ 55f5509cc1SKees Cook ret = arch_within_stack_frames(stack, stackend, obj, len); 56f5509cc1SKees Cook if (ret) 57f5509cc1SKees Cook return ret; 58f5509cc1SKees Cook 592792d84eSKees Cook /* Finally, check stack depth if possible. */ 602792d84eSKees Cook #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER 612792d84eSKees Cook if (IS_ENABLED(CONFIG_STACK_GROWSUP)) { 622792d84eSKees Cook if ((void *)current_stack_pointer < obj + len) 632792d84eSKees Cook return BAD_STACK; 642792d84eSKees Cook } else { 652792d84eSKees Cook if (obj < (void *)current_stack_pointer) 662792d84eSKees Cook return BAD_STACK; 672792d84eSKees Cook } 682792d84eSKees Cook #endif 692792d84eSKees Cook 70f5509cc1SKees Cook return GOOD_STACK; 71f5509cc1SKees Cook } 72f5509cc1SKees Cook 73b394d468SKees Cook /* 74afcc90f8SKees Cook * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found 75afcc90f8SKees Cook * an unexpected state during a copy_from_user() or copy_to_user() call. 76b394d468SKees Cook * There are several checks being performed on the buffer by the 77b394d468SKees Cook * __check_object_size() function. Normal stack buffer usage should never 78b394d468SKees Cook * trip the checks, and kernel text addressing will always trip the check. 79afcc90f8SKees Cook * For cache objects, it is checking that only the whitelisted range of 80afcc90f8SKees Cook * bytes for a given cache is being accessed (via the cache's usersize and 81afcc90f8SKees Cook * useroffset fields). To adjust a cache whitelist, use the usercopy-aware 82afcc90f8SKees Cook * kmem_cache_create_usercopy() function to create the cache (and 83afcc90f8SKees Cook * carefully audit the whitelist range). 84b394d468SKees Cook */ 85b394d468SKees Cook void __noreturn usercopy_abort(const char *name, const char *detail, 86b394d468SKees Cook bool to_user, unsigned long offset, 87b394d468SKees Cook unsigned long len) 88f5509cc1SKees Cook { 89b394d468SKees Cook pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", 90f5509cc1SKees Cook to_user ? "exposure" : "overwrite", 91b394d468SKees Cook to_user ? "from" : "to", 92b394d468SKees Cook name ? : "unknown?!", 93b394d468SKees Cook detail ? " '" : "", detail ? : "", detail ? "'" : "", 94b394d468SKees Cook offset, len); 95b394d468SKees Cook 96f5509cc1SKees Cook /* 97f5509cc1SKees Cook * For greater effect, it would be nice to do do_group_exit(), 98f5509cc1SKees Cook * but BUG() actually hooks all the lock-breaking and per-arch 99f5509cc1SKees Cook * Oops code, so that is used here instead. 100f5509cc1SKees Cook */ 101f5509cc1SKees Cook BUG(); 102f5509cc1SKees Cook } 103f5509cc1SKees Cook 104f5509cc1SKees Cook /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 105f4e6e289SKees Cook static bool overlaps(const unsigned long ptr, unsigned long n, 106f4e6e289SKees Cook unsigned long low, unsigned long high) 107f5509cc1SKees Cook { 108f4e6e289SKees Cook const unsigned long check_low = ptr; 109f5509cc1SKees Cook unsigned long check_high = check_low + n; 110f5509cc1SKees Cook 111f5509cc1SKees Cook /* Does not overlap if entirely above or entirely below. */ 11294cd97afSJosh Poimboeuf if (check_low >= high || check_high <= low) 113f5509cc1SKees Cook return false; 114f5509cc1SKees Cook 115f5509cc1SKees Cook return true; 116f5509cc1SKees Cook } 117f5509cc1SKees Cook 118f5509cc1SKees Cook /* Is this address range in the kernel text area? */ 119f4e6e289SKees Cook static inline void check_kernel_text_object(const unsigned long ptr, 120f4e6e289SKees Cook unsigned long n, bool to_user) 121f5509cc1SKees Cook { 122f5509cc1SKees Cook unsigned long textlow = (unsigned long)_stext; 123f5509cc1SKees Cook unsigned long texthigh = (unsigned long)_etext; 124f5509cc1SKees Cook unsigned long textlow_linear, texthigh_linear; 125f5509cc1SKees Cook 126f5509cc1SKees Cook if (overlaps(ptr, n, textlow, texthigh)) 127f4e6e289SKees Cook usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); 128f5509cc1SKees Cook 129f5509cc1SKees Cook /* 130f5509cc1SKees Cook * Some architectures have virtual memory mappings with a secondary 131f5509cc1SKees Cook * mapping of the kernel text, i.e. there is more than one virtual 132f5509cc1SKees Cook * kernel address that points to the kernel image. It is usually 133f5509cc1SKees Cook * when there is a separate linear physical memory mapping, in that 134f5509cc1SKees Cook * __pa() is not just the reverse of __va(). This can be detected 135f5509cc1SKees Cook * and checked: 136f5509cc1SKees Cook */ 13746f6236aSLaura Abbott textlow_linear = (unsigned long)lm_alias(textlow); 138f5509cc1SKees Cook /* No different mapping: we're done. */ 139f5509cc1SKees Cook if (textlow_linear == textlow) 140f4e6e289SKees Cook return; 141f5509cc1SKees Cook 142f5509cc1SKees Cook /* Check the secondary mapping... */ 14346f6236aSLaura Abbott texthigh_linear = (unsigned long)lm_alias(texthigh); 144f5509cc1SKees Cook if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 145f4e6e289SKees Cook usercopy_abort("linear kernel text", NULL, to_user, 146f4e6e289SKees Cook ptr - textlow_linear, n); 147f5509cc1SKees Cook } 148f5509cc1SKees Cook 149f4e6e289SKees Cook static inline void check_bogus_address(const unsigned long ptr, unsigned long n, 150f4e6e289SKees Cook bool to_user) 151f5509cc1SKees Cook { 152f5509cc1SKees Cook /* Reject if object wraps past end of memory. */ 15395153169SIsaac J. Manjarres if (ptr + (n - 1) < ptr) 154f4e6e289SKees Cook usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); 155f5509cc1SKees Cook 156f5509cc1SKees Cook /* Reject if NULL or ZERO-allocation. */ 157f5509cc1SKees Cook if (ZERO_OR_NULL_PTR(ptr)) 158f4e6e289SKees Cook usercopy_abort("null address", NULL, to_user, ptr, n); 159f5509cc1SKees Cook } 160f5509cc1SKees Cook 1618e1f74eaSKees Cook /* Checks for allocs that are marked in some way as spanning multiple pages. */ 162f4e6e289SKees Cook static inline void check_page_span(const void *ptr, unsigned long n, 1638e1f74eaSKees Cook struct page *page, bool to_user) 164f5509cc1SKees Cook { 1658e1f74eaSKees Cook #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN 166f5509cc1SKees Cook const void *end = ptr + n - 1; 1678e1f74eaSKees Cook struct page *endpage; 168f5509cc1SKees Cook bool is_reserved, is_cma; 169f5509cc1SKees Cook 170f5509cc1SKees Cook /* 171f5509cc1SKees Cook * Sometimes the kernel data regions are not marked Reserved (see 172f5509cc1SKees Cook * check below). And sometimes [_sdata,_edata) does not cover 173f5509cc1SKees Cook * rodata and/or bss, so check each range explicitly. 174f5509cc1SKees Cook */ 175f5509cc1SKees Cook 176f5509cc1SKees Cook /* Allow reads of kernel rodata region (if not marked as Reserved). */ 177f5509cc1SKees Cook if (ptr >= (const void *)__start_rodata && 178f5509cc1SKees Cook end <= (const void *)__end_rodata) { 179f5509cc1SKees Cook if (!to_user) 180f4e6e289SKees Cook usercopy_abort("rodata", NULL, to_user, 0, n); 181f4e6e289SKees Cook return; 182f5509cc1SKees Cook } 183f5509cc1SKees Cook 184f5509cc1SKees Cook /* Allow kernel data region (if not marked as Reserved). */ 185f5509cc1SKees Cook if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 186f4e6e289SKees Cook return; 187f5509cc1SKees Cook 188f5509cc1SKees Cook /* Allow kernel bss region (if not marked as Reserved). */ 189f5509cc1SKees Cook if (ptr >= (const void *)__bss_start && 190f5509cc1SKees Cook end <= (const void *)__bss_stop) 191f4e6e289SKees Cook return; 192f5509cc1SKees Cook 193f5509cc1SKees Cook /* Is the object wholly within one base page? */ 194f5509cc1SKees Cook if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 195f5509cc1SKees Cook ((unsigned long)end & (unsigned long)PAGE_MASK))) 196f4e6e289SKees Cook return; 197f5509cc1SKees Cook 1988e1f74eaSKees Cook /* Allow if fully inside the same compound (__GFP_COMP) page. */ 199f5509cc1SKees Cook endpage = virt_to_head_page(end); 200f5509cc1SKees Cook if (likely(endpage == page)) 201f4e6e289SKees Cook return; 202f5509cc1SKees Cook 203f5509cc1SKees Cook /* 204f5509cc1SKees Cook * Reject if range is entirely either Reserved (i.e. special or 205f5509cc1SKees Cook * device memory), or CMA. Otherwise, reject since the object spans 206f5509cc1SKees Cook * several independently allocated pages. 207f5509cc1SKees Cook */ 208f5509cc1SKees Cook is_reserved = PageReserved(page); 209f5509cc1SKees Cook is_cma = is_migrate_cma_page(page); 210f5509cc1SKees Cook if (!is_reserved && !is_cma) 211f4e6e289SKees Cook usercopy_abort("spans multiple pages", NULL, to_user, 0, n); 212f5509cc1SKees Cook 213f5509cc1SKees Cook for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 214f5509cc1SKees Cook page = virt_to_head_page(ptr); 215f5509cc1SKees Cook if (is_reserved && !PageReserved(page)) 216f4e6e289SKees Cook usercopy_abort("spans Reserved and non-Reserved pages", 217f4e6e289SKees Cook NULL, to_user, 0, n); 218f5509cc1SKees Cook if (is_cma && !is_migrate_cma_page(page)) 219f4e6e289SKees Cook usercopy_abort("spans CMA and non-CMA pages", NULL, 220f4e6e289SKees Cook to_user, 0, n); 221f5509cc1SKees Cook } 2228e1f74eaSKees Cook #endif 2238e1f74eaSKees Cook } 224f5509cc1SKees Cook 225f4e6e289SKees Cook static inline void check_heap_object(const void *ptr, unsigned long n, 2268e1f74eaSKees Cook bool to_user) 2278e1f74eaSKees Cook { 2280b3eb091SMatthew Wilcox (Oracle) struct folio *folio; 2298e1f74eaSKees Cook 2308e1f74eaSKees Cook if (!virt_addr_valid(ptr)) 231f4e6e289SKees Cook return; 2328e1f74eaSKees Cook 2334e140f59SMatthew Wilcox (Oracle) if (is_kmap_addr(ptr)) { 2344e140f59SMatthew Wilcox (Oracle) unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); 2354e140f59SMatthew Wilcox (Oracle) 2364e140f59SMatthew Wilcox (Oracle) if ((unsigned long)ptr + n - 1 > page_end) 2374e140f59SMatthew Wilcox (Oracle) usercopy_abort("kmap", NULL, to_user, 2384e140f59SMatthew Wilcox (Oracle) offset_in_page(ptr), n); 2394e140f59SMatthew Wilcox (Oracle) return; 2404e140f59SMatthew Wilcox (Oracle) } 2414e140f59SMatthew Wilcox (Oracle) 242*0aef499fSMatthew Wilcox (Oracle) if (is_vmalloc_addr(ptr)) { 243*0aef499fSMatthew Wilcox (Oracle) struct vm_struct *area = find_vm_area(ptr); 244*0aef499fSMatthew Wilcox (Oracle) unsigned long offset; 245*0aef499fSMatthew Wilcox (Oracle) 246*0aef499fSMatthew Wilcox (Oracle) if (!area) { 247*0aef499fSMatthew Wilcox (Oracle) usercopy_abort("vmalloc", "no area", to_user, 0, n); 248*0aef499fSMatthew Wilcox (Oracle) return; 249*0aef499fSMatthew Wilcox (Oracle) } 250*0aef499fSMatthew Wilcox (Oracle) 251*0aef499fSMatthew Wilcox (Oracle) offset = ptr - area->addr; 252*0aef499fSMatthew Wilcox (Oracle) if (offset + n > get_vm_area_size(area)) 253*0aef499fSMatthew Wilcox (Oracle) usercopy_abort("vmalloc", NULL, to_user, offset, n); 254*0aef499fSMatthew Wilcox (Oracle) return; 255*0aef499fSMatthew Wilcox (Oracle) } 256*0aef499fSMatthew Wilcox (Oracle) 2574e140f59SMatthew Wilcox (Oracle) folio = virt_to_folio(ptr); 2588e1f74eaSKees Cook 2590b3eb091SMatthew Wilcox (Oracle) if (folio_test_slab(folio)) { 2608e1f74eaSKees Cook /* Check slab allocator for flags and size. */ 2610b3eb091SMatthew Wilcox (Oracle) __check_heap_object(ptr, n, folio_slab(folio), to_user); 262f4e6e289SKees Cook } else { 2638e1f74eaSKees Cook /* Verify object does not incorrectly span multiple pages. */ 2640b3eb091SMatthew Wilcox (Oracle) check_page_span(ptr, n, folio_page(folio, 0), to_user); 265f4e6e289SKees Cook } 266f5509cc1SKees Cook } 267f5509cc1SKees Cook 268b5cb15d9SChris von Recklinghausen static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks); 269b5cb15d9SChris von Recklinghausen 270f5509cc1SKees Cook /* 271f5509cc1SKees Cook * Validates that the given object is: 272f5509cc1SKees Cook * - not bogus address 2737bff3c06SQian Cai * - fully contained by stack (or stack frame, when available) 2747bff3c06SQian Cai * - fully within SLAB object (or object whitelist area, when available) 275f5509cc1SKees Cook * - not in kernel text 276f5509cc1SKees Cook */ 277f5509cc1SKees Cook void __check_object_size(const void *ptr, unsigned long n, bool to_user) 278f5509cc1SKees Cook { 279b5cb15d9SChris von Recklinghausen if (static_branch_unlikely(&bypass_usercopy_checks)) 280b5cb15d9SChris von Recklinghausen return; 281b5cb15d9SChris von Recklinghausen 282f5509cc1SKees Cook /* Skip all tests if size is zero. */ 283f5509cc1SKees Cook if (!n) 284f5509cc1SKees Cook return; 285f5509cc1SKees Cook 286f5509cc1SKees Cook /* Check for invalid addresses. */ 287f4e6e289SKees Cook check_bogus_address((const unsigned long)ptr, n, to_user); 288f5509cc1SKees Cook 289f5509cc1SKees Cook /* Check for bad stack object. */ 290f5509cc1SKees Cook switch (check_stack_object(ptr, n)) { 291f5509cc1SKees Cook case NOT_STACK: 292f5509cc1SKees Cook /* Object is not touching the current process stack. */ 293f5509cc1SKees Cook break; 294f5509cc1SKees Cook case GOOD_FRAME: 295f5509cc1SKees Cook case GOOD_STACK: 296f5509cc1SKees Cook /* 297f5509cc1SKees Cook * Object is either in the correct frame (when it 298f5509cc1SKees Cook * is possible to check) or just generally on the 299f5509cc1SKees Cook * process stack (when frame checking not available). 300f5509cc1SKees Cook */ 301f5509cc1SKees Cook return; 302f5509cc1SKees Cook default: 3032792d84eSKees Cook usercopy_abort("process stack", NULL, to_user, 3042792d84eSKees Cook #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER 3052792d84eSKees Cook IS_ENABLED(CONFIG_STACK_GROWSUP) ? 3062792d84eSKees Cook ptr - (void *)current_stack_pointer : 3072792d84eSKees Cook (void *)current_stack_pointer - ptr, 3082792d84eSKees Cook #else 3092792d84eSKees Cook 0, 3102792d84eSKees Cook #endif 3112792d84eSKees Cook n); 312f5509cc1SKees Cook } 313f5509cc1SKees Cook 3147bff3c06SQian Cai /* Check for bad heap object. */ 3157bff3c06SQian Cai check_heap_object(ptr, n, to_user); 3167bff3c06SQian Cai 317f5509cc1SKees Cook /* Check for object in kernel to avoid text exposure. */ 318f4e6e289SKees Cook check_kernel_text_object((const unsigned long)ptr, n, to_user); 319f5509cc1SKees Cook } 320f5509cc1SKees Cook EXPORT_SYMBOL(__check_object_size); 321b5cb15d9SChris von Recklinghausen 322b5cb15d9SChris von Recklinghausen static bool enable_checks __initdata = true; 323b5cb15d9SChris von Recklinghausen 324b5cb15d9SChris von Recklinghausen static int __init parse_hardened_usercopy(char *str) 325b5cb15d9SChris von Recklinghausen { 32605fe3c10SRandy Dunlap if (strtobool(str, &enable_checks)) 32705fe3c10SRandy Dunlap pr_warn("Invalid option string for hardened_usercopy: '%s'\n", 32805fe3c10SRandy Dunlap str); 32905fe3c10SRandy Dunlap return 1; 330b5cb15d9SChris von Recklinghausen } 331b5cb15d9SChris von Recklinghausen 332b5cb15d9SChris von Recklinghausen __setup("hardened_usercopy=", parse_hardened_usercopy); 333b5cb15d9SChris von Recklinghausen 334b5cb15d9SChris von Recklinghausen static int __init set_hardened_usercopy(void) 335b5cb15d9SChris von Recklinghausen { 336b5cb15d9SChris von Recklinghausen if (enable_checks == false) 337b5cb15d9SChris von Recklinghausen static_branch_enable(&bypass_usercopy_checks); 338b5cb15d9SChris von Recklinghausen return 1; 339b5cb15d9SChris von Recklinghausen } 340b5cb15d9SChris von Recklinghausen 341b5cb15d9SChris von Recklinghausen late_initcall(set_hardened_usercopy); 342