1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f5509cc1SKees Cook /* 3f5509cc1SKees Cook * This implements the various checks for CONFIG_HARDENED_USERCOPY*, 4f5509cc1SKees Cook * which are designed to protect kernel memory from needless exposure 5f5509cc1SKees Cook * and overwrite under many unintended conditions. This code is based 6f5509cc1SKees Cook * on PAX_USERCOPY, which is: 7f5509cc1SKees Cook * 8f5509cc1SKees Cook * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source 9f5509cc1SKees Cook * Security Inc. 10f5509cc1SKees Cook */ 11f5509cc1SKees Cook #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12f5509cc1SKees Cook 13f5509cc1SKees Cook #include <linux/mm.h> 14314eed30SKees Cook #include <linux/highmem.h> 15f5509cc1SKees Cook #include <linux/slab.h> 165b825c3aSIngo Molnar #include <linux/sched.h> 1729930025SIngo Molnar #include <linux/sched/task.h> 1829930025SIngo Molnar #include <linux/sched/task_stack.h> 1996dc4f9fSSahara #include <linux/thread_info.h> 20b5cb15d9SChris von Recklinghausen #include <linux/atomic.h> 21b5cb15d9SChris von Recklinghausen #include <linux/jump_label.h> 22f5509cc1SKees Cook #include <asm/sections.h> 230b3eb091SMatthew Wilcox (Oracle) #include "slab.h" 24f5509cc1SKees Cook 25f5509cc1SKees Cook /* 26f5509cc1SKees Cook * Checks if a given pointer and length is contained by the current 27f5509cc1SKees Cook * stack frame (if possible). 28f5509cc1SKees Cook * 29f5509cc1SKees Cook * Returns: 30f5509cc1SKees Cook * NOT_STACK: not at all on the stack 31f5509cc1SKees Cook * GOOD_FRAME: fully within a valid stack frame 32*2792d84eSKees Cook * GOOD_STACK: within the current stack (when can't frame-check exactly) 33f5509cc1SKees Cook * BAD_STACK: error condition (invalid stack position or bad stack frame) 34f5509cc1SKees Cook */ 35f5509cc1SKees Cook static noinline int check_stack_object(const void *obj, unsigned long len) 36f5509cc1SKees Cook { 37f5509cc1SKees Cook const void * const stack = task_stack_page(current); 38f5509cc1SKees Cook const void * const stackend = stack + THREAD_SIZE; 39f5509cc1SKees Cook int ret; 40f5509cc1SKees Cook 41f5509cc1SKees Cook /* Object is not on the stack at all. */ 42f5509cc1SKees Cook if (obj + len <= stack || stackend <= obj) 43f5509cc1SKees Cook return NOT_STACK; 44f5509cc1SKees Cook 45f5509cc1SKees Cook /* 46f5509cc1SKees Cook * Reject: object partially overlaps the stack (passing the 475ce1be0eSRandy Dunlap * check above means at least one end is within the stack, 48f5509cc1SKees Cook * so if this check fails, the other end is outside the stack). 49f5509cc1SKees Cook */ 50f5509cc1SKees Cook if (obj < stack || stackend < obj + len) 51f5509cc1SKees Cook return BAD_STACK; 52f5509cc1SKees Cook 53f5509cc1SKees Cook /* Check if object is safely within a valid frame. */ 54f5509cc1SKees Cook ret = arch_within_stack_frames(stack, stackend, obj, len); 55f5509cc1SKees Cook if (ret) 56f5509cc1SKees Cook return ret; 57f5509cc1SKees Cook 58*2792d84eSKees Cook /* Finally, check stack depth if possible. */ 59*2792d84eSKees Cook #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER 60*2792d84eSKees Cook if (IS_ENABLED(CONFIG_STACK_GROWSUP)) { 61*2792d84eSKees Cook if ((void *)current_stack_pointer < obj + len) 62*2792d84eSKees Cook return BAD_STACK; 63*2792d84eSKees Cook } else { 64*2792d84eSKees Cook if (obj < (void *)current_stack_pointer) 65*2792d84eSKees Cook return BAD_STACK; 66*2792d84eSKees Cook } 67*2792d84eSKees Cook #endif 68*2792d84eSKees Cook 69f5509cc1SKees Cook return GOOD_STACK; 70f5509cc1SKees Cook } 71f5509cc1SKees Cook 72b394d468SKees Cook /* 73afcc90f8SKees Cook * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found 74afcc90f8SKees Cook * an unexpected state during a copy_from_user() or copy_to_user() call. 75b394d468SKees Cook * There are several checks being performed on the buffer by the 76b394d468SKees Cook * __check_object_size() function. Normal stack buffer usage should never 77b394d468SKees Cook * trip the checks, and kernel text addressing will always trip the check. 78afcc90f8SKees Cook * For cache objects, it is checking that only the whitelisted range of 79afcc90f8SKees Cook * bytes for a given cache is being accessed (via the cache's usersize and 80afcc90f8SKees Cook * useroffset fields). To adjust a cache whitelist, use the usercopy-aware 81afcc90f8SKees Cook * kmem_cache_create_usercopy() function to create the cache (and 82afcc90f8SKees Cook * carefully audit the whitelist range). 83b394d468SKees Cook */ 84afcc90f8SKees Cook void usercopy_warn(const char *name, const char *detail, bool to_user, 85afcc90f8SKees Cook unsigned long offset, unsigned long len) 86afcc90f8SKees Cook { 87afcc90f8SKees Cook WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", 88afcc90f8SKees Cook to_user ? "exposure" : "overwrite", 89afcc90f8SKees Cook to_user ? "from" : "to", 90afcc90f8SKees Cook name ? : "unknown?!", 91afcc90f8SKees Cook detail ? " '" : "", detail ? : "", detail ? "'" : "", 92afcc90f8SKees Cook offset, len); 93afcc90f8SKees Cook } 94afcc90f8SKees Cook 95b394d468SKees Cook void __noreturn usercopy_abort(const char *name, const char *detail, 96b394d468SKees Cook bool to_user, unsigned long offset, 97b394d468SKees Cook unsigned long len) 98f5509cc1SKees Cook { 99b394d468SKees Cook pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", 100f5509cc1SKees Cook to_user ? "exposure" : "overwrite", 101b394d468SKees Cook to_user ? "from" : "to", 102b394d468SKees Cook name ? : "unknown?!", 103b394d468SKees Cook detail ? " '" : "", detail ? : "", detail ? "'" : "", 104b394d468SKees Cook offset, len); 105b394d468SKees Cook 106f5509cc1SKees Cook /* 107f5509cc1SKees Cook * For greater effect, it would be nice to do do_group_exit(), 108f5509cc1SKees Cook * but BUG() actually hooks all the lock-breaking and per-arch 109f5509cc1SKees Cook * Oops code, so that is used here instead. 110f5509cc1SKees Cook */ 111f5509cc1SKees Cook BUG(); 112f5509cc1SKees Cook } 113f5509cc1SKees Cook 114f5509cc1SKees Cook /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 115f4e6e289SKees Cook static bool overlaps(const unsigned long ptr, unsigned long n, 116f4e6e289SKees Cook unsigned long low, unsigned long high) 117f5509cc1SKees Cook { 118f4e6e289SKees Cook const unsigned long check_low = ptr; 119f5509cc1SKees Cook unsigned long check_high = check_low + n; 120f5509cc1SKees Cook 121f5509cc1SKees Cook /* Does not overlap if entirely above or entirely below. */ 12294cd97afSJosh Poimboeuf if (check_low >= high || check_high <= low) 123f5509cc1SKees Cook return false; 124f5509cc1SKees Cook 125f5509cc1SKees Cook return true; 126f5509cc1SKees Cook } 127f5509cc1SKees Cook 128f5509cc1SKees Cook /* Is this address range in the kernel text area? */ 129f4e6e289SKees Cook static inline void check_kernel_text_object(const unsigned long ptr, 130f4e6e289SKees Cook unsigned long n, bool to_user) 131f5509cc1SKees Cook { 132f5509cc1SKees Cook unsigned long textlow = (unsigned long)_stext; 133f5509cc1SKees Cook unsigned long texthigh = (unsigned long)_etext; 134f5509cc1SKees Cook unsigned long textlow_linear, texthigh_linear; 135f5509cc1SKees Cook 136f5509cc1SKees Cook if (overlaps(ptr, n, textlow, texthigh)) 137f4e6e289SKees Cook usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); 138f5509cc1SKees Cook 139f5509cc1SKees Cook /* 140f5509cc1SKees Cook * Some architectures have virtual memory mappings with a secondary 141f5509cc1SKees Cook * mapping of the kernel text, i.e. there is more than one virtual 142f5509cc1SKees Cook * kernel address that points to the kernel image. It is usually 143f5509cc1SKees Cook * when there is a separate linear physical memory mapping, in that 144f5509cc1SKees Cook * __pa() is not just the reverse of __va(). This can be detected 145f5509cc1SKees Cook * and checked: 146f5509cc1SKees Cook */ 14746f6236aSLaura Abbott textlow_linear = (unsigned long)lm_alias(textlow); 148f5509cc1SKees Cook /* No different mapping: we're done. */ 149f5509cc1SKees Cook if (textlow_linear == textlow) 150f4e6e289SKees Cook return; 151f5509cc1SKees Cook 152f5509cc1SKees Cook /* Check the secondary mapping... */ 15346f6236aSLaura Abbott texthigh_linear = (unsigned long)lm_alias(texthigh); 154f5509cc1SKees Cook if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 155f4e6e289SKees Cook usercopy_abort("linear kernel text", NULL, to_user, 156f4e6e289SKees Cook ptr - textlow_linear, n); 157f5509cc1SKees Cook } 158f5509cc1SKees Cook 159f4e6e289SKees Cook static inline void check_bogus_address(const unsigned long ptr, unsigned long n, 160f4e6e289SKees Cook bool to_user) 161f5509cc1SKees Cook { 162f5509cc1SKees Cook /* Reject if object wraps past end of memory. */ 16395153169SIsaac J. Manjarres if (ptr + (n - 1) < ptr) 164f4e6e289SKees Cook usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); 165f5509cc1SKees Cook 166f5509cc1SKees Cook /* Reject if NULL or ZERO-allocation. */ 167f5509cc1SKees Cook if (ZERO_OR_NULL_PTR(ptr)) 168f4e6e289SKees Cook usercopy_abort("null address", NULL, to_user, ptr, n); 169f5509cc1SKees Cook } 170f5509cc1SKees Cook 1718e1f74eaSKees Cook /* Checks for allocs that are marked in some way as spanning multiple pages. */ 172f4e6e289SKees Cook static inline void check_page_span(const void *ptr, unsigned long n, 1738e1f74eaSKees Cook struct page *page, bool to_user) 174f5509cc1SKees Cook { 1758e1f74eaSKees Cook #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN 176f5509cc1SKees Cook const void *end = ptr + n - 1; 1778e1f74eaSKees Cook struct page *endpage; 178f5509cc1SKees Cook bool is_reserved, is_cma; 179f5509cc1SKees Cook 180f5509cc1SKees Cook /* 181f5509cc1SKees Cook * Sometimes the kernel data regions are not marked Reserved (see 182f5509cc1SKees Cook * check below). And sometimes [_sdata,_edata) does not cover 183f5509cc1SKees Cook * rodata and/or bss, so check each range explicitly. 184f5509cc1SKees Cook */ 185f5509cc1SKees Cook 186f5509cc1SKees Cook /* Allow reads of kernel rodata region (if not marked as Reserved). */ 187f5509cc1SKees Cook if (ptr >= (const void *)__start_rodata && 188f5509cc1SKees Cook end <= (const void *)__end_rodata) { 189f5509cc1SKees Cook if (!to_user) 190f4e6e289SKees Cook usercopy_abort("rodata", NULL, to_user, 0, n); 191f4e6e289SKees Cook return; 192f5509cc1SKees Cook } 193f5509cc1SKees Cook 194f5509cc1SKees Cook /* Allow kernel data region (if not marked as Reserved). */ 195f5509cc1SKees Cook if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 196f4e6e289SKees Cook return; 197f5509cc1SKees Cook 198f5509cc1SKees Cook /* Allow kernel bss region (if not marked as Reserved). */ 199f5509cc1SKees Cook if (ptr >= (const void *)__bss_start && 200f5509cc1SKees Cook end <= (const void *)__bss_stop) 201f4e6e289SKees Cook return; 202f5509cc1SKees Cook 203f5509cc1SKees Cook /* Is the object wholly within one base page? */ 204f5509cc1SKees Cook if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 205f5509cc1SKees Cook ((unsigned long)end & (unsigned long)PAGE_MASK))) 206f4e6e289SKees Cook return; 207f5509cc1SKees Cook 2088e1f74eaSKees Cook /* Allow if fully inside the same compound (__GFP_COMP) page. */ 209f5509cc1SKees Cook endpage = virt_to_head_page(end); 210f5509cc1SKees Cook if (likely(endpage == page)) 211f4e6e289SKees Cook return; 212f5509cc1SKees Cook 213f5509cc1SKees Cook /* 214f5509cc1SKees Cook * Reject if range is entirely either Reserved (i.e. special or 215f5509cc1SKees Cook * device memory), or CMA. Otherwise, reject since the object spans 216f5509cc1SKees Cook * several independently allocated pages. 217f5509cc1SKees Cook */ 218f5509cc1SKees Cook is_reserved = PageReserved(page); 219f5509cc1SKees Cook is_cma = is_migrate_cma_page(page); 220f5509cc1SKees Cook if (!is_reserved && !is_cma) 221f4e6e289SKees Cook usercopy_abort("spans multiple pages", NULL, to_user, 0, n); 222f5509cc1SKees Cook 223f5509cc1SKees Cook for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 224f5509cc1SKees Cook page = virt_to_head_page(ptr); 225f5509cc1SKees Cook if (is_reserved && !PageReserved(page)) 226f4e6e289SKees Cook usercopy_abort("spans Reserved and non-Reserved pages", 227f4e6e289SKees Cook NULL, to_user, 0, n); 228f5509cc1SKees Cook if (is_cma && !is_migrate_cma_page(page)) 229f4e6e289SKees Cook usercopy_abort("spans CMA and non-CMA pages", NULL, 230f4e6e289SKees Cook to_user, 0, n); 231f5509cc1SKees Cook } 2328e1f74eaSKees Cook #endif 2338e1f74eaSKees Cook } 234f5509cc1SKees Cook 235f4e6e289SKees Cook static inline void check_heap_object(const void *ptr, unsigned long n, 2368e1f74eaSKees Cook bool to_user) 2378e1f74eaSKees Cook { 2380b3eb091SMatthew Wilcox (Oracle) struct folio *folio; 2398e1f74eaSKees Cook 2408e1f74eaSKees Cook if (!virt_addr_valid(ptr)) 241f4e6e289SKees Cook return; 2428e1f74eaSKees Cook 243314eed30SKees Cook /* 244314eed30SKees Cook * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the 245314eed30SKees Cook * highmem page or fallback to virt_to_page(). The following 2460b3eb091SMatthew Wilcox (Oracle) * is effectively a highmem-aware virt_to_slab(). 247314eed30SKees Cook */ 2480b3eb091SMatthew Wilcox (Oracle) folio = page_folio(kmap_to_page((void *)ptr)); 2498e1f74eaSKees Cook 2500b3eb091SMatthew Wilcox (Oracle) if (folio_test_slab(folio)) { 2518e1f74eaSKees Cook /* Check slab allocator for flags and size. */ 2520b3eb091SMatthew Wilcox (Oracle) __check_heap_object(ptr, n, folio_slab(folio), to_user); 253f4e6e289SKees Cook } else { 2548e1f74eaSKees Cook /* Verify object does not incorrectly span multiple pages. */ 2550b3eb091SMatthew Wilcox (Oracle) check_page_span(ptr, n, folio_page(folio, 0), to_user); 256f4e6e289SKees Cook } 257f5509cc1SKees Cook } 258f5509cc1SKees Cook 259b5cb15d9SChris von Recklinghausen static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks); 260b5cb15d9SChris von Recklinghausen 261f5509cc1SKees Cook /* 262f5509cc1SKees Cook * Validates that the given object is: 263f5509cc1SKees Cook * - not bogus address 2647bff3c06SQian Cai * - fully contained by stack (or stack frame, when available) 2657bff3c06SQian Cai * - fully within SLAB object (or object whitelist area, when available) 266f5509cc1SKees Cook * - not in kernel text 267f5509cc1SKees Cook */ 268f5509cc1SKees Cook void __check_object_size(const void *ptr, unsigned long n, bool to_user) 269f5509cc1SKees Cook { 270b5cb15d9SChris von Recklinghausen if (static_branch_unlikely(&bypass_usercopy_checks)) 271b5cb15d9SChris von Recklinghausen return; 272b5cb15d9SChris von Recklinghausen 273f5509cc1SKees Cook /* Skip all tests if size is zero. */ 274f5509cc1SKees Cook if (!n) 275f5509cc1SKees Cook return; 276f5509cc1SKees Cook 277f5509cc1SKees Cook /* Check for invalid addresses. */ 278f4e6e289SKees Cook check_bogus_address((const unsigned long)ptr, n, to_user); 279f5509cc1SKees Cook 280f5509cc1SKees Cook /* Check for bad stack object. */ 281f5509cc1SKees Cook switch (check_stack_object(ptr, n)) { 282f5509cc1SKees Cook case NOT_STACK: 283f5509cc1SKees Cook /* Object is not touching the current process stack. */ 284f5509cc1SKees Cook break; 285f5509cc1SKees Cook case GOOD_FRAME: 286f5509cc1SKees Cook case GOOD_STACK: 287f5509cc1SKees Cook /* 288f5509cc1SKees Cook * Object is either in the correct frame (when it 289f5509cc1SKees Cook * is possible to check) or just generally on the 290f5509cc1SKees Cook * process stack (when frame checking not available). 291f5509cc1SKees Cook */ 292f5509cc1SKees Cook return; 293f5509cc1SKees Cook default: 294*2792d84eSKees Cook usercopy_abort("process stack", NULL, to_user, 295*2792d84eSKees Cook #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER 296*2792d84eSKees Cook IS_ENABLED(CONFIG_STACK_GROWSUP) ? 297*2792d84eSKees Cook ptr - (void *)current_stack_pointer : 298*2792d84eSKees Cook (void *)current_stack_pointer - ptr, 299*2792d84eSKees Cook #else 300*2792d84eSKees Cook 0, 301*2792d84eSKees Cook #endif 302*2792d84eSKees Cook n); 303f5509cc1SKees Cook } 304f5509cc1SKees Cook 3057bff3c06SQian Cai /* Check for bad heap object. */ 3067bff3c06SQian Cai check_heap_object(ptr, n, to_user); 3077bff3c06SQian Cai 308f5509cc1SKees Cook /* Check for object in kernel to avoid text exposure. */ 309f4e6e289SKees Cook check_kernel_text_object((const unsigned long)ptr, n, to_user); 310f5509cc1SKees Cook } 311f5509cc1SKees Cook EXPORT_SYMBOL(__check_object_size); 312b5cb15d9SChris von Recklinghausen 313b5cb15d9SChris von Recklinghausen static bool enable_checks __initdata = true; 314b5cb15d9SChris von Recklinghausen 315b5cb15d9SChris von Recklinghausen static int __init parse_hardened_usercopy(char *str) 316b5cb15d9SChris von Recklinghausen { 317b5cb15d9SChris von Recklinghausen return strtobool(str, &enable_checks); 318b5cb15d9SChris von Recklinghausen } 319b5cb15d9SChris von Recklinghausen 320b5cb15d9SChris von Recklinghausen __setup("hardened_usercopy=", parse_hardened_usercopy); 321b5cb15d9SChris von Recklinghausen 322b5cb15d9SChris von Recklinghausen static int __init set_hardened_usercopy(void) 323b5cb15d9SChris von Recklinghausen { 324b5cb15d9SChris von Recklinghausen if (enable_checks == false) 325b5cb15d9SChris von Recklinghausen static_branch_enable(&bypass_usercopy_checks); 326b5cb15d9SChris von Recklinghausen return 1; 327b5cb15d9SChris von Recklinghausen } 328b5cb15d9SChris von Recklinghausen 329b5cb15d9SChris von Recklinghausen late_initcall(set_hardened_usercopy); 330