1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains core software tag-based KASAN code. 4 * 5 * Copyright (c) 2018 Google, Inc. 6 * Author: Andrey Konovalov <andreyknvl@google.com> 7 */ 8 9 #define pr_fmt(fmt) "kasan: " fmt 10 11 #include <linux/export.h> 12 #include <linux/interrupt.h> 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kmemleak.h> 17 #include <linux/linkage.h> 18 #include <linux/memblock.h> 19 #include <linux/memory.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/printk.h> 23 #include <linux/random.h> 24 #include <linux/sched.h> 25 #include <linux/sched/task_stack.h> 26 #include <linux/slab.h> 27 #include <linux/stacktrace.h> 28 #include <linux/string.h> 29 #include <linux/string_choices.h> 30 #include <linux/types.h> 31 #include <linux/vmalloc.h> 32 #include <linux/bug.h> 33 34 #include "kasan.h" 35 #include "../slab.h" 36 37 static DEFINE_PER_CPU(u32, prng_state); 38 39 void __init kasan_init_sw_tags(void) 40 { 41 int cpu; 42 43 for_each_possible_cpu(cpu) 44 per_cpu(prng_state, cpu) = (u32)get_cycles(); 45 46 kasan_init_tags(); 47 48 pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n", 49 str_on_off(kasan_stack_collection_enabled())); 50 } 51 52 /* 53 * If a preemption happens between this_cpu_read and this_cpu_write, the only 54 * side effect is that we'll give a few allocated in different contexts objects 55 * the same tag. Since tag-based KASAN is meant to be used a probabilistic 56 * bug-detection debug feature, this doesn't have significant negative impact. 57 * 58 * Ideally the tags use strong randomness to prevent any attempts to predict 59 * them during explicit exploit attempts. But strong randomness is expensive, 60 * and we did an intentional trade-off to use a PRNG. This non-atomic RMW 61 * sequence has in fact positive effect, since interrupts that randomly skew 62 * PRNG at unpredictable points do only good. 63 */ 64 u8 kasan_random_tag(void) 65 { 66 u32 state = this_cpu_read(prng_state); 67 68 state = 1664525 * state + 1013904223; 69 this_cpu_write(prng_state, state); 70 71 return (u8)(state % (KASAN_TAG_MAX + 1)); 72 } 73 74 bool kasan_check_range(const void *addr, size_t size, bool write, 75 unsigned long ret_ip) 76 { 77 u8 tag; 78 u8 *shadow_first, *shadow_last, *shadow; 79 void *untagged_addr; 80 81 if (unlikely(size == 0)) 82 return true; 83 84 if (unlikely(addr + size < addr)) 85 return !kasan_report(addr, size, write, ret_ip); 86 87 tag = get_tag((const void *)addr); 88 89 /* 90 * Ignore accesses for pointers tagged with 0xff (native kernel 91 * pointer tag) to suppress false positives caused by kmap. 92 * 93 * Some kernel code was written to account for archs that don't keep 94 * high memory mapped all the time, but rather map and unmap particular 95 * pages when needed. Instead of storing a pointer to the kernel memory, 96 * this code saves the address of the page structure and offset within 97 * that page for later use. Those pages are then mapped and unmapped 98 * with kmap/kunmap when necessary and virt_to_page is used to get the 99 * virtual address of the page. For arm64 (that keeps the high memory 100 * mapped all the time), kmap is turned into a page_address call. 101 102 * The issue is that with use of the page_address + virt_to_page 103 * sequence the top byte value of the original pointer gets lost (gets 104 * set to KASAN_TAG_KERNEL (0xFF)). 105 */ 106 if (tag == KASAN_TAG_KERNEL) 107 return true; 108 109 untagged_addr = kasan_reset_tag((const void *)addr); 110 if (unlikely(!addr_has_metadata(untagged_addr))) 111 return !kasan_report(addr, size, write, ret_ip); 112 shadow_first = kasan_mem_to_shadow(untagged_addr); 113 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); 114 for (shadow = shadow_first; shadow <= shadow_last; shadow++) { 115 if (*shadow != tag) { 116 return !kasan_report(addr, size, write, ret_ip); 117 } 118 } 119 120 return true; 121 } 122 123 bool kasan_byte_accessible(const void *addr) 124 { 125 u8 tag = get_tag(addr); 126 void *untagged_addr = kasan_reset_tag(addr); 127 u8 shadow_byte; 128 129 if (!addr_has_metadata(untagged_addr)) 130 return false; 131 132 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(untagged_addr)); 133 return tag == KASAN_TAG_KERNEL || tag == shadow_byte; 134 } 135 136 #define DEFINE_HWASAN_LOAD_STORE(size) \ 137 void __hwasan_load##size##_noabort(void *addr) \ 138 { \ 139 kasan_check_range(addr, size, false, _RET_IP_); \ 140 } \ 141 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ 142 void __hwasan_store##size##_noabort(void *addr) \ 143 { \ 144 kasan_check_range(addr, size, true, _RET_IP_); \ 145 } \ 146 EXPORT_SYMBOL(__hwasan_store##size##_noabort) 147 148 DEFINE_HWASAN_LOAD_STORE(1); 149 DEFINE_HWASAN_LOAD_STORE(2); 150 DEFINE_HWASAN_LOAD_STORE(4); 151 DEFINE_HWASAN_LOAD_STORE(8); 152 DEFINE_HWASAN_LOAD_STORE(16); 153 154 void __hwasan_loadN_noabort(void *addr, ssize_t size) 155 { 156 kasan_check_range(addr, size, false, _RET_IP_); 157 } 158 EXPORT_SYMBOL(__hwasan_loadN_noabort); 159 160 void __hwasan_storeN_noabort(void *addr, ssize_t size) 161 { 162 kasan_check_range(addr, size, true, _RET_IP_); 163 } 164 EXPORT_SYMBOL(__hwasan_storeN_noabort); 165 166 void __hwasan_tag_memory(void *addr, u8 tag, ssize_t size) 167 { 168 kasan_poison(addr, size, tag, false); 169 } 170 EXPORT_SYMBOL(__hwasan_tag_memory); 171 172 void kasan_tag_mismatch(void *addr, unsigned long access_info, 173 unsigned long ret_ip) 174 { 175 kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, 176 ret_ip); 177 } 178