1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file contains kasan initialization code for ARM. 4 * 5 * Copyright (c) 2018 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 * Author: Linus Walleij <linus.walleij@linaro.org> 8 */ 9 10 #define pr_fmt(fmt) "kasan: " fmt 11 #include <linux/kasan.h> 12 #include <linux/kernel.h> 13 #include <linux/memblock.h> 14 #include <linux/sched/task.h> 15 #include <linux/start_kernel.h> 16 #include <linux/pgtable.h> 17 #include <asm/cputype.h> 18 #include <asm/highmem.h> 19 #include <asm/mach/map.h> 20 #include <asm/page.h> 21 #include <asm/pgalloc.h> 22 #include <asm/procinfo.h> 23 #include <asm/proc-fns.h> 24 25 #include "mm.h" 26 27 static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); 28 29 pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; 30 31 static __init void *kasan_alloc_block(size_t size) 32 { 33 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 34 MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE); 35 } 36 37 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, 38 unsigned long end, bool early) 39 { 40 unsigned long next; 41 pte_t *ptep = pte_offset_kernel(pmdp, addr); 42 43 do { 44 pte_t entry; 45 void *p; 46 47 next = addr + PAGE_SIZE; 48 49 if (!early) { 50 if (!pte_none(READ_ONCE(*ptep))) 51 continue; 52 53 p = kasan_alloc_block(PAGE_SIZE); 54 if (!p) { 55 panic("%s failed to allocate shadow page for address 0x%lx\n", 56 __func__, addr); 57 return; 58 } 59 memset(p, KASAN_SHADOW_INIT, PAGE_SIZE); 60 entry = pfn_pte(virt_to_pfn(p), 61 __pgprot(pgprot_val(PAGE_KERNEL))); 62 } else if (pte_none(READ_ONCE(*ptep))) { 63 /* 64 * The early shadow memory is mapping all KASan 65 * operations to one and the same page in memory, 66 * "kasan_early_shadow_page" so that the instrumentation 67 * will work on a scratch area until we can set up the 68 * proper KASan shadow memory. 69 */ 70 entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page), 71 __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN)); 72 } else { 73 /* 74 * Early shadow mappings are PMD_SIZE aligned, so if the 75 * first entry is already set, they must all be set. 76 */ 77 return; 78 } 79 80 set_pte_at(&init_mm, addr, ptep, entry); 81 } while (ptep++, addr = next, addr != end); 82 } 83 84 /* 85 * The pmd (page middle directory) is only used on LPAE 86 */ 87 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, 88 unsigned long end, bool early) 89 { 90 unsigned long next; 91 pmd_t *pmdp = pmd_offset(pudp, addr); 92 93 do { 94 if (pmd_none(*pmdp)) { 95 /* 96 * We attempt to allocate a shadow block for the PMDs 97 * used by the PTEs for this address if it isn't already 98 * allocated. 99 */ 100 void *p = early ? kasan_early_shadow_pte : 101 kasan_alloc_block(PAGE_SIZE); 102 103 if (!p) { 104 panic("%s failed to allocate shadow block for address 0x%lx\n", 105 __func__, addr); 106 return; 107 } 108 pmd_populate_kernel(&init_mm, pmdp, p); 109 flush_pmd_entry(pmdp); 110 } 111 112 next = pmd_addr_end(addr, end); 113 kasan_pte_populate(pmdp, addr, next, early); 114 } while (pmdp++, addr = next, addr != end); 115 } 116 117 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, 118 bool early) 119 { 120 unsigned long next; 121 pgd_t *pgdp; 122 p4d_t *p4dp; 123 pud_t *pudp; 124 125 pgdp = pgd_offset_k(addr); 126 127 do { 128 /* 129 * Allocate and populate the shadow block of p4d folded into 130 * pud folded into pmd if it doesn't already exist 131 */ 132 if (!early && pgd_none(*pgdp)) { 133 void *p = kasan_alloc_block(PAGE_SIZE); 134 135 if (!p) { 136 panic("%s failed to allocate shadow block for address 0x%lx\n", 137 __func__, addr); 138 return; 139 } 140 pgd_populate(&init_mm, pgdp, p); 141 } 142 143 next = pgd_addr_end(addr, end); 144 /* 145 * We just immediately jump over the p4d and pud page 146 * directories since we believe ARM32 will never gain four 147 * nor five level page tables. 148 */ 149 p4dp = p4d_offset(pgdp, addr); 150 pudp = pud_offset(p4dp, addr); 151 152 kasan_pmd_populate(pudp, addr, next, early); 153 } while (pgdp++, addr = next, addr != end); 154 } 155 156 extern struct proc_info_list *lookup_processor_type(unsigned int); 157 158 void __init kasan_early_init(void) 159 { 160 struct proc_info_list *list; 161 162 /* 163 * locate processor in the list of supported processor 164 * types. The linker builds this table for us from the 165 * entries in arch/arm/mm/proc-*.S 166 */ 167 list = lookup_processor_type(read_cpuid_id()); 168 if (list) { 169 #ifdef MULTI_CPU 170 processor = *list->proc; 171 #endif 172 } 173 174 BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET); 175 /* 176 * We walk the page table and set all of the shadow memory to point 177 * to the scratch page. 178 */ 179 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, true); 180 } 181 182 static void __init clear_pgds(unsigned long start, 183 unsigned long end) 184 { 185 for (; start && start < end; start += PMD_SIZE) 186 pmd_clear(pmd_off_k(start)); 187 } 188 189 static int __init create_mapping(void *start, void *end) 190 { 191 void *shadow_start, *shadow_end; 192 193 shadow_start = kasan_mem_to_shadow(start); 194 shadow_end = kasan_mem_to_shadow(end); 195 196 pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n", 197 start, end, shadow_start, shadow_end); 198 199 kasan_pgd_populate((unsigned long)shadow_start & PAGE_MASK, 200 PAGE_ALIGN((unsigned long)shadow_end), false); 201 return 0; 202 } 203 204 void __init kasan_init(void) 205 { 206 phys_addr_t pa_start, pa_end; 207 u64 i; 208 209 /* 210 * We are going to perform proper setup of shadow memory. 211 * 212 * At first we should unmap early shadow (clear_pgds() call bellow). 213 * However, instrumented code can't execute without shadow memory. 214 * 215 * To keep the early shadow memory MMU tables around while setting up 216 * the proper shadow memory, we copy swapper_pg_dir (the initial page 217 * table) to tmp_pgd_table and use that to keep the early shadow memory 218 * mapped until the full shadow setup is finished. Then we swap back 219 * to the proper swapper_pg_dir. 220 */ 221 222 memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table)); 223 #ifdef CONFIG_ARM_LPAE 224 /* We need to be in the same PGD or this won't work */ 225 BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != 226 pgd_index(KASAN_SHADOW_END)); 227 memcpy(tmp_pmd_table, 228 (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), 229 sizeof(tmp_pmd_table)); 230 set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], 231 __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); 232 #endif 233 cpu_switch_mm(tmp_pgd_table, &init_mm); 234 local_flush_tlb_all(); 235 236 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 237 238 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) 239 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 240 kasan_mem_to_shadow((void *)VMALLOC_END)); 241 242 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END), 243 kasan_mem_to_shadow((void *)-1UL) + 1); 244 245 for_each_mem_range(i, &pa_start, &pa_end) { 246 void *start = __va(pa_start); 247 void *end = __va(pa_end); 248 249 /* Do not attempt to shadow highmem */ 250 if (pa_start >= arm_lowmem_limit) { 251 pr_info("Skip highmem block at %pa-%pa\n", &pa_start, &pa_end); 252 continue; 253 } 254 if (pa_end > arm_lowmem_limit) { 255 pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n", 256 &pa_start, &pa_end, &arm_lowmem_limit); 257 end = __va(arm_lowmem_limit); 258 } 259 if (start >= end) { 260 pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n", 261 &pa_start, &pa_end, start, end); 262 continue; 263 } 264 265 create_mapping(start, end); 266 } 267 268 /* 269 * 1. The module global variables are in MODULES_VADDR ~ MODULES_END, 270 * so we need to map this area if CONFIG_KASAN_VMALLOC=n. With 271 * VMALLOC support KASAN will manage this region dynamically, 272 * refer to kasan_populate_vmalloc() and ARM's implementation of 273 * module_alloc(). 274 * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR 275 * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't 276 * use kasan_populate_zero_shadow. 277 */ 278 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES)) 279 create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END)); 280 create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE)); 281 282 /* 283 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so 284 * we should make sure that it maps the zero page read-only. 285 */ 286 for (i = 0; i < PTRS_PER_PTE; i++) 287 set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE, 288 &kasan_early_shadow_pte[i], 289 pfn_pte(virt_to_pfn(kasan_early_shadow_page), 290 __pgprot(pgprot_val(PAGE_KERNEL) 291 | L_PTE_RDONLY))); 292 293 cpu_switch_mm(swapper_pg_dir, &init_mm); 294 local_flush_tlb_all(); 295 296 memset(kasan_early_shadow_page, 0, PAGE_SIZE); 297 pr_info("Kernel address sanitizer initialized\n"); 298 init_task.kasan_depth = 0; 299 } 300