1 /* 2 * This file contains kasan initialization code for ARM64. 3 * 4 * Copyright (c) 2015 Samsung Electronics Co., Ltd. 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12 13 #define pr_fmt(fmt) "kasan: " fmt 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/memblock.h> 17 #include <linux/start_kernel.h> 18 19 #include <asm/page.h> 20 #include <asm/pgalloc.h> 21 #include <asm/pgtable.h> 22 #include <asm/tlbflush.h> 23 24 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); 25 26 static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, 27 unsigned long end) 28 { 29 pte_t *pte; 30 unsigned long next; 31 32 if (pmd_none(*pmd)) 33 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); 34 35 pte = pte_offset_kernel(pmd, addr); 36 do { 37 next = addr + PAGE_SIZE; 38 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), 39 PAGE_KERNEL)); 40 } while (pte++, addr = next, addr != end && pte_none(*pte)); 41 } 42 43 static void __init kasan_early_pmd_populate(pud_t *pud, 44 unsigned long addr, 45 unsigned long end) 46 { 47 pmd_t *pmd; 48 unsigned long next; 49 50 if (pud_none(*pud)) 51 pud_populate(&init_mm, pud, kasan_zero_pmd); 52 53 pmd = pmd_offset(pud, addr); 54 do { 55 next = pmd_addr_end(addr, end); 56 kasan_early_pte_populate(pmd, addr, next); 57 } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); 58 } 59 60 static void __init kasan_early_pud_populate(pgd_t *pgd, 61 unsigned long addr, 62 unsigned long end) 63 { 64 pud_t *pud; 65 unsigned long next; 66 67 if (pgd_none(*pgd)) 68 pgd_populate(&init_mm, pgd, kasan_zero_pud); 69 70 pud = pud_offset(pgd, addr); 71 do { 72 next = pud_addr_end(addr, end); 73 kasan_early_pmd_populate(pud, addr, next); 74 } while (pud++, addr = next, addr != end && pud_none(*pud)); 75 } 76 77 static void __init kasan_map_early_shadow(void) 78 { 79 unsigned long addr = KASAN_SHADOW_START; 80 unsigned long end = KASAN_SHADOW_END; 81 unsigned long next; 82 pgd_t *pgd; 83 84 pgd = pgd_offset_k(addr); 85 do { 86 next = pgd_addr_end(addr, end); 87 kasan_early_pud_populate(pgd, addr, next); 88 } while (pgd++, addr = next, addr != end); 89 } 90 91 asmlinkage void __init kasan_early_init(void) 92 { 93 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); 94 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); 95 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); 96 kasan_map_early_shadow(); 97 } 98 99 static void __init clear_pgds(unsigned long start, 100 unsigned long end) 101 { 102 /* 103 * Remove references to kasan page tables from 104 * swapper_pg_dir. pgd_clear() can't be used 105 * here because it's nop on 2,3-level pagetable setups 106 */ 107 for (; start < end; start += PGDIR_SIZE) 108 set_pgd(pgd_offset_k(start), __pgd(0)); 109 } 110 111 static void __init cpu_set_ttbr1(unsigned long ttbr1) 112 { 113 asm( 114 " msr ttbr1_el1, %0\n" 115 " isb" 116 : 117 : "r" (ttbr1)); 118 } 119 120 void __init kasan_init(void) 121 { 122 struct memblock_region *reg; 123 int i; 124 125 /* 126 * We are going to perform proper setup of shadow memory. 127 * At first we should unmap early shadow (clear_pgds() call bellow). 128 * However, instrumented code couldn't execute without shadow memory. 129 * tmp_pg_dir used to keep early shadow mapped until full shadow 130 * setup will be finished. 131 */ 132 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); 133 cpu_set_ttbr1(__pa(tmp_pg_dir)); 134 flush_tlb_all(); 135 136 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 137 138 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, 139 kasan_mem_to_shadow((void *)MODULES_VADDR)); 140 141 for_each_memblock(memory, reg) { 142 void *start = (void *)__phys_to_virt(reg->base); 143 void *end = (void *)__phys_to_virt(reg->base + reg->size); 144 145 if (start >= end) 146 break; 147 148 /* 149 * end + 1 here is intentional. We check several shadow bytes in 150 * advance to slightly speed up fastpath. In some rare cases 151 * we could cross boundary of mapped shadow, so we just map 152 * some more here. 153 */ 154 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), 155 (unsigned long)kasan_mem_to_shadow(end) + 1, 156 pfn_to_nid(virt_to_pfn(start))); 157 } 158 159 /* 160 * KAsan may reuse the contents of kasan_zero_pte directly, so we 161 * should make sure that it maps the zero page read-only. 162 */ 163 for (i = 0; i < PTRS_PER_PTE; i++) 164 set_pte(&kasan_zero_pte[i], 165 pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); 166 167 memset(kasan_zero_page, 0, PAGE_SIZE); 168 cpu_set_ttbr1(__pa(swapper_pg_dir)); 169 flush_tlb_all(); 170 171 /* At this point kasan is fully initialized. Enable error messages */ 172 init_task.kasan_depth = 0; 173 pr_info("KernelAddressSanitizer initialized\n"); 174 } 175