1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2019 Andes Technology Corporation 3 4 #include <linux/pfn.h> 5 #include <linux/init_task.h> 6 #include <linux/kasan.h> 7 #include <linux/kernel.h> 8 #include <linux/memblock.h> 9 #include <linux/pgtable.h> 10 #include <asm/tlbflush.h> 11 #include <asm/fixmap.h> 12 13 extern pgd_t early_pg_dir[PTRS_PER_PGD]; 14 asmlinkage void __init kasan_early_init(void) 15 { 16 uintptr_t i; 17 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START); 18 19 for (i = 0; i < PTRS_PER_PTE; ++i) 20 set_pte(kasan_early_shadow_pte + i, 21 mk_pte(virt_to_page(kasan_early_shadow_page), 22 PAGE_KERNEL)); 23 24 for (i = 0; i < PTRS_PER_PMD; ++i) 25 set_pmd(kasan_early_shadow_pmd + i, 26 pfn_pmd(PFN_DOWN 27 (__pa((uintptr_t) kasan_early_shadow_pte)), 28 __pgprot(_PAGE_TABLE))); 29 30 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END; 31 i += PGDIR_SIZE, ++pgd) 32 set_pgd(pgd, 33 pfn_pgd(PFN_DOWN 34 (__pa(((uintptr_t) kasan_early_shadow_pmd))), 35 __pgprot(_PAGE_TABLE))); 36 37 /* init for swapper_pg_dir */ 38 pgd = pgd_offset_k(KASAN_SHADOW_START); 39 40 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END; 41 i += PGDIR_SIZE, ++pgd) 42 set_pgd(pgd, 43 pfn_pgd(PFN_DOWN 44 (__pa(((uintptr_t) kasan_early_shadow_pmd))), 45 __pgprot(_PAGE_TABLE))); 46 47 flush_tlb_all(); 48 } 49 50 static void __init populate(void *start, void *end) 51 { 52 unsigned long i, offset; 53 unsigned long vaddr = (unsigned long)start & PAGE_MASK; 54 unsigned long vend = PAGE_ALIGN((unsigned long)end); 55 unsigned long n_pages = (vend - vaddr) / PAGE_SIZE; 56 unsigned long n_ptes = 57 ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE; 58 unsigned long n_pmds = 59 ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD; 60 61 pte_t *pte = 62 memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); 63 pmd_t *pmd = 64 memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); 65 pgd_t *pgd = pgd_offset_k(vaddr); 66 67 for (i = 0; i < n_pages; i++) { 68 phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 69 set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); 70 } 71 72 for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE) 73 set_pmd(&pmd[i], 74 pfn_pmd(PFN_DOWN(__pa(&pte[offset])), 75 __pgprot(_PAGE_TABLE))); 76 77 for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD) 78 set_pgd(&pgd[i], 79 pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), 80 __pgprot(_PAGE_TABLE))); 81 82 flush_tlb_all(); 83 memset(start, 0, end - start); 84 } 85 86 void __init kasan_init(void) 87 { 88 struct memblock_region *reg; 89 unsigned long i; 90 91 kasan_populate_early_shadow((void *)KASAN_SHADOW_START, 92 (void *)kasan_mem_to_shadow((void *) 93 VMALLOC_END)); 94 95 for_each_memblock(memory, reg) { 96 void *start = (void *)__va(reg->base); 97 void *end = (void *)__va(reg->base + reg->size); 98 99 if (start >= end) 100 break; 101 102 populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); 103 }; 104 105 for (i = 0; i < PTRS_PER_PTE; i++) 106 set_pte(&kasan_early_shadow_pte[i], 107 mk_pte(virt_to_page(kasan_early_shadow_page), 108 __pgprot(_PAGE_PRESENT | _PAGE_READ | 109 _PAGE_ACCESSED))); 110 111 memset(kasan_early_shadow_page, 0, PAGE_SIZE); 112 init_task.kasan_depth = 0; 113 } 114