1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * This file contains the routines for initializing the MMU 4 * on the 8xx series of chips. 5 * -- christophe 6 * 7 * Derived from arch/powerpc/mm/40x_mmu.c: 8 */ 9 10 #include <linux/memblock.h> 11 #include <linux/hugetlb.h> 12 13 #include <asm/fixmap.h> 14 15 #include <mm/mmu_decl.h> 16 17 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT) 18 19 static unsigned long block_mapped_ram; 20 21 /* 22 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap. 23 * Otherwise, returns 0 24 */ 25 phys_addr_t v_block_mapped(unsigned long va) 26 { 27 unsigned long p = PHYS_IMMR_BASE; 28 29 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) 30 return p + va - VIRT_IMMR_BASE; 31 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) 32 return __pa(va); 33 return 0; 34 } 35 36 /* 37 * Return VA for a given PA mapped with LTLBs or fixmap 38 * Return 0 if not mapped 39 */ 40 unsigned long p_block_mapped(phys_addr_t pa) 41 { 42 unsigned long p = PHYS_IMMR_BASE; 43 44 if (pa >= p && pa < p + IMMR_SIZE) 45 return VIRT_IMMR_BASE + pa - p; 46 if (pa < block_mapped_ram) 47 return (unsigned long)__va(pa); 48 return 0; 49 } 50 51 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) 52 { 53 if (hpd_val(*pmdp) == 0) { 54 pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K); 55 56 if (!ptep) 57 return NULL; 58 59 hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M); 60 hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M); 61 } 62 return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); 63 } 64 65 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, 66 pgprot_t prot, int psize, bool new) 67 { 68 pmd_t *pmdp = pmd_off_k(va); 69 pte_t *ptep; 70 71 if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) 72 return -EINVAL; 73 74 if (new) { 75 if (WARN_ON(slab_is_available())) 76 return -EINVAL; 77 78 if (psize == MMU_PAGE_512K) 79 ptep = early_pte_alloc_kernel(pmdp, va); 80 else 81 ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va); 82 } else { 83 if (psize == MMU_PAGE_512K) 84 ptep = pte_offset_kernel(pmdp, va); 85 else 86 ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); 87 } 88 89 if (WARN_ON(!ptep)) 90 return -ENOMEM; 91 92 /* The PTE should never be already present */ 93 if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) 94 return -EINVAL; 95 96 set_huge_pte_at(&init_mm, va, ptep, 97 pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize); 98 99 return 0; 100 } 101 102 /* 103 * MMU_init_hw does the chip-specific initialization of the MMU hardware. 104 */ 105 void __init MMU_init_hw(void) 106 { 107 } 108 109 static bool immr_is_mapped __initdata; 110 111 void __init mmu_mapin_immr(void) 112 { 113 if (immr_is_mapped) 114 return; 115 116 immr_is_mapped = true; 117 118 __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE, 119 PAGE_KERNEL_NCG, MMU_PAGE_512K, true); 120 } 121 122 static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, 123 pgprot_t prot, bool new) 124 { 125 unsigned long v = PAGE_OFFSET + offset; 126 unsigned long p = offset; 127 int err = 0; 128 129 WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); 130 131 for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K) 132 err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); 133 for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M) 134 err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); 135 for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K) 136 err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); 137 138 if (!new) 139 flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); 140 141 return err; 142 } 143 144 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) 145 { 146 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); 147 unsigned long sinittext = __pa(_sinittext); 148 bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence(); 149 unsigned long boundary = strict_boundary ? sinittext : etext8; 150 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); 151 152 WARN_ON(top < einittext8); 153 154 mmu_mapin_immr(); 155 156 mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); 157 if (debug_pagealloc_enabled_or_kfence()) { 158 top = boundary; 159 } else { 160 mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); 161 mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); 162 } 163 164 if (top > SZ_32M) 165 memblock_set_current_limit(top); 166 167 block_mapped_ram = top; 168 169 return top; 170 } 171 172 int mmu_mark_initmem_nx(void) 173 { 174 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); 175 unsigned long sinittext = __pa(_sinittext); 176 unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; 177 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); 178 int err = 0; 179 180 if (!debug_pagealloc_enabled_or_kfence()) 181 err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); 182 183 mmu_pin_tlb(block_mapped_ram, false); 184 185 return err; 186 } 187 188 #ifdef CONFIG_STRICT_KERNEL_RWX 189 int mmu_mark_rodata_ro(void) 190 { 191 unsigned long sinittext = __pa(_sinittext); 192 int err; 193 194 err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); 195 if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) 196 mmu_pin_tlb(block_mapped_ram, true); 197 198 return err; 199 } 200 #endif 201 202 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, 203 phys_addr_t first_memblock_size) 204 { 205 /* We don't currently support the first MEMBLOCK not mapping 0 206 * physical on those processors 207 */ 208 BUG_ON(first_memblock_base != 0); 209 210 /* 8xx can only access 32MB at the moment */ 211 memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); 212 } 213 214 int pud_clear_huge(pud_t *pud) 215 { 216 return 0; 217 } 218 219 int pmd_clear_huge(pmd_t *pmd) 220 { 221 return 0; 222 } 223