1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * This file contains the routines setting up the linux page tables. 4 * -- paulus 5 * 6 * Derived from arch/ppc/mm/init.c: 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * 9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 10 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 11 * Copyright (C) 1996 Paul Mackerras 12 * 13 * Derived from "arch/i386/mm/init.c" 14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/mm.h> 21 #include <linux/vmalloc.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/memblock.h> 25 #include <linux/slab.h> 26 #include <linux/set_memory.h> 27 28 #include <asm/pgalloc.h> 29 #include <asm/fixmap.h> 30 #include <asm/setup.h> 31 #include <asm/sections.h> 32 #include <asm/early_ioremap.h> 33 34 #include <mm/mmu_decl.h> 35 36 static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; 37 38 notrace void __init early_ioremap_init(void) 39 { 40 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); 41 pte_t *ptep = (pte_t *)early_fixmap_pagetable; 42 pmd_t *pmdp = pmd_off_k(addr); 43 44 for (; (s32)(FIXADDR_TOP - addr) > 0; 45 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) 46 pmd_populate_kernel(&init_mm, pmdp, ptep); 47 48 early_ioremap_setup(); 49 } 50 51 void __init *early_alloc_pgtable(unsigned long size) 52 { 53 return memblock_alloc_or_panic(size, size); 54 55 } 56 57 pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) 58 { 59 if (pmd_none(*pmdp)) { 60 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); 61 62 pmd_populate_kernel(&init_mm, pmdp, ptep); 63 } 64 return pte_offset_kernel(pmdp, va); 65 } 66 67 68 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) 69 { 70 pmd_t *pd; 71 pte_t *pg; 72 int err = -ENOMEM; 73 74 /* Use upper 10 bits of VA to index the first level map */ 75 pd = pmd_off_k(va); 76 /* Use middle 10 bits of VA to index the second-level map */ 77 if (likely(slab_is_available())) 78 pg = pte_alloc_kernel(pd, va); 79 else 80 pg = early_pte_alloc_kernel(pd, va); 81 if (pg) { 82 err = 0; 83 /* The PTE should never be already set nor present in the 84 * hash table 85 */ 86 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); 87 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); 88 } 89 smp_wmb(); 90 return err; 91 } 92 93 /* 94 * Map in a chunk of physical memory starting at start. 95 */ 96 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) 97 { 98 unsigned long v, s; 99 phys_addr_t p; 100 bool ktext; 101 102 s = offset; 103 v = PAGE_OFFSET + s; 104 p = memstart_addr + s; 105 for (; s < top; s += PAGE_SIZE) { 106 ktext = core_kernel_text(v); 107 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); 108 v += PAGE_SIZE; 109 p += PAGE_SIZE; 110 } 111 } 112 113 void __init mapin_ram(void) 114 { 115 phys_addr_t base, end; 116 u64 i; 117 118 for_each_mem_range(i, &base, &end) { 119 phys_addr_t top = min(end, total_lowmem); 120 121 if (base >= top) 122 continue; 123 base = mmu_mapin_ram(base, top); 124 __mapin_ram_chunk(base, top); 125 } 126 } 127 128 static int __mark_initmem_nx(void) 129 { 130 unsigned long numpages = PFN_UP((unsigned long)_einittext) - 131 PFN_DOWN((unsigned long)_sinittext); 132 int err; 133 134 err = mmu_mark_initmem_nx(); 135 136 if (!v_block_mapped((unsigned long)_sinittext)) { 137 err = set_memory_nx((unsigned long)_sinittext, numpages); 138 if (err) 139 return err; 140 err = set_memory_rw((unsigned long)_sinittext, numpages); 141 } 142 return err; 143 } 144 145 void mark_initmem_nx(void) 146 { 147 int err = __mark_initmem_nx(); 148 149 if (err) 150 panic("%s() failed, err = %d\n", __func__, err); 151 } 152 153 #ifdef CONFIG_STRICT_KERNEL_RWX 154 static int __mark_rodata_ro(void) 155 { 156 unsigned long numpages; 157 158 if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) 159 pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); 160 161 if (v_block_mapped((unsigned long)_stext + 1)) 162 return mmu_mark_rodata_ro(); 163 164 /* 165 * mark text and rodata as read only. __end_rodata is set by 166 * powerpc's linker script and includes tables and data 167 * requiring relocation which are not put in RO_DATA. 168 */ 169 numpages = PFN_UP((unsigned long)__end_rodata) - 170 PFN_DOWN((unsigned long)_stext); 171 172 return set_memory_ro((unsigned long)_stext, numpages); 173 } 174 175 void mark_rodata_ro(void) 176 { 177 int err = __mark_rodata_ro(); 178 179 if (err) 180 panic("%s() failed, err = %d\n", __func__, err); 181 } 182 #endif 183