1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Fixmap manipulation code 4 */ 5 6 #include <linux/bug.h> 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/libfdt.h> 10 #include <linux/memory.h> 11 #include <linux/mm.h> 12 #include <linux/sizes.h> 13 14 #include <asm/fixmap.h> 15 #include <asm/kernel-pgtable.h> 16 #include <asm/pgalloc.h> 17 #include <asm/tlbflush.h> 18 19 /* ensure that the fixmap region does not grow down into the PCI I/O region */ 20 static_assert(FIXADDR_TOT_START > PCI_IO_END); 21 22 #define NR_BM_PTE_TABLES \ 23 SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) 24 #define NR_BM_PMD_TABLES \ 25 SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) 26 27 static_assert(NR_BM_PMD_TABLES == 1); 28 29 #define __BM_TABLE_IDX(addr, shift) \ 30 (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) 31 32 #define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) 33 34 static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; 35 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 36 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 37 38 static inline pte_t *fixmap_pte(unsigned long addr) 39 { 40 return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)]; 41 } 42 43 static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) 44 { 45 pmd_t pmd = READ_ONCE(*pmdp); 46 pte_t *ptep; 47 48 if (pmd_none(pmd)) { 49 ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; 50 __pmd_populate(pmdp, __pa_symbol(ptep), 51 PMD_TYPE_TABLE | PMD_TABLE_AF); 52 } 53 } 54 55 static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, 56 unsigned long end) 57 { 58 unsigned long next; 59 pud_t pud = READ_ONCE(*pudp); 60 pmd_t *pmdp; 61 62 if (pud_none(pud)) 63 __pud_populate(pudp, __pa_symbol(bm_pmd), 64 PUD_TYPE_TABLE | PUD_TABLE_AF); 65 66 pmdp = pmd_offset_kimg(pudp, addr); 67 do { 68 next = pmd_addr_end(addr, end); 69 early_fixmap_init_pte(pmdp, addr); 70 } while (pmdp++, addr = next, addr != end); 71 } 72 73 74 static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, 75 unsigned long end) 76 { 77 p4d_t p4d = READ_ONCE(*p4dp); 78 pud_t *pudp; 79 80 if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && 81 p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { 82 /* 83 * We only end up here if the kernel mapping and the fixmap 84 * share the top level pgd entry, which should only happen on 85 * 16k/4 levels configurations. 86 */ 87 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 88 } 89 90 if (p4d_none(p4d)) 91 __p4d_populate(p4dp, __pa_symbol(bm_pud), 92 P4D_TYPE_TABLE | P4D_TABLE_AF); 93 94 pudp = pud_offset_kimg(p4dp, addr); 95 early_fixmap_init_pmd(pudp, addr, end); 96 } 97 98 /* 99 * The p*d_populate functions call virt_to_phys implicitly so they can't be used 100 * directly on kernel symbols (bm_p*d). This function is called too early to use 101 * lm_alias so __p*d_populate functions must be used to populate with the 102 * physical address from __pa_symbol. 103 */ 104 void __init early_fixmap_init(void) 105 { 106 unsigned long addr = FIXADDR_TOT_START; 107 unsigned long end = FIXADDR_TOP; 108 109 pgd_t *pgdp = pgd_offset_k(addr); 110 p4d_t *p4dp = p4d_offset_kimg(pgdp, addr); 111 112 early_fixmap_init_pud(p4dp, addr, end); 113 } 114 115 /* 116 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 117 * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 118 */ 119 void __set_fixmap(enum fixed_addresses idx, 120 phys_addr_t phys, pgprot_t flags) 121 { 122 unsigned long addr = __fix_to_virt(idx); 123 pte_t *ptep; 124 125 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 126 127 ptep = fixmap_pte(addr); 128 129 if (pgprot_val(flags)) { 130 __set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 131 } else { 132 __pte_clear(&init_mm, addr, ptep); 133 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 134 } 135 } 136 137 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 138 { 139 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 140 phys_addr_t dt_phys_base; 141 int offset; 142 void *dt_virt; 143 144 /* 145 * Check whether the physical FDT address is set and meets the minimum 146 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 147 * at least 8 bytes so that we can always access the magic and size 148 * fields of the FDT header after mapping the first chunk, double check 149 * here if that is indeed the case. 150 */ 151 BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 152 if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 153 return NULL; 154 155 dt_phys_base = round_down(dt_phys, PAGE_SIZE); 156 offset = dt_phys % PAGE_SIZE; 157 dt_virt = (void *)dt_virt_base + offset; 158 159 /* map the first chunk so we can read the size from the header */ 160 create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); 161 162 if (fdt_magic(dt_virt) != FDT_MAGIC) 163 return NULL; 164 165 *size = fdt_totalsize(dt_virt); 166 if (*size > MAX_FDT_SIZE) 167 return NULL; 168 169 if (offset + *size > PAGE_SIZE) { 170 create_mapping_noalloc(dt_phys_base, dt_virt_base, 171 offset + *size, prot); 172 } 173 174 return dt_virt; 175 } 176