xref: /linux/arch/arm64/mm/fixmap.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1b9754776SMark Rutland // SPDX-License-Identifier: GPL-2.0-only
2b9754776SMark Rutland /*
3b9754776SMark Rutland  * Fixmap manipulation code
4b9754776SMark Rutland  */
5b9754776SMark Rutland 
6b9754776SMark Rutland #include <linux/bug.h>
7b9754776SMark Rutland #include <linux/init.h>
8b9754776SMark Rutland #include <linux/kernel.h>
9b9754776SMark Rutland #include <linux/libfdt.h>
10b9754776SMark Rutland #include <linux/memory.h>
11b9754776SMark Rutland #include <linux/mm.h>
12b9754776SMark Rutland #include <linux/sizes.h>
13b9754776SMark Rutland 
14b9754776SMark Rutland #include <asm/fixmap.h>
15b9754776SMark Rutland #include <asm/kernel-pgtable.h>
16b9754776SMark Rutland #include <asm/pgalloc.h>
17b9754776SMark Rutland #include <asm/tlbflush.h>
18b9754776SMark Rutland 
19b730b0f2SArd Biesheuvel /* ensure that the fixmap region does not grow down into the PCI I/O region */
20b730b0f2SArd Biesheuvel static_assert(FIXADDR_TOT_START > PCI_IO_END);
21b730b0f2SArd Biesheuvel 
22414c109bSMark Rutland #define NR_BM_PTE_TABLES \
23414c109bSMark Rutland 	SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
24414c109bSMark Rutland #define NR_BM_PMD_TABLES \
25414c109bSMark Rutland 	SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
26414c109bSMark Rutland 
27414c109bSMark Rutland static_assert(NR_BM_PMD_TABLES == 1);
28414c109bSMark Rutland 
29414c109bSMark Rutland #define __BM_TABLE_IDX(addr, shift) \
30414c109bSMark Rutland 	(((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
31414c109bSMark Rutland 
32414c109bSMark Rutland #define BM_PTE_TABLE_IDX(addr)	__BM_TABLE_IDX(addr, PMD_SHIFT)
33414c109bSMark Rutland 
34414c109bSMark Rutland static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
35b9754776SMark Rutland static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
36b9754776SMark Rutland static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
37b9754776SMark Rutland 
fixmap_pte(unsigned long addr)38b9754776SMark Rutland static inline pte_t *fixmap_pte(unsigned long addr)
39b9754776SMark Rutland {
40414c109bSMark Rutland 	return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
41414c109bSMark Rutland }
42414c109bSMark Rutland 
early_fixmap_init_pte(pmd_t * pmdp,unsigned long addr)43414c109bSMark Rutland static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
44414c109bSMark Rutland {
45414c109bSMark Rutland 	pmd_t pmd = READ_ONCE(*pmdp);
46414c109bSMark Rutland 	pte_t *ptep;
47414c109bSMark Rutland 
48414c109bSMark Rutland 	if (pmd_none(pmd)) {
49414c109bSMark Rutland 		ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
50414c109bSMark Rutland 		__pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
51414c109bSMark Rutland 	}
52414c109bSMark Rutland }
53414c109bSMark Rutland 
early_fixmap_init_pmd(pud_t * pudp,unsigned long addr,unsigned long end)54414c109bSMark Rutland static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr,
55414c109bSMark Rutland 					 unsigned long end)
56414c109bSMark Rutland {
57414c109bSMark Rutland 	unsigned long next;
58414c109bSMark Rutland 	pud_t pud = READ_ONCE(*pudp);
59414c109bSMark Rutland 	pmd_t *pmdp;
60414c109bSMark Rutland 
61414c109bSMark Rutland 	if (pud_none(pud))
62414c109bSMark Rutland 		__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
63414c109bSMark Rutland 
64414c109bSMark Rutland 	pmdp = pmd_offset_kimg(pudp, addr);
65414c109bSMark Rutland 	do {
66414c109bSMark Rutland 		next = pmd_addr_end(addr, end);
67414c109bSMark Rutland 		early_fixmap_init_pte(pmdp, addr);
68414c109bSMark Rutland 	} while (pmdp++, addr = next, addr != end);
69414c109bSMark Rutland }
70414c109bSMark Rutland 
71414c109bSMark Rutland 
early_fixmap_init_pud(p4d_t * p4dp,unsigned long addr,unsigned long end)72414c109bSMark Rutland static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr,
73414c109bSMark Rutland 					 unsigned long end)
74414c109bSMark Rutland {
75414c109bSMark Rutland 	p4d_t p4d = READ_ONCE(*p4dp);
76414c109bSMark Rutland 	pud_t *pudp;
77414c109bSMark Rutland 
78414c109bSMark Rutland 	if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
79414c109bSMark Rutland 	    p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
80414c109bSMark Rutland 		/*
81414c109bSMark Rutland 		 * We only end up here if the kernel mapping and the fixmap
82414c109bSMark Rutland 		 * share the top level pgd entry, which should only happen on
83414c109bSMark Rutland 		 * 16k/4 levels configurations.
84414c109bSMark Rutland 		 */
85414c109bSMark Rutland 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
86414c109bSMark Rutland 	}
87414c109bSMark Rutland 
88414c109bSMark Rutland 	if (p4d_none(p4d))
89414c109bSMark Rutland 		__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
90414c109bSMark Rutland 
91414c109bSMark Rutland 	pudp = pud_offset_kimg(p4dp, addr);
92414c109bSMark Rutland 	early_fixmap_init_pmd(pudp, addr, end);
93b9754776SMark Rutland }
94b9754776SMark Rutland 
95b9754776SMark Rutland /*
96b9754776SMark Rutland  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
97b9754776SMark Rutland  * directly on kernel symbols (bm_p*d). This function is called too early to use
98b9754776SMark Rutland  * lm_alias so __p*d_populate functions must be used to populate with the
99b9754776SMark Rutland  * physical address from __pa_symbol.
100b9754776SMark Rutland  */
early_fixmap_init(void)101b9754776SMark Rutland void __init early_fixmap_init(void)
102b9754776SMark Rutland {
103b9754776SMark Rutland 	unsigned long addr = FIXADDR_TOT_START;
104414c109bSMark Rutland 	unsigned long end = FIXADDR_TOP;
105b9754776SMark Rutland 
106414c109bSMark Rutland 	pgd_t *pgdp = pgd_offset_k(addr);
1076ed8a3a0SArd Biesheuvel 	p4d_t *p4dp = p4d_offset_kimg(pgdp, addr);
108b9754776SMark Rutland 
109414c109bSMark Rutland 	early_fixmap_init_pud(p4dp, addr, end);
110b9754776SMark Rutland }
111b9754776SMark Rutland 
112b9754776SMark Rutland /*
113b9754776SMark Rutland  * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
114b9754776SMark Rutland  * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
115b9754776SMark Rutland  */
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)116b9754776SMark Rutland void __set_fixmap(enum fixed_addresses idx,
117b9754776SMark Rutland 			       phys_addr_t phys, pgprot_t flags)
118b9754776SMark Rutland {
119b9754776SMark Rutland 	unsigned long addr = __fix_to_virt(idx);
120b9754776SMark Rutland 	pte_t *ptep;
121b9754776SMark Rutland 
122b9754776SMark Rutland 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
123b9754776SMark Rutland 
124b9754776SMark Rutland 	ptep = fixmap_pte(addr);
125b9754776SMark Rutland 
126b9754776SMark Rutland 	if (pgprot_val(flags)) {
127*5a00bfd6SRyan Roberts 		__set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
128b9754776SMark Rutland 	} else {
129*5a00bfd6SRyan Roberts 		__pte_clear(&init_mm, addr, ptep);
130b9754776SMark Rutland 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
131b9754776SMark Rutland 	}
132b9754776SMark Rutland }
133b9754776SMark Rutland 
fixmap_remap_fdt(phys_addr_t dt_phys,int * size,pgprot_t prot)134b9754776SMark Rutland void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
135b9754776SMark Rutland {
136b9754776SMark Rutland 	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
137414c109bSMark Rutland 	phys_addr_t dt_phys_base;
138b9754776SMark Rutland 	int offset;
139b9754776SMark Rutland 	void *dt_virt;
140b9754776SMark Rutland 
141b9754776SMark Rutland 	/*
142b9754776SMark Rutland 	 * Check whether the physical FDT address is set and meets the minimum
143b9754776SMark Rutland 	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
144b9754776SMark Rutland 	 * at least 8 bytes so that we can always access the magic and size
145b9754776SMark Rutland 	 * fields of the FDT header after mapping the first chunk, double check
146b9754776SMark Rutland 	 * here if that is indeed the case.
147b9754776SMark Rutland 	 */
148b9754776SMark Rutland 	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
149b9754776SMark Rutland 	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
150b9754776SMark Rutland 		return NULL;
151b9754776SMark Rutland 
152414c109bSMark Rutland 	dt_phys_base = round_down(dt_phys, PAGE_SIZE);
153414c109bSMark Rutland 	offset = dt_phys % PAGE_SIZE;
154b9754776SMark Rutland 	dt_virt = (void *)dt_virt_base + offset;
155b9754776SMark Rutland 
156b9754776SMark Rutland 	/* map the first chunk so we can read the size from the header */
157414c109bSMark Rutland 	create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
158b9754776SMark Rutland 
159b9754776SMark Rutland 	if (fdt_magic(dt_virt) != FDT_MAGIC)
160b9754776SMark Rutland 		return NULL;
161b9754776SMark Rutland 
162b9754776SMark Rutland 	*size = fdt_totalsize(dt_virt);
163b9754776SMark Rutland 	if (*size > MAX_FDT_SIZE)
164b9754776SMark Rutland 		return NULL;
165b9754776SMark Rutland 
166414c109bSMark Rutland 	if (offset + *size > PAGE_SIZE) {
167414c109bSMark Rutland 		create_mapping_noalloc(dt_phys_base, dt_virt_base,
168414c109bSMark Rutland 				       offset + *size, prot);
169414c109bSMark Rutland 	}
170b9754776SMark Rutland 
171b9754776SMark Rutland 	return dt_virt;
172b9754776SMark Rutland }
173