xref: /linux/arch/arm64/mm/fixmap.c (revision b97547761b02cc95e0e6be827dc9ca9da8142761)
1*b9754776SMark Rutland // SPDX-License-Identifier: GPL-2.0-only
2*b9754776SMark Rutland /*
3*b9754776SMark Rutland  * Fixmap manipulation code
4*b9754776SMark Rutland  */
5*b9754776SMark Rutland 
6*b9754776SMark Rutland #include <linux/bug.h>
7*b9754776SMark Rutland #include <linux/init.h>
8*b9754776SMark Rutland #include <linux/kernel.h>
9*b9754776SMark Rutland #include <linux/libfdt.h>
10*b9754776SMark Rutland #include <linux/memory.h>
11*b9754776SMark Rutland #include <linux/mm.h>
12*b9754776SMark Rutland #include <linux/sizes.h>
13*b9754776SMark Rutland 
14*b9754776SMark Rutland #include <asm/fixmap.h>
15*b9754776SMark Rutland #include <asm/kernel-pgtable.h>
16*b9754776SMark Rutland #include <asm/pgalloc.h>
17*b9754776SMark Rutland #include <asm/tlbflush.h>
18*b9754776SMark Rutland 
19*b9754776SMark Rutland static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
20*b9754776SMark Rutland static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
21*b9754776SMark Rutland static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
22*b9754776SMark Rutland 
23*b9754776SMark Rutland static inline pud_t *fixmap_pud(unsigned long addr)
24*b9754776SMark Rutland {
25*b9754776SMark Rutland 	pgd_t *pgdp = pgd_offset_k(addr);
26*b9754776SMark Rutland 	p4d_t *p4dp = p4d_offset(pgdp, addr);
27*b9754776SMark Rutland 	p4d_t p4d = READ_ONCE(*p4dp);
28*b9754776SMark Rutland 
29*b9754776SMark Rutland 	BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
30*b9754776SMark Rutland 
31*b9754776SMark Rutland 	return pud_offset_kimg(p4dp, addr);
32*b9754776SMark Rutland }
33*b9754776SMark Rutland 
34*b9754776SMark Rutland static inline pmd_t *fixmap_pmd(unsigned long addr)
35*b9754776SMark Rutland {
36*b9754776SMark Rutland 	pud_t *pudp = fixmap_pud(addr);
37*b9754776SMark Rutland 	pud_t pud = READ_ONCE(*pudp);
38*b9754776SMark Rutland 
39*b9754776SMark Rutland 	BUG_ON(pud_none(pud) || pud_bad(pud));
40*b9754776SMark Rutland 
41*b9754776SMark Rutland 	return pmd_offset_kimg(pudp, addr);
42*b9754776SMark Rutland }
43*b9754776SMark Rutland 
44*b9754776SMark Rutland static inline pte_t *fixmap_pte(unsigned long addr)
45*b9754776SMark Rutland {
46*b9754776SMark Rutland 	return &bm_pte[pte_index(addr)];
47*b9754776SMark Rutland }
48*b9754776SMark Rutland 
49*b9754776SMark Rutland /*
50*b9754776SMark Rutland  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
51*b9754776SMark Rutland  * directly on kernel symbols (bm_p*d). This function is called too early to use
52*b9754776SMark Rutland  * lm_alias so __p*d_populate functions must be used to populate with the
53*b9754776SMark Rutland  * physical address from __pa_symbol.
54*b9754776SMark Rutland  */
55*b9754776SMark Rutland void __init early_fixmap_init(void)
56*b9754776SMark Rutland {
57*b9754776SMark Rutland 	pgd_t *pgdp;
58*b9754776SMark Rutland 	p4d_t *p4dp, p4d;
59*b9754776SMark Rutland 	pud_t *pudp;
60*b9754776SMark Rutland 	pmd_t *pmdp;
61*b9754776SMark Rutland 	unsigned long addr = FIXADDR_TOT_START;
62*b9754776SMark Rutland 
63*b9754776SMark Rutland 	pgdp = pgd_offset_k(addr);
64*b9754776SMark Rutland 	p4dp = p4d_offset(pgdp, addr);
65*b9754776SMark Rutland 	p4d = READ_ONCE(*p4dp);
66*b9754776SMark Rutland 	if (CONFIG_PGTABLE_LEVELS > 3 &&
67*b9754776SMark Rutland 	    !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
68*b9754776SMark Rutland 		/*
69*b9754776SMark Rutland 		 * We only end up here if the kernel mapping and the fixmap
70*b9754776SMark Rutland 		 * share the top level pgd entry, which should only happen on
71*b9754776SMark Rutland 		 * 16k/4 levels configurations.
72*b9754776SMark Rutland 		 */
73*b9754776SMark Rutland 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
74*b9754776SMark Rutland 		pudp = pud_offset_kimg(p4dp, addr);
75*b9754776SMark Rutland 	} else {
76*b9754776SMark Rutland 		if (p4d_none(p4d))
77*b9754776SMark Rutland 			__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
78*b9754776SMark Rutland 		pudp = fixmap_pud(addr);
79*b9754776SMark Rutland 	}
80*b9754776SMark Rutland 	if (pud_none(READ_ONCE(*pudp)))
81*b9754776SMark Rutland 		__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
82*b9754776SMark Rutland 	pmdp = fixmap_pmd(addr);
83*b9754776SMark Rutland 	__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
84*b9754776SMark Rutland 
85*b9754776SMark Rutland 	/*
86*b9754776SMark Rutland 	 * The boot-ioremap range spans multiple pmds, for which
87*b9754776SMark Rutland 	 * we are not prepared:
88*b9754776SMark Rutland 	 */
89*b9754776SMark Rutland 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
90*b9754776SMark Rutland 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
91*b9754776SMark Rutland 
92*b9754776SMark Rutland 	if ((pmdp != fixmap_pmd(__fix_to_virt(FIX_BTMAP_BEGIN)))
93*b9754776SMark Rutland 	     || pmdp != fixmap_pmd(__fix_to_virt(FIX_BTMAP_END))) {
94*b9754776SMark Rutland 		WARN_ON(1);
95*b9754776SMark Rutland 		pr_warn("pmdp %p != %p, %p\n",
96*b9754776SMark Rutland 			pmdp, fixmap_pmd(__fix_to_virt(FIX_BTMAP_BEGIN)),
97*b9754776SMark Rutland 			fixmap_pmd(__fix_to_virt(FIX_BTMAP_END)));
98*b9754776SMark Rutland 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
99*b9754776SMark Rutland 			__fix_to_virt(FIX_BTMAP_BEGIN));
100*b9754776SMark Rutland 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
101*b9754776SMark Rutland 			__fix_to_virt(FIX_BTMAP_END));
102*b9754776SMark Rutland 
103*b9754776SMark Rutland 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
104*b9754776SMark Rutland 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
105*b9754776SMark Rutland 	}
106*b9754776SMark Rutland }
107*b9754776SMark Rutland 
108*b9754776SMark Rutland /*
109*b9754776SMark Rutland  * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
110*b9754776SMark Rutland  * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
111*b9754776SMark Rutland  */
112*b9754776SMark Rutland void __set_fixmap(enum fixed_addresses idx,
113*b9754776SMark Rutland 			       phys_addr_t phys, pgprot_t flags)
114*b9754776SMark Rutland {
115*b9754776SMark Rutland 	unsigned long addr = __fix_to_virt(idx);
116*b9754776SMark Rutland 	pte_t *ptep;
117*b9754776SMark Rutland 
118*b9754776SMark Rutland 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
119*b9754776SMark Rutland 
120*b9754776SMark Rutland 	ptep = fixmap_pte(addr);
121*b9754776SMark Rutland 
122*b9754776SMark Rutland 	if (pgprot_val(flags)) {
123*b9754776SMark Rutland 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
124*b9754776SMark Rutland 	} else {
125*b9754776SMark Rutland 		pte_clear(&init_mm, addr, ptep);
126*b9754776SMark Rutland 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
127*b9754776SMark Rutland 	}
128*b9754776SMark Rutland }
129*b9754776SMark Rutland 
130*b9754776SMark Rutland void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
131*b9754776SMark Rutland {
132*b9754776SMark Rutland 	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
133*b9754776SMark Rutland 	int offset;
134*b9754776SMark Rutland 	void *dt_virt;
135*b9754776SMark Rutland 
136*b9754776SMark Rutland 	/*
137*b9754776SMark Rutland 	 * Check whether the physical FDT address is set and meets the minimum
138*b9754776SMark Rutland 	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
139*b9754776SMark Rutland 	 * at least 8 bytes so that we can always access the magic and size
140*b9754776SMark Rutland 	 * fields of the FDT header after mapping the first chunk, double check
141*b9754776SMark Rutland 	 * here if that is indeed the case.
142*b9754776SMark Rutland 	 */
143*b9754776SMark Rutland 	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
144*b9754776SMark Rutland 	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
145*b9754776SMark Rutland 		return NULL;
146*b9754776SMark Rutland 
147*b9754776SMark Rutland 	/*
148*b9754776SMark Rutland 	 * Make sure that the FDT region can be mapped without the need to
149*b9754776SMark Rutland 	 * allocate additional translation table pages, so that it is safe
150*b9754776SMark Rutland 	 * to call create_mapping_noalloc() this early.
151*b9754776SMark Rutland 	 *
152*b9754776SMark Rutland 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
153*b9754776SMark Rutland 	 * be in the same PMD as the rest of the fixmap.
154*b9754776SMark Rutland 	 * On 4k pages, we'll use section mappings for the FDT so we only
155*b9754776SMark Rutland 	 * have to be in the same PUD.
156*b9754776SMark Rutland 	 */
157*b9754776SMark Rutland 	BUILD_BUG_ON(dt_virt_base % SZ_2M);
158*b9754776SMark Rutland 
159*b9754776SMark Rutland 	BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
160*b9754776SMark Rutland 		     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
161*b9754776SMark Rutland 
162*b9754776SMark Rutland 	offset = dt_phys % SWAPPER_BLOCK_SIZE;
163*b9754776SMark Rutland 	dt_virt = (void *)dt_virt_base + offset;
164*b9754776SMark Rutland 
165*b9754776SMark Rutland 	/* map the first chunk so we can read the size from the header */
166*b9754776SMark Rutland 	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
167*b9754776SMark Rutland 			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
168*b9754776SMark Rutland 
169*b9754776SMark Rutland 	if (fdt_magic(dt_virt) != FDT_MAGIC)
170*b9754776SMark Rutland 		return NULL;
171*b9754776SMark Rutland 
172*b9754776SMark Rutland 	*size = fdt_totalsize(dt_virt);
173*b9754776SMark Rutland 	if (*size > MAX_FDT_SIZE)
174*b9754776SMark Rutland 		return NULL;
175*b9754776SMark Rutland 
176*b9754776SMark Rutland 	if (offset + *size > SWAPPER_BLOCK_SIZE)
177*b9754776SMark Rutland 		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
178*b9754776SMark Rutland 			       round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
179*b9754776SMark Rutland 
180*b9754776SMark Rutland 	return dt_virt;
181*b9754776SMark Rutland }
182*b9754776SMark Rutland 
183*b9754776SMark Rutland /*
184*b9754776SMark Rutland  * Copy the fixmap region into a new pgdir.
185*b9754776SMark Rutland  */
186*b9754776SMark Rutland void __init fixmap_copy(pgd_t *pgdir)
187*b9754776SMark Rutland {
188*b9754776SMark Rutland 	if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
189*b9754776SMark Rutland 		/*
190*b9754776SMark Rutland 		 * The fixmap falls in a separate pgd to the kernel, and doesn't
191*b9754776SMark Rutland 		 * live in the carveout for the swapper_pg_dir. We can simply
192*b9754776SMark Rutland 		 * re-use the existing dir for the fixmap.
193*b9754776SMark Rutland 		 */
194*b9754776SMark Rutland 		set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
195*b9754776SMark Rutland 			READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
196*b9754776SMark Rutland 	} else if (CONFIG_PGTABLE_LEVELS > 3) {
197*b9754776SMark Rutland 		pgd_t *bm_pgdp;
198*b9754776SMark Rutland 		p4d_t *bm_p4dp;
199*b9754776SMark Rutland 		pud_t *bm_pudp;
200*b9754776SMark Rutland 		/*
201*b9754776SMark Rutland 		 * The fixmap shares its top level pgd entry with the kernel
202*b9754776SMark Rutland 		 * mapping. This can really only occur when we are running
203*b9754776SMark Rutland 		 * with 16k/4 levels, so we can simply reuse the pud level
204*b9754776SMark Rutland 		 * entry instead.
205*b9754776SMark Rutland 		 */
206*b9754776SMark Rutland 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
207*b9754776SMark Rutland 		bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
208*b9754776SMark Rutland 		bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
209*b9754776SMark Rutland 		bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
210*b9754776SMark Rutland 		pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
211*b9754776SMark Rutland 		pud_clear_fixmap();
212*b9754776SMark Rutland 	} else {
213*b9754776SMark Rutland 		BUG();
214*b9754776SMark Rutland 	}
215*b9754776SMark Rutland }
216