xref: /linux/arch/arm64/kernel/pi/map_kernel.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2023 Google LLC
3 // Author: Ard Biesheuvel <ardb@google.com>
4 
5 #include <linux/init.h>
6 #include <linux/libfdt.h>
7 #include <linux/linkage.h>
8 #include <linux/types.h>
9 #include <linux/sizes.h>
10 #include <linux/string.h>
11 
12 #include <asm/memory.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h>
16 
17 #include "pi.h"
18 
19 extern const u8 __eh_frame_start[], __eh_frame_end[];
20 
21 extern void idmap_cpu_replace_ttbr1(void *pgdir);
22 
23 static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset,
24 			       void *start, void *end, pgprot_t prot,
25 			       bool may_use_cont, int root_level)
26 {
27 	map_range(pgd, ((u64)start + va_offset) & ~PAGE_OFFSET,
28 		  ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start,
29 		  prot, root_level, (pte_t *)pg_dir, may_use_cont, 0);
30 }
31 
32 static void __init unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start,
33 				 void *end, int root_level)
34 {
35 	map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0),
36 		    false, root_level);
37 }
38 
39 static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
40 {
41 	bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS);
42 	bool twopass = IS_ENABLED(CONFIG_RELOCATABLE);
43 	u64 pgdp = (u64)init_pg_dir + PAGE_SIZE;
44 	pgprot_t text_prot = PAGE_KERNEL_ROX;
45 	pgprot_t data_prot = PAGE_KERNEL;
46 	pgprot_t prot;
47 
48 	/*
49 	 * External debuggers may need to write directly to the text mapping to
50 	 * install SW breakpoints. Allow this (only) when explicitly requested
51 	 * with rodata=off.
52 	 */
53 	if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF))
54 		text_prot = PAGE_KERNEL_EXEC;
55 
56 	/*
57 	 * We only enable the shadow call stack dynamically if we are running
58 	 * on a system that does not implement PAC or BTI. PAC and SCS provide
59 	 * roughly the same level of protection, and BTI relies on the PACIASP
60 	 * instructions serving as landing pads, preventing us from patching
61 	 * those instructions into something else.
62 	 */
63 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac())
64 		enable_scs = false;
65 
66 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) {
67 		enable_scs = false;
68 
69 		/*
70 		 * If we have a CPU that supports BTI and a kernel built for
71 		 * BTI then mark the kernel executable text as guarded pages
72 		 * now so we don't have to rewrite the page tables later.
73 		 */
74 		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
75 	}
76 
77 	/* Map all code read-write on the first pass if needed */
78 	twopass |= enable_scs;
79 	prot = twopass ? data_prot : text_prot;
80 
81 	map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
82 		    !twopass, root_level);
83 	map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
84 		    __inittext_begin, data_prot, false, root_level);
85 	map_segment(init_pg_dir, &pgdp, va_offset, __inittext_begin,
86 		    __inittext_end, prot, false, root_level);
87 	map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin,
88 		    __initdata_end, data_prot, false, root_level);
89 	map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot,
90 		    true, root_level);
91 	dsb(ishst);
92 
93 	idmap_cpu_replace_ttbr1(init_pg_dir);
94 
95 	if (twopass) {
96 		if (IS_ENABLED(CONFIG_RELOCATABLE))
97 			relocate_kernel(kaslr_offset);
98 
99 		if (enable_scs) {
100 			scs_patch(__eh_frame_start + va_offset,
101 				  __eh_frame_end - __eh_frame_start);
102 			asm("ic ialluis");
103 
104 			dynamic_scs_is_enabled = true;
105 		}
106 
107 		/*
108 		 * Unmap the text region before remapping it, to avoid
109 		 * potential TLB conflicts when creating the contiguous
110 		 * descriptors.
111 		 */
112 		unmap_segment(init_pg_dir, va_offset, _stext, _etext,
113 			      root_level);
114 		dsb(ishst);
115 		isb();
116 		__tlbi(vmalle1);
117 		isb();
118 
119 		/*
120 		 * Remap these segments with different permissions
121 		 * No new page table allocations should be needed
122 		 */
123 		map_segment(init_pg_dir, NULL, va_offset, _stext, _etext,
124 			    text_prot, true, root_level);
125 		map_segment(init_pg_dir, NULL, va_offset, __inittext_begin,
126 			    __inittext_end, text_prot, false, root_level);
127 	}
128 
129 	/* Copy the root page table to its final location */
130 	memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
131 	dsb(ishst);
132 	idmap_cpu_replace_ttbr1(swapper_pg_dir);
133 }
134 
135 static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
136 {
137 	u64 sctlr = read_sysreg(sctlr_el1);
138 	u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
139 	u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
140 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
141 							   ID_AA64MMFR0_EL1_PARANGE_SHIFT);
142 
143 	tcr &= ~TCR_IPS_MASK;
144 	tcr |= parange << TCR_IPS_SHIFT;
145 
146 	asm("	msr	sctlr_el1, %0		;"
147 	    "	isb				;"
148 	    "   msr     ttbr0_el1, %1		;"
149 	    "   msr     tcr_el1, %2		;"
150 	    "	isb				;"
151 	    "	tlbi    vmalle1			;"
152 	    "	dsb     nsh			;"
153 	    "	isb				;"
154 	    "	msr     sctlr_el1, %3		;"
155 	    "	isb				;"
156 	    ::	"r"(sctlr & ~SCTLR_ELx_M), "r"(ttbr), "r"(tcr), "r"(sctlr));
157 }
158 
159 static void __init remap_idmap_for_lpa2(void)
160 {
161 	/* clear the bits that change meaning once LPA2 is turned on */
162 	pteval_t mask = PTE_SHARED;
163 
164 	/*
165 	 * We have to clear bits [9:8] in all block or page descriptors in the
166 	 * initial ID map, as otherwise they will be (mis)interpreted as
167 	 * physical address bits once we flick the LPA2 switch (TCR.DS). Since
168 	 * we cannot manipulate live descriptors in that way without creating
169 	 * potential TLB conflicts, let's create another temporary ID map in a
170 	 * LPA2 compatible fashion, and update the initial ID map while running
171 	 * from that.
172 	 */
173 	create_init_idmap(init_pg_dir, mask);
174 	dsb(ishst);
175 	set_ttbr0_for_lpa2((u64)init_pg_dir);
176 
177 	/*
178 	 * Recreate the initial ID map with the same granularity as before.
179 	 * Don't bother with the FDT, we no longer need it after this.
180 	 */
181 	memset(init_idmap_pg_dir, 0,
182 	       (u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
183 
184 	create_init_idmap(init_idmap_pg_dir, mask);
185 	dsb(ishst);
186 
187 	/* switch back to the updated initial ID map */
188 	set_ttbr0_for_lpa2((u64)init_idmap_pg_dir);
189 
190 	/* wipe the temporary ID map from memory */
191 	memset(init_pg_dir, 0, (u64)init_pg_end - (u64)init_pg_dir);
192 }
193 
194 static void __init map_fdt(u64 fdt)
195 {
196 	static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
197 	u64 efdt = fdt + MAX_FDT_SIZE;
198 	u64 ptep = (u64)ptes;
199 
200 	/*
201 	 * Map up to MAX_FDT_SIZE bytes, but avoid overlap with
202 	 * the kernel image.
203 	 */
204 	map_range(&ptep, fdt, (u64)_text > fdt ? min((u64)_text, efdt) : efdt,
205 		  fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL,
206 		  (pte_t *)init_idmap_pg_dir, false, 0);
207 	dsb(ishst);
208 }
209 
210 asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
211 {
212 	static char const chosen_str[] __initconst = "/chosen";
213 	u64 va_base, pa_base = (u64)&_text;
214 	u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN;
215 	int root_level = 4 - CONFIG_PGTABLE_LEVELS;
216 	int va_bits = VA_BITS;
217 	int chosen;
218 
219 	map_fdt((u64)fdt);
220 
221 	/* Clear BSS and the initial page tables */
222 	memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start);
223 
224 	/* Parse the command line for CPU feature overrides */
225 	chosen = fdt_path_offset(fdt, chosen_str);
226 	init_feature_override(boot_status, fdt, chosen);
227 
228 	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && !cpu_has_lva()) {
229 		va_bits = VA_BITS_MIN;
230 	} else if (IS_ENABLED(CONFIG_ARM64_LPA2) && !cpu_has_lpa2()) {
231 		va_bits = VA_BITS_MIN;
232 		root_level++;
233 	}
234 
235 	if (va_bits > VA_BITS_MIN)
236 		sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
237 
238 	/*
239 	 * The virtual KASLR displacement modulo 2MiB is decided by the
240 	 * physical placement of the image, as otherwise, we might not be able
241 	 * to create the early kernel mapping using 2 MiB block descriptors. So
242 	 * take the low bits of the KASLR offset from the physical address, and
243 	 * fill in the high bits from the seed.
244 	 */
245 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
246 		u64 kaslr_seed = kaslr_early_init(fdt, chosen);
247 
248 		if (kaslr_seed && kaslr_requires_kpti())
249 			arm64_use_ng_mappings = true;
250 
251 		kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
252 	}
253 
254 	if (IS_ENABLED(CONFIG_ARM64_LPA2) && va_bits > VA_BITS_MIN)
255 		remap_idmap_for_lpa2();
256 
257 	va_base = KIMAGE_VADDR + kaslr_offset;
258 	map_kernel(kaslr_offset, va_base - pa_base, root_level);
259 }
260