xref: /linux/arch/arm64/mm/proc.S (revision 3fd6c59042dbba50391e30862beac979491145fe)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Based on arch/arm/mm/proc.S
4 *
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 * Author: Catalin Marinas <catalin.marinas@arm.com>
8 */
9
10#include <linux/init.h>
11#include <linux/linkage.h>
12#include <linux/pgtable.h>
13#include <linux/cfi_types.h>
14#include <asm/assembler.h>
15#include <asm/asm-offsets.h>
16#include <asm/asm_pointer_auth.h>
17#include <asm/hwcap.h>
18#include <asm/kernel-pgtable.h>
19#include <asm/pgtable-hwdef.h>
20#include <asm/cpufeature.h>
21#include <asm/alternative.h>
22#include <asm/smp.h>
23#include <asm/sysreg.h>
24
25#ifdef CONFIG_ARM64_64K_PAGES
26#define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
27#elif defined(CONFIG_ARM64_16K_PAGES)
28#define TCR_TG_FLAGS	TCR_TG0_16K | TCR_TG1_16K
29#else /* CONFIG_ARM64_4K_PAGES */
30#define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
31#endif
32
33#ifdef CONFIG_RANDOMIZE_BASE
34#define TCR_KASLR_FLAGS	TCR_NFD1
35#else
36#define TCR_KASLR_FLAGS	0
37#endif
38
39/* PTWs cacheable, inner/outer WBWA */
40#define TCR_CACHE_FLAGS	TCR_IRGN_WBWA | TCR_ORGN_WBWA
41
42#ifdef CONFIG_KASAN_SW_TAGS
43#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
44#else
45#define TCR_KASAN_SW_FLAGS 0
46#endif
47
48#ifdef CONFIG_KASAN_HW_TAGS
49#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
50#elif defined(CONFIG_ARM64_MTE)
51/*
52 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
53 * TBI being enabled at EL1.
54 */
55#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
56#else
57#define TCR_MTE_FLAGS 0
58#endif
59
60/*
61 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
62 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
63 */
64#define MAIR_EL1_SET							\
65	(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) |	\
66	 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) |	\
67	 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) |		\
68	 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) |			\
69	 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
70
71#ifdef CONFIG_CPU_PM
72/**
73 * cpu_do_suspend - save CPU registers context
74 *
75 * x0: virtual address of context pointer
76 *
77 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
78 */
79SYM_FUNC_START(cpu_do_suspend)
80	mrs	x2, tpidr_el0
81	mrs	x3, tpidrro_el0
82	mrs	x4, contextidr_el1
83	mrs	x5, osdlr_el1
84	mrs	x6, cpacr_el1
85	mrs	x7, tcr_el1
86	mrs	x8, vbar_el1
87	mrs	x9, mdscr_el1
88	mrs	x10, oslsr_el1
89	mrs	x11, sctlr_el1
90	get_this_cpu_offset x12
91	mrs	x13, sp_el0
92	stp	x2, x3, [x0]
93	stp	x4, x5, [x0, #16]
94	stp	x6, x7, [x0, #32]
95	stp	x8, x9, [x0, #48]
96	stp	x10, x11, [x0, #64]
97	stp	x12, x13, [x0, #80]
98	/*
99	 * Save x18 as it may be used as a platform register, e.g. by shadow
100	 * call stack.
101	 */
102	str	x18, [x0, #96]
103	ret
104SYM_FUNC_END(cpu_do_suspend)
105
106/**
107 * cpu_do_resume - restore CPU register context
108 *
109 * x0: Address of context pointer
110 */
111SYM_FUNC_START(cpu_do_resume)
112	ldp	x2, x3, [x0]
113	ldp	x4, x5, [x0, #16]
114	ldp	x6, x8, [x0, #32]
115	ldp	x9, x10, [x0, #48]
116	ldp	x11, x12, [x0, #64]
117	ldp	x13, x14, [x0, #80]
118	/*
119	 * Restore x18, as it may be used as a platform register, and clear
120	 * the buffer to minimize the risk of exposure when used for shadow
121	 * call stack.
122	 */
123	ldr	x18, [x0, #96]
124	str	xzr, [x0, #96]
125	msr	tpidr_el0, x2
126	msr	tpidrro_el0, x3
127	msr	contextidr_el1, x4
128	msr	cpacr_el1, x6
129
130	/* Don't change t0sz here, mask those bits when restoring */
131	mrs	x7, tcr_el1
132	bfi	x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
133
134	msr	tcr_el1, x8
135	msr	vbar_el1, x9
136	msr	mdscr_el1, x10
137
138	msr	sctlr_el1, x12
139	set_this_cpu_offset x13
140	msr	sp_el0, x14
141	/*
142	 * Restore oslsr_el1 by writing oslar_el1
143	 */
144	msr	osdlr_el1, x5
145	ubfx	x11, x11, #1, #1
146	msr	oslar_el1, x11
147	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
148	reset_amuserenr_el0 x0			// Disable AMU access from EL0
149
150alternative_if ARM64_HAS_RAS_EXTN
151	msr_s	SYS_DISR_EL1, xzr
152alternative_else_nop_endif
153
154	ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
155	isb
156	ret
157SYM_FUNC_END(cpu_do_resume)
158#endif
159
160	.pushsection ".idmap.text", "a"
161
162.macro	__idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
163	adrp	\tmp1, reserved_pg_dir
164	phys_to_ttbr \tmp2, \tmp1
165	offset_ttbr1 \tmp2, \tmp1
166	msr	ttbr1_el1, \tmp2
167	isb
168	tlbi	vmalle1
169	dsb	nsh
170	isb
171.endm
172
173/*
174 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
175 *
176 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
177 * called by anything else. It can only be executed from a TTBR0 mapping.
178 */
179SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
180	__idmap_cpu_set_reserved_ttbr1 x1, x3
181
182	offset_ttbr1 x0, x3
183	msr	ttbr1_el1, x0
184	isb
185
186	ret
187SYM_FUNC_END(idmap_cpu_replace_ttbr1)
188SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
189	.popsection
190
191#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
192
193#define KPTI_NG_PTE_FLAGS	(PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \
194				 PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)
195
196	.pushsection ".idmap.text", "a"
197
198	.macro	pte_to_phys, phys, pte
199	and	\phys, \pte, #PTE_ADDR_LOW
200#ifdef CONFIG_ARM64_PA_BITS_52
201	and	\pte, \pte, #PTE_ADDR_HIGH
202	orr	\phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
203#endif
204	.endm
205
206	.macro	kpti_mk_tbl_ng, type, num_entries
207	add	end_\type\()p, cur_\type\()p, #\num_entries * 8
208.Ldo_\type:
209	ldr	\type, [cur_\type\()p], #8	// Load the entry and advance
210	tbz	\type, #0, .Lnext_\type		// Skip invalid and
211	tbnz	\type, #11, .Lnext_\type	// non-global entries
212	orr	\type, \type, #PTE_NG		// Same bit for blocks and pages
213	str	\type, [cur_\type\()p, #-8]	// Update the entry
214	.ifnc	\type, pte
215	tbnz	\type, #1, .Lderef_\type
216	.endif
217.Lnext_\type:
218	cmp	cur_\type\()p, end_\type\()p
219	b.ne	.Ldo_\type
220	.endm
221
222	/*
223	 * Dereference the current table entry and map it into the temporary
224	 * fixmap slot associated with the current level.
225	 */
226	.macro	kpti_map_pgtbl, type, level
227	str	xzr, [temp_pte, #8 * (\level + 2)]	// break before make
228	dsb	nshst
229	add	pte, temp_pte, #PAGE_SIZE * (\level + 2)
230	lsr	pte, pte, #12
231	tlbi	vaae1, pte
232	dsb	nsh
233	isb
234
235	phys_to_pte pte, cur_\type\()p
236	add	cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)
237	orr	pte, pte, pte_flags
238	str	pte, [temp_pte, #8 * (\level + 2)]
239	dsb	nshst
240	.endm
241
242/*
243 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
244 *				   unsigned long temp_pte_va)
245 *
246 * Called exactly once from stop_machine context by each CPU found during boot.
247 */
248	.pushsection	".data", "aw", %progbits
249SYM_DATA(__idmap_kpti_flag, .long 1)
250	.popsection
251
252SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
253	cpu		.req	w0
254	temp_pte	.req	x0
255	num_cpus	.req	w1
256	pte_flags	.req	x1
257	temp_pgd_phys	.req	x2
258	swapper_ttb	.req	x3
259	flag_ptr	.req	x4
260	cur_pgdp	.req	x5
261	end_pgdp	.req	x6
262	pgd		.req	x7
263	cur_pudp	.req	x8
264	end_pudp	.req	x9
265	cur_pmdp	.req	x11
266	end_pmdp	.req	x12
267	cur_ptep	.req	x14
268	end_ptep	.req	x15
269	pte		.req	x16
270	valid		.req	x17
271	cur_p4dp	.req	x19
272	end_p4dp	.req	x20
273
274	mov	x5, x3				// preserve temp_pte arg
275	mrs	swapper_ttb, ttbr1_el1
276	adr_l	flag_ptr, __idmap_kpti_flag
277
278	cbnz	cpu, __idmap_kpti_secondary
279
280#if CONFIG_PGTABLE_LEVELS > 4
281	stp	x29, x30, [sp, #-32]!
282	mov	x29, sp
283	stp	x19, x20, [sp, #16]
284#endif
285
286	/* We're the boot CPU. Wait for the others to catch up */
287	sevl
2881:	wfe
289	ldaxr	w17, [flag_ptr]
290	eor	w17, w17, num_cpus
291	cbnz	w17, 1b
292
293	/* Switch to the temporary page tables on this CPU only */
294	__idmap_cpu_set_reserved_ttbr1 x8, x9
295	offset_ttbr1 temp_pgd_phys, x8
296	msr	ttbr1_el1, temp_pgd_phys
297	isb
298
299	mov	temp_pte, x5
300	mov_q	pte_flags, KPTI_NG_PTE_FLAGS
301
302	/* Everybody is enjoying the idmap, so we can rewrite swapper. */
303
304#ifdef CONFIG_ARM64_LPA2
305	/*
306	 * If LPA2 support is configured, but 52-bit virtual addressing is not
307	 * enabled at runtime, we will fall back to one level of paging less,
308	 * and so we have to walk swapper_pg_dir as if we dereferenced its
309	 * address from a PGD level entry, and terminate the PGD level loop
310	 * right after.
311	 */
312	adrp	pgd, swapper_pg_dir	// walk &swapper_pg_dir at the next level
313	mov	cur_pgdp, end_pgdp	// must be equal to terminate the PGD loop
314alternative_if_not ARM64_HAS_VA52
315	b	.Lderef_pgd		// skip to the next level
316alternative_else_nop_endif
317	/*
318	 * LPA2 based 52-bit virtual addressing requires 52-bit physical
319	 * addressing to be enabled as well. In this case, the shareability
320	 * bits are repurposed as physical address bits, and should not be
321	 * set in pte_flags.
322	 */
323	bic	pte_flags, pte_flags, #PTE_SHARED
324#endif
325
326	/* PGD */
327	adrp		cur_pgdp, swapper_pg_dir
328	kpti_map_pgtbl	pgd, -1
329	kpti_mk_tbl_ng	pgd, PTRS_PER_PGD
330
331	/* Ensure all the updated entries are visible to secondary CPUs */
332	dsb	ishst
333
334	/* We're done: fire up swapper_pg_dir again */
335	__idmap_cpu_set_reserved_ttbr1 x8, x9
336	msr	ttbr1_el1, swapper_ttb
337	isb
338
339	/* Set the flag to zero to indicate that we're all done */
340	str	wzr, [flag_ptr]
341#if CONFIG_PGTABLE_LEVELS > 4
342	ldp	x19, x20, [sp, #16]
343	ldp	x29, x30, [sp], #32
344#endif
345	ret
346
347.Lderef_pgd:
348	/* P4D */
349	.if		CONFIG_PGTABLE_LEVELS > 4
350	p4d		.req	x30
351	pte_to_phys	cur_p4dp, pgd
352	kpti_map_pgtbl	p4d, 0
353	kpti_mk_tbl_ng	p4d, PTRS_PER_P4D
354	b		.Lnext_pgd
355	.else		/* CONFIG_PGTABLE_LEVELS <= 4 */
356	p4d		.req	pgd
357	.set		.Lnext_p4d, .Lnext_pgd
358	.endif
359
360.Lderef_p4d:
361	/* PUD */
362	.if		CONFIG_PGTABLE_LEVELS > 3
363	pud		.req	x10
364	pte_to_phys	cur_pudp, p4d
365	kpti_map_pgtbl	pud, 1
366	kpti_mk_tbl_ng	pud, PTRS_PER_PUD
367	b		.Lnext_p4d
368	.else		/* CONFIG_PGTABLE_LEVELS <= 3 */
369	pud		.req	pgd
370	.set		.Lnext_pud, .Lnext_pgd
371	.endif
372
373.Lderef_pud:
374	/* PMD */
375	.if		CONFIG_PGTABLE_LEVELS > 2
376	pmd		.req	x13
377	pte_to_phys	cur_pmdp, pud
378	kpti_map_pgtbl	pmd, 2
379	kpti_mk_tbl_ng	pmd, PTRS_PER_PMD
380	b		.Lnext_pud
381	.else		/* CONFIG_PGTABLE_LEVELS <= 2 */
382	pmd		.req	pgd
383	.set		.Lnext_pmd, .Lnext_pgd
384	.endif
385
386.Lderef_pmd:
387	/* PTE */
388	pte_to_phys	cur_ptep, pmd
389	kpti_map_pgtbl	pte, 3
390	kpti_mk_tbl_ng	pte, PTRS_PER_PTE
391	b		.Lnext_pmd
392
393	.unreq	cpu
394	.unreq	temp_pte
395	.unreq	num_cpus
396	.unreq	pte_flags
397	.unreq	temp_pgd_phys
398	.unreq	cur_pgdp
399	.unreq	end_pgdp
400	.unreq	pgd
401	.unreq	cur_pudp
402	.unreq	end_pudp
403	.unreq	pud
404	.unreq	cur_pmdp
405	.unreq	end_pmdp
406	.unreq	pmd
407	.unreq	cur_ptep
408	.unreq	end_ptep
409	.unreq	pte
410	.unreq	valid
411	.unreq	cur_p4dp
412	.unreq	end_p4dp
413	.unreq	p4d
414
415	/* Secondary CPUs end up here */
416__idmap_kpti_secondary:
417	/* Uninstall swapper before surgery begins */
418	__idmap_cpu_set_reserved_ttbr1 x16, x17
419
420	/* Increment the flag to let the boot CPU we're ready */
4211:	ldxr	w16, [flag_ptr]
422	add	w16, w16, #1
423	stxr	w17, w16, [flag_ptr]
424	cbnz	w17, 1b
425
426	/* Wait for the boot CPU to finish messing around with swapper */
427	sevl
4281:	wfe
429	ldxr	w16, [flag_ptr]
430	cbnz	w16, 1b
431
432	/* All done, act like nothing happened */
433	msr	ttbr1_el1, swapper_ttb
434	isb
435	ret
436
437	.unreq	swapper_ttb
438	.unreq	flag_ptr
439SYM_FUNC_END(idmap_kpti_install_ng_mappings)
440	.popsection
441#endif
442
443/*
444 *	__cpu_setup
445 *
446 *	Initialise the processor for turning the MMU on.
447 *
448 * Output:
449 *	Return in x0 the value of the SCTLR_EL1 register.
450 */
451	.pushsection ".idmap.text", "a"
452SYM_FUNC_START(__cpu_setup)
453	tlbi	vmalle1				// Invalidate local TLB
454	dsb	nsh
455
456	msr	cpacr_el1, xzr			// Reset cpacr_el1
457	mov	x1, #1 << 12			// Reset mdscr_el1 and disable
458	msr	mdscr_el1, x1			// access to the DCC from EL0
459	reset_pmuserenr_el0 x1			// Disable PMU access from EL0
460	reset_amuserenr_el0 x1			// Disable AMU access from EL0
461
462	/*
463	 * Default values for VMSA control registers. These will be adjusted
464	 * below depending on detected CPU features.
465	 */
466	mair	.req	x17
467	tcr	.req	x16
468	tcr2	.req	x15
469	mov_q	mair, MAIR_EL1_SET
470	mov_q	tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
471		     TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
472		     TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
473	mov	tcr2, xzr
474
475	tcr_clear_errata_bits tcr, x9, x5
476
477#ifdef CONFIG_ARM64_VA_BITS_52
478	mov		x9, #64 - VA_BITS
479alternative_if ARM64_HAS_VA52
480	tcr_set_t1sz	tcr, x9
481#ifdef CONFIG_ARM64_LPA2
482	orr		tcr, tcr, #TCR_DS
483#endif
484alternative_else_nop_endif
485#endif
486
487	/*
488	 * Set the IPS bits in TCR_EL1.
489	 */
490	tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
491#ifdef CONFIG_ARM64_HW_AFDBM
492	/*
493	 * Enable hardware update of the Access Flags bit.
494	 * Hardware dirty bit management is enabled later,
495	 * via capabilities.
496	 */
497	mrs	x9, ID_AA64MMFR1_EL1
498	ubfx	x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4
499	cbz	x9, 1f
500	orr	tcr, tcr, #TCR_HA		// hardware Access flag update
501#ifdef CONFIG_ARM64_HAFT
502	cmp	x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT
503	b.lt	1f
504	orr	tcr2, tcr2, TCR2_EL1x_HAFT
505#endif /* CONFIG_ARM64_HAFT */
5061:
507#endif	/* CONFIG_ARM64_HW_AFDBM */
508	msr	mair_el1, mair
509	msr	tcr_el1, tcr
510
511	mrs_s	x1, SYS_ID_AA64MMFR3_EL1
512	ubfx	x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
513	cbz	x1, .Lskip_indirection
514
515	/*
516	 * The PROT_* macros describing the various memory types may resolve to
517	 * C expressions if they include the PTE_MAYBE_* macros, and so they
518	 * can only be used from C code. The PIE_E* constants below are also
519	 * defined in terms of those macros, but will mask out those
520	 * PTE_MAYBE_* constants, whether they are set or not. So #define them
521	 * as 0x0 here so we can evaluate the PIE_E* constants in asm context.
522	 */
523
524#define PTE_MAYBE_NG		0
525#define PTE_MAYBE_SHARED	0
526
527	mov_q	x0, PIE_E0
528	msr	REG_PIRE0_EL1, x0
529	mov_q	x0, PIE_E1
530	msr	REG_PIR_EL1, x0
531
532#undef PTE_MAYBE_NG
533#undef PTE_MAYBE_SHARED
534
535	orr	tcr2, tcr2, TCR2_EL1x_PIE
536
537.Lskip_indirection:
538
539	mrs_s	x1, SYS_ID_AA64MMFR3_EL1
540	ubfx	x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4
541	cbz	x1, 1f
542	msr	REG_TCR2_EL1, tcr2
5431:
544
545	/*
546	 * Prepare SCTLR
547	 */
548	mov_q	x0, INIT_SCTLR_EL1_MMU_ON
549	ret					// return to head.S
550
551	.unreq	mair
552	.unreq	tcr
553	.unreq	tcr2
554SYM_FUNC_END(__cpu_setup)
555