xref: /linux/arch/arm64/kvm/hyp/nvhe/hyp-init.S (revision 27c8f12e972d3647e9d759d7cafd4c34fa513432)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/cfi_types.h>
9#include <linux/linkage.h>
10
11#include <asm/alternative.h>
12#include <asm/assembler.h>
13#include <asm/el2_setup.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_asm.h>
16#include <asm/kvm_mmu.h>
17#include <asm/pgtable-hwdef.h>
18#include <asm/sysreg.h>
19#include <asm/virt.h>
20
21	.text
22	.pushsection	.idmap.text, "ax"
23
24	.align	11
25
26SYM_CODE_START(__kvm_hyp_init)
27	ventry	__invalid		// Synchronous EL2t
28	ventry	__invalid		// IRQ EL2t
29	ventry	__invalid		// FIQ EL2t
30	ventry	__invalid		// Error EL2t
31
32	ventry	__invalid		// Synchronous EL2h
33	ventry	__invalid		// IRQ EL2h
34	ventry	__invalid		// FIQ EL2h
35	ventry	__invalid		// Error EL2h
36
37	ventry	__do_hyp_init		// Synchronous 64-bit EL1
38	ventry	__invalid		// IRQ 64-bit EL1
39	ventry	__invalid		// FIQ 64-bit EL1
40	ventry	__invalid		// Error 64-bit EL1
41
42	ventry	__invalid		// Synchronous 32-bit EL1
43	ventry	__invalid		// IRQ 32-bit EL1
44	ventry	__invalid		// FIQ 32-bit EL1
45	ventry	__invalid		// Error 32-bit EL1
46
47__invalid:
48	b	.
49
50	/*
51	 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
52	 *
53	 * x0: SMCCC function ID
54	 * x1: struct kvm_nvhe_init_params PA
55	 */
56__do_hyp_init:
57	/* Check for a stub HVC call */
58	cmp	x0, #HVC_STUB_HCALL_NR
59	b.lo	__kvm_handle_stub_hvc
60
61	bic	x0, x0, #ARM_SMCCC_CALL_HINTS
62	mov	x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
63	cmp	x0, x3
64	b.eq	1f
65
66	mov	x0, #SMCCC_RET_NOT_SUPPORTED
67	eret
68
691:	mov	x0, x1
70	mov	x3, lr
71	bl	___kvm_hyp_init			// Clobbers x0..x2
72	mov	lr, x3
73
74	/* Hello, World! */
75	mov	x0, #SMCCC_RET_SUCCESS
76	eret
77SYM_CODE_END(__kvm_hyp_init)
78
79/*
80 * Initialize the hypervisor in EL2.
81 *
82 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
83 * and leave x3 for the caller.
84 *
85 * x0: struct kvm_nvhe_init_params PA
86 */
87SYM_CODE_START_LOCAL(___kvm_hyp_init)
88	ldr	x1, [x0, #NVHE_INIT_STACK_HYP_VA]
89	mov	sp, x1
90
91	ldr	x1, [x0, #NVHE_INIT_MAIR_EL2]
92	msr	mair_el2, x1
93
94	ldr	x1, [x0, #NVHE_INIT_HCR_EL2]
95	msr	hcr_el2, x1
96
97	mov	x2, #HCR_E2H
98	and	x2, x1, x2
99	cbz	x2, 1f
100
101	// hVHE: Replay the EL2 setup to account for the E2H bit
102	// TPIDR_EL2 is used to preserve x0 across the macro maze...
103	isb
104	msr	tpidr_el2, x0
105	init_el2_state
106	finalise_el2_state
107	mrs	x0, tpidr_el2
108
1091:
110	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
111	msr	tpidr_el2, x1
112
113	ldr	x1, [x0, #NVHE_INIT_VTTBR]
114	msr	vttbr_el2, x1
115
116	ldr	x1, [x0, #NVHE_INIT_VTCR]
117	msr	vtcr_el2, x1
118
119	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
120	phys_to_ttbr x2, x1
121alternative_if ARM64_HAS_CNP
122	orr	x2, x2, #TTBR_CNP_BIT
123alternative_else_nop_endif
124	msr	ttbr0_el2, x2
125
126	ldr	x0, [x0, #NVHE_INIT_TCR_EL2]
127	msr	tcr_el2, x0
128
129	isb
130
131	/* Invalidate the stale TLBs from Bootloader */
132	tlbi	alle2
133	tlbi	vmalls12e1
134	dsb	sy
135
136	mov_q	x0, INIT_SCTLR_EL2_MMU_ON
137alternative_if ARM64_HAS_ADDRESS_AUTH
138	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
139		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
140	orr	x0, x0, x1
141alternative_else_nop_endif
142
143#ifdef CONFIG_ARM64_BTI_KERNEL
144alternative_if ARM64_BTI
145	orr	x0, x0, #SCTLR_EL2_BT
146alternative_else_nop_endif
147#endif /* CONFIG_ARM64_BTI_KERNEL */
148
149	msr	sctlr_el2, x0
150	isb
151
152	/* Set the host vector */
153	ldr	x0, =__kvm_hyp_host_vector
154	msr	vbar_el2, x0
155
156	ret
157SYM_CODE_END(___kvm_hyp_init)
158
159/*
160 * PSCI CPU_ON entry point
161 *
162 * x0: struct kvm_nvhe_init_params PA
163 */
164SYM_CODE_START(kvm_hyp_cpu_entry)
165	mov	x1, #1				// is_cpu_on = true
166	b	__kvm_hyp_init_cpu
167SYM_CODE_END(kvm_hyp_cpu_entry)
168
169/*
170 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
171 *
172 * x0: struct kvm_nvhe_init_params PA
173 */
174SYM_CODE_START(kvm_hyp_cpu_resume)
175	mov	x1, #0				// is_cpu_on = false
176	b	__kvm_hyp_init_cpu
177SYM_CODE_END(kvm_hyp_cpu_resume)
178
179/*
180 * Common code for CPU entry points. Initializes EL2 state and
181 * installs the hypervisor before handing over to a C handler.
182 *
183 * x0: struct kvm_nvhe_init_params PA
184 * x1: bool is_cpu_on
185 */
186SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
187	mov	x28, x0				// Stash arguments
188	mov	x29, x1
189
190	/* Check that the core was booted in EL2. */
191	mrs	x0, CurrentEL
192	cmp	x0, #CurrentEL_EL2
193	b.eq	2f
194
195	/* The core booted in EL1. KVM cannot be initialized on it. */
1961:	wfe
197	wfi
198	b	1b
199
2002:	msr	SPsel, #1			// We want to use SP_EL{1,2}
201
202	/* Initialize EL2 CPU state to sane values. */
203	init_el2_state				// Clobbers x0..x2
204	finalise_el2_state
205	__init_el2_nvhe_prepare_eret
206
207	/* Enable MMU, set vectors and stack. */
208	mov	x0, x28
209	bl	___kvm_hyp_init			// Clobbers x0..x2
210
211	/* Leave idmap. */
212	mov	x0, x29
213	ldr	x1, =kvm_host_psci_cpu_entry
214	br	x1
215SYM_CODE_END(__kvm_hyp_init_cpu)
216
217SYM_CODE_START(__kvm_handle_stub_hvc)
218	/*
219	 * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
220	 * we need bti j at beginning.
221	 */
222	bti j
223	cmp	x0, #HVC_SOFT_RESTART
224	b.ne	1f
225
226	/* This is where we're about to jump, staying at EL2 */
227	msr	elr_el2, x1
228	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
229	msr	spsr_el2, x0
230
231	/* Shuffle the arguments, and don't come back */
232	mov	x0, x2
233	mov	x1, x3
234	mov	x2, x4
235	b	reset
236
2371:	cmp	x0, #HVC_RESET_VECTORS
238	b.ne	1f
239
240	/*
241	 * Set the HVC_RESET_VECTORS return code before entering the common
242	 * path so that we do not clobber x0-x2 in case we are coming via
243	 * HVC_SOFT_RESTART.
244	 */
245	mov	x0, xzr
246reset:
247	/* Reset kvm back to the hyp stub. */
248	mov_q	x5, INIT_SCTLR_EL2_MMU_OFF
249	pre_disable_mmu_workaround
250	msr	sctlr_el2, x5
251	isb
252
253alternative_if ARM64_KVM_PROTECTED_MODE
254	mov_q	x5, HCR_HOST_NVHE_FLAGS
255	msr	hcr_el2, x5
256alternative_else_nop_endif
257
258	/* Install stub vectors */
259	adr_l	x5, __hyp_stub_vectors
260	msr	vbar_el2, x5
261	eret
262
2631:	/* Bad stub call */
264	mov_q	x0, HVC_STUB_ERR
265	eret
266
267SYM_CODE_END(__kvm_handle_stub_hvc)
268
269/*
270 * void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
271 *                             void (*fn)(void));
272 *
273 * SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
274 * using a physical pointer without triggering a kCFI failure.
275 */
276SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
277	/* Turn the MMU off */
278	pre_disable_mmu_workaround
279	mrs	x3, sctlr_el2
280	bic	x4, x3, #SCTLR_ELx_M
281	msr	sctlr_el2, x4
282	isb
283
284	tlbi	alle2
285
286	/* Install the new pgtables */
287	phys_to_ttbr x5, x0
288alternative_if ARM64_HAS_CNP
289	orr	x5, x5, #TTBR_CNP_BIT
290alternative_else_nop_endif
291	msr	ttbr0_el2, x5
292
293	/* Set the new stack pointer */
294	mov	sp, x1
295
296	/* And turn the MMU back on! */
297	dsb	nsh
298	isb
299	set_sctlr_el2	x3
300	ret	x2
301SYM_FUNC_END(__pkvm_init_switch_pgd)
302
303	.popsection
304