xref: /linux/arch/arm64/kvm/hyp/nvhe/hyp-init.S (revision e7d759f31ca295d589f7420719c311870bb3166f)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/el2_setup.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/kvm_mmu.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/sysreg.h>
18#include <asm/virt.h>
19
20	.text
21	.pushsection	.idmap.text, "ax"
22
23	.align	11
24
25SYM_CODE_START(__kvm_hyp_init)
26	ventry	__invalid		// Synchronous EL2t
27	ventry	__invalid		// IRQ EL2t
28	ventry	__invalid		// FIQ EL2t
29	ventry	__invalid		// Error EL2t
30
31	ventry	__invalid		// Synchronous EL2h
32	ventry	__invalid		// IRQ EL2h
33	ventry	__invalid		// FIQ EL2h
34	ventry	__invalid		// Error EL2h
35
36	ventry	__do_hyp_init		// Synchronous 64-bit EL1
37	ventry	__invalid		// IRQ 64-bit EL1
38	ventry	__invalid		// FIQ 64-bit EL1
39	ventry	__invalid		// Error 64-bit EL1
40
41	ventry	__invalid		// Synchronous 32-bit EL1
42	ventry	__invalid		// IRQ 32-bit EL1
43	ventry	__invalid		// FIQ 32-bit EL1
44	ventry	__invalid		// Error 32-bit EL1
45
46__invalid:
47	b	.
48
49	/*
50	 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
51	 *
52	 * x0: SMCCC function ID
53	 * x1: struct kvm_nvhe_init_params PA
54	 */
55__do_hyp_init:
56	/* Check for a stub HVC call */
57	cmp	x0, #HVC_STUB_HCALL_NR
58	b.lo	__kvm_handle_stub_hvc
59
60	bic	x0, x0, #ARM_SMCCC_CALL_HINTS
61	mov	x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
62	cmp	x0, x3
63	b.eq	1f
64
65	mov	x0, #SMCCC_RET_NOT_SUPPORTED
66	eret
67
681:	mov	x0, x1
69	mov	x3, lr
70	bl	___kvm_hyp_init			// Clobbers x0..x2
71	mov	lr, x3
72
73	/* Hello, World! */
74	mov	x0, #SMCCC_RET_SUCCESS
75	eret
76SYM_CODE_END(__kvm_hyp_init)
77
78/*
79 * Initialize the hypervisor in EL2.
80 *
81 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
82 * and leave x3 for the caller.
83 *
84 * x0: struct kvm_nvhe_init_params PA
85 */
86SYM_CODE_START_LOCAL(___kvm_hyp_init)
87	ldr	x1, [x0, #NVHE_INIT_STACK_HYP_VA]
88	mov	sp, x1
89
90	ldr	x1, [x0, #NVHE_INIT_MAIR_EL2]
91	msr	mair_el2, x1
92
93	ldr	x1, [x0, #NVHE_INIT_HCR_EL2]
94	msr	hcr_el2, x1
95
96	mov	x2, #HCR_E2H
97	and	x2, x1, x2
98	cbz	x2, 1f
99
100	// hVHE: Replay the EL2 setup to account for the E2H bit
101	// TPIDR_EL2 is used to preserve x0 across the macro maze...
102	isb
103	msr	tpidr_el2, x0
104	init_el2_state
105	finalise_el2_state
106	mrs	x0, tpidr_el2
107
1081:
109	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
110	msr	tpidr_el2, x1
111
112	ldr	x1, [x0, #NVHE_INIT_VTTBR]
113	msr	vttbr_el2, x1
114
115	ldr	x1, [x0, #NVHE_INIT_VTCR]
116	msr	vtcr_el2, x1
117
118	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
119	phys_to_ttbr x2, x1
120alternative_if ARM64_HAS_CNP
121	orr	x2, x2, #TTBR_CNP_BIT
122alternative_else_nop_endif
123	msr	ttbr0_el2, x2
124
125	ldr	x0, [x0, #NVHE_INIT_TCR_EL2]
126	msr	tcr_el2, x0
127
128	isb
129
130	/* Invalidate the stale TLBs from Bootloader */
131	tlbi	alle2
132	tlbi	vmalls12e1
133	dsb	sy
134
135	mov_q	x0, INIT_SCTLR_EL2_MMU_ON
136alternative_if ARM64_HAS_ADDRESS_AUTH
137	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
138		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
139	orr	x0, x0, x1
140alternative_else_nop_endif
141
142#ifdef CONFIG_ARM64_BTI_KERNEL
143alternative_if ARM64_BTI
144	orr	x0, x0, #SCTLR_EL2_BT
145alternative_else_nop_endif
146#endif /* CONFIG_ARM64_BTI_KERNEL */
147
148	msr	sctlr_el2, x0
149	isb
150
151	/* Set the host vector */
152	ldr	x0, =__kvm_hyp_host_vector
153	msr	vbar_el2, x0
154
155	ret
156SYM_CODE_END(___kvm_hyp_init)
157
158/*
159 * PSCI CPU_ON entry point
160 *
161 * x0: struct kvm_nvhe_init_params PA
162 */
163SYM_CODE_START(kvm_hyp_cpu_entry)
164	mov	x1, #1				// is_cpu_on = true
165	b	__kvm_hyp_init_cpu
166SYM_CODE_END(kvm_hyp_cpu_entry)
167
168/*
169 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
170 *
171 * x0: struct kvm_nvhe_init_params PA
172 */
173SYM_CODE_START(kvm_hyp_cpu_resume)
174	mov	x1, #0				// is_cpu_on = false
175	b	__kvm_hyp_init_cpu
176SYM_CODE_END(kvm_hyp_cpu_resume)
177
178/*
179 * Common code for CPU entry points. Initializes EL2 state and
180 * installs the hypervisor before handing over to a C handler.
181 *
182 * x0: struct kvm_nvhe_init_params PA
183 * x1: bool is_cpu_on
184 */
185SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
186	mov	x28, x0				// Stash arguments
187	mov	x29, x1
188
189	/* Check that the core was booted in EL2. */
190	mrs	x0, CurrentEL
191	cmp	x0, #CurrentEL_EL2
192	b.eq	2f
193
194	/* The core booted in EL1. KVM cannot be initialized on it. */
1951:	wfe
196	wfi
197	b	1b
198
1992:	msr	SPsel, #1			// We want to use SP_EL{1,2}
200
201	/* Initialize EL2 CPU state to sane values. */
202	init_el2_state				// Clobbers x0..x2
203	finalise_el2_state
204	__init_el2_nvhe_prepare_eret
205
206	/* Enable MMU, set vectors and stack. */
207	mov	x0, x28
208	bl	___kvm_hyp_init			// Clobbers x0..x2
209
210	/* Leave idmap. */
211	mov	x0, x29
212	ldr	x1, =kvm_host_psci_cpu_entry
213	br	x1
214SYM_CODE_END(__kvm_hyp_init_cpu)
215
216SYM_CODE_START(__kvm_handle_stub_hvc)
217	/*
218	 * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
219	 * we need bti j at beginning.
220	 */
221	bti j
222	cmp	x0, #HVC_SOFT_RESTART
223	b.ne	1f
224
225	/* This is where we're about to jump, staying at EL2 */
226	msr	elr_el2, x1
227	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
228	msr	spsr_el2, x0
229
230	/* Shuffle the arguments, and don't come back */
231	mov	x0, x2
232	mov	x1, x3
233	mov	x2, x4
234	b	reset
235
2361:	cmp	x0, #HVC_RESET_VECTORS
237	b.ne	1f
238
239	/*
240	 * Set the HVC_RESET_VECTORS return code before entering the common
241	 * path so that we do not clobber x0-x2 in case we are coming via
242	 * HVC_SOFT_RESTART.
243	 */
244	mov	x0, xzr
245reset:
246	/* Reset kvm back to the hyp stub. */
247	mov_q	x5, INIT_SCTLR_EL2_MMU_OFF
248	pre_disable_mmu_workaround
249	msr	sctlr_el2, x5
250	isb
251
252alternative_if ARM64_KVM_PROTECTED_MODE
253	mov_q	x5, HCR_HOST_NVHE_FLAGS
254	msr	hcr_el2, x5
255alternative_else_nop_endif
256
257	/* Install stub vectors */
258	adr_l	x5, __hyp_stub_vectors
259	msr	vbar_el2, x5
260	eret
261
2621:	/* Bad stub call */
263	mov_q	x0, HVC_STUB_ERR
264	eret
265
266SYM_CODE_END(__kvm_handle_stub_hvc)
267
268SYM_FUNC_START(__pkvm_init_switch_pgd)
269	/* Turn the MMU off */
270	pre_disable_mmu_workaround
271	mrs	x2, sctlr_el2
272	bic	x3, x2, #SCTLR_ELx_M
273	msr	sctlr_el2, x3
274	isb
275
276	tlbi	alle2
277
278	/* Install the new pgtables */
279	ldr	x3, [x0, #NVHE_INIT_PGD_PA]
280	phys_to_ttbr x4, x3
281alternative_if ARM64_HAS_CNP
282	orr	x4, x4, #TTBR_CNP_BIT
283alternative_else_nop_endif
284	msr	ttbr0_el2, x4
285
286	/* Set the new stack pointer */
287	ldr	x0, [x0, #NVHE_INIT_STACK_HYP_VA]
288	mov	sp, x0
289
290	/* And turn the MMU back on! */
291	dsb	nsh
292	isb
293	set_sctlr_el2	x2
294	ret	x1
295SYM_FUNC_END(__pkvm_init_switch_pgd)
296
297	.popsection
298