xref: /linux/arch/arm64/kernel/hyp-stub.S (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Hypervisor stub
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Author:	Marc Zyngier <marc.zyngier@arm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/linkage.h>
11
12#include <asm/assembler.h>
13#include <asm/el2_setup.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_asm.h>
16#include <asm/ptrace.h>
17#include <asm/virt.h>
18
19	.text
20	.pushsection	.hyp.text, "ax"
21
22	.align 11
23
24SYM_CODE_START(__hyp_stub_vectors)
25	ventry	el2_sync_invalid		// Synchronous EL2t
26	ventry	el2_irq_invalid			// IRQ EL2t
27	ventry	el2_fiq_invalid			// FIQ EL2t
28	ventry	el2_error_invalid		// Error EL2t
29
30	ventry	elx_sync			// Synchronous EL2h
31	ventry	el2_irq_invalid			// IRQ EL2h
32	ventry	el2_fiq_invalid			// FIQ EL2h
33	ventry	el2_error_invalid		// Error EL2h
34
35	ventry	elx_sync			// Synchronous 64-bit EL1
36	ventry	el1_irq_invalid			// IRQ 64-bit EL1
37	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
38	ventry	el1_error_invalid		// Error 64-bit EL1
39
40	ventry	el1_sync_invalid		// Synchronous 32-bit EL1
41	ventry	el1_irq_invalid			// IRQ 32-bit EL1
42	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
43	ventry	el1_error_invalid		// Error 32-bit EL1
44SYM_CODE_END(__hyp_stub_vectors)
45
46	.align 11
47
48SYM_CODE_START_LOCAL(elx_sync)
49	cmp	x0, #HVC_SET_VECTORS
50	b.ne	1f
51	msr	vbar_el2, x1
52	b	9f
53
541:	cmp	x0, #HVC_FINALISE_EL2
55	b.eq	__finalise_el2
56
57	cmp	x0, #HVC_GET_ICH_VTR_EL2
58	b.ne	2f
59	mrs_s	x1, SYS_ICH_VTR_EL2
60	b	9f
61
622:	cmp	x0, #HVC_SOFT_RESTART
63	b.ne	3f
64	mov	x0, x2
65	mov	x2, x4
66	mov	x4, x1
67	mov	x1, x3
68	br	x4				// no return
69
703:	cmp	x0, #HVC_RESET_VECTORS
71	beq	9f				// Nothing to reset!
72
73	/* Someone called kvm_call_hyp() against the hyp-stub... */
74	mov_q	x0, HVC_STUB_ERR
75	eret
76
779:	mov	x0, xzr
78	eret
79SYM_CODE_END(elx_sync)
80
81SYM_CODE_START_LOCAL(__finalise_el2)
82	finalise_el2_state
83
84	// nVHE? No way! Give me the real thing!
85	// Sanity check: MMU *must* be off
86	mrs	x1, sctlr_el2
87	tbnz	x1, #0, 1f
88
89	// Needs to be VHE capable, obviously
90	check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2
91
920:	// Check whether we only want the hypervisor to run VHE, not the kernel
93	adr_l	x1, arm64_sw_feature_override
94	ldr	x2, [x1, FTR_OVR_VAL_OFFSET]
95	ldr	x1, [x1, FTR_OVR_MASK_OFFSET]
96	and	x2, x2, x1
97	ubfx	x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4
98	cbz	x2, 2f
99
1001:	mov_q	x0, HVC_STUB_ERR
101	eret
1022:
103	// Engage the VHE magic!
104	mov_q	x0, HCR_HOST_VHE_FLAGS
105	msr_hcr_el2 x0
106	isb
107
108	// Use the EL1 allocated stack, per-cpu offset
109	mrs	x0, sp_el1
110	mov	sp, x0
111	mrs	x0, tpidr_el1
112	msr	tpidr_el2, x0
113
114	// FP configuration, vectors
115	mrs_s	x0, SYS_CPACR_EL12
116	msr	cpacr_el1, x0
117	mrs_s	x0, SYS_VBAR_EL12
118	msr	vbar_el1, x0
119
120	// Use EL2 translations for SPE & TRBE and disable access from EL1
121	mrs	x0, mdcr_el2
122	bic	x0, x0, #MDCR_EL2_E2PB_MASK
123	bic	x0, x0, #MDCR_EL2_E2TB_MASK
124	msr	mdcr_el2, x0
125
126	// Transfer the MM state from EL1 to EL2
127	mrs_s	x0, SYS_TCR_EL12
128	msr	tcr_el1, x0
129	mrs_s	x0, SYS_TTBR0_EL12
130	msr	ttbr0_el1, x0
131	mrs_s	x0, SYS_TTBR1_EL12
132	msr	ttbr1_el1, x0
133	mrs_s	x0, SYS_MAIR_EL12
134	msr	mair_el1, x0
135	mrs	x1, REG_ID_AA64MMFR3_EL1
136	ubfx	x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4
137	cbz	x1, .Lskip_tcr2
138	mrs	x0, REG_TCR2_EL12
139	msr	REG_TCR2_EL1, x0
140
141	// Transfer permission indirection state
142	mrs	x1, REG_ID_AA64MMFR3_EL1
143	ubfx	x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
144	cbz	x1, .Lskip_indirection
145	mrs	x0, REG_PIRE0_EL12
146	msr	REG_PIRE0_EL1, x0
147	mrs	x0, REG_PIR_EL12
148	msr	REG_PIR_EL1, x0
149
150.Lskip_indirection:
151.Lskip_tcr2:
152
153	isb
154
155	// Hack the exception return to stay at EL2
156	mrs	x0, spsr_el1
157	and	x0, x0, #~PSR_MODE_MASK
158	mov	x1, #PSR_MODE_EL2h
159	orr	x0, x0, x1
160	msr	spsr_el1, x0
161
162	b	enter_vhe
163SYM_CODE_END(__finalise_el2)
164
165	// At the point where we reach enter_vhe(), we run with
166	// the MMU off (which is enforced by __finalise_el2()).
167	// We thus need to be in the idmap, or everything will
168	// explode when enabling the MMU.
169
170	.pushsection	.idmap.text, "ax"
171
172SYM_CODE_START_LOCAL(enter_vhe)
173	// Invalidate TLBs before enabling the MMU
174	tlbi	vmalle1
175	dsb	nsh
176	isb
177
178	// Enable the EL2 S1 MMU, as set up from EL1
179	mrs_s	x0, SYS_SCTLR_EL12
180	set_sctlr_el1	x0
181
182	// Disable the EL1 S1 MMU for a good measure
183	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
184	msr_s	SYS_SCTLR_EL12, x0
185
186	mov	x0, xzr
187
188	eret
189SYM_CODE_END(enter_vhe)
190
191	.popsection
192
193.macro invalid_vector	label
194SYM_CODE_START_LOCAL(\label)
195	b \label
196SYM_CODE_END(\label)
197.endm
198
199	invalid_vector	el2_sync_invalid
200	invalid_vector	el2_irq_invalid
201	invalid_vector	el2_fiq_invalid
202	invalid_vector	el2_error_invalid
203	invalid_vector	el1_sync_invalid
204	invalid_vector	el1_irq_invalid
205	invalid_vector	el1_fiq_invalid
206	invalid_vector	el1_error_invalid
207
208	.popsection
209
210/*
211 * __hyp_set_vectors: Call this after boot to set the initial hypervisor
212 * vectors as part of hypervisor installation.  On an SMP system, this should
213 * be called on each CPU.
214 *
215 * x0 must be the physical address of the new vector table, and must be
216 * 2KB aligned.
217 *
218 * Before calling this, you must check that the stub hypervisor is installed
219 * everywhere, by waiting for any secondary CPUs to be brought up and then
220 * checking that is_hyp_mode_available() is true.
221 *
222 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
223 * something else went wrong... in such cases, trying to install a new
224 * hypervisor is unlikely to work as desired.
225 *
226 * When you call into your shiny new hypervisor, sp_el2 will contain junk,
227 * so you will need to set that to something sensible at the new hypervisor's
228 * initialisation entry point.
229 */
230
231SYM_FUNC_START(__hyp_set_vectors)
232	mov	x1, x0
233	mov	x0, #HVC_SET_VECTORS
234	hvc	#0
235	ret
236SYM_FUNC_END(__hyp_set_vectors)
237
238SYM_FUNC_START(__hyp_reset_vectors)
239	mov	x0, #HVC_RESET_VECTORS
240	hvc	#0
241	ret
242SYM_FUNC_END(__hyp_reset_vectors)
243
244/*
245 * Entry point to finalise EL2 and switch to VHE if deemed capable
246 *
247 * w0: boot mode, as returned by init_kernel_el()
248 */
249SYM_FUNC_START(finalise_el2)
250	// Need to have booted at EL2
251	cmp	w0, #BOOT_CPU_MODE_EL2
252	b.ne	1f
253
254	// and still be at EL1
255	mrs	x0, CurrentEL
256	cmp	x0, #CurrentEL_EL1
257	b.ne	1f
258
259	mov	x0, #HVC_FINALISE_EL2
260	hvc	#0
2611:
262	ret
263SYM_FUNC_END(finalise_el2)
264