xref: /linux/arch/arm64/kvm/hyp/nvhe/host.S (revision acbf6de674ef7b1b5870b25e7b3c695bf84273d0)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_arm.h>
11#include <asm/kvm_asm.h>
12#include <asm/kvm_mmu.h>
13#include <asm/kvm_ptrauth.h>
14
15	.text
16
17SYM_FUNC_START(__host_exit)
18	get_host_ctxt	x0, x1
19
20	/* Store the host regs x2 and x3 */
21	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
22
23	/* Retrieve the host regs x0-x1 from the stack */
24	ldp	x2, x3, [sp], #16	// x0, x1
25
26	/* Store the host regs x0-x1 and x4-x17 */
27	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
28	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
29	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
30	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
31	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
32	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
33	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
34	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
35
36	/* Store the host regs x18-x29, lr */
37	save_callee_saved_regs x0
38
39	/* Save the host context pointer in x29 across the function call */
40	mov	x29, x0
41
42#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
43alternative_if_not ARM64_HAS_ADDRESS_AUTH
44b __skip_pauth_save
45alternative_else_nop_endif
46
47alternative_if ARM64_KVM_PROTECTED_MODE
48	/* Save kernel ptrauth keys. */
49	add x18, x29, #CPU_APIAKEYLO_EL1
50	ptrauth_save_state x18, x19, x20
51
52	/* Use hyp keys. */
53	adr_this_cpu x18, kvm_hyp_ctxt, x19
54	add x18, x18, #CPU_APIAKEYLO_EL1
55	ptrauth_restore_state x18, x19, x20
56	isb
57alternative_else_nop_endif
58__skip_pauth_save:
59#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
60
61	bl	handle_trap
62
63__host_enter_restore_full:
64	/* Restore kernel keys. */
65#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
66alternative_if_not ARM64_HAS_ADDRESS_AUTH
67b __skip_pauth_restore
68alternative_else_nop_endif
69
70alternative_if ARM64_KVM_PROTECTED_MODE
71	add x18, x29, #CPU_APIAKEYLO_EL1
72	ptrauth_restore_state x18, x19, x20
73alternative_else_nop_endif
74__skip_pauth_restore:
75#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
76
77	/* Restore host regs x0-x17 */
78	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
79	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
80	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
81	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
82
83	/* x0-7 are use for panic arguments */
84__host_enter_for_panic:
85	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
86	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
87	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
88	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
89	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
90
91	/* Restore host regs x18-x29, lr */
92	restore_callee_saved_regs x29
93
94	/* Do not touch any register after this! */
95__host_enter_without_restoring:
96	eret
97	sb
98SYM_FUNC_END(__host_exit)
99
100/*
101 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
102 */
103SYM_FUNC_START(__host_enter)
104	mov	x29, x0
105	b	__host_enter_restore_full
106SYM_FUNC_END(__host_enter)
107
108/*
109 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
110 * 				  u64 elr, u64 par);
111 */
112SYM_FUNC_START(__hyp_do_panic)
113	/* Prepare and exit to the host's panic funciton. */
114	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
115		      PSR_MODE_EL1h)
116	msr	spsr_el2, lr
117	adr_l	lr, nvhe_hyp_panic_handler
118	hyp_kimg_va lr, x6
119	msr	elr_el2, lr
120
121	mov	x29, x0
122
123#ifdef CONFIG_NVHE_EL2_DEBUG
124	/* Ensure host stage-2 is disabled */
125	mrs	x0, hcr_el2
126	bic	x0, x0, #HCR_VM
127	msr	hcr_el2, x0
128	isb
129	tlbi	vmalls12e1
130	dsb	nsh
131#endif
132
133	/* Load the panic arguments into x0-7 */
134	mrs	x0, esr_el2
135	mov	x4, x3
136	mov	x3, x2
137	hyp_pa	x3, x6
138	get_vcpu_ptr x5, x6
139	mrs	x6, far_el2
140	mrs	x7, hpfar_el2
141
142	/* Enter the host, conditionally restoring the host context. */
143	cbz	x29, __host_enter_without_restoring
144	b	__host_enter_for_panic
145SYM_FUNC_END(__hyp_do_panic)
146
147SYM_FUNC_START(__host_hvc)
148	ldp	x0, x1, [sp]		// Don't fixup the stack yet
149
150	/* No stub for you, sonny Jim */
151alternative_if ARM64_KVM_PROTECTED_MODE
152	b	__host_exit
153alternative_else_nop_endif
154
155	/* Check for a stub HVC call */
156	cmp	x0, #HVC_STUB_HCALL_NR
157	b.hs	__host_exit
158
159	add	sp, sp, #16
160	/*
161	 * Compute the idmap address of __kvm_handle_stub_hvc and
162	 * jump there.
163	 *
164	 * Preserve x0-x4, which may contain stub parameters.
165	 */
166	adr_l	x5, __kvm_handle_stub_hvc
167	hyp_pa	x5, x6
168	br	x5
169SYM_FUNC_END(__host_hvc)
170
171.macro host_el1_sync_vect
172	.align 7
173.L__vect_start\@:
174	stp	x0, x1, [sp, #-16]!
175	mrs	x0, esr_el2
176	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
177	cmp	x0, #ESR_ELx_EC_HVC64
178	b.eq	__host_hvc
179	b	__host_exit
180.L__vect_end\@:
181.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
182	.error "host_el1_sync_vect larger than vector entry"
183.endif
184.endm
185
186.macro invalid_host_el2_vect
187	.align 7
188
189	/*
190	 * Test whether the SP has overflowed, without corrupting a GPR.
191	 * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
192	 * of SP should always be 1.
193	 */
194	add	sp, sp, x0			// sp' = sp + x0
195	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
196	tbz	x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
197	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
198	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
199
200	/* If a guest is loaded, panic out of it. */
201	stp	x0, x1, [sp, #-16]!
202	get_loaded_vcpu x0, x1
203	cbnz	x0, __guest_exit_panic
204	add	sp, sp, #16
205
206	/*
207	 * The panic may not be clean if the exception is taken before the host
208	 * context has been saved by __host_exit or after the hyp context has
209	 * been partially clobbered by __host_enter.
210	 */
211	b	hyp_panic
212
213.L__hyp_sp_overflow\@:
214	/* Switch to the overflow stack */
215	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
216
217	b	hyp_panic_bad_stack
218	ASM_BUG()
219.endm
220
221.macro invalid_host_el1_vect
222	.align 7
223	mov	x0, xzr		/* restore_host = false */
224	mrs	x1, spsr_el2
225	mrs	x2, elr_el2
226	mrs	x3, par_el1
227	b	__hyp_do_panic
228.endm
229
230/*
231 * The host vector does not use an ESB instruction in order to avoid consuming
232 * SErrors that should only be consumed by the host. Guest entry is deferred by
233 * __guest_enter if there are any pending asynchronous exceptions so hyp will
234 * always return to the host without having consumerd host SErrors.
235 *
236 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
237 * host knows about the EL2 vectors already, and there is no point in hiding
238 * them.
239 */
240	.align 11
241SYM_CODE_START(__kvm_hyp_host_vector)
242	invalid_host_el2_vect			// Synchronous EL2t
243	invalid_host_el2_vect			// IRQ EL2t
244	invalid_host_el2_vect			// FIQ EL2t
245	invalid_host_el2_vect			// Error EL2t
246
247	invalid_host_el2_vect			// Synchronous EL2h
248	invalid_host_el2_vect			// IRQ EL2h
249	invalid_host_el2_vect			// FIQ EL2h
250	invalid_host_el2_vect			// Error EL2h
251
252	host_el1_sync_vect			// Synchronous 64-bit EL1/EL0
253	invalid_host_el1_vect			// IRQ 64-bit EL1/EL0
254	invalid_host_el1_vect			// FIQ 64-bit EL1/EL0
255	invalid_host_el1_vect			// Error 64-bit EL1/EL0
256
257	host_el1_sync_vect			// Synchronous 32-bit EL1/EL0
258	invalid_host_el1_vect			// IRQ 32-bit EL1/EL0
259	invalid_host_el1_vect			// FIQ 32-bit EL1/EL0
260	invalid_host_el1_vect			// Error 32-bit EL1/EL0
261SYM_CODE_END(__kvm_hyp_host_vector)
262
263/*
264 * Forward SMC with arguments in struct kvm_cpu_context, and
265 * store the result into the same struct. Assumes SMCCC 1.2 or older.
266 *
267 * x0: struct kvm_cpu_context*
268 */
269SYM_CODE_START(__kvm_hyp_host_forward_smc)
270	/*
271	 * Use x18 to keep the pointer to the host context because
272	 * x18 is callee-saved in SMCCC but not in AAPCS64.
273	 */
274	mov	x18, x0
275
276	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
277	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
278	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
279	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
280	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
281	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
282	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
283	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
284	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
285
286	smc	#0
287
288	stp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
289	stp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
290	stp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
291	stp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
292	stp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
293	stp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
294	stp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
295	stp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
296	stp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
297
298	ret
299SYM_CODE_END(__kvm_hyp_host_forward_smc)
300
301/*
302 * kvm_host_psci_cpu_entry is called through br instruction, which requires
303 * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
304 * functions, but bti c instead.
305 */
306SYM_CODE_START(kvm_host_psci_cpu_entry)
307       bti j
308       b __kvm_host_psci_cpu_entry
309SYM_CODE_END(kvm_host_psci_cpu_entry)
310