xref: /linux/arch/arm64/kvm/hyp/nvhe/host.S (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_arm.h>
11#include <asm/kvm_asm.h>
12#include <asm/kvm_mmu.h>
13
14	.text
15
16SYM_FUNC_START(__host_exit)
17	get_host_ctxt	x0, x1
18
19	/* Store the host regs x2 and x3 */
20	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
21
22	/* Retrieve the host regs x0-x1 from the stack */
23	ldp	x2, x3, [sp], #16	// x0, x1
24
25	/* Store the host regs x0-x1 and x4-x17 */
26	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
27	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
28	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
29	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
30	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
31	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
32	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
33	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
34
35	/* Store the host regs x18-x29, lr */
36	save_callee_saved_regs x0
37
38	/* Save the host context pointer in x29 across the function call */
39	mov	x29, x0
40	bl	handle_trap
41
42	/* Restore host regs x0-x17 */
43__host_enter_restore_full:
44	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
45	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
46	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
47	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
48
49	/* x0-7 are use for panic arguments */
50__host_enter_for_panic:
51	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
52	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
53	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
54	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
55	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
56
57	/* Restore host regs x18-x29, lr */
58	restore_callee_saved_regs x29
59
60	/* Do not touch any register after this! */
61__host_enter_without_restoring:
62	eret
63	sb
64SYM_FUNC_END(__host_exit)
65
66/*
67 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
68 */
69SYM_FUNC_START(__host_enter)
70	mov	x29, x0
71	b	__host_enter_restore_full
72SYM_FUNC_END(__host_enter)
73
74/*
75 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
76 * 				  u64 elr, u64 par);
77 */
78SYM_FUNC_START(__hyp_do_panic)
79	/* Prepare and exit to the host's panic funciton. */
80	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
81		      PSR_MODE_EL1h)
82	msr	spsr_el2, lr
83	adr_l	lr, nvhe_hyp_panic_handler
84	hyp_kimg_va lr, x6
85	msr	elr_el2, lr
86
87	mov	x29, x0
88
89#ifdef CONFIG_NVHE_EL2_DEBUG
90	/* Ensure host stage-2 is disabled */
91	mrs	x0, hcr_el2
92	bic	x0, x0, #HCR_VM
93	msr	hcr_el2, x0
94	isb
95	tlbi	vmalls12e1
96	dsb	nsh
97#endif
98
99	/* Load the panic arguments into x0-7 */
100	mrs	x0, esr_el2
101	mov	x4, x3
102	mov	x3, x2
103	hyp_pa	x3, x6
104	get_vcpu_ptr x5, x6
105	mrs	x6, far_el2
106	mrs	x7, hpfar_el2
107
108	/* Enter the host, conditionally restoring the host context. */
109	cbz	x29, __host_enter_without_restoring
110	b	__host_enter_for_panic
111SYM_FUNC_END(__hyp_do_panic)
112
113SYM_FUNC_START(__host_hvc)
114	ldp	x0, x1, [sp]		// Don't fixup the stack yet
115
116	/* No stub for you, sonny Jim */
117alternative_if ARM64_KVM_PROTECTED_MODE
118	b	__host_exit
119alternative_else_nop_endif
120
121	/* Check for a stub HVC call */
122	cmp	x0, #HVC_STUB_HCALL_NR
123	b.hs	__host_exit
124
125	add	sp, sp, #16
126	/*
127	 * Compute the idmap address of __kvm_handle_stub_hvc and
128	 * jump there.
129	 *
130	 * Preserve x0-x4, which may contain stub parameters.
131	 */
132	adr_l	x5, __kvm_handle_stub_hvc
133	hyp_pa	x5, x6
134	br	x5
135SYM_FUNC_END(__host_hvc)
136
137.macro host_el1_sync_vect
138	.align 7
139.L__vect_start\@:
140	stp	x0, x1, [sp, #-16]!
141	mrs	x0, esr_el2
142	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
143	cmp	x0, #ESR_ELx_EC_HVC64
144	b.eq	__host_hvc
145	b	__host_exit
146.L__vect_end\@:
147.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
148	.error "host_el1_sync_vect larger than vector entry"
149.endif
150.endm
151
152.macro invalid_host_el2_vect
153	.align 7
154
155	/*
156	 * Test whether the SP has overflowed, without corrupting a GPR.
157	 * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
158	 * of SP should always be 1.
159	 */
160	add	sp, sp, x0			// sp' = sp + x0
161	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
162	tbz	x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
163	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
164	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
165
166	/* If a guest is loaded, panic out of it. */
167	stp	x0, x1, [sp, #-16]!
168	get_loaded_vcpu x0, x1
169	cbnz	x0, __guest_exit_panic
170	add	sp, sp, #16
171
172	/*
173	 * The panic may not be clean if the exception is taken before the host
174	 * context has been saved by __host_exit or after the hyp context has
175	 * been partially clobbered by __host_enter.
176	 */
177	b	hyp_panic
178
179.L__hyp_sp_overflow\@:
180	/*
181	 * Reset SP to the top of the stack, to allow handling the hyp_panic.
182	 * This corrupts the stack but is ok, since we won't be attempting
183	 * any unwinding here.
184	 */
185	ldr_this_cpu	x0, kvm_init_params + NVHE_INIT_STACK_HYP_VA, x1
186	mov	sp, x0
187
188	b	hyp_panic_bad_stack
189	ASM_BUG()
190.endm
191
192.macro invalid_host_el1_vect
193	.align 7
194	mov	x0, xzr		/* restore_host = false */
195	mrs	x1, spsr_el2
196	mrs	x2, elr_el2
197	mrs	x3, par_el1
198	b	__hyp_do_panic
199.endm
200
201/*
202 * The host vector does not use an ESB instruction in order to avoid consuming
203 * SErrors that should only be consumed by the host. Guest entry is deferred by
204 * __guest_enter if there are any pending asynchronous exceptions so hyp will
205 * always return to the host without having consumerd host SErrors.
206 *
207 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
208 * host knows about the EL2 vectors already, and there is no point in hiding
209 * them.
210 */
211	.align 11
212SYM_CODE_START(__kvm_hyp_host_vector)
213	invalid_host_el2_vect			// Synchronous EL2t
214	invalid_host_el2_vect			// IRQ EL2t
215	invalid_host_el2_vect			// FIQ EL2t
216	invalid_host_el2_vect			// Error EL2t
217
218	invalid_host_el2_vect			// Synchronous EL2h
219	invalid_host_el2_vect			// IRQ EL2h
220	invalid_host_el2_vect			// FIQ EL2h
221	invalid_host_el2_vect			// Error EL2h
222
223	host_el1_sync_vect			// Synchronous 64-bit EL1/EL0
224	invalid_host_el1_vect			// IRQ 64-bit EL1/EL0
225	invalid_host_el1_vect			// FIQ 64-bit EL1/EL0
226	invalid_host_el1_vect			// Error 64-bit EL1/EL0
227
228	host_el1_sync_vect			// Synchronous 32-bit EL1/EL0
229	invalid_host_el1_vect			// IRQ 32-bit EL1/EL0
230	invalid_host_el1_vect			// FIQ 32-bit EL1/EL0
231	invalid_host_el1_vect			// Error 32-bit EL1/EL0
232SYM_CODE_END(__kvm_hyp_host_vector)
233
234/*
235 * Forward SMC with arguments in struct kvm_cpu_context, and
236 * store the result into the same struct. Assumes SMCCC 1.2 or older.
237 *
238 * x0: struct kvm_cpu_context*
239 */
240SYM_CODE_START(__kvm_hyp_host_forward_smc)
241	/*
242	 * Use x18 to keep the pointer to the host context because
243	 * x18 is callee-saved in SMCCC but not in AAPCS64.
244	 */
245	mov	x18, x0
246
247	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
248	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
249	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
250	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
251	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
252	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
253	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
254	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
255	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
256
257	smc	#0
258
259	stp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
260	stp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
261	stp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
262	stp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
263	stp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
264	stp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
265	stp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
266	stp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
267	stp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
268
269	ret
270SYM_CODE_END(__kvm_hyp_host_forward_smc)
271