xref: /linux/arch/arm64/kvm/hyp/nvhe/host.S (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_asm.h>
11#include <asm/kvm_mmu.h>
12
13	.text
14
15SYM_FUNC_START(__host_exit)
16	get_host_ctxt	x0, x1
17
18	/* Store the host regs x2 and x3 */
19	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
20
21	/* Retrieve the host regs x0-x1 from the stack */
22	ldp	x2, x3, [sp], #16	// x0, x1
23
24	/* Store the host regs x0-x1 and x4-x17 */
25	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
26	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
27	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
28	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
29	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
30	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
31	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
32	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
33
34	/* Store the host regs x18-x29, lr */
35	save_callee_saved_regs x0
36
37	/* Save the host context pointer in x29 across the function call */
38	mov	x29, x0
39	bl	handle_trap
40
41	/* Restore host regs x0-x17 */
42__host_enter_restore_full:
43	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
44	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
45	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
46	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
47
48	/* x0-7 are use for panic arguments */
49__host_enter_for_panic:
50	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
51	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
52	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
53	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
54	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
55
56	/* Restore host regs x18-x29, lr */
57	restore_callee_saved_regs x29
58
59	/* Do not touch any register after this! */
60__host_enter_without_restoring:
61	eret
62	sb
63SYM_FUNC_END(__host_exit)
64
65/*
66 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
67 */
68SYM_FUNC_START(__host_enter)
69	mov	x29, x0
70	b	__host_enter_restore_full
71SYM_FUNC_END(__host_enter)
72
73/*
74 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
75 * 				  u64 elr, u64 par);
76 */
77SYM_FUNC_START(__hyp_do_panic)
78	/* Prepare and exit to the host's panic funciton. */
79	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
80		      PSR_MODE_EL1h)
81	msr	spsr_el2, lr
82	ldr	lr, =panic
83	hyp_kimg_va lr, x6
84	msr	elr_el2, lr
85
86	mov	x29, x0
87
88	/* Load the format string into x0 and arguments into x1-7 */
89	ldr	x0, =__hyp_panic_string
90	hyp_kimg_va x0, x6
91
92	/* Load the format arguments into x1-7. */
93	mov	x6, x3
94	get_vcpu_ptr x7, x3
95	mrs	x3, esr_el2
96	mrs	x4, far_el2
97	mrs	x5, hpfar_el2
98
99	/* Enter the host, conditionally restoring the host context. */
100	cbz	x29, __host_enter_without_restoring
101	b	__host_enter_for_panic
102SYM_FUNC_END(__hyp_do_panic)
103
104.macro host_el1_sync_vect
105	.align 7
106.L__vect_start\@:
107	stp	x0, x1, [sp, #-16]!
108	mrs	x0, esr_el2
109	lsr	x0, x0, #ESR_ELx_EC_SHIFT
110	cmp	x0, #ESR_ELx_EC_HVC64
111	b.ne	__host_exit
112
113	ldp	x0, x1, [sp]		// Don't fixup the stack yet
114
115	/* Check for a stub HVC call */
116	cmp	x0, #HVC_STUB_HCALL_NR
117	b.hs	__host_exit
118
119	add	sp, sp, #16
120	/*
121	 * Compute the idmap address of __kvm_handle_stub_hvc and
122	 * jump there. Since we use kimage_voffset, do not use the
123	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
124	 * (by loading it from the constant pool).
125	 *
126	 * Preserve x0-x4, which may contain stub parameters.
127	 */
128	ldr	x5, =__kvm_handle_stub_hvc
129	hyp_pa	x5, x6
130	br	x5
131.L__vect_end\@:
132.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
133	.error "host_el1_sync_vect larger than vector entry"
134.endif
135.endm
136
137.macro invalid_host_el2_vect
138	.align 7
139	/* If a guest is loaded, panic out of it. */
140	stp	x0, x1, [sp, #-16]!
141	get_loaded_vcpu x0, x1
142	cbnz	x0, __guest_exit_panic
143	add	sp, sp, #16
144
145	/*
146	 * The panic may not be clean if the exception is taken before the host
147	 * context has been saved by __host_exit or after the hyp context has
148	 * been partially clobbered by __host_enter.
149	 */
150	b	hyp_panic
151.endm
152
153.macro invalid_host_el1_vect
154	.align 7
155	mov	x0, xzr		/* restore_host = false */
156	mrs	x1, spsr_el2
157	mrs	x2, elr_el2
158	mrs	x3, par_el1
159	b	__hyp_do_panic
160.endm
161
162/*
163 * The host vector does not use an ESB instruction in order to avoid consuming
164 * SErrors that should only be consumed by the host. Guest entry is deferred by
165 * __guest_enter if there are any pending asynchronous exceptions so hyp will
166 * always return to the host without having consumerd host SErrors.
167 *
168 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
169 * host knows about the EL2 vectors already, and there is no point in hiding
170 * them.
171 */
172	.align 11
173SYM_CODE_START(__kvm_hyp_host_vector)
174	invalid_host_el2_vect			// Synchronous EL2t
175	invalid_host_el2_vect			// IRQ EL2t
176	invalid_host_el2_vect			// FIQ EL2t
177	invalid_host_el2_vect			// Error EL2t
178
179	invalid_host_el2_vect			// Synchronous EL2h
180	invalid_host_el2_vect			// IRQ EL2h
181	invalid_host_el2_vect			// FIQ EL2h
182	invalid_host_el2_vect			// Error EL2h
183
184	host_el1_sync_vect			// Synchronous 64-bit EL1
185	invalid_host_el1_vect			// IRQ 64-bit EL1
186	invalid_host_el1_vect			// FIQ 64-bit EL1
187	invalid_host_el1_vect			// Error 64-bit EL1
188
189	invalid_host_el1_vect			// Synchronous 32-bit EL1
190	invalid_host_el1_vect			// IRQ 32-bit EL1
191	invalid_host_el1_vect			// FIQ 32-bit EL1
192	invalid_host_el1_vect			// Error 32-bit EL1
193SYM_CODE_END(__kvm_hyp_host_vector)
194
195/*
196 * Forward SMC with arguments in struct kvm_cpu_context, and
197 * store the result into the same struct. Assumes SMCCC 1.2 or older.
198 *
199 * x0: struct kvm_cpu_context*
200 */
201SYM_CODE_START(__kvm_hyp_host_forward_smc)
202	/*
203	 * Use x18 to keep the pointer to the host context because
204	 * x18 is callee-saved in SMCCC but not in AAPCS64.
205	 */
206	mov	x18, x0
207
208	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
209	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
210	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
211	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
212	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
213	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
214	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
215	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
216	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
217
218	smc	#0
219
220	stp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
221	stp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
222	stp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
223	stp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
224	stp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
225	stp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
226	stp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
227	stp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
228	stp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
229
230	ret
231SYM_CODE_END(__kvm_hyp_host_forward_smc)
232