xref: /freebsd/sys/arm64/vmm/vmm_hyp_exception.S (revision b224af946a17b8e7a7b4942157556b5bc86dd6fb)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2017 Alexandru Elisei <alexandru.elisei@gmail.com>
5 * Copyright (c) 2021 Andrew Turner
6 *
7 * This software was developed by Alexandru Elisei under sponsorship
8 * from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32
33#include <sys/elf_common.h>
34#include <machine/asm.h>
35#include <machine/hypervisor.h>
36
37#include "assym.inc"
38#include "hyp.h"
39
40.macro	save_host_registers
41	/* TODO: Only store callee saved registers */
42	sub	sp, sp, #(32 * 8)
43	str	x30,      [sp, #(30 * 8)]
44	stp	x28, x29, [sp, #(28 * 8)]
45	stp	x26, x27, [sp, #(26 * 8)]
46	stp	x24, x25, [sp, #(24 * 8)]
47	stp	x22, x23, [sp, #(22 * 8)]
48	stp	x20, x21, [sp, #(20 * 8)]
49	stp	x18, x19, [sp, #(18 * 8)]
50	stp	x16, x17, [sp, #(16 * 8)]
51	stp	x14, x15, [sp, #(14 * 8)]
52	stp	x12, x13, [sp, #(12 * 8)]
53	stp	x10, x11, [sp, #(10 * 8)]
54	stp	x8,  x9,  [sp, #(8  * 8)]
55	stp	x6,  x7,  [sp, #(6  * 8)]
56	stp	x4,  x5,  [sp, #(4  * 8)]
57	stp	x2,  x3,  [sp, #(2  * 8)]
58	stp	x0,  x1,  [sp, #(0  * 8)]
59.endm
60
61.macro	restore_host_registers
62	/* TODO: Only restore callee saved registers */
63	ldp	x0,  x1,  [sp, #(0  * 8)]
64	ldp	x2,  x3,  [sp, #(2  * 8)]
65	ldp	x4,  x5,  [sp, #(4  * 8)]
66	ldp	x6,  x7,  [sp, #(6  * 8)]
67	ldp	x8,  x9,  [sp, #(8  * 8)]
68	ldp	x10, x11, [sp, #(10 * 8)]
69	ldp	x12, x13, [sp, #(12 * 8)]
70	ldp	x14, x15, [sp, #(14 * 8)]
71	ldp	x16, x17, [sp, #(16 * 8)]
72	ldp	x18, x19, [sp, #(18 * 8)]
73	ldp	x20, x21, [sp, #(20 * 8)]
74	ldp	x22, x23, [sp, #(22 * 8)]
75	ldp	x24, x25, [sp, #(24 * 8)]
76	ldp	x26, x27, [sp, #(26 * 8)]
77	ldp	x28, x29, [sp, #(28 * 8)]
78	ldr	x30,      [sp, #(30 * 8)]
79	add	sp, sp, #(32 * 8)
80.endm
81
82.macro	save_guest_registers
83	/* Back up x0 so we can use it as a temporary register */
84	stp	x0,  x1,  [sp, #-(2 * 8)]!
85
86	/* Restore the hypctx pointer */
87	mrs	x0, tpidr_el2
88
89	stp	x2,  x3,  [x0, #(TF_X + 2  * 8)]
90	stp	x4,  x5,  [x0, #(TF_X + 4  * 8)]
91	stp	x6,  x7,  [x0, #(TF_X + 6  * 8)]
92	stp	x8,  x9,  [x0, #(TF_X + 8  * 8)]
93	stp	x10, x11, [x0, #(TF_X + 10 * 8)]
94	stp	x12, x13, [x0, #(TF_X + 12 * 8)]
95	stp	x14, x15, [x0, #(TF_X + 14 * 8)]
96	stp	x16, x17, [x0, #(TF_X + 16 * 8)]
97	stp	x18, x19, [x0, #(TF_X + 18 * 8)]
98	stp	x20, x21, [x0, #(TF_X + 20 * 8)]
99	stp	x22, x23, [x0, #(TF_X + 22 * 8)]
100	stp	x24, x25, [x0, #(TF_X + 24 * 8)]
101	stp	x26, x27, [x0, #(TF_X + 26 * 8)]
102	stp	x28, x29, [x0, #(TF_X + 28 * 8)]
103
104	str	lr, [x0, #(TF_LR)]
105
106	/* Restore the saved x0 & x1 and save them */
107	ldp	x2,  x3,  [sp], #(2 * 8)
108	stp	x2,  x3,  [x0, #(TF_X + 0  * 8)]
109.endm
110
111.macro	restore_guest_registers
112	/*
113	 * Copy the guest x0 and x1 to the stack so we can restore them
114	 * after loading the other registers.
115	 */
116	ldp	x2,  x3,  [x0, #(TF_X + 0  * 8)]
117	stp	x2,  x3,  [sp, #-(2 * 8)]!
118
119	ldr	lr, [x0, #(TF_LR)]
120
121	ldp	x28, x29, [x0, #(TF_X + 28 * 8)]
122	ldp	x26, x27, [x0, #(TF_X + 26 * 8)]
123	ldp	x24, x25, [x0, #(TF_X + 24 * 8)]
124	ldp	x22, x23, [x0, #(TF_X + 22 * 8)]
125	ldp	x20, x21, [x0, #(TF_X + 20 * 8)]
126	ldp	x18, x19, [x0, #(TF_X + 18 * 8)]
127	ldp	x16, x17, [x0, #(TF_X + 16 * 8)]
128	ldp	x14, x15, [x0, #(TF_X + 14 * 8)]
129	ldp	x12, x13, [x0, #(TF_X + 12 * 8)]
130	ldp	x10, x11, [x0, #(TF_X + 10 * 8)]
131	ldp	x8,  x9,  [x0, #(TF_X + 8  * 8)]
132	ldp	x6,  x7,  [x0, #(TF_X + 6  * 8)]
133	ldp	x4,  x5,  [x0, #(TF_X + 4  * 8)]
134	ldp	x2,  x3,  [x0, #(TF_X + 2  * 8)]
135
136	ldp	x0,  x1,  [sp], #(2 * 8)
137.endm
138
139.macro vempty
140	.align 7
141	1: b	1b
142.endm
143
144.macro vector name
145	.align 7
146	b	handle_\name
147.endm
148
149	.text
150	.align 11
151hyp_vectors:
152	vempty			/* Synchronous EL2t */
153	vempty			/* IRQ EL2t */
154	vempty			/* FIQ EL2t */
155	vempty			/* Error EL2t */
156
157	vector el2_el2h_sync	/* Synchronous EL2h */
158	vector el2_el2h_irq	/* IRQ EL2h */
159	vector el2_el2h_fiq	/* FIQ EL2h */
160	vector el2_el2h_error	/* Error EL2h */
161
162	vector el2_el1_sync64	/* Synchronous 64-bit EL1 */
163	vector el2_el1_irq64	/* IRQ 64-bit EL1 */
164	vector el2_el1_fiq64	/* FIQ 64-bit EL1 */
165	vector el2_el1_error64	/* Error 64-bit EL1 */
166
167	vempty			/* Synchronous 32-bit EL1 */
168	vempty			/* IRQ 32-bit EL1 */
169	vempty			/* FIQ 32-bit EL1 */
170	vempty			/* Error 32-bit EL1 */
171
172.macro do_world_switch_to_host
173	save_guest_registers
174	restore_host_registers
175
176	/* Restore host VTTBR */
177	mov	x9, #VTTBR_HOST
178	msr	vttbr_el2, x9
179
180#ifdef VMM_VHE
181	msr	vbar_el1, x1
182#endif
183.endm
184
185
186.macro handle_el2_excp type
187#ifndef VMM_VHE
188	/* Save registers before modifying so we can restore them */
189	str	x9, [sp, #-16]!
190
191	/* Test if the exception happened when the host was running */
192	mrs	x9, vttbr_el2
193	cmp	x9, #VTTBR_HOST
194	beq	1f
195
196	/* We got the exception while the guest was running */
197	ldr	x9, [sp], #16
198#endif /* !VMM_VHE */
199	do_world_switch_to_host
200	mov	x0, \type
201	ret
202
203#ifndef VMM_VHE
2041:
205	/* We got the exception while the host was running */
206	ldr	x9, [sp], #16
207	mov	x0, \type
208	ERET
209#endif /* !VMM_VHE */
210.endm
211
212
213LENTRY(handle_el2_el2h_sync)
214	handle_el2_excp #EXCP_TYPE_EL2_SYNC
215LEND(handle_el2_el2h_sync)
216
217LENTRY(handle_el2_el2h_irq)
218	handle_el2_excp #EXCP_TYPE_EL2_IRQ
219LEND(handle_el2_el2h_irq)
220
221LENTRY(handle_el2_el2h_fiq)
222	handle_el2_excp #EXCP_TYPE_EL2_FIQ
223LEND(handle_el2_el2h_fiq)
224
225LENTRY(handle_el2_el2h_error)
226	handle_el2_excp #EXCP_TYPE_EL2_ERROR
227LEND(handle_el2_el2h_error)
228
229
230LENTRY(handle_el2_el1_sync64)
231#ifndef VMM_VHE
232	/* Save registers before modifying so we can restore them */
233	str	x9, [sp, #-16]!
234
235	/* Check for host hypervisor call */
236	mrs	x9, vttbr_el2
237	cmp	x9, #VTTBR_HOST
238	ldr	x9, [sp], #16 /* Restore the temp register */
239	bne	1f
240
241	/*
242	 * Called from the host
243	 */
244
245	/* Check if this is a cleanup call and handle in a controlled state */
246	cmp	x0, #(HYP_CLEANUP)
247	b.eq	vmm_cleanup
248
249	str	lr, [sp, #-16]!
250	bl	vmm_hyp_enter
251	ldr	lr, [sp], #16
252	ERET
253
2541:
255#endif
256	/* Guest exception taken to EL2 */
257	do_world_switch_to_host
258	mov	x0, #EXCP_TYPE_EL1_SYNC
259	ret
260LEND(handle_el2_el1_sync64)
261
262/*
263 * We only trap IRQ, FIQ and SError exceptions when a guest is running. Do a
264 * world switch to host to handle these exceptions.
265 */
266
267LENTRY(handle_el2_el1_irq64)
268	do_world_switch_to_host
269	str	x9, [sp, #-16]!
270	mrs	x9, ich_misr_el2
271	cmp	x9, xzr
272	beq	1f
273	mov	x0, #EXCP_TYPE_MAINT_IRQ
274	b	2f
2751:
276	mov	x0, #EXCP_TYPE_EL1_IRQ
2772:
278	ldr	x9, [sp], #16
279	ret
280LEND(handle_el2_el1_irq64)
281
282LENTRY(handle_el2_el1_fiq64)
283	do_world_switch_to_host
284	mov	x0, #EXCP_TYPE_EL1_FIQ
285	ret
286LEND(handle_el2_el1_fiq64)
287
288LENTRY(handle_el2_el1_error64)
289	do_world_switch_to_host
290	mov	x0, #EXCP_TYPE_EL1_ERROR
291	ret
292LEND(handle_el2_el1_error64)
293
294
295/*
296 * Usage:
297 * uint64_t vmm_do_call_guest(struct hypctx *hypctx)
298 *
299 * Expecting:
300 * x0 - hypctx address
301 */
302ENTRY(VMM_HYP_FUNC(do_call_guest))
303#ifdef VMM_VHE
304	mrs	x1, vbar_el1
305	adrp	x2, hyp_vectors
306	add	x2, x2, :lo12:hyp_vectors
307	msr	vbar_el1, x2
308	isb
309#endif
310
311	/* Save hypctx address */
312	msr	tpidr_el2, x0
313
314	save_host_registers
315	restore_guest_registers
316
317	/* Enter guest */
318	ERET
319END(VMM_HYP_FUNC(do_call_guest))
320
321GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
322