xref: /freebsd/sys/arm64/vmm/vmm_hyp_exception.S (revision 9bc300465e48e19d794d88d0c158a2adb92c7197)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2017 Alexandru Elisei <alexandru.elisei@gmail.com>
5 * Copyright (c) 2021 Andrew Turner
6 *
7 * This software was developed by Alexandru Elisei under sponsorship
8 * from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32
33#include <machine/asm.h>
34#include <machine/hypervisor.h>
35
36#include "assym.inc"
37#include "hyp.h"
38
39.macro	save_host_registers
40	/* TODO: Only store callee saved registers */
41	sub	sp, sp, #(32 * 8)
42	str	x30,      [sp, #(30 * 8)]
43	stp	x28, x29, [sp, #(28 * 8)]
44	stp	x26, x27, [sp, #(26 * 8)]
45	stp	x24, x25, [sp, #(24 * 8)]
46	stp	x22, x23, [sp, #(22 * 8)]
47	stp	x20, x21, [sp, #(20 * 8)]
48	stp	x18, x19, [sp, #(18 * 8)]
49	stp	x16, x17, [sp, #(16 * 8)]
50	stp	x14, x15, [sp, #(14 * 8)]
51	stp	x12, x13, [sp, #(12 * 8)]
52	stp	x10, x11, [sp, #(10 * 8)]
53	stp	x8,  x9,  [sp, #(8  * 8)]
54	stp	x6,  x7,  [sp, #(6  * 8)]
55	stp	x4,  x5,  [sp, #(4  * 8)]
56	stp	x2,  x3,  [sp, #(2  * 8)]
57	stp	x0,  x1,  [sp, #(0  * 8)]
58.endm
59
60.macro	restore_host_registers
61	/* TODO: Only restore callee saved registers */
62	ldp	x0,  x1,  [sp, #(0  * 8)]
63	ldp	x2,  x3,  [sp, #(2  * 8)]
64	ldp	x4,  x5,  [sp, #(4  * 8)]
65	ldp	x6,  x7,  [sp, #(6  * 8)]
66	ldp	x8,  x9,  [sp, #(8  * 8)]
67	ldp	x10, x11, [sp, #(10 * 8)]
68	ldp	x12, x13, [sp, #(12 * 8)]
69	ldp	x14, x15, [sp, #(14 * 8)]
70	ldp	x16, x17, [sp, #(16 * 8)]
71	ldp	x18, x19, [sp, #(18 * 8)]
72	ldp	x20, x21, [sp, #(20 * 8)]
73	ldp	x22, x23, [sp, #(22 * 8)]
74	ldp	x24, x25, [sp, #(24 * 8)]
75	ldp	x26, x27, [sp, #(26 * 8)]
76	ldp	x28, x29, [sp, #(28 * 8)]
77	ldr	x30,      [sp, #(30 * 8)]
78	add	sp, sp, #(32 * 8)
79.endm
80
81.macro	save_guest_registers
82	/* Back up x0 so we can use it as a temporary register */
83	stp	x0,  x1,  [sp, #-(2 * 8)]!
84
85	/* Restore the hypctx pointer */
86	mrs	x0, tpidr_el2
87
88	stp	x2,  x3,  [x0, #(TF_X + 2  * 8)]
89	stp	x4,  x5,  [x0, #(TF_X + 4  * 8)]
90	stp	x6,  x7,  [x0, #(TF_X + 6  * 8)]
91	stp	x8,  x9,  [x0, #(TF_X + 8  * 8)]
92	stp	x10, x11, [x0, #(TF_X + 10 * 8)]
93	stp	x12, x13, [x0, #(TF_X + 12 * 8)]
94	stp	x14, x15, [x0, #(TF_X + 14 * 8)]
95	stp	x16, x17, [x0, #(TF_X + 16 * 8)]
96	stp	x18, x19, [x0, #(TF_X + 18 * 8)]
97	stp	x20, x21, [x0, #(TF_X + 20 * 8)]
98	stp	x22, x23, [x0, #(TF_X + 22 * 8)]
99	stp	x24, x25, [x0, #(TF_X + 24 * 8)]
100	stp	x26, x27, [x0, #(TF_X + 26 * 8)]
101	stp	x28, x29, [x0, #(TF_X + 28 * 8)]
102
103	str	lr, [x0, #(TF_LR)]
104
105	/* Restore the saved x0 & x1 and save them */
106	ldp	x2,  x3,  [sp], #(2 * 8)
107	stp	x2,  x3,  [x0, #(TF_X + 0  * 8)]
108.endm
109
110.macro	restore_guest_registers
111	/*
112	 * Copy the guest x0 and x1 to the stack so we can restore them
113	 * after loading the other registers.
114	 */
115	ldp	x2,  x3,  [x0, #(TF_X + 0  * 8)]
116	stp	x2,  x3,  [sp, #-(2 * 8)]!
117
118	ldr	lr, [x0, #(TF_LR)]
119
120	ldp	x28, x29, [x0, #(TF_X + 28 * 8)]
121	ldp	x26, x27, [x0, #(TF_X + 26 * 8)]
122	ldp	x24, x25, [x0, #(TF_X + 24 * 8)]
123	ldp	x22, x23, [x0, #(TF_X + 22 * 8)]
124	ldp	x20, x21, [x0, #(TF_X + 20 * 8)]
125	ldp	x18, x19, [x0, #(TF_X + 18 * 8)]
126	ldp	x16, x17, [x0, #(TF_X + 16 * 8)]
127	ldp	x14, x15, [x0, #(TF_X + 14 * 8)]
128	ldp	x12, x13, [x0, #(TF_X + 12 * 8)]
129	ldp	x10, x11, [x0, #(TF_X + 10 * 8)]
130	ldp	x8,  x9,  [x0, #(TF_X + 8  * 8)]
131	ldp	x6,  x7,  [x0, #(TF_X + 6  * 8)]
132	ldp	x4,  x5,  [x0, #(TF_X + 4  * 8)]
133	ldp	x2,  x3,  [x0, #(TF_X + 2  * 8)]
134
135	ldp	x0,  x1,  [sp], #(2 * 8)
136.endm
137
138.macro vempty
139	.align 7
140	1: b	1b
141.endm
142
143.macro vector name
144	.align 7
145	b	handle_\name
146.endm
147
148	.section ".vmm_vectors","ax"
149	.align 11
150hyp_init_vectors:
151	vempty		/* Synchronous EL2t */
152	vempty		/* IRQ EL2t */
153	vempty		/* FIQ EL2t */
154	vempty		/* Error EL2t */
155
156	vempty		/* Synchronous EL2h */
157	vempty		/* IRQ EL2h */
158	vempty		/* FIQ EL2h */
159	vempty		/* Error EL2h */
160
161	vector hyp_init	/* Synchronous 64-bit EL1 */
162	vempty		/* IRQ 64-bit EL1 */
163	vempty		/* FIQ 64-bit EL1 */
164	vempty		/* Error 64-bit EL1 */
165
166	vempty		/* Synchronous 32-bit EL1 */
167	vempty		/* IRQ 32-bit EL1 */
168	vempty		/* FIQ 32-bit EL1 */
169	vempty		/* Error 32-bit EL1 */
170
171	.text
172	.align 11
173hyp_vectors:
174	vempty			/* Synchronous EL2t */
175	vempty			/* IRQ EL2t */
176	vempty			/* FIQ EL2t */
177	vempty			/* Error EL2t */
178
179	vector el2_el2h_sync	/* Synchronous EL2h */
180	vector el2_el2h_irq	/* IRQ EL2h */
181	vector el2_el2h_fiq	/* FIQ EL2h */
182	vector el2_el2h_error	/* Error EL2h */
183
184	vector el2_el1_sync64	/* Synchronous 64-bit EL1 */
185	vector el2_el1_irq64	/* IRQ 64-bit EL1 */
186	vector el2_el1_fiq64	/* FIQ 64-bit EL1 */
187	vector el2_el1_error64	/* Error 64-bit EL1 */
188
189	vempty			/* Synchronous 32-bit EL1 */
190	vempty			/* IRQ 32-bit EL1 */
191	vempty			/* FIQ 32-bit EL1 */
192	vempty			/* Error 32-bit EL1 */
193
194/*
195 * Initialize the hypervisor mode with a new exception vector table, translation
196 * table and stack.
197 *
198 * Expecting:
199 * x0 - translation tables physical address
200 * x1 - stack top virtual address
201 * x2 - TCR_EL2 value
202 * x3 - SCTLR_EL2 value
203 * x4 - VTCR_EL2 value
204 */
205LENTRY(handle_hyp_init)
206	/* Install the new exception vectors */
207	adrp	x6, hyp_vectors
208	add	x6, x6, :lo12:hyp_vectors
209	msr	vbar_el2, x6
210	/* Set the stack top address */
211	mov	sp, x1
212	/* Use the host VTTBR_EL2 to tell the host and the guests apart */
213	mov	x9, #VTTBR_HOST
214	msr	vttbr_el2, x9
215	/* Load the base address for the translation tables */
216	msr	ttbr0_el2, x0
217	/* Invalidate the TLB */
218	dsb	ish
219	tlbi	alle2
220	dsb	ishst
221	isb
222	/* Use the same memory attributes as EL1 */
223	mrs	x9, mair_el1
224	msr	mair_el2, x9
225	/* Configure address translation */
226	msr	tcr_el2, x2
227	isb
228	/* Set the system control register for EL2 */
229	msr	sctlr_el2, x3
230	/* Set the Stage 2 translation control register */
231	msr	vtcr_el2, x4
232	/* Return success */
233	mov	x0, #0
234	/* MMU is up and running */
235	ERET
236LEND(handle_hyp_init)
237
238.macro do_world_switch_to_host
239	save_guest_registers
240	restore_host_registers
241
242	/* Restore host VTTBR */
243	mov	x9, #VTTBR_HOST
244	msr	vttbr_el2, x9
245.endm
246
247
248.macro handle_el2_excp type
249	/* Save registers before modifying so we can restore them */
250	str	x9, [sp, #-16]!
251
252	/* Test if the exception happened when the host was running */
253	mrs	x9, vttbr_el2
254	cmp	x9, #VTTBR_HOST
255	beq	1f
256
257	/* We got the exception while the guest was running */
258	ldr	x9, [sp], #16
259	do_world_switch_to_host
260	mov	x0, \type
261	ret
262
2631:
264	/* We got the exception while the host was running */
265	ldr	x9, [sp], #16
266	mov	x0, \type
267	ERET
268.endm
269
270
271LENTRY(handle_el2_el2h_sync)
272	handle_el2_excp #EXCP_TYPE_EL2_SYNC
273LEND(handle_el2_el2h_sync)
274
275LENTRY(handle_el2_el2h_irq)
276	handle_el2_excp #EXCP_TYPE_EL2_IRQ
277LEND(handle_el2_el2h_irq)
278
279LENTRY(handle_el2_el2h_fiq)
280	handle_el2_excp #EXCP_TYPE_EL2_FIQ
281LEND(handle_el2_el2h_fiq)
282
283LENTRY(handle_el2_el2h_error)
284	handle_el2_excp #EXCP_TYPE_EL2_ERROR
285LEND(handle_el2_el2h_error)
286
287
288LENTRY(handle_el2_el1_sync64)
289	/* Save registers before modifying so we can restore them */
290	str	x9, [sp, #-16]!
291
292	/* Check for host hypervisor call */
293	mrs	x9, vttbr_el2
294	cmp	x9, #VTTBR_HOST
295	ldr	x9, [sp], #16 /* Restore the temp register */
296	bne	1f
297
298	/*
299	 * Called from the host
300	 */
301
302	/* Check if this is a cleanup call and handle in a controlled state */
303	cmp	x0, #(HYP_CLEANUP)
304	b.eq	vmm_cleanup
305
306	str	lr, [sp, #-16]!
307	bl	vmm_hyp_enter
308	ldr	lr, [sp], #16
309	ERET
310
3111:	/* Guest exception taken to EL2 */
312	do_world_switch_to_host
313	mov	x0, #EXCP_TYPE_EL1_SYNC
314	ret
315LEND(handle_el2_el1_sync64)
316
317/*
318 * We only trap IRQ, FIQ and SError exceptions when a guest is running. Do a
319 * world switch to host to handle these exceptions.
320 */
321
322LENTRY(handle_el2_el1_irq64)
323	do_world_switch_to_host
324	str	x9, [sp, #-16]!
325	mrs	x9, ich_misr_el2
326	cmp	x9, xzr
327	beq	1f
328	mov	x0, #EXCP_TYPE_MAINT_IRQ
329	b	2f
3301:
331	mov	x0, #EXCP_TYPE_EL1_IRQ
3322:
333	ldr	x9, [sp], #16
334	ret
335LEND(handle_el2_el1_irq64)
336
337LENTRY(handle_el2_el1_fiq64)
338	do_world_switch_to_host
339	mov	x0, #EXCP_TYPE_EL1_FIQ
340	ret
341LEND(handle_el2_el1_fiq64)
342
343LENTRY(handle_el2_el1_error64)
344	do_world_switch_to_host
345	mov	x0, #EXCP_TYPE_EL1_ERROR
346	ret
347LEND(handle_el2_el1_error64)
348
349
350/*
351 * Usage:
352 * uint64_t vmm_enter_guest(struct hypctx *hypctx)
353 *
354 * Expecting:
355 * x0 - hypctx address
356 */
357ENTRY(vmm_enter_guest)
358	/* Save hypctx address */
359	msr	tpidr_el2, x0
360
361	save_host_registers
362	restore_guest_registers
363
364	/* Enter guest */
365	ERET
366END(vmm_enter_guest)
367
368/*
369 * Usage:
370 * void vmm_cleanup(uint64_t handle, void *hyp_stub_vectors)
371 *
372 * Expecting:
373 * x1 - physical address of hyp_stub_vectors
374 */
375LENTRY(vmm_cleanup)
376	/* Restore the stub vectors */
377	msr	vbar_el2, x1
378
379	/* Disable the MMU */
380	dsb	sy
381	mrs	x2, sctlr_el2
382	bic	x2, x2, #SCTLR_EL2_M
383	msr	sctlr_el2, x2
384	isb
385
386	ERET
387LEND(vmm_cleanup)
388