xref: /freebsd/sys/arm64/vmm/vmm_hyp_exception.S (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2017 Alexandru Elisei <alexandru.elisei@gmail.com>
5 * Copyright (c) 2021 Andrew Turner
6 *
7 * This software was developed by Alexandru Elisei under sponsorship
8 * from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32
33#include <machine/asm.h>
34#include <machine/hypervisor.h>
35
36#include "assym.inc"
37#include "hyp.h"
38
39.macro	save_host_registers
40	/* TODO: Only store callee saved registers */
41	sub	sp, sp, #(32 * 8)
42	str	x30,      [sp, #(30 * 8)]
43	stp	x28, x29, [sp, #(28 * 8)]
44	stp	x26, x27, [sp, #(26 * 8)]
45	stp	x24, x25, [sp, #(24 * 8)]
46	stp	x22, x23, [sp, #(22 * 8)]
47	stp	x20, x21, [sp, #(20 * 8)]
48	stp	x18, x19, [sp, #(18 * 8)]
49	stp	x16, x17, [sp, #(16 * 8)]
50	stp	x14, x15, [sp, #(14 * 8)]
51	stp	x12, x13, [sp, #(12 * 8)]
52	stp	x10, x11, [sp, #(10 * 8)]
53	stp	x8,  x9,  [sp, #(8  * 8)]
54	stp	x6,  x7,  [sp, #(6  * 8)]
55	stp	x4,  x5,  [sp, #(4  * 8)]
56	stp	x2,  x3,  [sp, #(2  * 8)]
57	stp	x0,  x1,  [sp, #(0  * 8)]
58.endm
59
60.macro	restore_host_registers
61	/* TODO: Only restore callee saved registers */
62	ldp	x0,  x1,  [sp, #(0  * 8)]
63	ldp	x2,  x3,  [sp, #(2  * 8)]
64	ldp	x4,  x5,  [sp, #(4  * 8)]
65	ldp	x6,  x7,  [sp, #(6  * 8)]
66	ldp	x8,  x9,  [sp, #(8  * 8)]
67	ldp	x10, x11, [sp, #(10 * 8)]
68	ldp	x12, x13, [sp, #(12 * 8)]
69	ldp	x14, x15, [sp, #(14 * 8)]
70	ldp	x16, x17, [sp, #(16 * 8)]
71	ldp	x18, x19, [sp, #(18 * 8)]
72	ldp	x20, x21, [sp, #(20 * 8)]
73	ldp	x22, x23, [sp, #(22 * 8)]
74	ldp	x24, x25, [sp, #(24 * 8)]
75	ldp	x26, x27, [sp, #(26 * 8)]
76	ldp	x28, x29, [sp, #(28 * 8)]
77	ldr	x30,      [sp, #(30 * 8)]
78	add	sp, sp, #(32 * 8)
79.endm
80
81.macro	save_guest_registers
82	/* Back up x0 so we can use it as a temporary register */
83	stp	x0,  x1,  [sp, #-(2 * 8)]!
84
85	/* Restore the hypctx pointer */
86	mrs	x0, tpidr_el2
87
88	stp	x2,  x3,  [x0, #(TF_X + 2  * 8)]
89	stp	x4,  x5,  [x0, #(TF_X + 4  * 8)]
90	stp	x6,  x7,  [x0, #(TF_X + 6  * 8)]
91	stp	x8,  x9,  [x0, #(TF_X + 8  * 8)]
92	stp	x10, x11, [x0, #(TF_X + 10 * 8)]
93	stp	x12, x13, [x0, #(TF_X + 12 * 8)]
94	stp	x14, x15, [x0, #(TF_X + 14 * 8)]
95	stp	x16, x17, [x0, #(TF_X + 16 * 8)]
96	stp	x18, x19, [x0, #(TF_X + 18 * 8)]
97	stp	x20, x21, [x0, #(TF_X + 20 * 8)]
98	stp	x22, x23, [x0, #(TF_X + 22 * 8)]
99	stp	x24, x25, [x0, #(TF_X + 24 * 8)]
100	stp	x26, x27, [x0, #(TF_X + 26 * 8)]
101	stp	x28, x29, [x0, #(TF_X + 28 * 8)]
102
103	str	lr, [x0, #(TF_LR)]
104
105	/* Restore the saved x0 & x1 and save them */
106	ldp	x2,  x3,  [sp], #(2 * 8)
107	stp	x2,  x3,  [x0, #(TF_X + 0  * 8)]
108.endm
109
110.macro	restore_guest_registers
111	/*
112	 * Copy the guest x0 and x1 to the stack so we can restore them
113	 * after loading the other registers.
114	 */
115	ldp	x2,  x3,  [x0, #(TF_X + 0  * 8)]
116	stp	x2,  x3,  [sp, #-(2 * 8)]!
117
118	ldr	lr, [x0, #(TF_LR)]
119
120	ldp	x28, x29, [x0, #(TF_X + 28 * 8)]
121	ldp	x26, x27, [x0, #(TF_X + 26 * 8)]
122	ldp	x24, x25, [x0, #(TF_X + 24 * 8)]
123	ldp	x22, x23, [x0, #(TF_X + 22 * 8)]
124	ldp	x20, x21, [x0, #(TF_X + 20 * 8)]
125	ldp	x18, x19, [x0, #(TF_X + 18 * 8)]
126	ldp	x16, x17, [x0, #(TF_X + 16 * 8)]
127	ldp	x14, x15, [x0, #(TF_X + 14 * 8)]
128	ldp	x12, x13, [x0, #(TF_X + 12 * 8)]
129	ldp	x10, x11, [x0, #(TF_X + 10 * 8)]
130	ldp	x8,  x9,  [x0, #(TF_X + 8  * 8)]
131	ldp	x6,  x7,  [x0, #(TF_X + 6  * 8)]
132	ldp	x4,  x5,  [x0, #(TF_X + 4  * 8)]
133	ldp	x2,  x3,  [x0, #(TF_X + 2  * 8)]
134
135	ldp	x0,  x1,  [sp], #(2 * 8)
136.endm
137
138.macro vempty
139	.align 7
140	1: b	1b
141.endm
142
143.macro vector name
144	.align 7
145	b	handle_\name
146.endm
147
148	.section ".vmm_vectors","ax"
149	.align 11
150hyp_init_vectors:
151	vempty		/* Synchronous EL2t */
152	vempty		/* IRQ EL2t */
153	vempty		/* FIQ EL2t */
154	vempty		/* Error EL2t */
155
156	vempty		/* Synchronous EL2h */
157	vempty		/* IRQ EL2h */
158	vempty		/* FIQ EL2h */
159	vempty		/* Error EL2h */
160
161	vector hyp_init	/* Synchronous 64-bit EL1 */
162	vempty		/* IRQ 64-bit EL1 */
163	vempty		/* FIQ 64-bit EL1 */
164	vempty		/* Error 64-bit EL1 */
165
166	vempty		/* Synchronous 32-bit EL1 */
167	vempty		/* IRQ 32-bit EL1 */
168	vempty		/* FIQ 32-bit EL1 */
169	vempty		/* Error 32-bit EL1 */
170
171	.text
172	.align 11
173hyp_vectors:
174	vempty			/* Synchronous EL2t */
175	vempty			/* IRQ EL2t */
176	vempty			/* FIQ EL2t */
177	vempty			/* Error EL2t */
178
179	vector el2_el2h_sync	/* Synchronous EL2h */
180	vector el2_el2h_irq	/* IRQ EL2h */
181	vector el2_el2h_fiq	/* FIQ EL2h */
182	vector el2_el2h_error	/* Error EL2h */
183
184	vector el2_el1_sync64	/* Synchronous 64-bit EL1 */
185	vector el2_el1_irq64	/* IRQ 64-bit EL1 */
186	vector el2_el1_fiq64	/* FIQ 64-bit EL1 */
187	vector el2_el1_error64	/* Error 64-bit EL1 */
188
189	vempty			/* Synchronous 32-bit EL1 */
190	vempty			/* IRQ 32-bit EL1 */
191	vempty			/* FIQ 32-bit EL1 */
192	vempty			/* Error 32-bit EL1 */
193
194/*
195 * Initialize the hypervisor mode with a new exception vector table, translation
196 * table and stack.
197 *
198 * Expecting:
199 * x0 - translation tables physical address
200 * x1 - stack top virtual address
201 * x2 - TCR_EL2 value
202 * x3 - SCTLR_EL2 value
203 * x4 - VTCR_EL2 value
204 */
205LENTRY(handle_hyp_init)
206	/* Install the new exception vectors */
207	adrp	x6, hyp_vectors
208	add	x6, x6, :lo12:hyp_vectors
209	msr	vbar_el2, x6
210	/* Set the stack top address */
211	mov	sp, x1
212	/* Use the host VTTBR_EL2 to tell the host and the guests apart */
213	mov	x9, #VTTBR_HOST
214	msr	vttbr_el2, x9
215	/* Load the base address for the translation tables */
216	msr	ttbr0_el2, x0
217	/* Invalidate the TLB */
218	tlbi	alle2
219	/* Use the same memory attributes as EL1 */
220	mrs	x9, mair_el1
221	msr	mair_el2, x9
222	/* Configure address translation */
223	msr	tcr_el2, x2
224	isb
225	/* Set the system control register for EL2 */
226	msr	sctlr_el2, x3
227	/* Set the Stage 2 translation control register */
228	msr	vtcr_el2, x4
229	/* Return success */
230	mov	x0, #0
231	/* MMU is up and running */
232	ERET
233LEND(handle_hyp_init)
234
235.macro do_world_switch_to_host
236	save_guest_registers
237	restore_host_registers
238
239	/* Restore host VTTBR */
240	mov	x9, #VTTBR_HOST
241	msr	vttbr_el2, x9
242.endm
243
244
245.macro handle_el2_excp type
246	/* Save registers before modifying so we can restore them */
247	str	x9, [sp, #-16]!
248
249	/* Test if the exception happened when the host was running */
250	mrs	x9, vttbr_el2
251	cmp	x9, #VTTBR_HOST
252	beq	1f
253
254	/* We got the exception while the guest was running */
255	ldr	x9, [sp], #16
256	do_world_switch_to_host
257	mov	x0, \type
258	ret
259
2601:
261	/* We got the exception while the host was running */
262	ldr	x9, [sp], #16
263	mov	x0, \type
264	ERET
265.endm
266
267
268LENTRY(handle_el2_el2h_sync)
269	handle_el2_excp #EXCP_TYPE_EL2_SYNC
270LEND(handle_el2_el2h_sync)
271
272LENTRY(handle_el2_el2h_irq)
273	handle_el2_excp #EXCP_TYPE_EL2_IRQ
274LEND(handle_el2_el2h_irq)
275
276LENTRY(handle_el2_el2h_fiq)
277	handle_el2_excp #EXCP_TYPE_EL2_FIQ
278LEND(handle_el2_el2h_fiq)
279
280LENTRY(handle_el2_el2h_error)
281	handle_el2_excp #EXCP_TYPE_EL2_ERROR
282LEND(handle_el2_el2h_error)
283
284
285LENTRY(handle_el2_el1_sync64)
286	/* Save registers before modifying so we can restore them */
287	str	x9, [sp, #-16]!
288
289	/* Check for host hypervisor call */
290	mrs	x9, vttbr_el2
291	cmp	x9, #VTTBR_HOST
292	ldr	x9, [sp], #16 /* Restore the temp register */
293	bne	1f
294
295	/*
296	 * Called from the host
297	 */
298
299	/* Check if this is a cleanup call and handle in a controlled state */
300	cmp	x0, #(HYP_CLEANUP)
301	b.eq	vmm_cleanup
302
303	str	lr, [sp, #-16]!
304	bl	vmm_hyp_enter
305	ldr	lr, [sp], #16
306	ERET
307
3081:	/* Guest exception taken to EL2 */
309	do_world_switch_to_host
310	mov	x0, #EXCP_TYPE_EL1_SYNC
311	ret
312LEND(handle_el2_el1_sync64)
313
314/*
315 * We only trap IRQ, FIQ and SError exceptions when a guest is running. Do a
316 * world switch to host to handle these exceptions.
317 */
318
319LENTRY(handle_el2_el1_irq64)
320	do_world_switch_to_host
321	str	x9, [sp, #-16]!
322	mrs	x9, ich_misr_el2
323	cmp	x9, xzr
324	beq	1f
325	mov	x0, #EXCP_TYPE_MAINT_IRQ
326	b	2f
3271:
328	mov	x0, #EXCP_TYPE_EL1_IRQ
3292:
330	ldr	x9, [sp], #16
331	ret
332LEND(handle_el2_el1_irq)
333
334LENTRY(handle_el2_el1_fiq64)
335	do_world_switch_to_host
336	mov	x0, #EXCP_TYPE_EL1_FIQ
337	ret
338LEND(handle_el2_el1_fiq64)
339
340LENTRY(handle_el2_el1_error64)
341	do_world_switch_to_host
342	mov	x0, #EXCP_TYPE_EL1_ERROR
343	ret
344LEND(handle_el2_el1_error64)
345
346
347/*
348 * Usage:
349 * uint64_t vmm_enter_guest(struct hypctx *hypctx)
350 *
351 * Expecting:
352 * x0 - hypctx address
353 */
354ENTRY(vmm_enter_guest)
355	/* Save hypctx address */
356	msr	tpidr_el2, x0
357
358	save_host_registers
359	restore_guest_registers
360
361	/* Enter guest */
362	ERET
363END(vmm_enter_guest)
364
365/*
366 * Usage:
367 * void vmm_cleanup(uint64_t handle, void *hyp_stub_vectors)
368 *
369 * Expecting:
370 * x1 - physical address of hyp_stub_vectors
371 */
372LENTRY(vmm_cleanup)
373	/* Restore the stub vectors */
374	msr	vbar_el2, x1
375
376	/* Disable the MMU */
377	dsb	sy
378	mrs	x2, sctlr_el2
379	bic	x2, x2, #SCTLR_EL2_M
380	msr	sctlr_el2, x2
381	isb
382
383	ERET
384LEND(vmm_cleanup)
385