xref: /freebsd/sys/arm64/arm64/exception.S (revision 99213b3c352cdf568ea7cf5b4dddb1879f16a601)
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/elf_common.h>
29
30#include <machine/asm.h>
31#include <machine/armreg.h>
32
33#include "assym.inc"
34#include <sys/intr.h>
35
36	.text
37
38/*
39 * This is limited to 28 instructions as it's placed in the exception vector
40 * slot that is 32 instructions long. We need one for the branch, and three
41 * for the prologue.
42 */
43.macro	save_registers_head el
44.if \el == 1
45	stp	x0,  x1,  [sp, #-(TF_SIZE - TF_X + 128)]!
46.else
47	stp	x0,  x1,  [sp, #-(TF_SIZE - TF_X)]!
48.endif
49	stp	x2,  x3,  [sp, #(2  * 8)]
50	stp	x4,  x5,  [sp, #(4  * 8)]
51	stp	x6,  x7,  [sp, #(6  * 8)]
52	stp	x8,  x9,  [sp, #(8  * 8)]
53	stp	x10, x11, [sp, #(10 * 8)]
54	stp	x12, x13, [sp, #(12 * 8)]
55	stp	x14, x15, [sp, #(14 * 8)]
56	stp	x16, x17, [sp, #(16 * 8)]
57	stp	x18, x19, [sp, #(18 * 8)]
58	stp	x20, x21, [sp, #(20 * 8)]
59	stp	x22, x23, [sp, #(22 * 8)]
60	stp	x24, x25, [sp, #(24 * 8)]
61	stp	x26, x27, [sp, #(26 * 8)]
62	stp	x28, x29, [sp, #(28 * 8)]
63.if \el == 1
64	add	x18, sp, #(TF_SIZE - TF_X + 128)
65.else
66	mrs	x18, sp_el0
67.endif
68	mrs	x10, elr_el1
69	mrs	x11, spsr_el1
70	mrs	x12, esr_el1
71	mrs	x13, far_el1
72	stp	x18,  lr, [sp, #(TF_SP - TF_X)]!
73	stp	x10, x11, [sp, #(TF_ELR)]
74	stp	x12, x13, [sp, #(TF_ESR)]
75	mrs	x18, tpidr_el1
76.endm
77
78.macro	save_registers el
79	add	x29, sp, #(TF_SIZE)
80.if \el == 0
81#if defined(PERTHREAD_SSP)
82	/* Load the SSP canary to sp_el0 */
83	ldr	x1, [x18, #(PC_CURTHREAD)]
84	add	x1, x1, #(TD_MD_CANARY)
85	msr	sp_el0, x1
86#endif
87
88	/* Apply the SSBD (CVE-2018-3639) workaround if needed */
89	ldr	x1, [x18, #PC_SSBD]
90	cbz	x1, 1f
91	mov	w0, #1
92	blr	x1
931:
94
95	ldr	x0, [x18, #PC_CURTHREAD]
96	bl	ptrauth_exit_el0
97
98	ldr	x0, [x18, #(PC_CURTHREAD)]
99	bl	dbg_monitor_enter
100
101	/* Unmask debug and SError exceptions */
102	msr	daifclr, #(DAIF_D | DAIF_A)
103.else
104	/*
105	 * Unmask debug and SError exceptions.
106	 * For EL1, debug exceptions are conditionally unmasked in
107	 * do_el1h_sync().
108	 */
109	msr	daifclr, #(DAIF_A)
110.endif
111.endm
112
113.macro	restore_registers el
114	/*
115	 * Mask all exceptions, x18 may change in the interrupt exception
116	 * handler.
117	 */
118	msr	daifset, #(DAIF_ALL)
119.if \el == 0
120	ldr	x0, [x18, #PC_CURTHREAD]
121	mov	x1, sp
122	bl	dbg_monitor_exit
123
124	ldr	x0, [x18, #PC_CURTHREAD]
125	bl	ptrauth_enter_el0
126
127	/* Remove the SSBD (CVE-2018-3639) workaround if needed */
128	ldr	x1, [x18, #PC_SSBD]
129	cbz	x1, 1f
130	mov	w0, #0
131	blr	x1
1321:
133.endif
134	ldp	x18,  lr, [sp, #(TF_SP)]
135	ldp	x10, x11, [sp, #(TF_ELR)]
136.if \el == 0
137	msr	sp_el0, x18
138.endif
139	msr	spsr_el1, x11
140	msr	elr_el1, x10
141	ldp	x0,  x1,  [sp, #(TF_X + 0  * 8)]
142	ldp	x2,  x3,  [sp, #(TF_X + 2  * 8)]
143	ldp	x4,  x5,  [sp, #(TF_X + 4  * 8)]
144	ldp	x6,  x7,  [sp, #(TF_X + 6  * 8)]
145	ldp	x8,  x9,  [sp, #(TF_X + 8  * 8)]
146	ldp	x10, x11, [sp, #(TF_X + 10 * 8)]
147	ldp	x12, x13, [sp, #(TF_X + 12 * 8)]
148	ldp	x14, x15, [sp, #(TF_X + 14 * 8)]
149	ldp	x16, x17, [sp, #(TF_X + 16 * 8)]
150.if \el == 0
151	/*
152	 * We only restore the callee saved registers when returning to
153	 * userland as they may have been updated by a system call or signal.
154	 */
155	ldp	x18, x19, [sp, #(TF_X + 18 * 8)]
156	ldp	x20, x21, [sp, #(TF_X + 20 * 8)]
157	ldp	x22, x23, [sp, #(TF_X + 22 * 8)]
158	ldp	x24, x25, [sp, #(TF_X + 24 * 8)]
159	ldp	x26, x27, [sp, #(TF_X + 26 * 8)]
160	ldp	x28, x29, [sp, #(TF_X + 28 * 8)]
161.else
162	ldr	     x29, [sp, #(TF_X + 29 * 8)]
163.endif
164.if \el == 0
165	add	sp, sp, #(TF_SIZE)
166.else
167	mov	sp, x18
168	mrs	x18, tpidr_el1
169.endif
170.endm
171
172.macro	do_ast
173	mrs	x19, daif
174	/* Make sure the IRQs are enabled before calling ast() */
175	bic	x19, x19, #(PSR_I | PSR_F)
1761:
177	/*
178	 * Mask interrupts while checking the ast pending flag
179	 */
180	msr	daifset, #(DAIF_INTR)
181
182	/* Read the current thread AST mask */
183	ldr	x1, [x18, #PC_CURTHREAD]	/* Load curthread */
184	ldr	w1, [x1, #(TD_AST)]
185
186	/* Check if we have a non-zero AST mask */
187	cbz	w1, 2f
188
189	/* Restore interrupts */
190	msr	daif, x19
191
192	/* handle the ast */
193	mov	x0, sp
194	bl	_C_LABEL(ast)
195
196	/* Re-check for new ast scheduled */
197	b	1b
1982:
199.endm
200
201#ifdef KMSAN
202/*
203 * The KMSAN runtime relies on a TLS block to track initialization and origin
204 * state for function parameters and return values.  To keep this state
205 * consistent in the face of asynchronous kernel-mode traps, the runtime
206 * maintains a stack of blocks: when handling an exception or interrupt,
207 * kmsan_intr_enter() pushes the new block to be used until the handler is
208 * complete, at which point kmsan_intr_leave() restores the previous block.
209 *
210 * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that
211 * may have happened while in kernel-mode.  In particular, they are not required
212 * around amd64_syscall() or ast() calls.  Otherwise, kmsan_intr_enter() can be
213 * called unconditionally, without distinguishing between entry from user-mode
214 * or kernel-mode.
215 */
216#define	KMSAN_ENTER	bl kmsan_intr_enter
217#define	KMSAN_LEAVE	bl kmsan_intr_leave
218#else
219#define	KMSAN_ENTER
220#define	KMSAN_LEAVE
221#endif
222
223ENTRY(handle_el1h_sync)
224	save_registers 1
225	KMSAN_ENTER
226	ldr	x0, [x18, #PC_CURTHREAD]
227	mov	x1, sp
228	bl	do_el1h_sync
229	KMSAN_LEAVE
230	restore_registers 1
231	ERET
232END(handle_el1h_sync)
233
234ENTRY(handle_el1h_irq)
235	save_registers 1
236	KMSAN_ENTER
237	mov	x0, sp
238	mov	x1, #INTR_ROOT_IRQ
239	bl	intr_irq_handler
240	KMSAN_LEAVE
241	restore_registers 1
242	ERET
243END(handle_el1h_irq)
244
245ENTRY(handle_el1h_fiq)
246	save_registers 1
247	KMSAN_ENTER
248	mov	x0, sp
249	mov	x1, #INTR_ROOT_FIQ
250	bl	intr_irq_handler
251	KMSAN_LEAVE
252	restore_registers 1
253	ERET
254END(handle_el1h_fiq)
255
256ENTRY(handle_el1h_serror)
257	save_registers 1
258	KMSAN_ENTER
259	mov	x0, sp
2601:	bl	do_serror
261	b	1b
262	KMSAN_LEAVE
263END(handle_el1h_serror)
264
265ENTRY(handle_el0_sync)
266	save_registers 0
267	KMSAN_ENTER
268	ldr	x0, [x18, #PC_CURTHREAD]
269	mov	x1, sp
270	mov	x22, x0
271	str	x1, [x0, #TD_FRAME]
272	bl	do_el0_sync
273	do_ast
274	str	xzr, [x22, #TD_FRAME]
275	KMSAN_LEAVE
276	restore_registers 0
277	ERET
278END(handle_el0_sync)
279
280ENTRY(handle_el0_irq)
281	save_registers 0
282	KMSAN_ENTER
283	mov	x0, sp
284	mov	x1, #INTR_ROOT_IRQ
285	bl	intr_irq_handler
286	do_ast
287	KMSAN_LEAVE
288	restore_registers 0
289	ERET
290END(handle_el0_irq)
291
292ENTRY(handle_el0_fiq)
293	save_registers 0
294	KMSAN_ENTER
295	mov	x0, sp
296	mov	x1, #INTR_ROOT_FIQ
297	bl	intr_irq_handler
298	do_ast
299	KMSAN_LEAVE
300	restore_registers 0
301	ERET
302END(handle_el0_fiq)
303
304ENTRY(handle_el0_serror)
305	save_registers 0
306	KMSAN_ENTER
307	mov	x0, sp
3081:	bl	do_serror
309	b	1b
310	KMSAN_LEAVE
311END(handle_el0_serror)
312
313ENTRY(handle_empty_exception)
314	save_registers 0
315	KMSAN_ENTER
316	mov	x0, sp
3171:	bl	unhandled_exception
318	b	1b
319	KMSAN_LEAVE
320END(handle_empty_exception)
321
322.macro	vector	name, el
323	.align 7
324	save_registers_head \el
325	b	handle_\name
326	dsb	sy
327	isb
328	/* Break instruction to ensure we aren't executing code here. */
329	brk	0x42
330.endm
331
332.macro	vempty el
333	vector	empty_exception \el
334.endm
335
336	.align 11
337	.globl exception_vectors
338exception_vectors:
339	vempty 1		/* Synchronous EL1t */
340	vempty 1		/* IRQ EL1t */
341	vempty 1		/* FIQ EL1t */
342	vempty 1		/* Error EL1t */
343
344	vector el1h_sync 1	/* Synchronous EL1h */
345	vector el1h_irq 1	/* IRQ EL1h */
346	vector el1h_fiq 1	/* FIQ EL1h */
347	vector el1h_serror 1	/* Error EL1h */
348
349	vector el0_sync 0	/* Synchronous 64-bit EL0 */
350	vector el0_irq 0	/* IRQ 64-bit EL0 */
351	vector el0_fiq 0	/* FIQ 64-bit EL0 */
352	vector el0_serror 0	/* Error 64-bit EL0 */
353
354	vector el0_sync 0	/* Synchronous 32-bit EL0 */
355	vector el0_irq 0	/* IRQ 32-bit EL0 */
356	vector el0_fiq 0	/* FIQ 32-bit EL0 */
357	vector el0_serror 0	/* Error 32-bit EL0 */
358
359GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
360