xref: /freebsd/sys/arm64/arm64/exception.S (revision d1e843b3f976528fbea04e702a20219d532220d3)
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/elf_common.h>
29
30#include <machine/asm.h>
31#include <machine/armreg.h>
32
33#include "assym.inc"
34#include <sys/intr.h>
35
36	.text
37
38/*
39 * This is limited to 28 instructions as it's placed in the exception vector
40 * slot that is 32 instructions long. We need one for the branch, and three
41 * for the prologue.
42 */
43.macro	save_registers_head el
44.if \el == 1
45	mov	x18, sp
46	stp	x0,  x1,  [sp, #(TF_X - TF_SIZE - 128)]!
47.else
48	stp	x0,  x1,  [sp, #(TF_X - TF_SIZE)]!
49.endif
50	stp	x2,  x3,  [sp, #(2  * 8)]
51	stp	x4,  x5,  [sp, #(4  * 8)]
52	stp	x6,  x7,  [sp, #(6  * 8)]
53	stp	x8,  x9,  [sp, #(8  * 8)]
54	stp	x10, x11, [sp, #(10 * 8)]
55	stp	x12, x13, [sp, #(12 * 8)]
56	stp	x14, x15, [sp, #(14 * 8)]
57	stp	x16, x17, [sp, #(16 * 8)]
58	stp	x18, x19, [sp, #(18 * 8)]
59	stp	x20, x21, [sp, #(20 * 8)]
60	stp	x22, x23, [sp, #(22 * 8)]
61	stp	x24, x25, [sp, #(24 * 8)]
62	stp	x26, x27, [sp, #(26 * 8)]
63	stp	x28, x29, [sp, #(28 * 8)]
64.if \el == 0
65	mrs	x18, sp_el0
66.endif
67	mrs	x10, elr_el1
68	mrs	x11, spsr_el1
69	mrs	x12, esr_el1
70	mrs	x13, far_el1
71	stp	x18,  lr, [sp, #(TF_SP - TF_X)]!
72	stp	x10, x11, [sp, #(TF_ELR)]
73	stp	x12, x13, [sp, #(TF_ESR)]
74	mrs	x18, tpidr_el1
75.endm
76
77.macro	save_registers el
78	add	x29, sp, #(TF_SIZE)
79.if \el == 0
80#if defined(PERTHREAD_SSP)
81	/* Load the SSP canary to sp_el0 */
82	ldr	x1, [x18, #(PC_CURTHREAD)]
83	add	x1, x1, #(TD_MD_CANARY)
84	msr	sp_el0, x1
85#endif
86
87	/* Apply the SSBD (CVE-2018-3639) workaround if needed */
88	ldr	x1, [x18, #PC_SSBD]
89	cbz	x1, 1f
90	mov	w0, #1
91	blr	x1
921:
93
94	ldr	x0, [x18, #PC_CURTHREAD]
95	bl	ptrauth_exit_el0
96
97	ldr	x0, [x18, #(PC_CURTHREAD)]
98	bl	dbg_monitor_enter
99
100	/* Unmask debug and SError exceptions */
101	msr	daifclr, #(DAIF_D | DAIF_A)
102.else
103	/*
104	 * Unmask debug and SError exceptions.
105	 * For EL1, debug exceptions are conditionally unmasked in
106	 * do_el1h_sync().
107	 */
108	msr	daifclr, #(DAIF_A)
109.endif
110.endm
111
112.macro	restore_registers el
113	/*
114	 * Mask all exceptions, x18 may change in the interrupt exception
115	 * handler.
116	 */
117	msr	daifset, #(DAIF_ALL)
118.if \el == 0
119	ldr	x0, [x18, #PC_CURTHREAD]
120	mov	x1, sp
121	bl	dbg_monitor_exit
122
123	ldr	x0, [x18, #PC_CURTHREAD]
124	bl	ptrauth_enter_el0
125
126	/* Remove the SSBD (CVE-2018-3639) workaround if needed */
127	ldr	x1, [x18, #PC_SSBD]
128	cbz	x1, 1f
129	mov	w0, #0
130	blr	x1
1311:
132.endif
133	ldp	x18,  lr, [sp, #(TF_SP)]
134	ldp	x10, x11, [sp, #(TF_ELR)]
135.if \el == 0
136	msr	sp_el0, x18
137.endif
138	msr	spsr_el1, x11
139	msr	elr_el1, x10
140	ldp	x0,  x1,  [sp, #(TF_X + 0  * 8)]
141	ldp	x2,  x3,  [sp, #(TF_X + 2  * 8)]
142	ldp	x4,  x5,  [sp, #(TF_X + 4  * 8)]
143	ldp	x6,  x7,  [sp, #(TF_X + 6  * 8)]
144	ldp	x8,  x9,  [sp, #(TF_X + 8  * 8)]
145	ldp	x10, x11, [sp, #(TF_X + 10 * 8)]
146	ldp	x12, x13, [sp, #(TF_X + 12 * 8)]
147	ldp	x14, x15, [sp, #(TF_X + 14 * 8)]
148	ldp	x16, x17, [sp, #(TF_X + 16 * 8)]
149.if \el == 0
150	/*
151	 * We only restore the callee saved registers when returning to
152	 * userland as they may have been updated by a system call or signal.
153	 */
154	ldp	x18, x19, [sp, #(TF_X + 18 * 8)]
155	ldp	x20, x21, [sp, #(TF_X + 20 * 8)]
156	ldp	x22, x23, [sp, #(TF_X + 22 * 8)]
157	ldp	x24, x25, [sp, #(TF_X + 24 * 8)]
158	ldp	x26, x27, [sp, #(TF_X + 26 * 8)]
159	ldp	x28, x29, [sp, #(TF_X + 28 * 8)]
160.else
161	ldr	     x29, [sp, #(TF_X + 29 * 8)]
162.endif
163.if \el == 0
164	add	sp, sp, #(TF_SIZE)
165.else
166	mov	sp, x18
167	mrs	x18, tpidr_el1
168.endif
169.endm
170
171.macro	do_ast
172	mrs	x19, daif
173	/* Make sure the IRQs are enabled before calling ast() */
174	bic	x19, x19, #(PSR_I | PSR_F)
1751:
176	/*
177	 * Mask interrupts while checking the ast pending flag
178	 */
179	msr	daifset, #(DAIF_INTR)
180
181	/* Read the current thread AST mask */
182	ldr	x1, [x18, #PC_CURTHREAD]	/* Load curthread */
183	ldr	w1, [x1, #(TD_AST)]
184
185	/* Check if we have a non-zero AST mask */
186	cbz	w1, 2f
187
188	/* Restore interrupts */
189	msr	daif, x19
190
191	/* handle the ast */
192	mov	x0, sp
193	bl	_C_LABEL(ast)
194
195	/* Re-check for new ast scheduled */
196	b	1b
1972:
198.endm
199
200#ifdef KMSAN
201/*
202 * The KMSAN runtime relies on a TLS block to track initialization and origin
203 * state for function parameters and return values.  To keep this state
204 * consistent in the face of asynchronous kernel-mode traps, the runtime
205 * maintains a stack of blocks: when handling an exception or interrupt,
206 * kmsan_intr_enter() pushes the new block to be used until the handler is
207 * complete, at which point kmsan_intr_leave() restores the previous block.
208 *
209 * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that
210 * may have happened while in kernel-mode.  In particular, they are not required
211 * around amd64_syscall() or ast() calls.  Otherwise, kmsan_intr_enter() can be
212 * called unconditionally, without distinguishing between entry from user-mode
213 * or kernel-mode.
214 */
215#define	KMSAN_ENTER	bl kmsan_intr_enter
216#define	KMSAN_LEAVE	bl kmsan_intr_leave
217#else
218#define	KMSAN_ENTER
219#define	KMSAN_LEAVE
220#endif
221
222ENTRY(handle_el1h_sync)
223	save_registers 1
224	KMSAN_ENTER
225	ldr	x0, [x18, #PC_CURTHREAD]
226	mov	x1, sp
227	bl	do_el1h_sync
228	KMSAN_LEAVE
229	restore_registers 1
230	ERET
231END(handle_el1h_sync)
232
233ENTRY(handle_el1h_irq)
234	save_registers 1
235	KMSAN_ENTER
236	mov	x0, sp
237	mov	x1, #INTR_ROOT_IRQ
238	bl	intr_irq_handler
239	KMSAN_LEAVE
240	restore_registers 1
241	ERET
242END(handle_el1h_irq)
243
244ENTRY(handle_el1h_fiq)
245	save_registers 1
246	KMSAN_ENTER
247	mov	x0, sp
248	mov	x1, #INTR_ROOT_FIQ
249	bl	intr_irq_handler
250	KMSAN_LEAVE
251	restore_registers 1
252	ERET
253END(handle_el1h_fiq)
254
255ENTRY(handle_el1h_serror)
256	save_registers 1
257	KMSAN_ENTER
258	mov	x0, sp
2591:	bl	do_serror
260	b	1b
261	KMSAN_LEAVE
262END(handle_el1h_serror)
263
264ENTRY(handle_el0_sync)
265	save_registers 0
266	KMSAN_ENTER
267	ldr	x0, [x18, #PC_CURTHREAD]
268	mov	x1, sp
269	mov	x22, x0
270	str	x1, [x0, #TD_FRAME]
271	bl	do_el0_sync
272	do_ast
273	str	xzr, [x22, #TD_FRAME]
274	KMSAN_LEAVE
275	restore_registers 0
276	ERET
277END(handle_el0_sync)
278
279ENTRY(handle_el0_irq)
280	save_registers 0
281	KMSAN_ENTER
282	mov	x0, sp
283	mov	x1, #INTR_ROOT_IRQ
284	bl	intr_irq_handler
285	do_ast
286	KMSAN_LEAVE
287	restore_registers 0
288	ERET
289END(handle_el0_irq)
290
291ENTRY(handle_el0_fiq)
292	save_registers 0
293	KMSAN_ENTER
294	mov	x0, sp
295	mov	x1, #INTR_ROOT_FIQ
296	bl	intr_irq_handler
297	do_ast
298	KMSAN_LEAVE
299	restore_registers 0
300	ERET
301END(handle_el0_fiq)
302
303ENTRY(handle_el0_serror)
304	save_registers 0
305	KMSAN_ENTER
306	mov	x0, sp
3071:	bl	do_serror
308	b	1b
309	KMSAN_LEAVE
310END(handle_el0_serror)
311
312ENTRY(handle_empty_exception)
313	save_registers 0
314	KMSAN_ENTER
315	mov	x0, sp
3161:	bl	unhandled_exception
317	b	1b
318	KMSAN_LEAVE
319END(handle_empty_exception)
320
321.macro	vector	name, el
322	.align 7
323	save_registers_head \el
324	b	handle_\name
325	dsb	sy
326	isb
327	/* Break instruction to ensure we aren't executing code here. */
328	brk	0x42
329.endm
330
331.macro	vempty el
332	vector	empty_exception \el
333.endm
334
335	.align 11
336	.globl exception_vectors
337exception_vectors:
338	vempty 1		/* Synchronous EL1t */
339	vempty 1		/* IRQ EL1t */
340	vempty 1		/* FIQ EL1t */
341	vempty 1		/* Error EL1t */
342
343	vector el1h_sync 1	/* Synchronous EL1h */
344	vector el1h_irq 1	/* IRQ EL1h */
345	vector el1h_fiq 1	/* FIQ EL1h */
346	vector el1h_serror 1	/* Error EL1h */
347
348	vector el0_sync 0	/* Synchronous 64-bit EL0 */
349	vector el0_irq 0	/* IRQ 64-bit EL0 */
350	vector el0_fiq 0	/* FIQ 64-bit EL0 */
351	vector el0_serror 0	/* Error 64-bit EL0 */
352
353	vector el0_sync 0	/* Synchronous 32-bit EL0 */
354	vector el0_irq 0	/* IRQ 32-bit EL0 */
355	vector el0_fiq 0	/* FIQ 32-bit EL0 */
356	vector el0_serror 0	/* Error 32-bit EL0 */
357
358GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
359