xref: /linux/arch/arm64/kernel/entry.S (revision 460e8c3340a265d1d70fa1ee05c42afd68b2efa0)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/processor.h>
32#include <asm/ptrace.h>
33#include <asm/thread_info.h>
34#include <asm/asm-uaccess.h>
35#include <asm/unistd.h>
36
37/*
38 * Context tracking subsystem.  Used to instrument transitions
39 * between user and kernel mode.
40 */
41	.macro ct_user_exit, syscall = 0
42#ifdef CONFIG_CONTEXT_TRACKING
43	bl	context_tracking_user_exit
44	.if \syscall == 1
45	/*
46	 * Save/restore needed during syscalls.  Restore syscall arguments from
47	 * the values already saved on stack during kernel_entry.
48	 */
49	ldp	x0, x1, [sp]
50	ldp	x2, x3, [sp, #S_X2]
51	ldp	x4, x5, [sp, #S_X4]
52	ldp	x6, x7, [sp, #S_X6]
53	.endif
54#endif
55	.endm
56
57	.macro ct_user_enter
58#ifdef CONFIG_CONTEXT_TRACKING
59	bl	context_tracking_user_enter
60#endif
61	.endm
62
63/*
64 * Bad Abort numbers
65 *-----------------
66 */
67#define BAD_SYNC	0
68#define BAD_IRQ		1
69#define BAD_FIQ		2
70#define BAD_ERROR	3
71
72	.macro kernel_ventry	label
73	.align 7
74	sub	sp, sp, #S_FRAME_SIZE
75#ifdef CONFIG_VMAP_STACK
76	/*
77	 * Test whether the SP has overflowed, without corrupting a GPR.
78	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
79	 */
80	add	sp, sp, x0			// sp' = sp + x0
81	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
82	tbnz	x0, #THREAD_SHIFT, 0f
83	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
84	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
85	b	\label
86
870:
88	/*
89	 * Either we've just detected an overflow, or we've taken an exception
90	 * while on the overflow stack. Either way, we won't return to
91	 * userspace, and can clobber EL0 registers to free up GPRs.
92	 */
93
94	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
95	msr	tpidr_el0, x0
96
97	/* Recover the original x0 value and stash it in tpidrro_el0 */
98	sub	x0, sp, x0
99	msr	tpidrro_el0, x0
100
101	/* Switch to the overflow stack */
102	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
103
104	/*
105	 * Check whether we were already on the overflow stack. This may happen
106	 * after panic() re-enables interrupts.
107	 */
108	mrs	x0, tpidr_el0			// sp of interrupted context
109	sub	x0, sp, x0			// delta with top of overflow stack
110	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
111	b.ne	__bad_stack			// no? -> bad stack pointer
112
113	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
114	sub	sp, sp, x0
115	mrs	x0, tpidrro_el0
116#endif
117	b	\label
118	.endm
119
120	.macro	kernel_entry, el, regsize = 64
121	.if	\regsize == 32
122	mov	w0, w0				// zero upper 32 bits of x0
123	.endif
124	stp	x0, x1, [sp, #16 * 0]
125	stp	x2, x3, [sp, #16 * 1]
126	stp	x4, x5, [sp, #16 * 2]
127	stp	x6, x7, [sp, #16 * 3]
128	stp	x8, x9, [sp, #16 * 4]
129	stp	x10, x11, [sp, #16 * 5]
130	stp	x12, x13, [sp, #16 * 6]
131	stp	x14, x15, [sp, #16 * 7]
132	stp	x16, x17, [sp, #16 * 8]
133	stp	x18, x19, [sp, #16 * 9]
134	stp	x20, x21, [sp, #16 * 10]
135	stp	x22, x23, [sp, #16 * 11]
136	stp	x24, x25, [sp, #16 * 12]
137	stp	x26, x27, [sp, #16 * 13]
138	stp	x28, x29, [sp, #16 * 14]
139
140	.if	\el == 0
141	mrs	x21, sp_el0
142	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
143	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
144	disable_step_tsk x19, x20		// exceptions when scheduling.
145
146	mov	x29, xzr			// fp pointed to user-space
147	.else
148	add	x21, sp, #S_FRAME_SIZE
149	get_thread_info tsk
150	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
151	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
152	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
153	mov	x20, #TASK_SIZE_64
154	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
155	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
156	.endif /* \el == 0 */
157	mrs	x22, elr_el1
158	mrs	x23, spsr_el1
159	stp	lr, x21, [sp, #S_LR]
160
161	/*
162	 * In order to be able to dump the contents of struct pt_regs at the
163	 * time the exception was taken (in case we attempt to walk the call
164	 * stack later), chain it together with the stack frames.
165	 */
166	.if \el == 0
167	stp	xzr, xzr, [sp, #S_STACKFRAME]
168	.else
169	stp	x29, x22, [sp, #S_STACKFRAME]
170	.endif
171	add	x29, sp, #S_STACKFRAME
172
173#ifdef CONFIG_ARM64_SW_TTBR0_PAN
174	/*
175	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
176	 * EL0, there is no need to check the state of TTBR0_EL1 since
177	 * accesses are always enabled.
178	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
179	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
180	 * user mappings.
181	 */
182alternative_if ARM64_HAS_PAN
183	b	1f				// skip TTBR0 PAN
184alternative_else_nop_endif
185
186	.if	\el != 0
187	mrs	x21, ttbr0_el1
188	tst	x21, #0xffff << 48		// Check for the reserved ASID
189	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
190	b.eq	1f				// TTBR0 access already disabled
191	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
192	.endif
193
194	__uaccess_ttbr0_disable x21
1951:
196#endif
197
198	stp	x22, x23, [sp, #S_PC]
199
200	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
201	.if	\el == 0
202	mov	w21, #NO_SYSCALL
203	str	w21, [sp, #S_SYSCALLNO]
204	.endif
205
206	/*
207	 * Set sp_el0 to current thread_info.
208	 */
209	.if	\el == 0
210	msr	sp_el0, tsk
211	.endif
212
213	/*
214	 * Registers that may be useful after this macro is invoked:
215	 *
216	 * x21 - aborted SP
217	 * x22 - aborted PC
218	 * x23 - aborted PSTATE
219	*/
220	.endm
221
222	.macro	kernel_exit, el
223	.if	\el != 0
224	disable_daif
225
226	/* Restore the task's original addr_limit. */
227	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
228	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
229
230	/* No need to restore UAO, it will be restored from SPSR_EL1 */
231	.endif
232
233	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
234	.if	\el == 0
235	ct_user_enter
236	.endif
237
238#ifdef CONFIG_ARM64_SW_TTBR0_PAN
239	/*
240	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
241	 * PAN bit checking.
242	 */
243alternative_if ARM64_HAS_PAN
244	b	2f				// skip TTBR0 PAN
245alternative_else_nop_endif
246
247	.if	\el != 0
248	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
249	.endif
250
251	__uaccess_ttbr0_enable x0
252
253	.if	\el == 0
254	/*
255	 * Enable errata workarounds only if returning to user. The only
256	 * workaround currently required for TTBR0_EL1 changes are for the
257	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
258	 * corruption).
259	 */
260	post_ttbr0_update_workaround
261	.endif
2621:
263	.if	\el != 0
264	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
265	.endif
2662:
267#endif
268
269	.if	\el == 0
270	ldr	x23, [sp, #S_SP]		// load return stack pointer
271	msr	sp_el0, x23
272#ifdef CONFIG_ARM64_ERRATUM_845719
273alternative_if ARM64_WORKAROUND_845719
274	tbz	x22, #4, 1f
275#ifdef CONFIG_PID_IN_CONTEXTIDR
276	mrs	x29, contextidr_el1
277	msr	contextidr_el1, x29
278#else
279	msr contextidr_el1, xzr
280#endif
2811:
282alternative_else_nop_endif
283#endif
284	.endif
285
286	msr	elr_el1, x21			// set up the return data
287	msr	spsr_el1, x22
288	ldp	x0, x1, [sp, #16 * 0]
289	ldp	x2, x3, [sp, #16 * 1]
290	ldp	x4, x5, [sp, #16 * 2]
291	ldp	x6, x7, [sp, #16 * 3]
292	ldp	x8, x9, [sp, #16 * 4]
293	ldp	x10, x11, [sp, #16 * 5]
294	ldp	x12, x13, [sp, #16 * 6]
295	ldp	x14, x15, [sp, #16 * 7]
296	ldp	x16, x17, [sp, #16 * 8]
297	ldp	x18, x19, [sp, #16 * 9]
298	ldp	x20, x21, [sp, #16 * 10]
299	ldp	x22, x23, [sp, #16 * 11]
300	ldp	x24, x25, [sp, #16 * 12]
301	ldp	x26, x27, [sp, #16 * 13]
302	ldp	x28, x29, [sp, #16 * 14]
303	ldr	lr, [sp, #S_LR]
304	add	sp, sp, #S_FRAME_SIZE		// restore sp
305	/*
306	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
307	 * when returning from IPI handler, and when returning to user-space.
308	 */
309	eret					// return to kernel
310	.endm
311
312	.macro	irq_stack_entry
313	mov	x19, sp			// preserve the original sp
314
315	/*
316	 * Compare sp with the base of the task stack.
317	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
318	 * and should switch to the irq stack.
319	 */
320	ldr	x25, [tsk, TSK_STACK]
321	eor	x25, x25, x19
322	and	x25, x25, #~(THREAD_SIZE - 1)
323	cbnz	x25, 9998f
324
325	ldr_this_cpu x25, irq_stack_ptr, x26
326	mov	x26, #IRQ_STACK_SIZE
327	add	x26, x25, x26
328
329	/* switch to the irq stack */
330	mov	sp, x26
3319998:
332	.endm
333
334	/*
335	 * x19 should be preserved between irq_stack_entry and
336	 * irq_stack_exit.
337	 */
338	.macro	irq_stack_exit
339	mov	sp, x19
340	.endm
341
342/*
343 * These are the registers used in the syscall handler, and allow us to
344 * have in theory up to 7 arguments to a function - x0 to x6.
345 *
346 * x7 is reserved for the system call number in 32-bit mode.
347 */
348wsc_nr	.req	w25		// number of system calls
349wscno	.req	w26		// syscall number
350xscno	.req	x26		// syscall number (zero-extended)
351stbl	.req	x27		// syscall table pointer
352tsk	.req	x28		// current thread_info
353
354/*
355 * Interrupt handling.
356 */
357	.macro	irq_handler
358	ldr_l	x1, handle_arch_irq
359	mov	x0, sp
360	irq_stack_entry
361	blr	x1
362	irq_stack_exit
363	.endm
364
365	.text
366
367/*
368 * Exception vectors.
369 */
370	.pushsection ".entry.text", "ax"
371
372	.align	11
373ENTRY(vectors)
374	kernel_ventry	el1_sync_invalid		// Synchronous EL1t
375	kernel_ventry	el1_irq_invalid			// IRQ EL1t
376	kernel_ventry	el1_fiq_invalid			// FIQ EL1t
377	kernel_ventry	el1_error_invalid		// Error EL1t
378
379	kernel_ventry	el1_sync			// Synchronous EL1h
380	kernel_ventry	el1_irq				// IRQ EL1h
381	kernel_ventry	el1_fiq_invalid			// FIQ EL1h
382	kernel_ventry	el1_error			// Error EL1h
383
384	kernel_ventry	el0_sync			// Synchronous 64-bit EL0
385	kernel_ventry	el0_irq				// IRQ 64-bit EL0
386	kernel_ventry	el0_fiq_invalid			// FIQ 64-bit EL0
387	kernel_ventry	el0_error			// Error 64-bit EL0
388
389#ifdef CONFIG_COMPAT
390	kernel_ventry	el0_sync_compat			// Synchronous 32-bit EL0
391	kernel_ventry	el0_irq_compat			// IRQ 32-bit EL0
392	kernel_ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
393	kernel_ventry	el0_error_compat		// Error 32-bit EL0
394#else
395	kernel_ventry	el0_sync_invalid		// Synchronous 32-bit EL0
396	kernel_ventry	el0_irq_invalid			// IRQ 32-bit EL0
397	kernel_ventry	el0_fiq_invalid			// FIQ 32-bit EL0
398	kernel_ventry	el0_error_invalid		// Error 32-bit EL0
399#endif
400END(vectors)
401
402#ifdef CONFIG_VMAP_STACK
403	/*
404	 * We detected an overflow in kernel_ventry, which switched to the
405	 * overflow stack. Stash the exception regs, and head to our overflow
406	 * handler.
407	 */
408__bad_stack:
409	/* Restore the original x0 value */
410	mrs	x0, tpidrro_el0
411
412	/*
413	 * Store the original GPRs to the new stack. The orginal SP (minus
414	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
415	 */
416	sub	sp, sp, #S_FRAME_SIZE
417	kernel_entry 1
418	mrs	x0, tpidr_el0
419	add	x0, x0, #S_FRAME_SIZE
420	str	x0, [sp, #S_SP]
421
422	/* Stash the regs for handle_bad_stack */
423	mov	x0, sp
424
425	/* Time to die */
426	bl	handle_bad_stack
427	ASM_BUG()
428#endif /* CONFIG_VMAP_STACK */
429
430/*
431 * Invalid mode handlers
432 */
433	.macro	inv_entry, el, reason, regsize = 64
434	kernel_entry \el, \regsize
435	mov	x0, sp
436	mov	x1, #\reason
437	mrs	x2, esr_el1
438	bl	bad_mode
439	ASM_BUG()
440	.endm
441
442el0_sync_invalid:
443	inv_entry 0, BAD_SYNC
444ENDPROC(el0_sync_invalid)
445
446el0_irq_invalid:
447	inv_entry 0, BAD_IRQ
448ENDPROC(el0_irq_invalid)
449
450el0_fiq_invalid:
451	inv_entry 0, BAD_FIQ
452ENDPROC(el0_fiq_invalid)
453
454el0_error_invalid:
455	inv_entry 0, BAD_ERROR
456ENDPROC(el0_error_invalid)
457
458#ifdef CONFIG_COMPAT
459el0_fiq_invalid_compat:
460	inv_entry 0, BAD_FIQ, 32
461ENDPROC(el0_fiq_invalid_compat)
462#endif
463
464el1_sync_invalid:
465	inv_entry 1, BAD_SYNC
466ENDPROC(el1_sync_invalid)
467
468el1_irq_invalid:
469	inv_entry 1, BAD_IRQ
470ENDPROC(el1_irq_invalid)
471
472el1_fiq_invalid:
473	inv_entry 1, BAD_FIQ
474ENDPROC(el1_fiq_invalid)
475
476el1_error_invalid:
477	inv_entry 1, BAD_ERROR
478ENDPROC(el1_error_invalid)
479
480/*
481 * EL1 mode handlers.
482 */
483	.align	6
484el1_sync:
485	kernel_entry 1
486	mrs	x1, esr_el1			// read the syndrome register
487	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
488	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
489	b.eq	el1_da
490	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
491	b.eq	el1_ia
492	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
493	b.eq	el1_undef
494	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
495	b.eq	el1_sp_pc
496	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
497	b.eq	el1_sp_pc
498	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
499	b.eq	el1_undef
500	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
501	b.ge	el1_dbg
502	b	el1_inv
503
504el1_ia:
505	/*
506	 * Fall through to the Data abort case
507	 */
508el1_da:
509	/*
510	 * Data abort handling
511	 */
512	mrs	x3, far_el1
513	inherit_daif	pstate=x23, tmp=x2
514	clear_address_tag x0, x3
515	mov	x2, sp				// struct pt_regs
516	bl	do_mem_abort
517
518	kernel_exit 1
519el1_sp_pc:
520	/*
521	 * Stack or PC alignment exception handling
522	 */
523	mrs	x0, far_el1
524	inherit_daif	pstate=x23, tmp=x2
525	mov	x2, sp
526	bl	do_sp_pc_abort
527	ASM_BUG()
528el1_undef:
529	/*
530	 * Undefined instruction
531	 */
532	inherit_daif	pstate=x23, tmp=x2
533	mov	x0, sp
534	bl	do_undefinstr
535	ASM_BUG()
536el1_dbg:
537	/*
538	 * Debug exception handling
539	 */
540	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
541	cinc	x24, x24, eq			// set bit '0'
542	tbz	x24, #0, el1_inv		// EL1 only
543	mrs	x0, far_el1
544	mov	x2, sp				// struct pt_regs
545	bl	do_debug_exception
546	kernel_exit 1
547el1_inv:
548	// TODO: add support for undefined instructions in kernel mode
549	inherit_daif	pstate=x23, tmp=x2
550	mov	x0, sp
551	mov	x2, x1
552	mov	x1, #BAD_SYNC
553	bl	bad_mode
554	ASM_BUG()
555ENDPROC(el1_sync)
556
557	.align	6
558el1_irq:
559	kernel_entry 1
560	enable_da_f
561#ifdef CONFIG_TRACE_IRQFLAGS
562	bl	trace_hardirqs_off
563#endif
564
565	irq_handler
566
567#ifdef CONFIG_PREEMPT
568	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
569	cbnz	w24, 1f				// preempt count != 0
570	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
571	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
572	bl	el1_preempt
5731:
574#endif
575#ifdef CONFIG_TRACE_IRQFLAGS
576	bl	trace_hardirqs_on
577#endif
578	kernel_exit 1
579ENDPROC(el1_irq)
580
581#ifdef CONFIG_PREEMPT
582el1_preempt:
583	mov	x24, lr
5841:	bl	preempt_schedule_irq		// irq en/disable is done inside
585	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
586	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
587	ret	x24
588#endif
589
590/*
591 * EL0 mode handlers.
592 */
593	.align	6
594el0_sync:
595	kernel_entry 0
596	mrs	x25, esr_el1			// read the syndrome register
597	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
598	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
599	b.eq	el0_svc
600	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
601	b.eq	el0_da
602	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
603	b.eq	el0_ia
604	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
605	b.eq	el0_fpsimd_acc
606	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
607	b.eq	el0_sve_acc
608	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
609	b.eq	el0_fpsimd_exc
610	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
611	b.eq	el0_sys
612	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
613	b.eq	el0_sp_pc
614	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
615	b.eq	el0_sp_pc
616	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
617	b.eq	el0_undef
618	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
619	b.ge	el0_dbg
620	b	el0_inv
621
622#ifdef CONFIG_COMPAT
623	.align	6
624el0_sync_compat:
625	kernel_entry 0, 32
626	mrs	x25, esr_el1			// read the syndrome register
627	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
628	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
629	b.eq	el0_svc_compat
630	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
631	b.eq	el0_da
632	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
633	b.eq	el0_ia
634	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
635	b.eq	el0_fpsimd_acc
636	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
637	b.eq	el0_fpsimd_exc
638	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
639	b.eq	el0_sp_pc
640	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
641	b.eq	el0_undef
642	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
643	b.eq	el0_undef
644	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
645	b.eq	el0_undef
646	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
647	b.eq	el0_undef
648	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
649	b.eq	el0_undef
650	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
651	b.eq	el0_undef
652	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
653	b.ge	el0_dbg
654	b	el0_inv
655el0_svc_compat:
656	/*
657	 * AArch32 syscall handling
658	 */
659	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
660	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
661	mov	wscno, w7			// syscall number in w7 (r7)
662	mov     wsc_nr, #__NR_compat_syscalls
663	b	el0_svc_naked
664
665	.align	6
666el0_irq_compat:
667	kernel_entry 0, 32
668	b	el0_irq_naked
669
670el0_error_compat:
671	kernel_entry 0, 32
672	b	el0_error_naked
673#endif
674
675el0_da:
676	/*
677	 * Data abort handling
678	 */
679	mrs	x26, far_el1
680	enable_daif
681	ct_user_exit
682	clear_address_tag x0, x26
683	mov	x1, x25
684	mov	x2, sp
685	bl	do_mem_abort
686	b	ret_to_user
687el0_ia:
688	/*
689	 * Instruction abort handling
690	 */
691	mrs	x26, far_el1
692	enable_daif
693	ct_user_exit
694	mov	x0, x26
695	mov	x1, x25
696	mov	x2, sp
697	bl	do_mem_abort
698	b	ret_to_user
699el0_fpsimd_acc:
700	/*
701	 * Floating Point or Advanced SIMD access
702	 */
703	enable_daif
704	ct_user_exit
705	mov	x0, x25
706	mov	x1, sp
707	bl	do_fpsimd_acc
708	b	ret_to_user
709el0_sve_acc:
710	/*
711	 * Scalable Vector Extension access
712	 */
713	enable_daif
714	ct_user_exit
715	mov	x0, x25
716	mov	x1, sp
717	bl	do_sve_acc
718	b	ret_to_user
719el0_fpsimd_exc:
720	/*
721	 * Floating Point, Advanced SIMD or SVE exception
722	 */
723	enable_daif
724	ct_user_exit
725	mov	x0, x25
726	mov	x1, sp
727	bl	do_fpsimd_exc
728	b	ret_to_user
729el0_sp_pc:
730	/*
731	 * Stack or PC alignment exception handling
732	 */
733	mrs	x26, far_el1
734	enable_daif
735	ct_user_exit
736	mov	x0, x26
737	mov	x1, x25
738	mov	x2, sp
739	bl	do_sp_pc_abort
740	b	ret_to_user
741el0_undef:
742	/*
743	 * Undefined instruction
744	 */
745	enable_daif
746	ct_user_exit
747	mov	x0, sp
748	bl	do_undefinstr
749	b	ret_to_user
750el0_sys:
751	/*
752	 * System instructions, for trapped cache maintenance instructions
753	 */
754	enable_daif
755	ct_user_exit
756	mov	x0, x25
757	mov	x1, sp
758	bl	do_sysinstr
759	b	ret_to_user
760el0_dbg:
761	/*
762	 * Debug exception handling
763	 */
764	tbnz	x24, #0, el0_inv		// EL0 only
765	mrs	x0, far_el1
766	mov	x1, x25
767	mov	x2, sp
768	bl	do_debug_exception
769	enable_daif
770	ct_user_exit
771	b	ret_to_user
772el0_inv:
773	enable_daif
774	ct_user_exit
775	mov	x0, sp
776	mov	x1, #BAD_SYNC
777	mov	x2, x25
778	bl	bad_el0_sync
779	b	ret_to_user
780ENDPROC(el0_sync)
781
782	.align	6
783el0_irq:
784	kernel_entry 0
785el0_irq_naked:
786	enable_da_f
787#ifdef CONFIG_TRACE_IRQFLAGS
788	bl	trace_hardirqs_off
789#endif
790
791	ct_user_exit
792	irq_handler
793
794#ifdef CONFIG_TRACE_IRQFLAGS
795	bl	trace_hardirqs_on
796#endif
797	b	ret_to_user
798ENDPROC(el0_irq)
799
800el1_error:
801	kernel_entry 1
802	mrs	x1, esr_el1
803	enable_dbg
804	mov	x0, sp
805	bl	do_serror
806	kernel_exit 1
807ENDPROC(el1_error)
808
809el0_error:
810	kernel_entry 0
811el0_error_naked:
812	mrs	x1, esr_el1
813	enable_dbg
814	mov	x0, sp
815	bl	do_serror
816	enable_daif
817	ct_user_exit
818	b	ret_to_user
819ENDPROC(el0_error)
820
821
822/*
823 * This is the fast syscall return path.  We do as little as possible here,
824 * and this includes saving x0 back into the kernel stack.
825 */
826ret_fast_syscall:
827	disable_daif
828	str	x0, [sp, #S_X0]			// returned x0
829	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
830	and	x2, x1, #_TIF_SYSCALL_WORK
831	cbnz	x2, ret_fast_syscall_trace
832	and	x2, x1, #_TIF_WORK_MASK
833	cbnz	x2, work_pending
834	enable_step_tsk x1, x2
835	kernel_exit 0
836ret_fast_syscall_trace:
837	enable_daif
838	b	__sys_trace_return_skipped	// we already saved x0
839
840/*
841 * Ok, we need to do extra processing, enter the slow path.
842 */
843work_pending:
844	mov	x0, sp				// 'regs'
845	bl	do_notify_resume
846#ifdef CONFIG_TRACE_IRQFLAGS
847	bl	trace_hardirqs_on		// enabled while in userspace
848#endif
849	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
850	b	finish_ret_to_user
851/*
852 * "slow" syscall return path.
853 */
854ret_to_user:
855	disable_daif
856	ldr	x1, [tsk, #TSK_TI_FLAGS]
857	and	x2, x1, #_TIF_WORK_MASK
858	cbnz	x2, work_pending
859finish_ret_to_user:
860	enable_step_tsk x1, x2
861	kernel_exit 0
862ENDPROC(ret_to_user)
863
864/*
865 * SVC handler.
866 */
867	.align	6
868el0_svc:
869	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
870	adrp	stbl, sys_call_table		// load syscall table pointer
871	mov	wscno, w8			// syscall number in w8
872	mov	wsc_nr, #__NR_syscalls
873
874#ifdef CONFIG_ARM64_SVE
875alternative_if_not ARM64_SVE
876	b	el0_svc_naked
877alternative_else_nop_endif
878	tbz	x16, #TIF_SVE, el0_svc_naked	// Skip unless TIF_SVE set:
879	bic	x16, x16, #_TIF_SVE		// discard SVE state
880	str	x16, [tsk, #TSK_TI_FLAGS]
881
882	/*
883	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
884	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
885	 * happens if a context switch or kernel_neon_begin() or context
886	 * modification (sigreturn, ptrace) intervenes.
887	 * So, ensure that CPACR_EL1 is already correct for the fast-path case:
888	 */
889	mrs	x9, cpacr_el1
890	bic	x9, x9, #CPACR_EL1_ZEN_EL0EN	// disable SVE for el0
891	msr	cpacr_el1, x9			// synchronised by eret to el0
892#endif
893
894el0_svc_naked:					// compat entry point
895	stp	x0, xscno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
896	enable_daif
897	ct_user_exit 1
898
899	tst	x16, #_TIF_SYSCALL_WORK		// check for syscall hooks
900	b.ne	__sys_trace
901	cmp     wscno, wsc_nr			// check upper syscall limit
902	b.hs	ni_sys
903	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
904	blr	x16				// call sys_* routine
905	b	ret_fast_syscall
906ni_sys:
907	mov	x0, sp
908	bl	do_ni_syscall
909	b	ret_fast_syscall
910ENDPROC(el0_svc)
911
912	/*
913	 * This is the really slow path.  We're going to be doing context
914	 * switches, and waiting for our parent to respond.
915	 */
916__sys_trace:
917	cmp     wscno, #NO_SYSCALL		// user-issued syscall(-1)?
918	b.ne	1f
919	mov	x0, #-ENOSYS			// set default errno if so
920	str	x0, [sp, #S_X0]
9211:	mov	x0, sp
922	bl	syscall_trace_enter
923	cmp	w0, #NO_SYSCALL			// skip the syscall?
924	b.eq	__sys_trace_return_skipped
925	mov	wscno, w0			// syscall number (possibly new)
926	mov	x1, sp				// pointer to regs
927	cmp	wscno, wsc_nr			// check upper syscall limit
928	b.hs	__ni_sys_trace
929	ldp	x0, x1, [sp]			// restore the syscall args
930	ldp	x2, x3, [sp, #S_X2]
931	ldp	x4, x5, [sp, #S_X4]
932	ldp	x6, x7, [sp, #S_X6]
933	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
934	blr	x16				// call sys_* routine
935
936__sys_trace_return:
937	str	x0, [sp, #S_X0]			// save returned x0
938__sys_trace_return_skipped:
939	mov	x0, sp
940	bl	syscall_trace_exit
941	b	ret_to_user
942
943__ni_sys_trace:
944	mov	x0, sp
945	bl	do_ni_syscall
946	b	__sys_trace_return
947
948	.popsection				// .entry.text
949
950/*
951 * Special system call wrappers.
952 */
953ENTRY(sys_rt_sigreturn_wrapper)
954	mov	x0, sp
955	b	sys_rt_sigreturn
956ENDPROC(sys_rt_sigreturn_wrapper)
957
958/*
959 * Register switch for AArch64. The callee-saved registers need to be saved
960 * and restored. On entry:
961 *   x0 = previous task_struct (must be preserved across the switch)
962 *   x1 = next task_struct
963 * Previous and next are guaranteed not to be the same.
964 *
965 */
966ENTRY(cpu_switch_to)
967	mov	x10, #THREAD_CPU_CONTEXT
968	add	x8, x0, x10
969	mov	x9, sp
970	stp	x19, x20, [x8], #16		// store callee-saved registers
971	stp	x21, x22, [x8], #16
972	stp	x23, x24, [x8], #16
973	stp	x25, x26, [x8], #16
974	stp	x27, x28, [x8], #16
975	stp	x29, x9, [x8], #16
976	str	lr, [x8]
977	add	x8, x1, x10
978	ldp	x19, x20, [x8], #16		// restore callee-saved registers
979	ldp	x21, x22, [x8], #16
980	ldp	x23, x24, [x8], #16
981	ldp	x25, x26, [x8], #16
982	ldp	x27, x28, [x8], #16
983	ldp	x29, x9, [x8], #16
984	ldr	lr, [x8]
985	mov	sp, x9
986	msr	sp_el0, x1
987	ret
988ENDPROC(cpu_switch_to)
989NOKPROBE(cpu_switch_to)
990
991/*
992 * This is how we return from a fork.
993 */
994ENTRY(ret_from_fork)
995	bl	schedule_tail
996	cbz	x19, 1f				// not a kernel thread
997	mov	x0, x20
998	blr	x19
9991:	get_thread_info tsk
1000	b	ret_to_user
1001ENDPROC(ret_from_fork)
1002NOKPROBE(ret_from_fork)
1003