xref: /linux/arch/arm64/kernel/entry.S (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/memory.h>
32#include <asm/mmu.h>
33#include <asm/processor.h>
34#include <asm/ptrace.h>
35#include <asm/thread_info.h>
36#include <asm/asm-uaccess.h>
37#include <asm/unistd.h>
38
39/*
40 * Context tracking subsystem.  Used to instrument transitions
41 * between user and kernel mode.
42 */
43	.macro ct_user_exit, syscall = 0
44#ifdef CONFIG_CONTEXT_TRACKING
45	bl	context_tracking_user_exit
46	.if \syscall == 1
47	/*
48	 * Save/restore needed during syscalls.  Restore syscall arguments from
49	 * the values already saved on stack during kernel_entry.
50	 */
51	ldp	x0, x1, [sp]
52	ldp	x2, x3, [sp, #S_X2]
53	ldp	x4, x5, [sp, #S_X4]
54	ldp	x6, x7, [sp, #S_X6]
55	.endif
56#endif
57	.endm
58
59	.macro ct_user_enter
60#ifdef CONFIG_CONTEXT_TRACKING
61	bl	context_tracking_user_enter
62#endif
63	.endm
64
65/*
66 * Bad Abort numbers
67 *-----------------
68 */
69#define BAD_SYNC	0
70#define BAD_IRQ		1
71#define BAD_FIQ		2
72#define BAD_ERROR	3
73
74	.macro kernel_ventry, el, label, regsize = 64
75	.align 7
76#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
77alternative_if ARM64_UNMAP_KERNEL_AT_EL0
78	.if	\el == 0
79	.if	\regsize == 64
80	mrs	x30, tpidrro_el0
81	msr	tpidrro_el0, xzr
82	.else
83	mov	x30, xzr
84	.endif
85	.endif
86alternative_else_nop_endif
87#endif
88
89	sub	sp, sp, #S_FRAME_SIZE
90#ifdef CONFIG_VMAP_STACK
91	/*
92	 * Test whether the SP has overflowed, without corrupting a GPR.
93	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
94	 */
95	add	sp, sp, x0			// sp' = sp + x0
96	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
97	tbnz	x0, #THREAD_SHIFT, 0f
98	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
99	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
100	b	el\()\el\()_\label
101
1020:
103	/*
104	 * Either we've just detected an overflow, or we've taken an exception
105	 * while on the overflow stack. Either way, we won't return to
106	 * userspace, and can clobber EL0 registers to free up GPRs.
107	 */
108
109	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
110	msr	tpidr_el0, x0
111
112	/* Recover the original x0 value and stash it in tpidrro_el0 */
113	sub	x0, sp, x0
114	msr	tpidrro_el0, x0
115
116	/* Switch to the overflow stack */
117	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
118
119	/*
120	 * Check whether we were already on the overflow stack. This may happen
121	 * after panic() re-enables interrupts.
122	 */
123	mrs	x0, tpidr_el0			// sp of interrupted context
124	sub	x0, sp, x0			// delta with top of overflow stack
125	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
126	b.ne	__bad_stack			// no? -> bad stack pointer
127
128	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
129	sub	sp, sp, x0
130	mrs	x0, tpidrro_el0
131#endif
132	b	el\()\el\()_\label
133	.endm
134
135	.macro tramp_alias, dst, sym
136	mov_q	\dst, TRAMP_VALIAS
137	add	\dst, \dst, #(\sym - .entry.tramp.text)
138	.endm
139
140	.macro	kernel_entry, el, regsize = 64
141	.if	\regsize == 32
142	mov	w0, w0				// zero upper 32 bits of x0
143	.endif
144	stp	x0, x1, [sp, #16 * 0]
145	stp	x2, x3, [sp, #16 * 1]
146	stp	x4, x5, [sp, #16 * 2]
147	stp	x6, x7, [sp, #16 * 3]
148	stp	x8, x9, [sp, #16 * 4]
149	stp	x10, x11, [sp, #16 * 5]
150	stp	x12, x13, [sp, #16 * 6]
151	stp	x14, x15, [sp, #16 * 7]
152	stp	x16, x17, [sp, #16 * 8]
153	stp	x18, x19, [sp, #16 * 9]
154	stp	x20, x21, [sp, #16 * 10]
155	stp	x22, x23, [sp, #16 * 11]
156	stp	x24, x25, [sp, #16 * 12]
157	stp	x26, x27, [sp, #16 * 13]
158	stp	x28, x29, [sp, #16 * 14]
159
160	.if	\el == 0
161	mrs	x21, sp_el0
162	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
163	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
164	disable_step_tsk x19, x20		// exceptions when scheduling.
165
166	mov	x29, xzr			// fp pointed to user-space
167	.else
168	add	x21, sp, #S_FRAME_SIZE
169	get_thread_info tsk
170	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
171	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
172	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
173	mov	x20, #TASK_SIZE_64
174	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
175	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
176	.endif /* \el == 0 */
177	mrs	x22, elr_el1
178	mrs	x23, spsr_el1
179	stp	lr, x21, [sp, #S_LR]
180
181	/*
182	 * In order to be able to dump the contents of struct pt_regs at the
183	 * time the exception was taken (in case we attempt to walk the call
184	 * stack later), chain it together with the stack frames.
185	 */
186	.if \el == 0
187	stp	xzr, xzr, [sp, #S_STACKFRAME]
188	.else
189	stp	x29, x22, [sp, #S_STACKFRAME]
190	.endif
191	add	x29, sp, #S_STACKFRAME
192
193#ifdef CONFIG_ARM64_SW_TTBR0_PAN
194	/*
195	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
196	 * EL0, there is no need to check the state of TTBR0_EL1 since
197	 * accesses are always enabled.
198	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
199	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
200	 * user mappings.
201	 */
202alternative_if ARM64_HAS_PAN
203	b	1f				// skip TTBR0 PAN
204alternative_else_nop_endif
205
206	.if	\el != 0
207	mrs	x21, ttbr0_el1
208	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
209	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
210	b.eq	1f				// TTBR0 access already disabled
211	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
212	.endif
213
214	__uaccess_ttbr0_disable x21
2151:
216#endif
217
218	stp	x22, x23, [sp, #S_PC]
219
220	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
221	.if	\el == 0
222	mov	w21, #NO_SYSCALL
223	str	w21, [sp, #S_SYSCALLNO]
224	.endif
225
226	/*
227	 * Set sp_el0 to current thread_info.
228	 */
229	.if	\el == 0
230	msr	sp_el0, tsk
231	.endif
232
233	/*
234	 * Registers that may be useful after this macro is invoked:
235	 *
236	 * x21 - aborted SP
237	 * x22 - aborted PC
238	 * x23 - aborted PSTATE
239	*/
240	.endm
241
242	.macro	kernel_exit, el
243	.if	\el != 0
244	disable_daif
245
246	/* Restore the task's original addr_limit. */
247	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
248	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
249
250	/* No need to restore UAO, it will be restored from SPSR_EL1 */
251	.endif
252
253	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
254	.if	\el == 0
255	ct_user_enter
256	.endif
257
258#ifdef CONFIG_ARM64_SW_TTBR0_PAN
259	/*
260	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
261	 * PAN bit checking.
262	 */
263alternative_if ARM64_HAS_PAN
264	b	2f				// skip TTBR0 PAN
265alternative_else_nop_endif
266
267	.if	\el != 0
268	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
269	.endif
270
271	__uaccess_ttbr0_enable x0, x1
272
273	.if	\el == 0
274	/*
275	 * Enable errata workarounds only if returning to user. The only
276	 * workaround currently required for TTBR0_EL1 changes are for the
277	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
278	 * corruption).
279	 */
280	bl	post_ttbr_update_workaround
281	.endif
2821:
283	.if	\el != 0
284	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
285	.endif
2862:
287#endif
288
289	.if	\el == 0
290	ldr	x23, [sp, #S_SP]		// load return stack pointer
291	msr	sp_el0, x23
292	tst	x22, #PSR_MODE32_BIT		// native task?
293	b.eq	3f
294
295#ifdef CONFIG_ARM64_ERRATUM_845719
296alternative_if ARM64_WORKAROUND_845719
297#ifdef CONFIG_PID_IN_CONTEXTIDR
298	mrs	x29, contextidr_el1
299	msr	contextidr_el1, x29
300#else
301	msr contextidr_el1, xzr
302#endif
303alternative_else_nop_endif
304#endif
3053:
306	.endif
307
308	msr	elr_el1, x21			// set up the return data
309	msr	spsr_el1, x22
310	ldp	x0, x1, [sp, #16 * 0]
311	ldp	x2, x3, [sp, #16 * 1]
312	ldp	x4, x5, [sp, #16 * 2]
313	ldp	x6, x7, [sp, #16 * 3]
314	ldp	x8, x9, [sp, #16 * 4]
315	ldp	x10, x11, [sp, #16 * 5]
316	ldp	x12, x13, [sp, #16 * 6]
317	ldp	x14, x15, [sp, #16 * 7]
318	ldp	x16, x17, [sp, #16 * 8]
319	ldp	x18, x19, [sp, #16 * 9]
320	ldp	x20, x21, [sp, #16 * 10]
321	ldp	x22, x23, [sp, #16 * 11]
322	ldp	x24, x25, [sp, #16 * 12]
323	ldp	x26, x27, [sp, #16 * 13]
324	ldp	x28, x29, [sp, #16 * 14]
325	ldr	lr, [sp, #S_LR]
326	add	sp, sp, #S_FRAME_SIZE		// restore sp
327
328	.if	\el == 0
329alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
330#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
331	bne	4f
332	msr	far_el1, x30
333	tramp_alias	x30, tramp_exit_native
334	br	x30
3354:
336	tramp_alias	x30, tramp_exit_compat
337	br	x30
338#endif
339	.else
340	eret
341	.endif
342	.endm
343
344	.macro	irq_stack_entry
345	mov	x19, sp			// preserve the original sp
346
347	/*
348	 * Compare sp with the base of the task stack.
349	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
350	 * and should switch to the irq stack.
351	 */
352	ldr	x25, [tsk, TSK_STACK]
353	eor	x25, x25, x19
354	and	x25, x25, #~(THREAD_SIZE - 1)
355	cbnz	x25, 9998f
356
357	ldr_this_cpu x25, irq_stack_ptr, x26
358	mov	x26, #IRQ_STACK_SIZE
359	add	x26, x25, x26
360
361	/* switch to the irq stack */
362	mov	sp, x26
3639998:
364	.endm
365
366	/*
367	 * x19 should be preserved between irq_stack_entry and
368	 * irq_stack_exit.
369	 */
370	.macro	irq_stack_exit
371	mov	sp, x19
372	.endm
373
374/*
375 * These are the registers used in the syscall handler, and allow us to
376 * have in theory up to 7 arguments to a function - x0 to x6.
377 *
378 * x7 is reserved for the system call number in 32-bit mode.
379 */
380wsc_nr	.req	w25		// number of system calls
381wscno	.req	w26		// syscall number
382xscno	.req	x26		// syscall number (zero-extended)
383stbl	.req	x27		// syscall table pointer
384tsk	.req	x28		// current thread_info
385
386/*
387 * Interrupt handling.
388 */
389	.macro	irq_handler
390	ldr_l	x1, handle_arch_irq
391	mov	x0, sp
392	irq_stack_entry
393	blr	x1
394	irq_stack_exit
395	.endm
396
397	.text
398
399/*
400 * Exception vectors.
401 */
402	.pushsection ".entry.text", "ax"
403
404	.align	11
405ENTRY(vectors)
406	kernel_ventry	1, sync_invalid			// Synchronous EL1t
407	kernel_ventry	1, irq_invalid			// IRQ EL1t
408	kernel_ventry	1, fiq_invalid			// FIQ EL1t
409	kernel_ventry	1, error_invalid		// Error EL1t
410
411	kernel_ventry	1, sync				// Synchronous EL1h
412	kernel_ventry	1, irq				// IRQ EL1h
413	kernel_ventry	1, fiq_invalid			// FIQ EL1h
414	kernel_ventry	1, error			// Error EL1h
415
416	kernel_ventry	0, sync				// Synchronous 64-bit EL0
417	kernel_ventry	0, irq				// IRQ 64-bit EL0
418	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
419	kernel_ventry	0, error			// Error 64-bit EL0
420
421#ifdef CONFIG_COMPAT
422	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
423	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
424	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
425	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
426#else
427	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
428	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
429	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
430	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
431#endif
432END(vectors)
433
434#ifdef CONFIG_VMAP_STACK
435	/*
436	 * We detected an overflow in kernel_ventry, which switched to the
437	 * overflow stack. Stash the exception regs, and head to our overflow
438	 * handler.
439	 */
440__bad_stack:
441	/* Restore the original x0 value */
442	mrs	x0, tpidrro_el0
443
444	/*
445	 * Store the original GPRs to the new stack. The orginal SP (minus
446	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
447	 */
448	sub	sp, sp, #S_FRAME_SIZE
449	kernel_entry 1
450	mrs	x0, tpidr_el0
451	add	x0, x0, #S_FRAME_SIZE
452	str	x0, [sp, #S_SP]
453
454	/* Stash the regs for handle_bad_stack */
455	mov	x0, sp
456
457	/* Time to die */
458	bl	handle_bad_stack
459	ASM_BUG()
460#endif /* CONFIG_VMAP_STACK */
461
462/*
463 * Invalid mode handlers
464 */
465	.macro	inv_entry, el, reason, regsize = 64
466	kernel_entry \el, \regsize
467	mov	x0, sp
468	mov	x1, #\reason
469	mrs	x2, esr_el1
470	bl	bad_mode
471	ASM_BUG()
472	.endm
473
474el0_sync_invalid:
475	inv_entry 0, BAD_SYNC
476ENDPROC(el0_sync_invalid)
477
478el0_irq_invalid:
479	inv_entry 0, BAD_IRQ
480ENDPROC(el0_irq_invalid)
481
482el0_fiq_invalid:
483	inv_entry 0, BAD_FIQ
484ENDPROC(el0_fiq_invalid)
485
486el0_error_invalid:
487	inv_entry 0, BAD_ERROR
488ENDPROC(el0_error_invalid)
489
490#ifdef CONFIG_COMPAT
491el0_fiq_invalid_compat:
492	inv_entry 0, BAD_FIQ, 32
493ENDPROC(el0_fiq_invalid_compat)
494#endif
495
496el1_sync_invalid:
497	inv_entry 1, BAD_SYNC
498ENDPROC(el1_sync_invalid)
499
500el1_irq_invalid:
501	inv_entry 1, BAD_IRQ
502ENDPROC(el1_irq_invalid)
503
504el1_fiq_invalid:
505	inv_entry 1, BAD_FIQ
506ENDPROC(el1_fiq_invalid)
507
508el1_error_invalid:
509	inv_entry 1, BAD_ERROR
510ENDPROC(el1_error_invalid)
511
512/*
513 * EL1 mode handlers.
514 */
515	.align	6
516el1_sync:
517	kernel_entry 1
518	mrs	x1, esr_el1			// read the syndrome register
519	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
520	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
521	b.eq	el1_da
522	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
523	b.eq	el1_ia
524	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
525	b.eq	el1_undef
526	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
527	b.eq	el1_sp_pc
528	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
529	b.eq	el1_sp_pc
530	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
531	b.eq	el1_undef
532	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
533	b.ge	el1_dbg
534	b	el1_inv
535
536el1_ia:
537	/*
538	 * Fall through to the Data abort case
539	 */
540el1_da:
541	/*
542	 * Data abort handling
543	 */
544	mrs	x3, far_el1
545	inherit_daif	pstate=x23, tmp=x2
546	clear_address_tag x0, x3
547	mov	x2, sp				// struct pt_regs
548	bl	do_mem_abort
549
550	kernel_exit 1
551el1_sp_pc:
552	/*
553	 * Stack or PC alignment exception handling
554	 */
555	mrs	x0, far_el1
556	inherit_daif	pstate=x23, tmp=x2
557	mov	x2, sp
558	bl	do_sp_pc_abort
559	ASM_BUG()
560el1_undef:
561	/*
562	 * Undefined instruction
563	 */
564	inherit_daif	pstate=x23, tmp=x2
565	mov	x0, sp
566	bl	do_undefinstr
567	ASM_BUG()
568el1_dbg:
569	/*
570	 * Debug exception handling
571	 */
572	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
573	cinc	x24, x24, eq			// set bit '0'
574	tbz	x24, #0, el1_inv		// EL1 only
575	mrs	x0, far_el1
576	mov	x2, sp				// struct pt_regs
577	bl	do_debug_exception
578	kernel_exit 1
579el1_inv:
580	// TODO: add support for undefined instructions in kernel mode
581	inherit_daif	pstate=x23, tmp=x2
582	mov	x0, sp
583	mov	x2, x1
584	mov	x1, #BAD_SYNC
585	bl	bad_mode
586	ASM_BUG()
587ENDPROC(el1_sync)
588
589	.align	6
590el1_irq:
591	kernel_entry 1
592	enable_da_f
593#ifdef CONFIG_TRACE_IRQFLAGS
594	bl	trace_hardirqs_off
595#endif
596
597	irq_handler
598
599#ifdef CONFIG_PREEMPT
600	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
601	cbnz	w24, 1f				// preempt count != 0
602	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
603	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
604	bl	el1_preempt
6051:
606#endif
607#ifdef CONFIG_TRACE_IRQFLAGS
608	bl	trace_hardirqs_on
609#endif
610	kernel_exit 1
611ENDPROC(el1_irq)
612
613#ifdef CONFIG_PREEMPT
614el1_preempt:
615	mov	x24, lr
6161:	bl	preempt_schedule_irq		// irq en/disable is done inside
617	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
618	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
619	ret	x24
620#endif
621
622/*
623 * EL0 mode handlers.
624 */
625	.align	6
626el0_sync:
627	kernel_entry 0
628	mrs	x25, esr_el1			// read the syndrome register
629	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
630	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
631	b.eq	el0_svc
632	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
633	b.eq	el0_da
634	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
635	b.eq	el0_ia
636	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
637	b.eq	el0_fpsimd_acc
638	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
639	b.eq	el0_sve_acc
640	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
641	b.eq	el0_fpsimd_exc
642	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
643	b.eq	el0_sys
644	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
645	b.eq	el0_sp_pc
646	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
647	b.eq	el0_sp_pc
648	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
649	b.eq	el0_undef
650	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
651	b.ge	el0_dbg
652	b	el0_inv
653
654#ifdef CONFIG_COMPAT
655	.align	6
656el0_sync_compat:
657	kernel_entry 0, 32
658	mrs	x25, esr_el1			// read the syndrome register
659	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
660	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
661	b.eq	el0_svc_compat
662	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
663	b.eq	el0_da
664	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
665	b.eq	el0_ia
666	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
667	b.eq	el0_fpsimd_acc
668	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
669	b.eq	el0_fpsimd_exc
670	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
671	b.eq	el0_sp_pc
672	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
673	b.eq	el0_undef
674	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
675	b.eq	el0_undef
676	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
677	b.eq	el0_undef
678	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
679	b.eq	el0_undef
680	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
681	b.eq	el0_undef
682	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
683	b.eq	el0_undef
684	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
685	b.ge	el0_dbg
686	b	el0_inv
687el0_svc_compat:
688	/*
689	 * AArch32 syscall handling
690	 */
691	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
692	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
693	mov	wscno, w7			// syscall number in w7 (r7)
694	mov     wsc_nr, #__NR_compat_syscalls
695	b	el0_svc_naked
696
697	.align	6
698el0_irq_compat:
699	kernel_entry 0, 32
700	b	el0_irq_naked
701
702el0_error_compat:
703	kernel_entry 0, 32
704	b	el0_error_naked
705#endif
706
707el0_da:
708	/*
709	 * Data abort handling
710	 */
711	mrs	x26, far_el1
712	enable_daif
713	ct_user_exit
714	clear_address_tag x0, x26
715	mov	x1, x25
716	mov	x2, sp
717	bl	do_mem_abort
718	b	ret_to_user
719el0_ia:
720	/*
721	 * Instruction abort handling
722	 */
723	mrs	x26, far_el1
724	enable_da_f
725#ifdef CONFIG_TRACE_IRQFLAGS
726	bl	trace_hardirqs_off
727#endif
728	ct_user_exit
729	mov	x0, x26
730	mov	x1, x25
731	mov	x2, sp
732	bl	do_el0_ia_bp_hardening
733	b	ret_to_user
734el0_fpsimd_acc:
735	/*
736	 * Floating Point or Advanced SIMD access
737	 */
738	enable_daif
739	ct_user_exit
740	mov	x0, x25
741	mov	x1, sp
742	bl	do_fpsimd_acc
743	b	ret_to_user
744el0_sve_acc:
745	/*
746	 * Scalable Vector Extension access
747	 */
748	enable_daif
749	ct_user_exit
750	mov	x0, x25
751	mov	x1, sp
752	bl	do_sve_acc
753	b	ret_to_user
754el0_fpsimd_exc:
755	/*
756	 * Floating Point, Advanced SIMD or SVE exception
757	 */
758	enable_daif
759	ct_user_exit
760	mov	x0, x25
761	mov	x1, sp
762	bl	do_fpsimd_exc
763	b	ret_to_user
764el0_sp_pc:
765	/*
766	 * Stack or PC alignment exception handling
767	 */
768	mrs	x26, far_el1
769	enable_daif
770	ct_user_exit
771	mov	x0, x26
772	mov	x1, x25
773	mov	x2, sp
774	bl	do_sp_pc_abort
775	b	ret_to_user
776el0_undef:
777	/*
778	 * Undefined instruction
779	 */
780	enable_daif
781	ct_user_exit
782	mov	x0, sp
783	bl	do_undefinstr
784	b	ret_to_user
785el0_sys:
786	/*
787	 * System instructions, for trapped cache maintenance instructions
788	 */
789	enable_daif
790	ct_user_exit
791	mov	x0, x25
792	mov	x1, sp
793	bl	do_sysinstr
794	b	ret_to_user
795el0_dbg:
796	/*
797	 * Debug exception handling
798	 */
799	tbnz	x24, #0, el0_inv		// EL0 only
800	mrs	x0, far_el1
801	mov	x1, x25
802	mov	x2, sp
803	bl	do_debug_exception
804	enable_daif
805	ct_user_exit
806	b	ret_to_user
807el0_inv:
808	enable_daif
809	ct_user_exit
810	mov	x0, sp
811	mov	x1, #BAD_SYNC
812	mov	x2, x25
813	bl	bad_el0_sync
814	b	ret_to_user
815ENDPROC(el0_sync)
816
817	.align	6
818el0_irq:
819	kernel_entry 0
820el0_irq_naked:
821	enable_da_f
822#ifdef CONFIG_TRACE_IRQFLAGS
823	bl	trace_hardirqs_off
824#endif
825
826	ct_user_exit
827	irq_handler
828
829#ifdef CONFIG_TRACE_IRQFLAGS
830	bl	trace_hardirqs_on
831#endif
832	b	ret_to_user
833ENDPROC(el0_irq)
834
835el1_error:
836	kernel_entry 1
837	mrs	x1, esr_el1
838	enable_dbg
839	mov	x0, sp
840	bl	do_serror
841	kernel_exit 1
842ENDPROC(el1_error)
843
844el0_error:
845	kernel_entry 0
846el0_error_naked:
847	mrs	x1, esr_el1
848	enable_dbg
849	mov	x0, sp
850	bl	do_serror
851	enable_daif
852	ct_user_exit
853	b	ret_to_user
854ENDPROC(el0_error)
855
856
857/*
858 * This is the fast syscall return path.  We do as little as possible here,
859 * and this includes saving x0 back into the kernel stack.
860 */
861ret_fast_syscall:
862	disable_daif
863	str	x0, [sp, #S_X0]			// returned x0
864	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
865	and	x2, x1, #_TIF_SYSCALL_WORK
866	cbnz	x2, ret_fast_syscall_trace
867	and	x2, x1, #_TIF_WORK_MASK
868	cbnz	x2, work_pending
869	enable_step_tsk x1, x2
870	kernel_exit 0
871ret_fast_syscall_trace:
872	enable_daif
873	b	__sys_trace_return_skipped	// we already saved x0
874
875/*
876 * Ok, we need to do extra processing, enter the slow path.
877 */
878work_pending:
879	mov	x0, sp				// 'regs'
880	bl	do_notify_resume
881#ifdef CONFIG_TRACE_IRQFLAGS
882	bl	trace_hardirqs_on		// enabled while in userspace
883#endif
884	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
885	b	finish_ret_to_user
886/*
887 * "slow" syscall return path.
888 */
889ret_to_user:
890	disable_daif
891	ldr	x1, [tsk, #TSK_TI_FLAGS]
892	and	x2, x1, #_TIF_WORK_MASK
893	cbnz	x2, work_pending
894finish_ret_to_user:
895	enable_step_tsk x1, x2
896	kernel_exit 0
897ENDPROC(ret_to_user)
898
899/*
900 * SVC handler.
901 */
902	.align	6
903el0_svc:
904	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
905	adrp	stbl, sys_call_table		// load syscall table pointer
906	mov	wscno, w8			// syscall number in w8
907	mov	wsc_nr, #__NR_syscalls
908
909#ifdef CONFIG_ARM64_SVE
910alternative_if_not ARM64_SVE
911	b	el0_svc_naked
912alternative_else_nop_endif
913	tbz	x16, #TIF_SVE, el0_svc_naked	// Skip unless TIF_SVE set:
914	bic	x16, x16, #_TIF_SVE		// discard SVE state
915	str	x16, [tsk, #TSK_TI_FLAGS]
916
917	/*
918	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
919	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
920	 * happens if a context switch or kernel_neon_begin() or context
921	 * modification (sigreturn, ptrace) intervenes.
922	 * So, ensure that CPACR_EL1 is already correct for the fast-path case:
923	 */
924	mrs	x9, cpacr_el1
925	bic	x9, x9, #CPACR_EL1_ZEN_EL0EN	// disable SVE for el0
926	msr	cpacr_el1, x9			// synchronised by eret to el0
927#endif
928
929el0_svc_naked:					// compat entry point
930	stp	x0, xscno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
931	enable_daif
932	ct_user_exit 1
933
934	tst	x16, #_TIF_SYSCALL_WORK		// check for syscall hooks
935	b.ne	__sys_trace
936	cmp     wscno, wsc_nr			// check upper syscall limit
937	b.hs	ni_sys
938	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
939	blr	x16				// call sys_* routine
940	b	ret_fast_syscall
941ni_sys:
942	mov	x0, sp
943	bl	do_ni_syscall
944	b	ret_fast_syscall
945ENDPROC(el0_svc)
946
947	/*
948	 * This is the really slow path.  We're going to be doing context
949	 * switches, and waiting for our parent to respond.
950	 */
951__sys_trace:
952	cmp     wscno, #NO_SYSCALL		// user-issued syscall(-1)?
953	b.ne	1f
954	mov	x0, #-ENOSYS			// set default errno if so
955	str	x0, [sp, #S_X0]
9561:	mov	x0, sp
957	bl	syscall_trace_enter
958	cmp	w0, #NO_SYSCALL			// skip the syscall?
959	b.eq	__sys_trace_return_skipped
960	mov	wscno, w0			// syscall number (possibly new)
961	mov	x1, sp				// pointer to regs
962	cmp	wscno, wsc_nr			// check upper syscall limit
963	b.hs	__ni_sys_trace
964	ldp	x0, x1, [sp]			// restore the syscall args
965	ldp	x2, x3, [sp, #S_X2]
966	ldp	x4, x5, [sp, #S_X4]
967	ldp	x6, x7, [sp, #S_X6]
968	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
969	blr	x16				// call sys_* routine
970
971__sys_trace_return:
972	str	x0, [sp, #S_X0]			// save returned x0
973__sys_trace_return_skipped:
974	mov	x0, sp
975	bl	syscall_trace_exit
976	b	ret_to_user
977
978__ni_sys_trace:
979	mov	x0, sp
980	bl	do_ni_syscall
981	b	__sys_trace_return
982
983	.popsection				// .entry.text
984
985#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
986/*
987 * Exception vectors trampoline.
988 */
989	.pushsection ".entry.tramp.text", "ax"
990
991	.macro tramp_map_kernel, tmp
992	mrs	\tmp, ttbr1_el1
993	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
994	bic	\tmp, \tmp, #USER_ASID_FLAG
995	msr	ttbr1_el1, \tmp
996#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
997alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
998	/* ASID already in \tmp[63:48] */
999	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1000	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1001	/* 2MB boundary containing the vectors, so we nobble the walk cache */
1002	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1003	isb
1004	tlbi	vae1, \tmp
1005	dsb	nsh
1006alternative_else_nop_endif
1007#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1008	.endm
1009
1010	.macro tramp_unmap_kernel, tmp
1011	mrs	\tmp, ttbr1_el1
1012	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1013	orr	\tmp, \tmp, #USER_ASID_FLAG
1014	msr	ttbr1_el1, \tmp
1015	/*
1016	 * We avoid running the post_ttbr_update_workaround here because the
1017	 * user and kernel ASIDs don't have conflicting mappings, so any
1018	 * "blessing" as described in:
1019	 *
1020	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
1021	 *
1022	 * will not hurt correctness. Whilst this may partially defeat the
1023	 * point of using split ASIDs in the first place, it avoids
1024	 * the hit of invalidating the entire I-cache on every return to
1025	 * userspace.
1026	 */
1027	.endm
1028
1029	.macro tramp_ventry, regsize = 64
1030	.align	7
10311:
1032	.if	\regsize == 64
1033	msr	tpidrro_el0, x30	// Restored in kernel_ventry
1034	.endif
1035	/*
1036	 * Defend against branch aliasing attacks by pushing a dummy
1037	 * entry onto the return stack and using a RET instruction to
1038	 * enter the full-fat kernel vectors.
1039	 */
1040	bl	2f
1041	b	.
10422:
1043	tramp_map_kernel	x30
1044#ifdef CONFIG_RANDOMIZE_BASE
1045	adr	x30, tramp_vectors + PAGE_SIZE
1046alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1047	ldr	x30, [x30]
1048#else
1049	ldr	x30, =vectors
1050#endif
1051	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1052	msr	vbar_el1, x30
1053	add	x30, x30, #(1b - tramp_vectors)
1054	isb
1055	ret
1056	.endm
1057
1058	.macro tramp_exit, regsize = 64
1059	adr	x30, tramp_vectors
1060	msr	vbar_el1, x30
1061	tramp_unmap_kernel	x30
1062	.if	\regsize == 64
1063	mrs	x30, far_el1
1064	.endif
1065	eret
1066	.endm
1067
1068	.align	11
1069ENTRY(tramp_vectors)
1070	.space	0x400
1071
1072	tramp_ventry
1073	tramp_ventry
1074	tramp_ventry
1075	tramp_ventry
1076
1077	tramp_ventry	32
1078	tramp_ventry	32
1079	tramp_ventry	32
1080	tramp_ventry	32
1081END(tramp_vectors)
1082
1083ENTRY(tramp_exit_native)
1084	tramp_exit
1085END(tramp_exit_native)
1086
1087ENTRY(tramp_exit_compat)
1088	tramp_exit	32
1089END(tramp_exit_compat)
1090
1091	.ltorg
1092	.popsection				// .entry.tramp.text
1093#ifdef CONFIG_RANDOMIZE_BASE
1094	.pushsection ".rodata", "a"
1095	.align PAGE_SHIFT
1096	.globl	__entry_tramp_data_start
1097__entry_tramp_data_start:
1098	.quad	vectors
1099	.popsection				// .rodata
1100#endif /* CONFIG_RANDOMIZE_BASE */
1101#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1102
1103/*
1104 * Special system call wrappers.
1105 */
1106ENTRY(sys_rt_sigreturn_wrapper)
1107	mov	x0, sp
1108	b	sys_rt_sigreturn
1109ENDPROC(sys_rt_sigreturn_wrapper)
1110
1111/*
1112 * Register switch for AArch64. The callee-saved registers need to be saved
1113 * and restored. On entry:
1114 *   x0 = previous task_struct (must be preserved across the switch)
1115 *   x1 = next task_struct
1116 * Previous and next are guaranteed not to be the same.
1117 *
1118 */
1119ENTRY(cpu_switch_to)
1120	mov	x10, #THREAD_CPU_CONTEXT
1121	add	x8, x0, x10
1122	mov	x9, sp
1123	stp	x19, x20, [x8], #16		// store callee-saved registers
1124	stp	x21, x22, [x8], #16
1125	stp	x23, x24, [x8], #16
1126	stp	x25, x26, [x8], #16
1127	stp	x27, x28, [x8], #16
1128	stp	x29, x9, [x8], #16
1129	str	lr, [x8]
1130	add	x8, x1, x10
1131	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1132	ldp	x21, x22, [x8], #16
1133	ldp	x23, x24, [x8], #16
1134	ldp	x25, x26, [x8], #16
1135	ldp	x27, x28, [x8], #16
1136	ldp	x29, x9, [x8], #16
1137	ldr	lr, [x8]
1138	mov	sp, x9
1139	msr	sp_el0, x1
1140	ret
1141ENDPROC(cpu_switch_to)
1142NOKPROBE(cpu_switch_to)
1143
1144/*
1145 * This is how we return from a fork.
1146 */
1147ENTRY(ret_from_fork)
1148	bl	schedule_tail
1149	cbz	x19, 1f				// not a kernel thread
1150	mov	x0, x20
1151	blr	x19
11521:	get_thread_info tsk
1153	b	ret_to_user
1154ENDPROC(ret_from_fork)
1155NOKPROBE(ret_from_fork)
1156
1157#ifdef CONFIG_ARM_SDE_INTERFACE
1158
1159#include <asm/sdei.h>
1160#include <uapi/linux/arm_sdei.h>
1161
1162.macro sdei_handler_exit exit_mode
1163	/* On success, this call never returns... */
1164	cmp	\exit_mode, #SDEI_EXIT_SMC
1165	b.ne	99f
1166	smc	#0
1167	b	.
116899:	hvc	#0
1169	b	.
1170.endm
1171
1172#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1173/*
1174 * The regular SDEI entry point may have been unmapped along with the rest of
1175 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1176 * argument accessible.
1177 *
1178 * This clobbers x4, __sdei_handler() will restore this from firmware's
1179 * copy.
1180 */
1181.ltorg
1182.pushsection ".entry.tramp.text", "ax"
1183ENTRY(__sdei_asm_entry_trampoline)
1184	mrs	x4, ttbr1_el1
1185	tbz	x4, #USER_ASID_BIT, 1f
1186
1187	tramp_map_kernel tmp=x4
1188	isb
1189	mov	x4, xzr
1190
1191	/*
1192	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1193	 * the kernel on exit.
1194	 */
11951:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1196
1197#ifdef CONFIG_RANDOMIZE_BASE
1198	adr	x4, tramp_vectors + PAGE_SIZE
1199	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1200	ldr	x4, [x4]
1201#else
1202	ldr	x4, =__sdei_asm_handler
1203#endif
1204	br	x4
1205ENDPROC(__sdei_asm_entry_trampoline)
1206NOKPROBE(__sdei_asm_entry_trampoline)
1207
1208/*
1209 * Make the exit call and restore the original ttbr1_el1
1210 *
1211 * x0 & x1: setup for the exit API call
1212 * x2: exit_mode
1213 * x4: struct sdei_registered_event argument from registration time.
1214 */
1215ENTRY(__sdei_asm_exit_trampoline)
1216	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1217	cbnz	x4, 1f
1218
1219	tramp_unmap_kernel	tmp=x4
1220
12211:	sdei_handler_exit exit_mode=x2
1222ENDPROC(__sdei_asm_exit_trampoline)
1223NOKPROBE(__sdei_asm_exit_trampoline)
1224	.ltorg
1225.popsection		// .entry.tramp.text
1226#ifdef CONFIG_RANDOMIZE_BASE
1227.pushsection ".rodata", "a"
1228__sdei_asm_trampoline_next_handler:
1229	.quad	__sdei_asm_handler
1230.popsection		// .rodata
1231#endif /* CONFIG_RANDOMIZE_BASE */
1232#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1233
1234/*
1235 * Software Delegated Exception entry point.
1236 *
1237 * x0: Event number
1238 * x1: struct sdei_registered_event argument from registration time.
1239 * x2: interrupted PC
1240 * x3: interrupted PSTATE
1241 * x4: maybe clobbered by the trampoline
1242 *
1243 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1244 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1245 * want them.
1246 */
1247ENTRY(__sdei_asm_handler)
1248	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1249	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1250	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1251	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1252	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1253	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1254	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1255	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1256	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1257	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1258	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1259	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1260	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1261	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1262	mov	x4, sp
1263	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1264
1265	mov	x19, x1
1266
1267#ifdef CONFIG_VMAP_STACK
1268	/*
1269	 * entry.S may have been using sp as a scratch register, find whether
1270	 * this is a normal or critical event and switch to the appropriate
1271	 * stack for this CPU.
1272	 */
1273	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1274	cbnz	w4, 1f
1275	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1276	b	2f
12771:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12782:	mov	x6, #SDEI_STACK_SIZE
1279	add	x5, x5, x6
1280	mov	sp, x5
1281#endif
1282
1283	/*
1284	 * We may have interrupted userspace, or a guest, or exit-from or
1285	 * return-to either of these. We can't trust sp_el0, restore it.
1286	 */
1287	mrs	x28, sp_el0
1288	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1289	msr	sp_el0, x0
1290
1291	/* If we interrupted the kernel point to the previous stack/frame. */
1292	and     x0, x3, #0xc
1293	mrs     x1, CurrentEL
1294	cmp     x0, x1
1295	csel	x29, x29, xzr, eq	// fp, or zero
1296	csel	x4, x2, xzr, eq		// elr, or zero
1297
1298	stp	x29, x4, [sp, #-16]!
1299	mov	x29, sp
1300
1301	add	x0, x19, #SDEI_EVENT_INTREGS
1302	mov	x1, x19
1303	bl	__sdei_handler
1304
1305	msr	sp_el0, x28
1306	/* restore regs >x17 that we clobbered */
1307	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1308	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1309	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1310	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1311	mov	sp, x1
1312
1313	mov	x1, x0			// address to complete_and_resume
1314	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1315	cmp	x0, #1
1316	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1317	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1318	csel	x0, x2, x3, ls
1319
1320	ldr_l	x2, sdei_exit_mode
1321
1322alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1323	sdei_handler_exit exit_mode=x2
1324alternative_else_nop_endif
1325
1326#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1327	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1328	br	x5
1329#endif
1330ENDPROC(__sdei_asm_handler)
1331NOKPROBE(__sdei_asm_handler)
1332#endif /* CONFIG_ARM_SDE_INTERFACE */
1333