xref: /linux/arch/arm64/kernel/entry.S (revision ecdf06e1ea5376bba03c155751f6869d3dfaa210)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/memory.h>
32#include <asm/mmu.h>
33#include <asm/processor.h>
34#include <asm/ptrace.h>
35#include <asm/thread_info.h>
36#include <asm/asm-uaccess.h>
37#include <asm/unistd.h>
38
39/*
40 * Context tracking subsystem.  Used to instrument transitions
41 * between user and kernel mode.
42 */
43	.macro ct_user_exit, syscall = 0
44#ifdef CONFIG_CONTEXT_TRACKING
45	bl	context_tracking_user_exit
46	.if \syscall == 1
47	/*
48	 * Save/restore needed during syscalls.  Restore syscall arguments from
49	 * the values already saved on stack during kernel_entry.
50	 */
51	ldp	x0, x1, [sp]
52	ldp	x2, x3, [sp, #S_X2]
53	ldp	x4, x5, [sp, #S_X4]
54	ldp	x6, x7, [sp, #S_X6]
55	.endif
56#endif
57	.endm
58
59	.macro ct_user_enter
60#ifdef CONFIG_CONTEXT_TRACKING
61	bl	context_tracking_user_enter
62#endif
63	.endm
64
65/*
66 * Bad Abort numbers
67 *-----------------
68 */
69#define BAD_SYNC	0
70#define BAD_IRQ		1
71#define BAD_FIQ		2
72#define BAD_ERROR	3
73
74	.macro kernel_ventry, el, label, regsize = 64
75	.align 7
76#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
77alternative_if ARM64_UNMAP_KERNEL_AT_EL0
78	.if	\el == 0
79	.if	\regsize == 64
80	mrs	x30, tpidrro_el0
81	msr	tpidrro_el0, xzr
82	.else
83	mov	x30, xzr
84	.endif
85	.endif
86alternative_else_nop_endif
87#endif
88
89	sub	sp, sp, #S_FRAME_SIZE
90#ifdef CONFIG_VMAP_STACK
91	/*
92	 * Test whether the SP has overflowed, without corrupting a GPR.
93	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
94	 */
95	add	sp, sp, x0			// sp' = sp + x0
96	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
97	tbnz	x0, #THREAD_SHIFT, 0f
98	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
99	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
100	b	el\()\el\()_\label
101
1020:
103	/*
104	 * Either we've just detected an overflow, or we've taken an exception
105	 * while on the overflow stack. Either way, we won't return to
106	 * userspace, and can clobber EL0 registers to free up GPRs.
107	 */
108
109	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
110	msr	tpidr_el0, x0
111
112	/* Recover the original x0 value and stash it in tpidrro_el0 */
113	sub	x0, sp, x0
114	msr	tpidrro_el0, x0
115
116	/* Switch to the overflow stack */
117	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
118
119	/*
120	 * Check whether we were already on the overflow stack. This may happen
121	 * after panic() re-enables interrupts.
122	 */
123	mrs	x0, tpidr_el0			// sp of interrupted context
124	sub	x0, sp, x0			// delta with top of overflow stack
125	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
126	b.ne	__bad_stack			// no? -> bad stack pointer
127
128	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
129	sub	sp, sp, x0
130	mrs	x0, tpidrro_el0
131#endif
132	b	el\()\el\()_\label
133	.endm
134
135	.macro tramp_alias, dst, sym
136	mov_q	\dst, TRAMP_VALIAS
137	add	\dst, \dst, #(\sym - .entry.tramp.text)
138	.endm
139
140	.macro	kernel_entry, el, regsize = 64
141	.if	\regsize == 32
142	mov	w0, w0				// zero upper 32 bits of x0
143	.endif
144	stp	x0, x1, [sp, #16 * 0]
145	stp	x2, x3, [sp, #16 * 1]
146	stp	x4, x5, [sp, #16 * 2]
147	stp	x6, x7, [sp, #16 * 3]
148	stp	x8, x9, [sp, #16 * 4]
149	stp	x10, x11, [sp, #16 * 5]
150	stp	x12, x13, [sp, #16 * 6]
151	stp	x14, x15, [sp, #16 * 7]
152	stp	x16, x17, [sp, #16 * 8]
153	stp	x18, x19, [sp, #16 * 9]
154	stp	x20, x21, [sp, #16 * 10]
155	stp	x22, x23, [sp, #16 * 11]
156	stp	x24, x25, [sp, #16 * 12]
157	stp	x26, x27, [sp, #16 * 13]
158	stp	x28, x29, [sp, #16 * 14]
159
160	.if	\el == 0
161	mrs	x21, sp_el0
162	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
163	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
164	disable_step_tsk x19, x20		// exceptions when scheduling.
165
166	mov	x29, xzr			// fp pointed to user-space
167	.else
168	add	x21, sp, #S_FRAME_SIZE
169	get_thread_info tsk
170	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
171	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
172	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
173	mov	x20, #TASK_SIZE_64
174	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
175	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
176	.endif /* \el == 0 */
177	mrs	x22, elr_el1
178	mrs	x23, spsr_el1
179	stp	lr, x21, [sp, #S_LR]
180
181	/*
182	 * In order to be able to dump the contents of struct pt_regs at the
183	 * time the exception was taken (in case we attempt to walk the call
184	 * stack later), chain it together with the stack frames.
185	 */
186	.if \el == 0
187	stp	xzr, xzr, [sp, #S_STACKFRAME]
188	.else
189	stp	x29, x22, [sp, #S_STACKFRAME]
190	.endif
191	add	x29, sp, #S_STACKFRAME
192
193#ifdef CONFIG_ARM64_SW_TTBR0_PAN
194	/*
195	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
196	 * EL0, there is no need to check the state of TTBR0_EL1 since
197	 * accesses are always enabled.
198	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
199	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
200	 * user mappings.
201	 */
202alternative_if ARM64_HAS_PAN
203	b	1f				// skip TTBR0 PAN
204alternative_else_nop_endif
205
206	.if	\el != 0
207	mrs	x21, ttbr0_el1
208	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
209	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
210	b.eq	1f				// TTBR0 access already disabled
211	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
212	.endif
213
214	__uaccess_ttbr0_disable x21
2151:
216#endif
217
218	stp	x22, x23, [sp, #S_PC]
219
220	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
221	.if	\el == 0
222	mov	w21, #NO_SYSCALL
223	str	w21, [sp, #S_SYSCALLNO]
224	.endif
225
226	/*
227	 * Set sp_el0 to current thread_info.
228	 */
229	.if	\el == 0
230	msr	sp_el0, tsk
231	.endif
232
233	/*
234	 * Registers that may be useful after this macro is invoked:
235	 *
236	 * x21 - aborted SP
237	 * x22 - aborted PC
238	 * x23 - aborted PSTATE
239	*/
240	.endm
241
242	.macro	kernel_exit, el
243	.if	\el != 0
244	disable_daif
245
246	/* Restore the task's original addr_limit. */
247	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
248	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
249
250	/* No need to restore UAO, it will be restored from SPSR_EL1 */
251	.endif
252
253	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
254	.if	\el == 0
255	ct_user_enter
256	.endif
257
258#ifdef CONFIG_ARM64_SW_TTBR0_PAN
259	/*
260	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
261	 * PAN bit checking.
262	 */
263alternative_if ARM64_HAS_PAN
264	b	2f				// skip TTBR0 PAN
265alternative_else_nop_endif
266
267	.if	\el != 0
268	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
269	.endif
270
271	__uaccess_ttbr0_enable x0, x1
272
273	.if	\el == 0
274	/*
275	 * Enable errata workarounds only if returning to user. The only
276	 * workaround currently required for TTBR0_EL1 changes are for the
277	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
278	 * corruption).
279	 */
280	bl	post_ttbr_update_workaround
281	.endif
2821:
283	.if	\el != 0
284	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
285	.endif
2862:
287#endif
288
289	.if	\el == 0
290	ldr	x23, [sp, #S_SP]		// load return stack pointer
291	msr	sp_el0, x23
292	tst	x22, #PSR_MODE32_BIT		// native task?
293	b.eq	3f
294
295#ifdef CONFIG_ARM64_ERRATUM_845719
296alternative_if ARM64_WORKAROUND_845719
297#ifdef CONFIG_PID_IN_CONTEXTIDR
298	mrs	x29, contextidr_el1
299	msr	contextidr_el1, x29
300#else
301	msr contextidr_el1, xzr
302#endif
303alternative_else_nop_endif
304#endif
3053:
306	.endif
307
308	msr	elr_el1, x21			// set up the return data
309	msr	spsr_el1, x22
310	ldp	x0, x1, [sp, #16 * 0]
311	ldp	x2, x3, [sp, #16 * 1]
312	ldp	x4, x5, [sp, #16 * 2]
313	ldp	x6, x7, [sp, #16 * 3]
314	ldp	x8, x9, [sp, #16 * 4]
315	ldp	x10, x11, [sp, #16 * 5]
316	ldp	x12, x13, [sp, #16 * 6]
317	ldp	x14, x15, [sp, #16 * 7]
318	ldp	x16, x17, [sp, #16 * 8]
319	ldp	x18, x19, [sp, #16 * 9]
320	ldp	x20, x21, [sp, #16 * 10]
321	ldp	x22, x23, [sp, #16 * 11]
322	ldp	x24, x25, [sp, #16 * 12]
323	ldp	x26, x27, [sp, #16 * 13]
324	ldp	x28, x29, [sp, #16 * 14]
325	ldr	lr, [sp, #S_LR]
326	add	sp, sp, #S_FRAME_SIZE		// restore sp
327	/*
328	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
329	 * when returning from IPI handler, and when returning to user-space.
330	 */
331
332	.if	\el == 0
333alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
334#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
335	bne	4f
336	msr	far_el1, x30
337	tramp_alias	x30, tramp_exit_native
338	br	x30
3394:
340	tramp_alias	x30, tramp_exit_compat
341	br	x30
342#endif
343	.else
344	eret
345	.endif
346	.endm
347
348	.macro	irq_stack_entry
349	mov	x19, sp			// preserve the original sp
350
351	/*
352	 * Compare sp with the base of the task stack.
353	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
354	 * and should switch to the irq stack.
355	 */
356	ldr	x25, [tsk, TSK_STACK]
357	eor	x25, x25, x19
358	and	x25, x25, #~(THREAD_SIZE - 1)
359	cbnz	x25, 9998f
360
361	ldr_this_cpu x25, irq_stack_ptr, x26
362	mov	x26, #IRQ_STACK_SIZE
363	add	x26, x25, x26
364
365	/* switch to the irq stack */
366	mov	sp, x26
3679998:
368	.endm
369
370	/*
371	 * x19 should be preserved between irq_stack_entry and
372	 * irq_stack_exit.
373	 */
374	.macro	irq_stack_exit
375	mov	sp, x19
376	.endm
377
378/*
379 * These are the registers used in the syscall handler, and allow us to
380 * have in theory up to 7 arguments to a function - x0 to x6.
381 *
382 * x7 is reserved for the system call number in 32-bit mode.
383 */
384wsc_nr	.req	w25		// number of system calls
385wscno	.req	w26		// syscall number
386xscno	.req	x26		// syscall number (zero-extended)
387stbl	.req	x27		// syscall table pointer
388tsk	.req	x28		// current thread_info
389
390/*
391 * Interrupt handling.
392 */
393	.macro	irq_handler
394	ldr_l	x1, handle_arch_irq
395	mov	x0, sp
396	irq_stack_entry
397	blr	x1
398	irq_stack_exit
399	.endm
400
401	.text
402
403/*
404 * Exception vectors.
405 */
406	.pushsection ".entry.text", "ax"
407
408	.align	11
409ENTRY(vectors)
410	kernel_ventry	1, sync_invalid			// Synchronous EL1t
411	kernel_ventry	1, irq_invalid			// IRQ EL1t
412	kernel_ventry	1, fiq_invalid			// FIQ EL1t
413	kernel_ventry	1, error_invalid		// Error EL1t
414
415	kernel_ventry	1, sync				// Synchronous EL1h
416	kernel_ventry	1, irq				// IRQ EL1h
417	kernel_ventry	1, fiq_invalid			// FIQ EL1h
418	kernel_ventry	1, error			// Error EL1h
419
420	kernel_ventry	0, sync				// Synchronous 64-bit EL0
421	kernel_ventry	0, irq				// IRQ 64-bit EL0
422	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
423	kernel_ventry	0, error			// Error 64-bit EL0
424
425#ifdef CONFIG_COMPAT
426	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
427	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
428	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
429	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
430#else
431	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
432	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
433	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
434	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
435#endif
436END(vectors)
437
438#ifdef CONFIG_VMAP_STACK
439	/*
440	 * We detected an overflow in kernel_ventry, which switched to the
441	 * overflow stack. Stash the exception regs, and head to our overflow
442	 * handler.
443	 */
444__bad_stack:
445	/* Restore the original x0 value */
446	mrs	x0, tpidrro_el0
447
448	/*
449	 * Store the original GPRs to the new stack. The orginal SP (minus
450	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
451	 */
452	sub	sp, sp, #S_FRAME_SIZE
453	kernel_entry 1
454	mrs	x0, tpidr_el0
455	add	x0, x0, #S_FRAME_SIZE
456	str	x0, [sp, #S_SP]
457
458	/* Stash the regs for handle_bad_stack */
459	mov	x0, sp
460
461	/* Time to die */
462	bl	handle_bad_stack
463	ASM_BUG()
464#endif /* CONFIG_VMAP_STACK */
465
466/*
467 * Invalid mode handlers
468 */
469	.macro	inv_entry, el, reason, regsize = 64
470	kernel_entry \el, \regsize
471	mov	x0, sp
472	mov	x1, #\reason
473	mrs	x2, esr_el1
474	bl	bad_mode
475	ASM_BUG()
476	.endm
477
478el0_sync_invalid:
479	inv_entry 0, BAD_SYNC
480ENDPROC(el0_sync_invalid)
481
482el0_irq_invalid:
483	inv_entry 0, BAD_IRQ
484ENDPROC(el0_irq_invalid)
485
486el0_fiq_invalid:
487	inv_entry 0, BAD_FIQ
488ENDPROC(el0_fiq_invalid)
489
490el0_error_invalid:
491	inv_entry 0, BAD_ERROR
492ENDPROC(el0_error_invalid)
493
494#ifdef CONFIG_COMPAT
495el0_fiq_invalid_compat:
496	inv_entry 0, BAD_FIQ, 32
497ENDPROC(el0_fiq_invalid_compat)
498#endif
499
500el1_sync_invalid:
501	inv_entry 1, BAD_SYNC
502ENDPROC(el1_sync_invalid)
503
504el1_irq_invalid:
505	inv_entry 1, BAD_IRQ
506ENDPROC(el1_irq_invalid)
507
508el1_fiq_invalid:
509	inv_entry 1, BAD_FIQ
510ENDPROC(el1_fiq_invalid)
511
512el1_error_invalid:
513	inv_entry 1, BAD_ERROR
514ENDPROC(el1_error_invalid)
515
516/*
517 * EL1 mode handlers.
518 */
519	.align	6
520el1_sync:
521	kernel_entry 1
522	mrs	x1, esr_el1			// read the syndrome register
523	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
524	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
525	b.eq	el1_da
526	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
527	b.eq	el1_ia
528	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
529	b.eq	el1_undef
530	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
531	b.eq	el1_sp_pc
532	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
533	b.eq	el1_sp_pc
534	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
535	b.eq	el1_undef
536	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
537	b.ge	el1_dbg
538	b	el1_inv
539
540el1_ia:
541	/*
542	 * Fall through to the Data abort case
543	 */
544el1_da:
545	/*
546	 * Data abort handling
547	 */
548	mrs	x3, far_el1
549	inherit_daif	pstate=x23, tmp=x2
550	clear_address_tag x0, x3
551	mov	x2, sp				// struct pt_regs
552	bl	do_mem_abort
553
554	kernel_exit 1
555el1_sp_pc:
556	/*
557	 * Stack or PC alignment exception handling
558	 */
559	mrs	x0, far_el1
560	inherit_daif	pstate=x23, tmp=x2
561	mov	x2, sp
562	bl	do_sp_pc_abort
563	ASM_BUG()
564el1_undef:
565	/*
566	 * Undefined instruction
567	 */
568	inherit_daif	pstate=x23, tmp=x2
569	mov	x0, sp
570	bl	do_undefinstr
571	ASM_BUG()
572el1_dbg:
573	/*
574	 * Debug exception handling
575	 */
576	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
577	cinc	x24, x24, eq			// set bit '0'
578	tbz	x24, #0, el1_inv		// EL1 only
579	mrs	x0, far_el1
580	mov	x2, sp				// struct pt_regs
581	bl	do_debug_exception
582	kernel_exit 1
583el1_inv:
584	// TODO: add support for undefined instructions in kernel mode
585	inherit_daif	pstate=x23, tmp=x2
586	mov	x0, sp
587	mov	x2, x1
588	mov	x1, #BAD_SYNC
589	bl	bad_mode
590	ASM_BUG()
591ENDPROC(el1_sync)
592
593	.align	6
594el1_irq:
595	kernel_entry 1
596	enable_da_f
597#ifdef CONFIG_TRACE_IRQFLAGS
598	bl	trace_hardirqs_off
599#endif
600
601	irq_handler
602
603#ifdef CONFIG_PREEMPT
604	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
605	cbnz	w24, 1f				// preempt count != 0
606	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
607	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
608	bl	el1_preempt
6091:
610#endif
611#ifdef CONFIG_TRACE_IRQFLAGS
612	bl	trace_hardirqs_on
613#endif
614	kernel_exit 1
615ENDPROC(el1_irq)
616
617#ifdef CONFIG_PREEMPT
618el1_preempt:
619	mov	x24, lr
6201:	bl	preempt_schedule_irq		// irq en/disable is done inside
621	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
622	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
623	ret	x24
624#endif
625
626/*
627 * EL0 mode handlers.
628 */
629	.align	6
630el0_sync:
631	kernel_entry 0
632	mrs	x25, esr_el1			// read the syndrome register
633	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
634	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
635	b.eq	el0_svc
636	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
637	b.eq	el0_da
638	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
639	b.eq	el0_ia
640	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
641	b.eq	el0_fpsimd_acc
642	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
643	b.eq	el0_sve_acc
644	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
645	b.eq	el0_fpsimd_exc
646	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
647	b.eq	el0_sys
648	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
649	b.eq	el0_sp_pc
650	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
651	b.eq	el0_sp_pc
652	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
653	b.eq	el0_undef
654	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
655	b.ge	el0_dbg
656	b	el0_inv
657
658#ifdef CONFIG_COMPAT
659	.align	6
660el0_sync_compat:
661	kernel_entry 0, 32
662	mrs	x25, esr_el1			// read the syndrome register
663	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
664	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
665	b.eq	el0_svc_compat
666	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
667	b.eq	el0_da
668	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
669	b.eq	el0_ia
670	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
671	b.eq	el0_fpsimd_acc
672	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
673	b.eq	el0_fpsimd_exc
674	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
675	b.eq	el0_sp_pc
676	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
677	b.eq	el0_undef
678	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
679	b.eq	el0_undef
680	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
681	b.eq	el0_undef
682	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
683	b.eq	el0_undef
684	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
685	b.eq	el0_undef
686	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
687	b.eq	el0_undef
688	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
689	b.ge	el0_dbg
690	b	el0_inv
691el0_svc_compat:
692	/*
693	 * AArch32 syscall handling
694	 */
695	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
696	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
697	mov	wscno, w7			// syscall number in w7 (r7)
698	mov     wsc_nr, #__NR_compat_syscalls
699	b	el0_svc_naked
700
701	.align	6
702el0_irq_compat:
703	kernel_entry 0, 32
704	b	el0_irq_naked
705
706el0_error_compat:
707	kernel_entry 0, 32
708	b	el0_error_naked
709#endif
710
711el0_da:
712	/*
713	 * Data abort handling
714	 */
715	mrs	x26, far_el1
716	enable_daif
717	ct_user_exit
718	clear_address_tag x0, x26
719	mov	x1, x25
720	mov	x2, sp
721	bl	do_mem_abort
722	b	ret_to_user
723el0_ia:
724	/*
725	 * Instruction abort handling
726	 */
727	mrs	x26, far_el1
728	enable_da_f
729#ifdef CONFIG_TRACE_IRQFLAGS
730	bl	trace_hardirqs_off
731#endif
732	ct_user_exit
733	mov	x0, x26
734	mov	x1, x25
735	mov	x2, sp
736	bl	do_el0_ia_bp_hardening
737	b	ret_to_user
738el0_fpsimd_acc:
739	/*
740	 * Floating Point or Advanced SIMD access
741	 */
742	enable_daif
743	ct_user_exit
744	mov	x0, x25
745	mov	x1, sp
746	bl	do_fpsimd_acc
747	b	ret_to_user
748el0_sve_acc:
749	/*
750	 * Scalable Vector Extension access
751	 */
752	enable_daif
753	ct_user_exit
754	mov	x0, x25
755	mov	x1, sp
756	bl	do_sve_acc
757	b	ret_to_user
758el0_fpsimd_exc:
759	/*
760	 * Floating Point, Advanced SIMD or SVE exception
761	 */
762	enable_daif
763	ct_user_exit
764	mov	x0, x25
765	mov	x1, sp
766	bl	do_fpsimd_exc
767	b	ret_to_user
768el0_sp_pc:
769	/*
770	 * Stack or PC alignment exception handling
771	 */
772	mrs	x26, far_el1
773	enable_daif
774	ct_user_exit
775	mov	x0, x26
776	mov	x1, x25
777	mov	x2, sp
778	bl	do_sp_pc_abort
779	b	ret_to_user
780el0_undef:
781	/*
782	 * Undefined instruction
783	 */
784	enable_daif
785	ct_user_exit
786	mov	x0, sp
787	bl	do_undefinstr
788	b	ret_to_user
789el0_sys:
790	/*
791	 * System instructions, for trapped cache maintenance instructions
792	 */
793	enable_daif
794	ct_user_exit
795	mov	x0, x25
796	mov	x1, sp
797	bl	do_sysinstr
798	b	ret_to_user
799el0_dbg:
800	/*
801	 * Debug exception handling
802	 */
803	tbnz	x24, #0, el0_inv		// EL0 only
804	mrs	x0, far_el1
805	mov	x1, x25
806	mov	x2, sp
807	bl	do_debug_exception
808	enable_daif
809	ct_user_exit
810	b	ret_to_user
811el0_inv:
812	enable_daif
813	ct_user_exit
814	mov	x0, sp
815	mov	x1, #BAD_SYNC
816	mov	x2, x25
817	bl	bad_el0_sync
818	b	ret_to_user
819ENDPROC(el0_sync)
820
821	.align	6
822el0_irq:
823	kernel_entry 0
824el0_irq_naked:
825	enable_da_f
826#ifdef CONFIG_TRACE_IRQFLAGS
827	bl	trace_hardirqs_off
828#endif
829
830	ct_user_exit
831	irq_handler
832
833#ifdef CONFIG_TRACE_IRQFLAGS
834	bl	trace_hardirqs_on
835#endif
836	b	ret_to_user
837ENDPROC(el0_irq)
838
839el1_error:
840	kernel_entry 1
841	mrs	x1, esr_el1
842	enable_dbg
843	mov	x0, sp
844	bl	do_serror
845	kernel_exit 1
846ENDPROC(el1_error)
847
848el0_error:
849	kernel_entry 0
850el0_error_naked:
851	mrs	x1, esr_el1
852	enable_dbg
853	mov	x0, sp
854	bl	do_serror
855	enable_daif
856	ct_user_exit
857	b	ret_to_user
858ENDPROC(el0_error)
859
860
861/*
862 * This is the fast syscall return path.  We do as little as possible here,
863 * and this includes saving x0 back into the kernel stack.
864 */
865ret_fast_syscall:
866	disable_daif
867	str	x0, [sp, #S_X0]			// returned x0
868	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
869	and	x2, x1, #_TIF_SYSCALL_WORK
870	cbnz	x2, ret_fast_syscall_trace
871	and	x2, x1, #_TIF_WORK_MASK
872	cbnz	x2, work_pending
873	enable_step_tsk x1, x2
874	kernel_exit 0
875ret_fast_syscall_trace:
876	enable_daif
877	b	__sys_trace_return_skipped	// we already saved x0
878
879/*
880 * Ok, we need to do extra processing, enter the slow path.
881 */
882work_pending:
883	mov	x0, sp				// 'regs'
884	bl	do_notify_resume
885#ifdef CONFIG_TRACE_IRQFLAGS
886	bl	trace_hardirqs_on		// enabled while in userspace
887#endif
888	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
889	b	finish_ret_to_user
890/*
891 * "slow" syscall return path.
892 */
893ret_to_user:
894	disable_daif
895	ldr	x1, [tsk, #TSK_TI_FLAGS]
896	and	x2, x1, #_TIF_WORK_MASK
897	cbnz	x2, work_pending
898finish_ret_to_user:
899	enable_step_tsk x1, x2
900	kernel_exit 0
901ENDPROC(ret_to_user)
902
903/*
904 * SVC handler.
905 */
906	.align	6
907el0_svc:
908	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
909	adrp	stbl, sys_call_table		// load syscall table pointer
910	mov	wscno, w8			// syscall number in w8
911	mov	wsc_nr, #__NR_syscalls
912
913#ifdef CONFIG_ARM64_SVE
914alternative_if_not ARM64_SVE
915	b	el0_svc_naked
916alternative_else_nop_endif
917	tbz	x16, #TIF_SVE, el0_svc_naked	// Skip unless TIF_SVE set:
918	bic	x16, x16, #_TIF_SVE		// discard SVE state
919	str	x16, [tsk, #TSK_TI_FLAGS]
920
921	/*
922	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
923	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
924	 * happens if a context switch or kernel_neon_begin() or context
925	 * modification (sigreturn, ptrace) intervenes.
926	 * So, ensure that CPACR_EL1 is already correct for the fast-path case:
927	 */
928	mrs	x9, cpacr_el1
929	bic	x9, x9, #CPACR_EL1_ZEN_EL0EN	// disable SVE for el0
930	msr	cpacr_el1, x9			// synchronised by eret to el0
931#endif
932
933el0_svc_naked:					// compat entry point
934	stp	x0, xscno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
935	enable_daif
936	ct_user_exit 1
937
938	tst	x16, #_TIF_SYSCALL_WORK		// check for syscall hooks
939	b.ne	__sys_trace
940	cmp     wscno, wsc_nr			// check upper syscall limit
941	b.hs	ni_sys
942	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
943	blr	x16				// call sys_* routine
944	b	ret_fast_syscall
945ni_sys:
946	mov	x0, sp
947	bl	do_ni_syscall
948	b	ret_fast_syscall
949ENDPROC(el0_svc)
950
951	/*
952	 * This is the really slow path.  We're going to be doing context
953	 * switches, and waiting for our parent to respond.
954	 */
955__sys_trace:
956	cmp     wscno, #NO_SYSCALL		// user-issued syscall(-1)?
957	b.ne	1f
958	mov	x0, #-ENOSYS			// set default errno if so
959	str	x0, [sp, #S_X0]
9601:	mov	x0, sp
961	bl	syscall_trace_enter
962	cmp	w0, #NO_SYSCALL			// skip the syscall?
963	b.eq	__sys_trace_return_skipped
964	mov	wscno, w0			// syscall number (possibly new)
965	mov	x1, sp				// pointer to regs
966	cmp	wscno, wsc_nr			// check upper syscall limit
967	b.hs	__ni_sys_trace
968	ldp	x0, x1, [sp]			// restore the syscall args
969	ldp	x2, x3, [sp, #S_X2]
970	ldp	x4, x5, [sp, #S_X4]
971	ldp	x6, x7, [sp, #S_X6]
972	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
973	blr	x16				// call sys_* routine
974
975__sys_trace_return:
976	str	x0, [sp, #S_X0]			// save returned x0
977__sys_trace_return_skipped:
978	mov	x0, sp
979	bl	syscall_trace_exit
980	b	ret_to_user
981
982__ni_sys_trace:
983	mov	x0, sp
984	bl	do_ni_syscall
985	b	__sys_trace_return
986
987	.popsection				// .entry.text
988
989#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
990/*
991 * Exception vectors trampoline.
992 */
993	.pushsection ".entry.tramp.text", "ax"
994
995	.macro tramp_map_kernel, tmp
996	mrs	\tmp, ttbr1_el1
997	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
998	bic	\tmp, \tmp, #USER_ASID_FLAG
999	msr	ttbr1_el1, \tmp
1000#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1001alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
1002	/* ASID already in \tmp[63:48] */
1003	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1004	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1005	/* 2MB boundary containing the vectors, so we nobble the walk cache */
1006	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1007	isb
1008	tlbi	vae1, \tmp
1009	dsb	nsh
1010alternative_else_nop_endif
1011#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1012	.endm
1013
1014	.macro tramp_unmap_kernel, tmp
1015	mrs	\tmp, ttbr1_el1
1016	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1017	orr	\tmp, \tmp, #USER_ASID_FLAG
1018	msr	ttbr1_el1, \tmp
1019	/*
1020	 * We avoid running the post_ttbr_update_workaround here because the
1021	 * user and kernel ASIDs don't have conflicting mappings, so any
1022	 * "blessing" as described in:
1023	 *
1024	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
1025	 *
1026	 * will not hurt correctness. Whilst this may partially defeat the
1027	 * point of using split ASIDs in the first place, it avoids
1028	 * the hit of invalidating the entire I-cache on every return to
1029	 * userspace.
1030	 */
1031	.endm
1032
1033	.macro tramp_ventry, regsize = 64
1034	.align	7
10351:
1036	.if	\regsize == 64
1037	msr	tpidrro_el0, x30	// Restored in kernel_ventry
1038	.endif
1039	/*
1040	 * Defend against branch aliasing attacks by pushing a dummy
1041	 * entry onto the return stack and using a RET instruction to
1042	 * enter the full-fat kernel vectors.
1043	 */
1044	bl	2f
1045	b	.
10462:
1047	tramp_map_kernel	x30
1048#ifdef CONFIG_RANDOMIZE_BASE
1049	adr	x30, tramp_vectors + PAGE_SIZE
1050alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1051	ldr	x30, [x30]
1052#else
1053	ldr	x30, =vectors
1054#endif
1055	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1056	msr	vbar_el1, x30
1057	add	x30, x30, #(1b - tramp_vectors)
1058	isb
1059	ret
1060	.endm
1061
1062	.macro tramp_exit, regsize = 64
1063	adr	x30, tramp_vectors
1064	msr	vbar_el1, x30
1065	tramp_unmap_kernel	x30
1066	.if	\regsize == 64
1067	mrs	x30, far_el1
1068	.endif
1069	eret
1070	.endm
1071
1072	.align	11
1073ENTRY(tramp_vectors)
1074	.space	0x400
1075
1076	tramp_ventry
1077	tramp_ventry
1078	tramp_ventry
1079	tramp_ventry
1080
1081	tramp_ventry	32
1082	tramp_ventry	32
1083	tramp_ventry	32
1084	tramp_ventry	32
1085END(tramp_vectors)
1086
1087ENTRY(tramp_exit_native)
1088	tramp_exit
1089END(tramp_exit_native)
1090
1091ENTRY(tramp_exit_compat)
1092	tramp_exit	32
1093END(tramp_exit_compat)
1094
1095	.ltorg
1096	.popsection				// .entry.tramp.text
1097#ifdef CONFIG_RANDOMIZE_BASE
1098	.pushsection ".rodata", "a"
1099	.align PAGE_SHIFT
1100	.globl	__entry_tramp_data_start
1101__entry_tramp_data_start:
1102	.quad	vectors
1103	.popsection				// .rodata
1104#endif /* CONFIG_RANDOMIZE_BASE */
1105#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1106
1107/*
1108 * Special system call wrappers.
1109 */
1110ENTRY(sys_rt_sigreturn_wrapper)
1111	mov	x0, sp
1112	b	sys_rt_sigreturn
1113ENDPROC(sys_rt_sigreturn_wrapper)
1114
1115/*
1116 * Register switch for AArch64. The callee-saved registers need to be saved
1117 * and restored. On entry:
1118 *   x0 = previous task_struct (must be preserved across the switch)
1119 *   x1 = next task_struct
1120 * Previous and next are guaranteed not to be the same.
1121 *
1122 */
1123ENTRY(cpu_switch_to)
1124	mov	x10, #THREAD_CPU_CONTEXT
1125	add	x8, x0, x10
1126	mov	x9, sp
1127	stp	x19, x20, [x8], #16		// store callee-saved registers
1128	stp	x21, x22, [x8], #16
1129	stp	x23, x24, [x8], #16
1130	stp	x25, x26, [x8], #16
1131	stp	x27, x28, [x8], #16
1132	stp	x29, x9, [x8], #16
1133	str	lr, [x8]
1134	add	x8, x1, x10
1135	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1136	ldp	x21, x22, [x8], #16
1137	ldp	x23, x24, [x8], #16
1138	ldp	x25, x26, [x8], #16
1139	ldp	x27, x28, [x8], #16
1140	ldp	x29, x9, [x8], #16
1141	ldr	lr, [x8]
1142	mov	sp, x9
1143	msr	sp_el0, x1
1144	ret
1145ENDPROC(cpu_switch_to)
1146NOKPROBE(cpu_switch_to)
1147
1148/*
1149 * This is how we return from a fork.
1150 */
1151ENTRY(ret_from_fork)
1152	bl	schedule_tail
1153	cbz	x19, 1f				// not a kernel thread
1154	mov	x0, x20
1155	blr	x19
11561:	get_thread_info tsk
1157	b	ret_to_user
1158ENDPROC(ret_from_fork)
1159NOKPROBE(ret_from_fork)
1160
1161#ifdef CONFIG_ARM_SDE_INTERFACE
1162
1163#include <asm/sdei.h>
1164#include <uapi/linux/arm_sdei.h>
1165
1166.macro sdei_handler_exit exit_mode
1167	/* On success, this call never returns... */
1168	cmp	\exit_mode, #SDEI_EXIT_SMC
1169	b.ne	99f
1170	smc	#0
1171	b	.
117299:	hvc	#0
1173	b	.
1174.endm
1175
1176#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1177/*
1178 * The regular SDEI entry point may have been unmapped along with the rest of
1179 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1180 * argument accessible.
1181 *
1182 * This clobbers x4, __sdei_handler() will restore this from firmware's
1183 * copy.
1184 */
1185.ltorg
1186.pushsection ".entry.tramp.text", "ax"
1187ENTRY(__sdei_asm_entry_trampoline)
1188	mrs	x4, ttbr1_el1
1189	tbz	x4, #USER_ASID_BIT, 1f
1190
1191	tramp_map_kernel tmp=x4
1192	isb
1193	mov	x4, xzr
1194
1195	/*
1196	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1197	 * the kernel on exit.
1198	 */
11991:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1200
1201#ifdef CONFIG_RANDOMIZE_BASE
1202	adr	x4, tramp_vectors + PAGE_SIZE
1203	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1204	ldr	x4, [x4]
1205#else
1206	ldr	x4, =__sdei_asm_handler
1207#endif
1208	br	x4
1209ENDPROC(__sdei_asm_entry_trampoline)
1210NOKPROBE(__sdei_asm_entry_trampoline)
1211
1212/*
1213 * Make the exit call and restore the original ttbr1_el1
1214 *
1215 * x0 & x1: setup for the exit API call
1216 * x2: exit_mode
1217 * x4: struct sdei_registered_event argument from registration time.
1218 */
1219ENTRY(__sdei_asm_exit_trampoline)
1220	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1221	cbnz	x4, 1f
1222
1223	tramp_unmap_kernel	tmp=x4
1224
12251:	sdei_handler_exit exit_mode=x2
1226ENDPROC(__sdei_asm_exit_trampoline)
1227NOKPROBE(__sdei_asm_exit_trampoline)
1228	.ltorg
1229.popsection		// .entry.tramp.text
1230#ifdef CONFIG_RANDOMIZE_BASE
1231.pushsection ".rodata", "a"
1232__sdei_asm_trampoline_next_handler:
1233	.quad	__sdei_asm_handler
1234.popsection		// .rodata
1235#endif /* CONFIG_RANDOMIZE_BASE */
1236#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1237
1238/*
1239 * Software Delegated Exception entry point.
1240 *
1241 * x0: Event number
1242 * x1: struct sdei_registered_event argument from registration time.
1243 * x2: interrupted PC
1244 * x3: interrupted PSTATE
1245 * x4: maybe clobbered by the trampoline
1246 *
1247 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1248 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1249 * want them.
1250 */
1251ENTRY(__sdei_asm_handler)
1252	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1253	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1254	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1255	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1256	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1257	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1258	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1259	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1260	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1261	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1262	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1263	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1264	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1265	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1266	mov	x4, sp
1267	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1268
1269	mov	x19, x1
1270
1271#ifdef CONFIG_VMAP_STACK
1272	/*
1273	 * entry.S may have been using sp as a scratch register, find whether
1274	 * this is a normal or critical event and switch to the appropriate
1275	 * stack for this CPU.
1276	 */
1277	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1278	cbnz	w4, 1f
1279	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1280	b	2f
12811:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12822:	mov	x6, #SDEI_STACK_SIZE
1283	add	x5, x5, x6
1284	mov	sp, x5
1285#endif
1286
1287	/*
1288	 * We may have interrupted userspace, or a guest, or exit-from or
1289	 * return-to either of these. We can't trust sp_el0, restore it.
1290	 */
1291	mrs	x28, sp_el0
1292	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1293	msr	sp_el0, x0
1294
1295	/* If we interrupted the kernel point to the previous stack/frame. */
1296	and     x0, x3, #0xc
1297	mrs     x1, CurrentEL
1298	cmp     x0, x1
1299	csel	x29, x29, xzr, eq	// fp, or zero
1300	csel	x4, x2, xzr, eq		// elr, or zero
1301
1302	stp	x29, x4, [sp, #-16]!
1303	mov	x29, sp
1304
1305	add	x0, x19, #SDEI_EVENT_INTREGS
1306	mov	x1, x19
1307	bl	__sdei_handler
1308
1309	msr	sp_el0, x28
1310	/* restore regs >x17 that we clobbered */
1311	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1312	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1313	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1314	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1315	mov	sp, x1
1316
1317	mov	x1, x0			// address to complete_and_resume
1318	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1319	cmp	x0, #1
1320	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1321	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1322	csel	x0, x2, x3, ls
1323
1324	ldr_l	x2, sdei_exit_mode
1325
1326alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1327	sdei_handler_exit exit_mode=x2
1328alternative_else_nop_endif
1329
1330#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1331	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1332	br	x5
1333#endif
1334ENDPROC(__sdei_asm_handler)
1335NOKPROBE(__sdei_asm_handler)
1336#endif /* CONFIG_ARM_SDE_INTERFACE */
1337