xref: /linux/arch/arm64/kernel/entry.S (revision 962fad301c33dec69324dc2d9320fd84a119a24c)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32/*
33 * Context tracking subsystem.  Used to instrument transitions
34 * between user and kernel mode.
35 */
36	.macro ct_user_exit_irqoff
37#ifdef CONFIG_CONTEXT_TRACKING
38	bl	enter_from_user_mode
39#endif
40	.endm
41
42	.macro ct_user_enter
43#ifdef CONFIG_CONTEXT_TRACKING
44	bl	context_tracking_user_enter
45#endif
46	.endm
47
48	.macro	clear_gp_regs
49	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
50	mov	x\n, xzr
51	.endr
52	.endm
53
54/*
55 * Bad Abort numbers
56 *-----------------
57 */
58#define BAD_SYNC	0
59#define BAD_IRQ		1
60#define BAD_FIQ		2
61#define BAD_ERROR	3
62
63	.macro kernel_ventry, el, label, regsize = 64
64	.align 7
65#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
66	.if	\el == 0
67alternative_if ARM64_UNMAP_KERNEL_AT_EL0
68	.if	\regsize == 64
69	mrs	x30, tpidrro_el0
70	msr	tpidrro_el0, xzr
71	.else
72	mov	x30, xzr
73	.endif
74alternative_else_nop_endif
75	.endif
76#endif
77
78	sub	sp, sp, #S_FRAME_SIZE
79#ifdef CONFIG_VMAP_STACK
80	/*
81	 * Test whether the SP has overflowed, without corrupting a GPR.
82	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
83	 * should always be zero.
84	 */
85	add	sp, sp, x0			// sp' = sp + x0
86	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
87	tbnz	x0, #THREAD_SHIFT, 0f
88	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
89	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
90	b	el\()\el\()_\label
91
920:
93	/*
94	 * Either we've just detected an overflow, or we've taken an exception
95	 * while on the overflow stack. Either way, we won't return to
96	 * userspace, and can clobber EL0 registers to free up GPRs.
97	 */
98
99	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
100	msr	tpidr_el0, x0
101
102	/* Recover the original x0 value and stash it in tpidrro_el0 */
103	sub	x0, sp, x0
104	msr	tpidrro_el0, x0
105
106	/* Switch to the overflow stack */
107	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
108
109	/*
110	 * Check whether we were already on the overflow stack. This may happen
111	 * after panic() re-enables interrupts.
112	 */
113	mrs	x0, tpidr_el0			// sp of interrupted context
114	sub	x0, sp, x0			// delta with top of overflow stack
115	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
116	b.ne	__bad_stack			// no? -> bad stack pointer
117
118	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
119	sub	sp, sp, x0
120	mrs	x0, tpidrro_el0
121#endif
122	b	el\()\el\()_\label
123	.endm
124
125	.macro tramp_alias, dst, sym
126	mov_q	\dst, TRAMP_VALIAS
127	add	\dst, \dst, #(\sym - .entry.tramp.text)
128	.endm
129
130	/*
131	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
132	 * them if required.
133	 */
134	.macro	apply_ssbd, state, tmp1, tmp2
135#ifdef CONFIG_ARM64_SSBD
136alternative_cb	arm64_enable_wa2_handling
137	b	.L__asm_ssbd_skip\@
138alternative_cb_end
139	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
140	cbz	\tmp2,	.L__asm_ssbd_skip\@
141	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
142	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
143	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
144	mov	w1, #\state
145alternative_cb	arm64_update_smccc_conduit
146	nop					// Patched to SMC/HVC #0
147alternative_cb_end
148.L__asm_ssbd_skip\@:
149#endif
150	.endm
151
152	.macro	kernel_entry, el, regsize = 64
153	.if	\regsize == 32
154	mov	w0, w0				// zero upper 32 bits of x0
155	.endif
156	stp	x0, x1, [sp, #16 * 0]
157	stp	x2, x3, [sp, #16 * 1]
158	stp	x4, x5, [sp, #16 * 2]
159	stp	x6, x7, [sp, #16 * 3]
160	stp	x8, x9, [sp, #16 * 4]
161	stp	x10, x11, [sp, #16 * 5]
162	stp	x12, x13, [sp, #16 * 6]
163	stp	x14, x15, [sp, #16 * 7]
164	stp	x16, x17, [sp, #16 * 8]
165	stp	x18, x19, [sp, #16 * 9]
166	stp	x20, x21, [sp, #16 * 10]
167	stp	x22, x23, [sp, #16 * 11]
168	stp	x24, x25, [sp, #16 * 12]
169	stp	x26, x27, [sp, #16 * 13]
170	stp	x28, x29, [sp, #16 * 14]
171
172	.if	\el == 0
173	.if	\regsize == 32
174	/*
175	 * If we're returning from a 32-bit task on a system affected by
176	 * 1418040 then re-enable userspace access to the virtual counter.
177	 */
178#ifdef CONFIG_ARM64_ERRATUM_1418040
179alternative_if ARM64_WORKAROUND_1418040
180	mrs	x0, cntkctl_el1
181	orr	x0, x0, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
182	msr	cntkctl_el1, x0
183alternative_else_nop_endif
184#endif
185	.endif
186	clear_gp_regs
187	mrs	x21, sp_el0
188	ldr_this_cpu	tsk, __entry_task, x20
189	msr	sp_el0, tsk
190
191	/*
192	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
193	 * when scheduling.
194	 */
195	ldr	x19, [tsk, #TSK_TI_FLAGS]
196	disable_step_tsk x19, x20
197
198	apply_ssbd 1, x22, x23
199
200	ptrauth_keys_install_kernel tsk, x20, x22, x23
201
202	scs_load tsk, x20
203	.else
204	add	x21, sp, #S_FRAME_SIZE
205	get_current_task tsk
206	/* Save the task's original addr_limit and set USER_DS */
207	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
208	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
209	mov	x20, #USER_DS
210	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
211	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
212	.endif /* \el == 0 */
213	mrs	x22, elr_el1
214	mrs	x23, spsr_el1
215	stp	lr, x21, [sp, #S_LR]
216
217	/*
218	 * In order to be able to dump the contents of struct pt_regs at the
219	 * time the exception was taken (in case we attempt to walk the call
220	 * stack later), chain it together with the stack frames.
221	 */
222	.if \el == 0
223	stp	xzr, xzr, [sp, #S_STACKFRAME]
224	.else
225	stp	x29, x22, [sp, #S_STACKFRAME]
226	.endif
227	add	x29, sp, #S_STACKFRAME
228
229#ifdef CONFIG_ARM64_SW_TTBR0_PAN
230alternative_if_not ARM64_HAS_PAN
231	bl	__swpan_entry_el\el
232alternative_else_nop_endif
233#endif
234
235	stp	x22, x23, [sp, #S_PC]
236
237	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
238	.if	\el == 0
239	mov	w21, #NO_SYSCALL
240	str	w21, [sp, #S_SYSCALLNO]
241	.endif
242
243	/* Save pmr */
244alternative_if ARM64_HAS_IRQ_PRIO_MASKING
245	mrs_s	x20, SYS_ICC_PMR_EL1
246	str	x20, [sp, #S_PMR_SAVE]
247alternative_else_nop_endif
248
249	/*
250	 * Registers that may be useful after this macro is invoked:
251	 *
252	 * x20 - ICC_PMR_EL1
253	 * x21 - aborted SP
254	 * x22 - aborted PC
255	 * x23 - aborted PSTATE
256	*/
257	.endm
258
259	.macro	kernel_exit, el
260	.if	\el != 0
261	disable_daif
262
263	/* Restore the task's original addr_limit. */
264	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
265	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
266
267	/* No need to restore UAO, it will be restored from SPSR_EL1 */
268	.endif
269
270	/* Restore pmr */
271alternative_if ARM64_HAS_IRQ_PRIO_MASKING
272	ldr	x20, [sp, #S_PMR_SAVE]
273	msr_s	SYS_ICC_PMR_EL1, x20
274	mrs_s	x21, SYS_ICC_CTLR_EL1
275	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
276	dsb	sy				// Ensure priority change is seen by redistributor
277.L__skip_pmr_sync\@:
278alternative_else_nop_endif
279
280	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
281	.if	\el == 0
282	ct_user_enter
283	.endif
284
285#ifdef CONFIG_ARM64_SW_TTBR0_PAN
286alternative_if_not ARM64_HAS_PAN
287	bl	__swpan_exit_el\el
288alternative_else_nop_endif
289#endif
290
291	.if	\el == 0
292	ldr	x23, [sp, #S_SP]		// load return stack pointer
293	msr	sp_el0, x23
294	tst	x22, #PSR_MODE32_BIT		// native task?
295	b.eq	3f
296
297#ifdef CONFIG_ARM64_ERRATUM_1418040
298alternative_if ARM64_WORKAROUND_1418040
299	mrs	x0, cntkctl_el1
300	bic	x0, x0, #2			// ARCH_TIMER_USR_VCT_ACCESS_EN
301	msr	cntkctl_el1, x0
302alternative_else_nop_endif
303#endif
304
305#ifdef CONFIG_ARM64_ERRATUM_845719
306alternative_if ARM64_WORKAROUND_845719
307#ifdef CONFIG_PID_IN_CONTEXTIDR
308	mrs	x29, contextidr_el1
309	msr	contextidr_el1, x29
310#else
311	msr contextidr_el1, xzr
312#endif
313alternative_else_nop_endif
314#endif
3153:
316	scs_save tsk, x0
317
318	/* No kernel C function calls after this as user keys are set. */
319	ptrauth_keys_install_user tsk, x0, x1, x2
320
321	apply_ssbd 0, x0, x1
322	.endif
323
324	msr	elr_el1, x21			// set up the return data
325	msr	spsr_el1, x22
326	ldp	x0, x1, [sp, #16 * 0]
327	ldp	x2, x3, [sp, #16 * 1]
328	ldp	x4, x5, [sp, #16 * 2]
329	ldp	x6, x7, [sp, #16 * 3]
330	ldp	x8, x9, [sp, #16 * 4]
331	ldp	x10, x11, [sp, #16 * 5]
332	ldp	x12, x13, [sp, #16 * 6]
333	ldp	x14, x15, [sp, #16 * 7]
334	ldp	x16, x17, [sp, #16 * 8]
335	ldp	x18, x19, [sp, #16 * 9]
336	ldp	x20, x21, [sp, #16 * 10]
337	ldp	x22, x23, [sp, #16 * 11]
338	ldp	x24, x25, [sp, #16 * 12]
339	ldp	x26, x27, [sp, #16 * 13]
340	ldp	x28, x29, [sp, #16 * 14]
341	ldr	lr, [sp, #S_LR]
342	add	sp, sp, #S_FRAME_SIZE		// restore sp
343
344	.if	\el == 0
345alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
346#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
347	bne	4f
348	msr	far_el1, x30
349	tramp_alias	x30, tramp_exit_native
350	br	x30
3514:
352	tramp_alias	x30, tramp_exit_compat
353	br	x30
354#endif
355	.else
356	eret
357	.endif
358	sb
359	.endm
360
361#ifdef CONFIG_ARM64_SW_TTBR0_PAN
362	/*
363	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
364	 * EL0, there is no need to check the state of TTBR0_EL1 since
365	 * accesses are always enabled.
366	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
367	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
368	 * user mappings.
369	 */
370SYM_CODE_START_LOCAL(__swpan_entry_el1)
371	mrs	x21, ttbr0_el1
372	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
373	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
374	b.eq	1f				// TTBR0 access already disabled
375	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
376SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
377	__uaccess_ttbr0_disable x21
3781:	ret
379SYM_CODE_END(__swpan_entry_el1)
380
381	/*
382	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
383	 * PAN bit checking.
384	 */
385SYM_CODE_START_LOCAL(__swpan_exit_el1)
386	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
387	__uaccess_ttbr0_enable x0, x1
3881:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
389	ret
390SYM_CODE_END(__swpan_exit_el1)
391
392SYM_CODE_START_LOCAL(__swpan_exit_el0)
393	__uaccess_ttbr0_enable x0, x1
394	/*
395	 * Enable errata workarounds only if returning to user. The only
396	 * workaround currently required for TTBR0_EL1 changes are for the
397	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
398	 * corruption).
399	 */
400	b	post_ttbr_update_workaround
401SYM_CODE_END(__swpan_exit_el0)
402#endif
403
404	.macro	irq_stack_entry
405	mov	x19, sp			// preserve the original sp
406#ifdef CONFIG_SHADOW_CALL_STACK
407	mov	x24, scs_sp		// preserve the original shadow stack
408#endif
409
410	/*
411	 * Compare sp with the base of the task stack.
412	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
413	 * and should switch to the irq stack.
414	 */
415	ldr	x25, [tsk, TSK_STACK]
416	eor	x25, x25, x19
417	and	x25, x25, #~(THREAD_SIZE - 1)
418	cbnz	x25, 9998f
419
420	ldr_this_cpu x25, irq_stack_ptr, x26
421	mov	x26, #IRQ_STACK_SIZE
422	add	x26, x25, x26
423
424	/* switch to the irq stack */
425	mov	sp, x26
426
427#ifdef CONFIG_SHADOW_CALL_STACK
428	/* also switch to the irq shadow stack */
429	adr_this_cpu scs_sp, irq_shadow_call_stack, x26
430#endif
431
4329998:
433	.endm
434
435	/*
436	 * The callee-saved regs (x19-x29) should be preserved between
437	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
438	 * uses x20-x23 to store data for later use.
439	 */
440	.macro	irq_stack_exit
441	mov	sp, x19
442#ifdef CONFIG_SHADOW_CALL_STACK
443	mov	scs_sp, x24
444#endif
445	.endm
446
447/* GPRs used by entry code */
448tsk	.req	x28		// current thread_info
449
450/*
451 * Interrupt handling.
452 */
453	.macro	irq_handler
454	ldr_l	x1, handle_arch_irq
455	mov	x0, sp
456	irq_stack_entry
457	blr	x1
458	irq_stack_exit
459	.endm
460
461#ifdef CONFIG_ARM64_PSEUDO_NMI
462	/*
463	 * Set res to 0 if irqs were unmasked in interrupted context.
464	 * Otherwise set res to non-0 value.
465	 */
466	.macro	test_irqs_unmasked res:req, pmr:req
467alternative_if ARM64_HAS_IRQ_PRIO_MASKING
468	sub	\res, \pmr, #GIC_PRIO_IRQON
469alternative_else
470	mov	\res, xzr
471alternative_endif
472	.endm
473#endif
474
475	.macro	gic_prio_kentry_setup, tmp:req
476#ifdef CONFIG_ARM64_PSEUDO_NMI
477	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
478	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
479	msr_s	SYS_ICC_PMR_EL1, \tmp
480	alternative_else_nop_endif
481#endif
482	.endm
483
484	.macro	gic_prio_irq_setup, pmr:req, tmp:req
485#ifdef CONFIG_ARM64_PSEUDO_NMI
486	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
487	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
488	msr_s	SYS_ICC_PMR_EL1, \tmp
489	alternative_else_nop_endif
490#endif
491	.endm
492
493	.text
494
495/*
496 * Exception vectors.
497 */
498	.pushsection ".entry.text", "ax"
499
500	.align	11
501SYM_CODE_START(vectors)
502	kernel_ventry	1, sync_invalid			// Synchronous EL1t
503	kernel_ventry	1, irq_invalid			// IRQ EL1t
504	kernel_ventry	1, fiq_invalid			// FIQ EL1t
505	kernel_ventry	1, error_invalid		// Error EL1t
506
507	kernel_ventry	1, sync				// Synchronous EL1h
508	kernel_ventry	1, irq				// IRQ EL1h
509	kernel_ventry	1, fiq_invalid			// FIQ EL1h
510	kernel_ventry	1, error			// Error EL1h
511
512	kernel_ventry	0, sync				// Synchronous 64-bit EL0
513	kernel_ventry	0, irq				// IRQ 64-bit EL0
514	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
515	kernel_ventry	0, error			// Error 64-bit EL0
516
517#ifdef CONFIG_COMPAT
518	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
519	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
520	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
521	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
522#else
523	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
524	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
525	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
526	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
527#endif
528SYM_CODE_END(vectors)
529
530#ifdef CONFIG_VMAP_STACK
531	/*
532	 * We detected an overflow in kernel_ventry, which switched to the
533	 * overflow stack. Stash the exception regs, and head to our overflow
534	 * handler.
535	 */
536__bad_stack:
537	/* Restore the original x0 value */
538	mrs	x0, tpidrro_el0
539
540	/*
541	 * Store the original GPRs to the new stack. The orginal SP (minus
542	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
543	 */
544	sub	sp, sp, #S_FRAME_SIZE
545	kernel_entry 1
546	mrs	x0, tpidr_el0
547	add	x0, x0, #S_FRAME_SIZE
548	str	x0, [sp, #S_SP]
549
550	/* Stash the regs for handle_bad_stack */
551	mov	x0, sp
552
553	/* Time to die */
554	bl	handle_bad_stack
555	ASM_BUG()
556#endif /* CONFIG_VMAP_STACK */
557
558/*
559 * Invalid mode handlers
560 */
561	.macro	inv_entry, el, reason, regsize = 64
562	kernel_entry \el, \regsize
563	mov	x0, sp
564	mov	x1, #\reason
565	mrs	x2, esr_el1
566	bl	bad_mode
567	ASM_BUG()
568	.endm
569
570SYM_CODE_START_LOCAL(el0_sync_invalid)
571	inv_entry 0, BAD_SYNC
572SYM_CODE_END(el0_sync_invalid)
573
574SYM_CODE_START_LOCAL(el0_irq_invalid)
575	inv_entry 0, BAD_IRQ
576SYM_CODE_END(el0_irq_invalid)
577
578SYM_CODE_START_LOCAL(el0_fiq_invalid)
579	inv_entry 0, BAD_FIQ
580SYM_CODE_END(el0_fiq_invalid)
581
582SYM_CODE_START_LOCAL(el0_error_invalid)
583	inv_entry 0, BAD_ERROR
584SYM_CODE_END(el0_error_invalid)
585
586#ifdef CONFIG_COMPAT
587SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
588	inv_entry 0, BAD_FIQ, 32
589SYM_CODE_END(el0_fiq_invalid_compat)
590#endif
591
592SYM_CODE_START_LOCAL(el1_sync_invalid)
593	inv_entry 1, BAD_SYNC
594SYM_CODE_END(el1_sync_invalid)
595
596SYM_CODE_START_LOCAL(el1_irq_invalid)
597	inv_entry 1, BAD_IRQ
598SYM_CODE_END(el1_irq_invalid)
599
600SYM_CODE_START_LOCAL(el1_fiq_invalid)
601	inv_entry 1, BAD_FIQ
602SYM_CODE_END(el1_fiq_invalid)
603
604SYM_CODE_START_LOCAL(el1_error_invalid)
605	inv_entry 1, BAD_ERROR
606SYM_CODE_END(el1_error_invalid)
607
608/*
609 * EL1 mode handlers.
610 */
611	.align	6
612SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
613	kernel_entry 1
614	mov	x0, sp
615	bl	el1_sync_handler
616	kernel_exit 1
617SYM_CODE_END(el1_sync)
618
619	.align	6
620SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
621	kernel_entry 1
622	gic_prio_irq_setup pmr=x20, tmp=x1
623	enable_da_f
624
625#ifdef CONFIG_ARM64_PSEUDO_NMI
626	test_irqs_unmasked	res=x0, pmr=x20
627	cbz	x0, 1f
628	bl	asm_nmi_enter
6291:
630#endif
631
632#ifdef CONFIG_TRACE_IRQFLAGS
633	bl	trace_hardirqs_off
634#endif
635
636	irq_handler
637
638#ifdef CONFIG_PREEMPTION
639	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
640alternative_if ARM64_HAS_IRQ_PRIO_MASKING
641	/*
642	 * DA_F were cleared at start of handling. If anything is set in DAIF,
643	 * we come back from an NMI, so skip preemption
644	 */
645	mrs	x0, daif
646	orr	x24, x24, x0
647alternative_else_nop_endif
648	cbnz	x24, 1f				// preempt count != 0 || NMI return path
649	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
6501:
651#endif
652
653#ifdef CONFIG_ARM64_PSEUDO_NMI
654	/*
655	 * When using IRQ priority masking, we can get spurious interrupts while
656	 * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
657	 * section with interrupts disabled. Skip tracing in those cases.
658	 */
659	test_irqs_unmasked	res=x0, pmr=x20
660	cbz	x0, 1f
661	bl	asm_nmi_exit
6621:
663#endif
664
665#ifdef CONFIG_TRACE_IRQFLAGS
666#ifdef CONFIG_ARM64_PSEUDO_NMI
667	test_irqs_unmasked	res=x0, pmr=x20
668	cbnz	x0, 1f
669#endif
670	bl	trace_hardirqs_on
6711:
672#endif
673
674	kernel_exit 1
675SYM_CODE_END(el1_irq)
676
677/*
678 * EL0 mode handlers.
679 */
680	.align	6
681SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
682	kernel_entry 0
683	mov	x0, sp
684	bl	el0_sync_handler
685	b	ret_to_user
686SYM_CODE_END(el0_sync)
687
688#ifdef CONFIG_COMPAT
689	.align	6
690SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
691	kernel_entry 0, 32
692	mov	x0, sp
693	bl	el0_sync_compat_handler
694	b	ret_to_user
695SYM_CODE_END(el0_sync_compat)
696
697	.align	6
698SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
699	kernel_entry 0, 32
700	b	el0_irq_naked
701SYM_CODE_END(el0_irq_compat)
702
703SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
704	kernel_entry 0, 32
705	b	el0_error_naked
706SYM_CODE_END(el0_error_compat)
707#endif
708
709	.align	6
710SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
711	kernel_entry 0
712el0_irq_naked:
713	gic_prio_irq_setup pmr=x20, tmp=x0
714	ct_user_exit_irqoff
715	enable_da_f
716
717#ifdef CONFIG_TRACE_IRQFLAGS
718	bl	trace_hardirqs_off
719#endif
720
721#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
722	tbz	x22, #55, 1f
723	bl	do_el0_irq_bp_hardening
7241:
725#endif
726	irq_handler
727
728#ifdef CONFIG_TRACE_IRQFLAGS
729	bl	trace_hardirqs_on
730#endif
731	b	ret_to_user
732SYM_CODE_END(el0_irq)
733
734SYM_CODE_START_LOCAL(el1_error)
735	kernel_entry 1
736	mrs	x1, esr_el1
737	gic_prio_kentry_setup tmp=x2
738	enable_dbg
739	mov	x0, sp
740	bl	do_serror
741	kernel_exit 1
742SYM_CODE_END(el1_error)
743
744SYM_CODE_START_LOCAL(el0_error)
745	kernel_entry 0
746el0_error_naked:
747	mrs	x25, esr_el1
748	gic_prio_kentry_setup tmp=x2
749	ct_user_exit_irqoff
750	enable_dbg
751	mov	x0, sp
752	mov	x1, x25
753	bl	do_serror
754	enable_da_f
755	b	ret_to_user
756SYM_CODE_END(el0_error)
757
758/*
759 * "slow" syscall return path.
760 */
761SYM_CODE_START_LOCAL(ret_to_user)
762	disable_daif
763	gic_prio_kentry_setup tmp=x3
764	ldr	x1, [tsk, #TSK_TI_FLAGS]
765	and	x2, x1, #_TIF_WORK_MASK
766	cbnz	x2, work_pending
767finish_ret_to_user:
768	enable_step_tsk x1, x2
769#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
770	bl	stackleak_erase
771#endif
772	kernel_exit 0
773
774/*
775 * Ok, we need to do extra processing, enter the slow path.
776 */
777work_pending:
778	mov	x0, sp				// 'regs'
779	bl	do_notify_resume
780#ifdef CONFIG_TRACE_IRQFLAGS
781	bl	trace_hardirqs_on		// enabled while in userspace
782#endif
783	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
784	b	finish_ret_to_user
785SYM_CODE_END(ret_to_user)
786
787	.popsection				// .entry.text
788
789#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
790/*
791 * Exception vectors trampoline.
792 */
793	.pushsection ".entry.tramp.text", "ax"
794
795	.macro tramp_map_kernel, tmp
796	mrs	\tmp, ttbr1_el1
797	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
798	bic	\tmp, \tmp, #USER_ASID_FLAG
799	msr	ttbr1_el1, \tmp
800#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
801alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
802	/* ASID already in \tmp[63:48] */
803	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
804	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
805	/* 2MB boundary containing the vectors, so we nobble the walk cache */
806	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
807	isb
808	tlbi	vae1, \tmp
809	dsb	nsh
810alternative_else_nop_endif
811#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
812	.endm
813
814	.macro tramp_unmap_kernel, tmp
815	mrs	\tmp, ttbr1_el1
816	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
817	orr	\tmp, \tmp, #USER_ASID_FLAG
818	msr	ttbr1_el1, \tmp
819	/*
820	 * We avoid running the post_ttbr_update_workaround here because
821	 * it's only needed by Cavium ThunderX, which requires KPTI to be
822	 * disabled.
823	 */
824	.endm
825
826	.macro tramp_ventry, regsize = 64
827	.align	7
8281:
829	.if	\regsize == 64
830	msr	tpidrro_el0, x30	// Restored in kernel_ventry
831	.endif
832	/*
833	 * Defend against branch aliasing attacks by pushing a dummy
834	 * entry onto the return stack and using a RET instruction to
835	 * enter the full-fat kernel vectors.
836	 */
837	bl	2f
838	b	.
8392:
840	tramp_map_kernel	x30
841#ifdef CONFIG_RANDOMIZE_BASE
842	adr	x30, tramp_vectors + PAGE_SIZE
843alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
844	ldr	x30, [x30]
845#else
846	ldr	x30, =vectors
847#endif
848alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
849	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
850alternative_else_nop_endif
851	msr	vbar_el1, x30
852	add	x30, x30, #(1b - tramp_vectors)
853	isb
854	ret
855	.endm
856
857	.macro tramp_exit, regsize = 64
858	adr	x30, tramp_vectors
859	msr	vbar_el1, x30
860	tramp_unmap_kernel	x30
861	.if	\regsize == 64
862	mrs	x30, far_el1
863	.endif
864	eret
865	sb
866	.endm
867
868	.align	11
869SYM_CODE_START_NOALIGN(tramp_vectors)
870	.space	0x400
871
872	tramp_ventry
873	tramp_ventry
874	tramp_ventry
875	tramp_ventry
876
877	tramp_ventry	32
878	tramp_ventry	32
879	tramp_ventry	32
880	tramp_ventry	32
881SYM_CODE_END(tramp_vectors)
882
883SYM_CODE_START(tramp_exit_native)
884	tramp_exit
885SYM_CODE_END(tramp_exit_native)
886
887SYM_CODE_START(tramp_exit_compat)
888	tramp_exit	32
889SYM_CODE_END(tramp_exit_compat)
890
891	.ltorg
892	.popsection				// .entry.tramp.text
893#ifdef CONFIG_RANDOMIZE_BASE
894	.pushsection ".rodata", "a"
895	.align PAGE_SHIFT
896SYM_DATA_START(__entry_tramp_data_start)
897	.quad	vectors
898SYM_DATA_END(__entry_tramp_data_start)
899	.popsection				// .rodata
900#endif /* CONFIG_RANDOMIZE_BASE */
901#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
902
903/*
904 * Register switch for AArch64. The callee-saved registers need to be saved
905 * and restored. On entry:
906 *   x0 = previous task_struct (must be preserved across the switch)
907 *   x1 = next task_struct
908 * Previous and next are guaranteed not to be the same.
909 *
910 */
911SYM_FUNC_START(cpu_switch_to)
912	mov	x10, #THREAD_CPU_CONTEXT
913	add	x8, x0, x10
914	mov	x9, sp
915	stp	x19, x20, [x8], #16		// store callee-saved registers
916	stp	x21, x22, [x8], #16
917	stp	x23, x24, [x8], #16
918	stp	x25, x26, [x8], #16
919	stp	x27, x28, [x8], #16
920	stp	x29, x9, [x8], #16
921	str	lr, [x8]
922	add	x8, x1, x10
923	ldp	x19, x20, [x8], #16		// restore callee-saved registers
924	ldp	x21, x22, [x8], #16
925	ldp	x23, x24, [x8], #16
926	ldp	x25, x26, [x8], #16
927	ldp	x27, x28, [x8], #16
928	ldp	x29, x9, [x8], #16
929	ldr	lr, [x8]
930	mov	sp, x9
931	msr	sp_el0, x1
932	ptrauth_keys_install_kernel x1, x8, x9, x10
933	scs_save x0, x8
934	scs_load x1, x8
935	ret
936SYM_FUNC_END(cpu_switch_to)
937NOKPROBE(cpu_switch_to)
938
939/*
940 * This is how we return from a fork.
941 */
942SYM_CODE_START(ret_from_fork)
943	bl	schedule_tail
944	cbz	x19, 1f				// not a kernel thread
945	mov	x0, x20
946	blr	x19
9471:	get_current_task tsk
948	b	ret_to_user
949SYM_CODE_END(ret_from_fork)
950NOKPROBE(ret_from_fork)
951
952#ifdef CONFIG_ARM_SDE_INTERFACE
953
954#include <asm/sdei.h>
955#include <uapi/linux/arm_sdei.h>
956
957.macro sdei_handler_exit exit_mode
958	/* On success, this call never returns... */
959	cmp	\exit_mode, #SDEI_EXIT_SMC
960	b.ne	99f
961	smc	#0
962	b	.
96399:	hvc	#0
964	b	.
965.endm
966
967#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
968/*
969 * The regular SDEI entry point may have been unmapped along with the rest of
970 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
971 * argument accessible.
972 *
973 * This clobbers x4, __sdei_handler() will restore this from firmware's
974 * copy.
975 */
976.ltorg
977.pushsection ".entry.tramp.text", "ax"
978SYM_CODE_START(__sdei_asm_entry_trampoline)
979	mrs	x4, ttbr1_el1
980	tbz	x4, #USER_ASID_BIT, 1f
981
982	tramp_map_kernel tmp=x4
983	isb
984	mov	x4, xzr
985
986	/*
987	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
988	 * the kernel on exit.
989	 */
9901:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
991
992#ifdef CONFIG_RANDOMIZE_BASE
993	adr	x4, tramp_vectors + PAGE_SIZE
994	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
995	ldr	x4, [x4]
996#else
997	ldr	x4, =__sdei_asm_handler
998#endif
999	br	x4
1000SYM_CODE_END(__sdei_asm_entry_trampoline)
1001NOKPROBE(__sdei_asm_entry_trampoline)
1002
1003/*
1004 * Make the exit call and restore the original ttbr1_el1
1005 *
1006 * x0 & x1: setup for the exit API call
1007 * x2: exit_mode
1008 * x4: struct sdei_registered_event argument from registration time.
1009 */
1010SYM_CODE_START(__sdei_asm_exit_trampoline)
1011	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1012	cbnz	x4, 1f
1013
1014	tramp_unmap_kernel	tmp=x4
1015
10161:	sdei_handler_exit exit_mode=x2
1017SYM_CODE_END(__sdei_asm_exit_trampoline)
1018NOKPROBE(__sdei_asm_exit_trampoline)
1019	.ltorg
1020.popsection		// .entry.tramp.text
1021#ifdef CONFIG_RANDOMIZE_BASE
1022.pushsection ".rodata", "a"
1023SYM_DATA_START(__sdei_asm_trampoline_next_handler)
1024	.quad	__sdei_asm_handler
1025SYM_DATA_END(__sdei_asm_trampoline_next_handler)
1026.popsection		// .rodata
1027#endif /* CONFIG_RANDOMIZE_BASE */
1028#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1029
1030/*
1031 * Software Delegated Exception entry point.
1032 *
1033 * x0: Event number
1034 * x1: struct sdei_registered_event argument from registration time.
1035 * x2: interrupted PC
1036 * x3: interrupted PSTATE
1037 * x4: maybe clobbered by the trampoline
1038 *
1039 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1040 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1041 * want them.
1042 */
1043SYM_CODE_START(__sdei_asm_handler)
1044	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1045	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1046	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1047	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1048	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1049	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1050	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1051	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1052	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1053	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1054	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1055	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1056	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1057	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1058	mov	x4, sp
1059	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1060
1061	mov	x19, x1
1062
1063#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1064	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1065#endif
1066
1067#ifdef CONFIG_VMAP_STACK
1068	/*
1069	 * entry.S may have been using sp as a scratch register, find whether
1070	 * this is a normal or critical event and switch to the appropriate
1071	 * stack for this CPU.
1072	 */
1073	cbnz	w4, 1f
1074	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1075	b	2f
10761:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10772:	mov	x6, #SDEI_STACK_SIZE
1078	add	x5, x5, x6
1079	mov	sp, x5
1080#endif
1081
1082#ifdef CONFIG_SHADOW_CALL_STACK
1083	/* Use a separate shadow call stack for normal and critical events */
1084	cbnz	w4, 3f
1085	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
1086	b	4f
10873:	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
10884:
1089#endif
1090
1091	/*
1092	 * We may have interrupted userspace, or a guest, or exit-from or
1093	 * return-to either of these. We can't trust sp_el0, restore it.
1094	 */
1095	mrs	x28, sp_el0
1096	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1097	msr	sp_el0, x0
1098
1099	/* If we interrupted the kernel point to the previous stack/frame. */
1100	and     x0, x3, #0xc
1101	mrs     x1, CurrentEL
1102	cmp     x0, x1
1103	csel	x29, x29, xzr, eq	// fp, or zero
1104	csel	x4, x2, xzr, eq		// elr, or zero
1105
1106	stp	x29, x4, [sp, #-16]!
1107	mov	x29, sp
1108
1109	add	x0, x19, #SDEI_EVENT_INTREGS
1110	mov	x1, x19
1111	bl	__sdei_handler
1112
1113	msr	sp_el0, x28
1114	/* restore regs >x17 that we clobbered */
1115	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1116	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1117	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1118	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1119	mov	sp, x1
1120
1121	mov	x1, x0			// address to complete_and_resume
1122	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1123	cmp	x0, #1
1124	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1125	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1126	csel	x0, x2, x3, ls
1127
1128	ldr_l	x2, sdei_exit_mode
1129
1130alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1131	sdei_handler_exit exit_mode=x2
1132alternative_else_nop_endif
1133
1134#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1135	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1136	br	x5
1137#endif
1138SYM_CODE_END(__sdei_asm_handler)
1139NOKPROBE(__sdei_asm_handler)
1140#endif /* CONFIG_ARM_SDE_INTERFACE */
1141