xref: /linux/arch/arm64/kernel/entry.S (revision 80d443e8876602be2c130f79c4de81e12e2a700d)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/memory.h>
32#include <asm/ptrace.h>
33#include <asm/thread_info.h>
34#include <linux/uaccess.h>
35#include <asm/unistd.h>
36
37/*
38 * Context tracking subsystem.  Used to instrument transitions
39 * between user and kernel mode.
40 */
41	.macro ct_user_exit, syscall = 0
42#ifdef CONFIG_CONTEXT_TRACKING
43	bl	context_tracking_user_exit
44	.if \syscall == 1
45	/*
46	 * Save/restore needed during syscalls.  Restore syscall arguments from
47	 * the values already saved on stack during kernel_entry.
48	 */
49	ldp	x0, x1, [sp]
50	ldp	x2, x3, [sp, #S_X2]
51	ldp	x4, x5, [sp, #S_X4]
52	ldp	x6, x7, [sp, #S_X6]
53	.endif
54#endif
55	.endm
56
57	.macro ct_user_enter
58#ifdef CONFIG_CONTEXT_TRACKING
59	bl	context_tracking_user_enter
60#endif
61	.endm
62
63/*
64 * Bad Abort numbers
65 *-----------------
66 */
67#define BAD_SYNC	0
68#define BAD_IRQ		1
69#define BAD_FIQ		2
70#define BAD_ERROR	3
71
72	.macro	kernel_entry, el, regsize = 64
73	sub	sp, sp, #S_FRAME_SIZE
74	.if	\regsize == 32
75	mov	w0, w0				// zero upper 32 bits of x0
76	.endif
77	stp	x0, x1, [sp, #16 * 0]
78	stp	x2, x3, [sp, #16 * 1]
79	stp	x4, x5, [sp, #16 * 2]
80	stp	x6, x7, [sp, #16 * 3]
81	stp	x8, x9, [sp, #16 * 4]
82	stp	x10, x11, [sp, #16 * 5]
83	stp	x12, x13, [sp, #16 * 6]
84	stp	x14, x15, [sp, #16 * 7]
85	stp	x16, x17, [sp, #16 * 8]
86	stp	x18, x19, [sp, #16 * 9]
87	stp	x20, x21, [sp, #16 * 10]
88	stp	x22, x23, [sp, #16 * 11]
89	stp	x24, x25, [sp, #16 * 12]
90	stp	x26, x27, [sp, #16 * 13]
91	stp	x28, x29, [sp, #16 * 14]
92
93	.if	\el == 0
94	mrs	x21, sp_el0
95	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
96	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
97	disable_step_tsk x19, x20		// exceptions when scheduling.
98
99	mov	x29, xzr			// fp pointed to user-space
100	.else
101	add	x21, sp, #S_FRAME_SIZE
102	get_thread_info tsk
103	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
104	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
105	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
106	mov	x20, #TASK_SIZE_64
107	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
108	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
109	.endif /* \el == 0 */
110	mrs	x22, elr_el1
111	mrs	x23, spsr_el1
112	stp	lr, x21, [sp, #S_LR]
113
114#ifdef CONFIG_ARM64_SW_TTBR0_PAN
115	/*
116	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
117	 * EL0, there is no need to check the state of TTBR0_EL1 since
118	 * accesses are always enabled.
119	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
120	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
121	 * user mappings.
122	 */
123alternative_if ARM64_HAS_PAN
124	b	1f				// skip TTBR0 PAN
125alternative_else_nop_endif
126
127	.if	\el != 0
128	mrs	x21, ttbr0_el1
129	tst	x21, #0xffff << 48		// Check for the reserved ASID
130	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
131	b.eq	1f				// TTBR0 access already disabled
132	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
133	.endif
134
135	__uaccess_ttbr0_disable x21
1361:
137#endif
138
139	stp	x22, x23, [sp, #S_PC]
140
141	/*
142	 * Set syscallno to -1 by default (overridden later if real syscall).
143	 */
144	.if	\el == 0
145	mvn	x21, xzr
146	str	x21, [sp, #S_SYSCALLNO]
147	.endif
148
149	/*
150	 * Set sp_el0 to current thread_info.
151	 */
152	.if	\el == 0
153	msr	sp_el0, tsk
154	.endif
155
156	/*
157	 * Registers that may be useful after this macro is invoked:
158	 *
159	 * x21 - aborted SP
160	 * x22 - aborted PC
161	 * x23 - aborted PSTATE
162	*/
163	.endm
164
165	.macro	kernel_exit, el
166	.if	\el != 0
167	/* Restore the task's original addr_limit. */
168	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
169	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
170
171	/* No need to restore UAO, it will be restored from SPSR_EL1 */
172	.endif
173
174	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
175	.if	\el == 0
176	ct_user_enter
177	.endif
178
179#ifdef CONFIG_ARM64_SW_TTBR0_PAN
180	/*
181	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
182	 * PAN bit checking.
183	 */
184alternative_if ARM64_HAS_PAN
185	b	2f				// skip TTBR0 PAN
186alternative_else_nop_endif
187
188	.if	\el != 0
189	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
190	.endif
191
192	__uaccess_ttbr0_enable x0
193
194	.if	\el == 0
195	/*
196	 * Enable errata workarounds only if returning to user. The only
197	 * workaround currently required for TTBR0_EL1 changes are for the
198	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
199	 * corruption).
200	 */
201	post_ttbr0_update_workaround
202	.endif
2031:
204	.if	\el != 0
205	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
206	.endif
2072:
208#endif
209
210	.if	\el == 0
211	ldr	x23, [sp, #S_SP]		// load return stack pointer
212	msr	sp_el0, x23
213#ifdef CONFIG_ARM64_ERRATUM_845719
214alternative_if ARM64_WORKAROUND_845719
215	tbz	x22, #4, 1f
216#ifdef CONFIG_PID_IN_CONTEXTIDR
217	mrs	x29, contextidr_el1
218	msr	contextidr_el1, x29
219#else
220	msr contextidr_el1, xzr
221#endif
2221:
223alternative_else_nop_endif
224#endif
225	.endif
226
227	msr	elr_el1, x21			// set up the return data
228	msr	spsr_el1, x22
229	ldp	x0, x1, [sp, #16 * 0]
230	ldp	x2, x3, [sp, #16 * 1]
231	ldp	x4, x5, [sp, #16 * 2]
232	ldp	x6, x7, [sp, #16 * 3]
233	ldp	x8, x9, [sp, #16 * 4]
234	ldp	x10, x11, [sp, #16 * 5]
235	ldp	x12, x13, [sp, #16 * 6]
236	ldp	x14, x15, [sp, #16 * 7]
237	ldp	x16, x17, [sp, #16 * 8]
238	ldp	x18, x19, [sp, #16 * 9]
239	ldp	x20, x21, [sp, #16 * 10]
240	ldp	x22, x23, [sp, #16 * 11]
241	ldp	x24, x25, [sp, #16 * 12]
242	ldp	x26, x27, [sp, #16 * 13]
243	ldp	x28, x29, [sp, #16 * 14]
244	ldr	lr, [sp, #S_LR]
245	add	sp, sp, #S_FRAME_SIZE		// restore sp
246	eret					// return to kernel
247	.endm
248
249	.macro	irq_stack_entry
250	mov	x19, sp			// preserve the original sp
251
252	/*
253	 * Compare sp with the base of the task stack.
254	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
255	 * and should switch to the irq stack.
256	 */
257	ldr	x25, [tsk, TSK_STACK]
258	eor	x25, x25, x19
259	and	x25, x25, #~(THREAD_SIZE - 1)
260	cbnz	x25, 9998f
261
262	adr_this_cpu x25, irq_stack, x26
263	mov	x26, #IRQ_STACK_START_SP
264	add	x26, x25, x26
265
266	/* switch to the irq stack */
267	mov	sp, x26
268
269	/*
270	 * Add a dummy stack frame, this non-standard format is fixed up
271	 * by unwind_frame()
272	 */
273	stp     x29, x19, [sp, #-16]!
274	mov	x29, sp
275
2769998:
277	.endm
278
279	/*
280	 * x19 should be preserved between irq_stack_entry and
281	 * irq_stack_exit.
282	 */
283	.macro	irq_stack_exit
284	mov	sp, x19
285	.endm
286
287/*
288 * These are the registers used in the syscall handler, and allow us to
289 * have in theory up to 7 arguments to a function - x0 to x6.
290 *
291 * x7 is reserved for the system call number in 32-bit mode.
292 */
293sc_nr	.req	x25		// number of system calls
294scno	.req	x26		// syscall number
295stbl	.req	x27		// syscall table pointer
296tsk	.req	x28		// current thread_info
297
298/*
299 * Interrupt handling.
300 */
301	.macro	irq_handler
302	ldr_l	x1, handle_arch_irq
303	mov	x0, sp
304	irq_stack_entry
305	blr	x1
306	irq_stack_exit
307	.endm
308
309	.text
310
311/*
312 * Exception vectors.
313 */
314	.pushsection ".entry.text", "ax"
315
316	.align	11
317ENTRY(vectors)
318	ventry	el1_sync_invalid		// Synchronous EL1t
319	ventry	el1_irq_invalid			// IRQ EL1t
320	ventry	el1_fiq_invalid			// FIQ EL1t
321	ventry	el1_error_invalid		// Error EL1t
322
323	ventry	el1_sync			// Synchronous EL1h
324	ventry	el1_irq				// IRQ EL1h
325	ventry	el1_fiq_invalid			// FIQ EL1h
326	ventry	el1_error_invalid		// Error EL1h
327
328	ventry	el0_sync			// Synchronous 64-bit EL0
329	ventry	el0_irq				// IRQ 64-bit EL0
330	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
331	ventry	el0_error_invalid		// Error 64-bit EL0
332
333#ifdef CONFIG_COMPAT
334	ventry	el0_sync_compat			// Synchronous 32-bit EL0
335	ventry	el0_irq_compat			// IRQ 32-bit EL0
336	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
337	ventry	el0_error_invalid_compat	// Error 32-bit EL0
338#else
339	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
340	ventry	el0_irq_invalid			// IRQ 32-bit EL0
341	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
342	ventry	el0_error_invalid		// Error 32-bit EL0
343#endif
344END(vectors)
345
346/*
347 * Invalid mode handlers
348 */
349	.macro	inv_entry, el, reason, regsize = 64
350	kernel_entry \el, \regsize
351	mov	x0, sp
352	mov	x1, #\reason
353	mrs	x2, esr_el1
354	b	bad_mode
355	.endm
356
357el0_sync_invalid:
358	inv_entry 0, BAD_SYNC
359ENDPROC(el0_sync_invalid)
360
361el0_irq_invalid:
362	inv_entry 0, BAD_IRQ
363ENDPROC(el0_irq_invalid)
364
365el0_fiq_invalid:
366	inv_entry 0, BAD_FIQ
367ENDPROC(el0_fiq_invalid)
368
369el0_error_invalid:
370	inv_entry 0, BAD_ERROR
371ENDPROC(el0_error_invalid)
372
373#ifdef CONFIG_COMPAT
374el0_fiq_invalid_compat:
375	inv_entry 0, BAD_FIQ, 32
376ENDPROC(el0_fiq_invalid_compat)
377
378el0_error_invalid_compat:
379	inv_entry 0, BAD_ERROR, 32
380ENDPROC(el0_error_invalid_compat)
381#endif
382
383el1_sync_invalid:
384	inv_entry 1, BAD_SYNC
385ENDPROC(el1_sync_invalid)
386
387el1_irq_invalid:
388	inv_entry 1, BAD_IRQ
389ENDPROC(el1_irq_invalid)
390
391el1_fiq_invalid:
392	inv_entry 1, BAD_FIQ
393ENDPROC(el1_fiq_invalid)
394
395el1_error_invalid:
396	inv_entry 1, BAD_ERROR
397ENDPROC(el1_error_invalid)
398
399/*
400 * EL1 mode handlers.
401 */
402	.align	6
403el1_sync:
404	kernel_entry 1
405	mrs	x1, esr_el1			// read the syndrome register
406	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
407	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
408	b.eq	el1_da
409	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
410	b.eq	el1_ia
411	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
412	b.eq	el1_undef
413	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
414	b.eq	el1_sp_pc
415	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
416	b.eq	el1_sp_pc
417	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
418	b.eq	el1_undef
419	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
420	b.ge	el1_dbg
421	b	el1_inv
422
423el1_ia:
424	/*
425	 * Fall through to the Data abort case
426	 */
427el1_da:
428	/*
429	 * Data abort handling
430	 */
431	mrs	x0, far_el1
432	enable_dbg
433	// re-enable interrupts if they were enabled in the aborted context
434	tbnz	x23, #7, 1f			// PSR_I_BIT
435	enable_irq
4361:
437	mov	x2, sp				// struct pt_regs
438	bl	do_mem_abort
439
440	// disable interrupts before pulling preserved data off the stack
441	disable_irq
442	kernel_exit 1
443el1_sp_pc:
444	/*
445	 * Stack or PC alignment exception handling
446	 */
447	mrs	x0, far_el1
448	enable_dbg
449	mov	x2, sp
450	b	do_sp_pc_abort
451el1_undef:
452	/*
453	 * Undefined instruction
454	 */
455	enable_dbg
456	mov	x0, sp
457	b	do_undefinstr
458el1_dbg:
459	/*
460	 * Debug exception handling
461	 */
462	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
463	cinc	x24, x24, eq			// set bit '0'
464	tbz	x24, #0, el1_inv		// EL1 only
465	mrs	x0, far_el1
466	mov	x2, sp				// struct pt_regs
467	bl	do_debug_exception
468	kernel_exit 1
469el1_inv:
470	// TODO: add support for undefined instructions in kernel mode
471	enable_dbg
472	mov	x0, sp
473	mov	x2, x1
474	mov	x1, #BAD_SYNC
475	b	bad_mode
476ENDPROC(el1_sync)
477
478	.align	6
479el1_irq:
480	kernel_entry 1
481	enable_dbg
482#ifdef CONFIG_TRACE_IRQFLAGS
483	bl	trace_hardirqs_off
484#endif
485
486	irq_handler
487
488#ifdef CONFIG_PREEMPT
489	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
490	cbnz	w24, 1f				// preempt count != 0
491	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
492	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
493	bl	el1_preempt
4941:
495#endif
496#ifdef CONFIG_TRACE_IRQFLAGS
497	bl	trace_hardirqs_on
498#endif
499	kernel_exit 1
500ENDPROC(el1_irq)
501
502#ifdef CONFIG_PREEMPT
503el1_preempt:
504	mov	x24, lr
5051:	bl	preempt_schedule_irq		// irq en/disable is done inside
506	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
507	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
508	ret	x24
509#endif
510
511/*
512 * EL0 mode handlers.
513 */
514	.align	6
515el0_sync:
516	kernel_entry 0
517	mrs	x25, esr_el1			// read the syndrome register
518	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
519	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
520	b.eq	el0_svc
521	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
522	b.eq	el0_da
523	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
524	b.eq	el0_ia
525	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
526	b.eq	el0_fpsimd_acc
527	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
528	b.eq	el0_fpsimd_exc
529	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
530	b.eq	el0_sys
531	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
532	b.eq	el0_sp_pc
533	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
534	b.eq	el0_sp_pc
535	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
536	b.eq	el0_undef
537	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
538	b.ge	el0_dbg
539	b	el0_inv
540
541#ifdef CONFIG_COMPAT
542	.align	6
543el0_sync_compat:
544	kernel_entry 0, 32
545	mrs	x25, esr_el1			// read the syndrome register
546	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
547	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
548	b.eq	el0_svc_compat
549	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
550	b.eq	el0_da
551	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
552	b.eq	el0_ia
553	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
554	b.eq	el0_fpsimd_acc
555	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
556	b.eq	el0_fpsimd_exc
557	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
558	b.eq	el0_sp_pc
559	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
560	b.eq	el0_undef
561	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
562	b.eq	el0_undef
563	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
564	b.eq	el0_undef
565	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
566	b.eq	el0_undef
567	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
568	b.eq	el0_undef
569	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
570	b.eq	el0_undef
571	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
572	b.ge	el0_dbg
573	b	el0_inv
574el0_svc_compat:
575	/*
576	 * AArch32 syscall handling
577	 */
578	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
579	uxtw	scno, w7			// syscall number in w7 (r7)
580	mov     sc_nr, #__NR_compat_syscalls
581	b	el0_svc_naked
582
583	.align	6
584el0_irq_compat:
585	kernel_entry 0, 32
586	b	el0_irq_naked
587#endif
588
589el0_da:
590	/*
591	 * Data abort handling
592	 */
593	mrs	x26, far_el1
594	// enable interrupts before calling the main handler
595	enable_dbg_and_irq
596	ct_user_exit
597	bic	x0, x26, #(0xff << 56)
598	mov	x1, x25
599	mov	x2, sp
600	bl	do_mem_abort
601	b	ret_to_user
602el0_ia:
603	/*
604	 * Instruction abort handling
605	 */
606	mrs	x26, far_el1
607	// enable interrupts before calling the main handler
608	enable_dbg_and_irq
609	ct_user_exit
610	mov	x0, x26
611	mov	x1, x25
612	mov	x2, sp
613	bl	do_mem_abort
614	b	ret_to_user
615el0_fpsimd_acc:
616	/*
617	 * Floating Point or Advanced SIMD access
618	 */
619	enable_dbg
620	ct_user_exit
621	mov	x0, x25
622	mov	x1, sp
623	bl	do_fpsimd_acc
624	b	ret_to_user
625el0_fpsimd_exc:
626	/*
627	 * Floating Point or Advanced SIMD exception
628	 */
629	enable_dbg
630	ct_user_exit
631	mov	x0, x25
632	mov	x1, sp
633	bl	do_fpsimd_exc
634	b	ret_to_user
635el0_sp_pc:
636	/*
637	 * Stack or PC alignment exception handling
638	 */
639	mrs	x26, far_el1
640	// enable interrupts before calling the main handler
641	enable_dbg_and_irq
642	ct_user_exit
643	mov	x0, x26
644	mov	x1, x25
645	mov	x2, sp
646	bl	do_sp_pc_abort
647	b	ret_to_user
648el0_undef:
649	/*
650	 * Undefined instruction
651	 */
652	// enable interrupts before calling the main handler
653	enable_dbg_and_irq
654	ct_user_exit
655	mov	x0, sp
656	bl	do_undefinstr
657	b	ret_to_user
658el0_sys:
659	/*
660	 * System instructions, for trapped cache maintenance instructions
661	 */
662	enable_dbg_and_irq
663	ct_user_exit
664	mov	x0, x25
665	mov	x1, sp
666	bl	do_sysinstr
667	b	ret_to_user
668el0_dbg:
669	/*
670	 * Debug exception handling
671	 */
672	tbnz	x24, #0, el0_inv		// EL0 only
673	mrs	x0, far_el1
674	mov	x1, x25
675	mov	x2, sp
676	bl	do_debug_exception
677	enable_dbg
678	ct_user_exit
679	b	ret_to_user
680el0_inv:
681	enable_dbg
682	ct_user_exit
683	mov	x0, sp
684	mov	x1, #BAD_SYNC
685	mov	x2, x25
686	bl	bad_mode
687	b	ret_to_user
688ENDPROC(el0_sync)
689
690	.align	6
691el0_irq:
692	kernel_entry 0
693el0_irq_naked:
694	enable_dbg
695#ifdef CONFIG_TRACE_IRQFLAGS
696	bl	trace_hardirqs_off
697#endif
698
699	ct_user_exit
700	irq_handler
701
702#ifdef CONFIG_TRACE_IRQFLAGS
703	bl	trace_hardirqs_on
704#endif
705	b	ret_to_user
706ENDPROC(el0_irq)
707
708/*
709 * Register switch for AArch64. The callee-saved registers need to be saved
710 * and restored. On entry:
711 *   x0 = previous task_struct (must be preserved across the switch)
712 *   x1 = next task_struct
713 * Previous and next are guaranteed not to be the same.
714 *
715 */
716ENTRY(cpu_switch_to)
717	mov	x10, #THREAD_CPU_CONTEXT
718	add	x8, x0, x10
719	mov	x9, sp
720	stp	x19, x20, [x8], #16		// store callee-saved registers
721	stp	x21, x22, [x8], #16
722	stp	x23, x24, [x8], #16
723	stp	x25, x26, [x8], #16
724	stp	x27, x28, [x8], #16
725	stp	x29, x9, [x8], #16
726	str	lr, [x8]
727	add	x8, x1, x10
728	ldp	x19, x20, [x8], #16		// restore callee-saved registers
729	ldp	x21, x22, [x8], #16
730	ldp	x23, x24, [x8], #16
731	ldp	x25, x26, [x8], #16
732	ldp	x27, x28, [x8], #16
733	ldp	x29, x9, [x8], #16
734	ldr	lr, [x8]
735	mov	sp, x9
736	msr	sp_el0, x1
737	ret
738ENDPROC(cpu_switch_to)
739
740/*
741 * This is the fast syscall return path.  We do as little as possible here,
742 * and this includes saving x0 back into the kernel stack.
743 */
744ret_fast_syscall:
745	disable_irq				// disable interrupts
746	str	x0, [sp, #S_X0]			// returned x0
747	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
748	and	x2, x1, #_TIF_SYSCALL_WORK
749	cbnz	x2, ret_fast_syscall_trace
750	and	x2, x1, #_TIF_WORK_MASK
751	cbnz	x2, work_pending
752	enable_step_tsk x1, x2
753	kernel_exit 0
754ret_fast_syscall_trace:
755	enable_irq				// enable interrupts
756	b	__sys_trace_return_skipped	// we already saved x0
757
758/*
759 * Ok, we need to do extra processing, enter the slow path.
760 */
761work_pending:
762	mov	x0, sp				// 'regs'
763	bl	do_notify_resume
764#ifdef CONFIG_TRACE_IRQFLAGS
765	bl	trace_hardirqs_on		// enabled while in userspace
766#endif
767	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
768	b	finish_ret_to_user
769/*
770 * "slow" syscall return path.
771 */
772ret_to_user:
773	disable_irq				// disable interrupts
774	ldr	x1, [tsk, #TSK_TI_FLAGS]
775	and	x2, x1, #_TIF_WORK_MASK
776	cbnz	x2, work_pending
777finish_ret_to_user:
778	enable_step_tsk x1, x2
779	kernel_exit 0
780ENDPROC(ret_to_user)
781
782/*
783 * This is how we return from a fork.
784 */
785ENTRY(ret_from_fork)
786	bl	schedule_tail
787	cbz	x19, 1f				// not a kernel thread
788	mov	x0, x20
789	blr	x19
7901:	get_thread_info tsk
791	b	ret_to_user
792ENDPROC(ret_from_fork)
793
794/*
795 * SVC handler.
796 */
797	.align	6
798el0_svc:
799	adrp	stbl, sys_call_table		// load syscall table pointer
800	uxtw	scno, w8			// syscall number in w8
801	mov	sc_nr, #__NR_syscalls
802el0_svc_naked:					// compat entry point
803	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
804	enable_dbg_and_irq
805	ct_user_exit 1
806
807	ldr	x16, [tsk, #TSK_TI_FLAGS]	// check for syscall hooks
808	tst	x16, #_TIF_SYSCALL_WORK
809	b.ne	__sys_trace
810	cmp     scno, sc_nr                     // check upper syscall limit
811	b.hs	ni_sys
812	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
813	blr	x16				// call sys_* routine
814	b	ret_fast_syscall
815ni_sys:
816	mov	x0, sp
817	bl	do_ni_syscall
818	b	ret_fast_syscall
819ENDPROC(el0_svc)
820
821	/*
822	 * This is the really slow path.  We're going to be doing context
823	 * switches, and waiting for our parent to respond.
824	 */
825__sys_trace:
826	mov	w0, #-1				// set default errno for
827	cmp     scno, x0			// user-issued syscall(-1)
828	b.ne	1f
829	mov	x0, #-ENOSYS
830	str	x0, [sp, #S_X0]
8311:	mov	x0, sp
832	bl	syscall_trace_enter
833	cmp	w0, #-1				// skip the syscall?
834	b.eq	__sys_trace_return_skipped
835	uxtw	scno, w0			// syscall number (possibly new)
836	mov	x1, sp				// pointer to regs
837	cmp	scno, sc_nr			// check upper syscall limit
838	b.hs	__ni_sys_trace
839	ldp	x0, x1, [sp]			// restore the syscall args
840	ldp	x2, x3, [sp, #S_X2]
841	ldp	x4, x5, [sp, #S_X4]
842	ldp	x6, x7, [sp, #S_X6]
843	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
844	blr	x16				// call sys_* routine
845
846__sys_trace_return:
847	str	x0, [sp, #S_X0]			// save returned x0
848__sys_trace_return_skipped:
849	mov	x0, sp
850	bl	syscall_trace_exit
851	b	ret_to_user
852
853__ni_sys_trace:
854	mov	x0, sp
855	bl	do_ni_syscall
856	b	__sys_trace_return
857
858	.popsection				// .entry.text
859
860/*
861 * Special system call wrappers.
862 */
863ENTRY(sys_rt_sigreturn_wrapper)
864	mov	x0, sp
865	b	sys_rt_sigreturn
866ENDPROC(sys_rt_sigreturn_wrapper)
867