xref: /linux/arch/arm64/kernel/entry.S (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/thread_info.h>
31#include <asm/unistd.h>
32
33/*
34 * Context tracking subsystem.  Used to instrument transitions
35 * between user and kernel mode.
36 */
37	.macro ct_user_exit, syscall = 0
38#ifdef CONFIG_CONTEXT_TRACKING
39	bl	context_tracking_user_exit
40	.if \syscall == 1
41	/*
42	 * Save/restore needed during syscalls.  Restore syscall arguments from
43	 * the values already saved on stack during kernel_entry.
44	 */
45	ldp	x0, x1, [sp]
46	ldp	x2, x3, [sp, #S_X2]
47	ldp	x4, x5, [sp, #S_X4]
48	ldp	x6, x7, [sp, #S_X6]
49	.endif
50#endif
51	.endm
52
53	.macro ct_user_enter
54#ifdef CONFIG_CONTEXT_TRACKING
55	bl	context_tracking_user_enter
56#endif
57	.endm
58
59/*
60 * Bad Abort numbers
61 *-----------------
62 */
63#define BAD_SYNC	0
64#define BAD_IRQ		1
65#define BAD_FIQ		2
66#define BAD_ERROR	3
67
68	.macro	kernel_entry, el, regsize = 64
69	sub	sp, sp, #S_FRAME_SIZE
70	.if	\regsize == 32
71	mov	w0, w0				// zero upper 32 bits of x0
72	.endif
73	stp	x0, x1, [sp, #16 * 0]
74	stp	x2, x3, [sp, #16 * 1]
75	stp	x4, x5, [sp, #16 * 2]
76	stp	x6, x7, [sp, #16 * 3]
77	stp	x8, x9, [sp, #16 * 4]
78	stp	x10, x11, [sp, #16 * 5]
79	stp	x12, x13, [sp, #16 * 6]
80	stp	x14, x15, [sp, #16 * 7]
81	stp	x16, x17, [sp, #16 * 8]
82	stp	x18, x19, [sp, #16 * 9]
83	stp	x20, x21, [sp, #16 * 10]
84	stp	x22, x23, [sp, #16 * 11]
85	stp	x24, x25, [sp, #16 * 12]
86	stp	x26, x27, [sp, #16 * 13]
87	stp	x28, x29, [sp, #16 * 14]
88
89	.if	\el == 0
90	mrs	x21, sp_el0
91	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
92	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
93	disable_step_tsk x19, x20		// exceptions when scheduling.
94	.else
95	add	x21, sp, #S_FRAME_SIZE
96	.endif
97	mrs	x22, elr_el1
98	mrs	x23, spsr_el1
99	stp	lr, x21, [sp, #S_LR]
100	stp	x22, x23, [sp, #S_PC]
101
102	/*
103	 * Set syscallno to -1 by default (overridden later if real syscall).
104	 */
105	.if	\el == 0
106	mvn	x21, xzr
107	str	x21, [sp, #S_SYSCALLNO]
108	.endif
109
110	/*
111	 * Registers that may be useful after this macro is invoked:
112	 *
113	 * x21 - aborted SP
114	 * x22 - aborted PC
115	 * x23 - aborted PSTATE
116	*/
117	.endm
118
119	.macro	kernel_exit, el
120	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
121	.if	\el == 0
122	ct_user_enter
123	ldr	x23, [sp, #S_SP]		// load return stack pointer
124	msr	sp_el0, x23
125#ifdef CONFIG_ARM64_ERRATUM_845719
126alternative_if_not ARM64_WORKAROUND_845719
127	nop
128	nop
129#ifdef CONFIG_PID_IN_CONTEXTIDR
130	nop
131#endif
132alternative_else
133	tbz	x22, #4, 1f
134#ifdef CONFIG_PID_IN_CONTEXTIDR
135	mrs	x29, contextidr_el1
136	msr	contextidr_el1, x29
137#else
138	msr contextidr_el1, xzr
139#endif
1401:
141alternative_endif
142#endif
143	.endif
144	msr	elr_el1, x21			// set up the return data
145	msr	spsr_el1, x22
146	ldp	x0, x1, [sp, #16 * 0]
147	ldp	x2, x3, [sp, #16 * 1]
148	ldp	x4, x5, [sp, #16 * 2]
149	ldp	x6, x7, [sp, #16 * 3]
150	ldp	x8, x9, [sp, #16 * 4]
151	ldp	x10, x11, [sp, #16 * 5]
152	ldp	x12, x13, [sp, #16 * 6]
153	ldp	x14, x15, [sp, #16 * 7]
154	ldp	x16, x17, [sp, #16 * 8]
155	ldp	x18, x19, [sp, #16 * 9]
156	ldp	x20, x21, [sp, #16 * 10]
157	ldp	x22, x23, [sp, #16 * 11]
158	ldp	x24, x25, [sp, #16 * 12]
159	ldp	x26, x27, [sp, #16 * 13]
160	ldp	x28, x29, [sp, #16 * 14]
161	ldr	lr, [sp, #S_LR]
162	add	sp, sp, #S_FRAME_SIZE		// restore sp
163	eret					// return to kernel
164	.endm
165
166	.macro	get_thread_info, rd
167	mov	\rd, sp
168	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
169	.endm
170
171/*
172 * These are the registers used in the syscall handler, and allow us to
173 * have in theory up to 7 arguments to a function - x0 to x6.
174 *
175 * x7 is reserved for the system call number in 32-bit mode.
176 */
177sc_nr	.req	x25		// number of system calls
178scno	.req	x26		// syscall number
179stbl	.req	x27		// syscall table pointer
180tsk	.req	x28		// current thread_info
181
182/*
183 * Interrupt handling.
184 */
185	.macro	irq_handler
186	adrp	x1, handle_arch_irq
187	ldr	x1, [x1, #:lo12:handle_arch_irq]
188	mov	x0, sp
189	blr	x1
190	.endm
191
192	.text
193
194/*
195 * Exception vectors.
196 */
197
198	.align	11
199ENTRY(vectors)
200	ventry	el1_sync_invalid		// Synchronous EL1t
201	ventry	el1_irq_invalid			// IRQ EL1t
202	ventry	el1_fiq_invalid			// FIQ EL1t
203	ventry	el1_error_invalid		// Error EL1t
204
205	ventry	el1_sync			// Synchronous EL1h
206	ventry	el1_irq				// IRQ EL1h
207	ventry	el1_fiq_invalid			// FIQ EL1h
208	ventry	el1_error_invalid		// Error EL1h
209
210	ventry	el0_sync			// Synchronous 64-bit EL0
211	ventry	el0_irq				// IRQ 64-bit EL0
212	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
213	ventry	el0_error_invalid		// Error 64-bit EL0
214
215#ifdef CONFIG_COMPAT
216	ventry	el0_sync_compat			// Synchronous 32-bit EL0
217	ventry	el0_irq_compat			// IRQ 32-bit EL0
218	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
219	ventry	el0_error_invalid_compat	// Error 32-bit EL0
220#else
221	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
222	ventry	el0_irq_invalid			// IRQ 32-bit EL0
223	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
224	ventry	el0_error_invalid		// Error 32-bit EL0
225#endif
226END(vectors)
227
228/*
229 * Invalid mode handlers
230 */
231	.macro	inv_entry, el, reason, regsize = 64
232	kernel_entry el, \regsize
233	mov	x0, sp
234	mov	x1, #\reason
235	mrs	x2, esr_el1
236	b	bad_mode
237	.endm
238
239el0_sync_invalid:
240	inv_entry 0, BAD_SYNC
241ENDPROC(el0_sync_invalid)
242
243el0_irq_invalid:
244	inv_entry 0, BAD_IRQ
245ENDPROC(el0_irq_invalid)
246
247el0_fiq_invalid:
248	inv_entry 0, BAD_FIQ
249ENDPROC(el0_fiq_invalid)
250
251el0_error_invalid:
252	inv_entry 0, BAD_ERROR
253ENDPROC(el0_error_invalid)
254
255#ifdef CONFIG_COMPAT
256el0_fiq_invalid_compat:
257	inv_entry 0, BAD_FIQ, 32
258ENDPROC(el0_fiq_invalid_compat)
259
260el0_error_invalid_compat:
261	inv_entry 0, BAD_ERROR, 32
262ENDPROC(el0_error_invalid_compat)
263#endif
264
265el1_sync_invalid:
266	inv_entry 1, BAD_SYNC
267ENDPROC(el1_sync_invalid)
268
269el1_irq_invalid:
270	inv_entry 1, BAD_IRQ
271ENDPROC(el1_irq_invalid)
272
273el1_fiq_invalid:
274	inv_entry 1, BAD_FIQ
275ENDPROC(el1_fiq_invalid)
276
277el1_error_invalid:
278	inv_entry 1, BAD_ERROR
279ENDPROC(el1_error_invalid)
280
281/*
282 * EL1 mode handlers.
283 */
284	.align	6
285el1_sync:
286	kernel_entry 1
287	mrs	x1, esr_el1			// read the syndrome register
288	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
289	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
290	b.eq	el1_da
291	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
292	b.eq	el1_undef
293	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
294	b.eq	el1_sp_pc
295	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
296	b.eq	el1_sp_pc
297	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
298	b.eq	el1_undef
299	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
300	b.ge	el1_dbg
301	b	el1_inv
302el1_da:
303	/*
304	 * Data abort handling
305	 */
306	mrs	x0, far_el1
307	enable_dbg
308	// re-enable interrupts if they were enabled in the aborted context
309	tbnz	x23, #7, 1f			// PSR_I_BIT
310	enable_irq
3111:
312	mov	x2, sp				// struct pt_regs
313	bl	do_mem_abort
314
315	// disable interrupts before pulling preserved data off the stack
316	disable_irq
317	kernel_exit 1
318el1_sp_pc:
319	/*
320	 * Stack or PC alignment exception handling
321	 */
322	mrs	x0, far_el1
323	enable_dbg
324	mov	x2, sp
325	b	do_sp_pc_abort
326el1_undef:
327	/*
328	 * Undefined instruction
329	 */
330	enable_dbg
331	mov	x0, sp
332	b	do_undefinstr
333el1_dbg:
334	/*
335	 * Debug exception handling
336	 */
337	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
338	cinc	x24, x24, eq			// set bit '0'
339	tbz	x24, #0, el1_inv		// EL1 only
340	mrs	x0, far_el1
341	mov	x2, sp				// struct pt_regs
342	bl	do_debug_exception
343	kernel_exit 1
344el1_inv:
345	// TODO: add support for undefined instructions in kernel mode
346	enable_dbg
347	mov	x0, sp
348	mov	x2, x1
349	mov	x1, #BAD_SYNC
350	b	bad_mode
351ENDPROC(el1_sync)
352
353	.align	6
354el1_irq:
355	kernel_entry 1
356	enable_dbg
357#ifdef CONFIG_TRACE_IRQFLAGS
358	bl	trace_hardirqs_off
359#endif
360
361	irq_handler
362
363#ifdef CONFIG_PREEMPT
364	get_thread_info tsk
365	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
366	cbnz	w24, 1f				// preempt count != 0
367	ldr	x0, [tsk, #TI_FLAGS]		// get flags
368	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
369	bl	el1_preempt
3701:
371#endif
372#ifdef CONFIG_TRACE_IRQFLAGS
373	bl	trace_hardirqs_on
374#endif
375	kernel_exit 1
376ENDPROC(el1_irq)
377
378#ifdef CONFIG_PREEMPT
379el1_preempt:
380	mov	x24, lr
3811:	bl	preempt_schedule_irq		// irq en/disable is done inside
382	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
383	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
384	ret	x24
385#endif
386
387/*
388 * EL0 mode handlers.
389 */
390	.align	6
391el0_sync:
392	kernel_entry 0
393	mrs	x25, esr_el1			// read the syndrome register
394	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
395	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
396	b.eq	el0_svc
397	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
398	b.eq	el0_da
399	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
400	b.eq	el0_ia
401	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
402	b.eq	el0_fpsimd_acc
403	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
404	b.eq	el0_fpsimd_exc
405	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
406	b.eq	el0_undef
407	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
408	b.eq	el0_sp_pc
409	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
410	b.eq	el0_sp_pc
411	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
412	b.eq	el0_undef
413	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
414	b.ge	el0_dbg
415	b	el0_inv
416
417#ifdef CONFIG_COMPAT
418	.align	6
419el0_sync_compat:
420	kernel_entry 0, 32
421	mrs	x25, esr_el1			// read the syndrome register
422	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
423	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
424	b.eq	el0_svc_compat
425	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
426	b.eq	el0_da
427	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
428	b.eq	el0_ia
429	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
430	b.eq	el0_fpsimd_acc
431	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
432	b.eq	el0_fpsimd_exc
433	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
434	b.eq	el0_undef
435	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
436	b.eq	el0_undef
437	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
438	b.eq	el0_undef
439	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
440	b.eq	el0_undef
441	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
442	b.eq	el0_undef
443	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
444	b.eq	el0_undef
445	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
446	b.ge	el0_dbg
447	b	el0_inv
448el0_svc_compat:
449	/*
450	 * AArch32 syscall handling
451	 */
452	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
453	uxtw	scno, w7			// syscall number in w7 (r7)
454	mov     sc_nr, #__NR_compat_syscalls
455	b	el0_svc_naked
456
457	.align	6
458el0_irq_compat:
459	kernel_entry 0, 32
460	b	el0_irq_naked
461#endif
462
463el0_da:
464	/*
465	 * Data abort handling
466	 */
467	mrs	x26, far_el1
468	// enable interrupts before calling the main handler
469	enable_dbg_and_irq
470	ct_user_exit
471	bic	x0, x26, #(0xff << 56)
472	mov	x1, x25
473	mov	x2, sp
474	bl	do_mem_abort
475	b	ret_to_user
476el0_ia:
477	/*
478	 * Instruction abort handling
479	 */
480	mrs	x26, far_el1
481	// enable interrupts before calling the main handler
482	enable_dbg_and_irq
483	ct_user_exit
484	mov	x0, x26
485	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
486	mov	x2, sp
487	bl	do_mem_abort
488	b	ret_to_user
489el0_fpsimd_acc:
490	/*
491	 * Floating Point or Advanced SIMD access
492	 */
493	enable_dbg
494	ct_user_exit
495	mov	x0, x25
496	mov	x1, sp
497	bl	do_fpsimd_acc
498	b	ret_to_user
499el0_fpsimd_exc:
500	/*
501	 * Floating Point or Advanced SIMD exception
502	 */
503	enable_dbg
504	ct_user_exit
505	mov	x0, x25
506	mov	x1, sp
507	bl	do_fpsimd_exc
508	b	ret_to_user
509el0_sp_pc:
510	/*
511	 * Stack or PC alignment exception handling
512	 */
513	mrs	x26, far_el1
514	// enable interrupts before calling the main handler
515	enable_dbg_and_irq
516	ct_user_exit
517	mov	x0, x26
518	mov	x1, x25
519	mov	x2, sp
520	bl	do_sp_pc_abort
521	b	ret_to_user
522el0_undef:
523	/*
524	 * Undefined instruction
525	 */
526	// enable interrupts before calling the main handler
527	enable_dbg_and_irq
528	ct_user_exit
529	mov	x0, sp
530	bl	do_undefinstr
531	b	ret_to_user
532el0_dbg:
533	/*
534	 * Debug exception handling
535	 */
536	tbnz	x24, #0, el0_inv		// EL0 only
537	mrs	x0, far_el1
538	mov	x1, x25
539	mov	x2, sp
540	bl	do_debug_exception
541	enable_dbg
542	ct_user_exit
543	b	ret_to_user
544el0_inv:
545	enable_dbg
546	ct_user_exit
547	mov	x0, sp
548	mov	x1, #BAD_SYNC
549	mov	x2, x25
550	bl	bad_mode
551	b	ret_to_user
552ENDPROC(el0_sync)
553
554	.align	6
555el0_irq:
556	kernel_entry 0
557el0_irq_naked:
558	enable_dbg
559#ifdef CONFIG_TRACE_IRQFLAGS
560	bl	trace_hardirqs_off
561#endif
562
563	ct_user_exit
564	irq_handler
565
566#ifdef CONFIG_TRACE_IRQFLAGS
567	bl	trace_hardirqs_on
568#endif
569	b	ret_to_user
570ENDPROC(el0_irq)
571
572/*
573 * Register switch for AArch64. The callee-saved registers need to be saved
574 * and restored. On entry:
575 *   x0 = previous task_struct (must be preserved across the switch)
576 *   x1 = next task_struct
577 * Previous and next are guaranteed not to be the same.
578 *
579 */
580ENTRY(cpu_switch_to)
581	mov	x10, #THREAD_CPU_CONTEXT
582	add	x8, x0, x10
583	mov	x9, sp
584	stp	x19, x20, [x8], #16		// store callee-saved registers
585	stp	x21, x22, [x8], #16
586	stp	x23, x24, [x8], #16
587	stp	x25, x26, [x8], #16
588	stp	x27, x28, [x8], #16
589	stp	x29, x9, [x8], #16
590	str	lr, [x8]
591	add	x8, x1, x10
592	ldp	x19, x20, [x8], #16		// restore callee-saved registers
593	ldp	x21, x22, [x8], #16
594	ldp	x23, x24, [x8], #16
595	ldp	x25, x26, [x8], #16
596	ldp	x27, x28, [x8], #16
597	ldp	x29, x9, [x8], #16
598	ldr	lr, [x8]
599	mov	sp, x9
600	ret
601ENDPROC(cpu_switch_to)
602
603/*
604 * This is the fast syscall return path.  We do as little as possible here,
605 * and this includes saving x0 back into the kernel stack.
606 */
607ret_fast_syscall:
608	disable_irq				// disable interrupts
609	str	x0, [sp, #S_X0]			// returned x0
610	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
611	and	x2, x1, #_TIF_SYSCALL_WORK
612	cbnz	x2, ret_fast_syscall_trace
613	and	x2, x1, #_TIF_WORK_MASK
614	cbnz	x2, work_pending
615	enable_step_tsk x1, x2
616	kernel_exit 0
617ret_fast_syscall_trace:
618	enable_irq				// enable interrupts
619	b	__sys_trace_return_skipped	// we already saved x0
620
621/*
622 * Ok, we need to do extra processing, enter the slow path.
623 */
624work_pending:
625	tbnz	x1, #TIF_NEED_RESCHED, work_resched
626	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
627	ldr	x2, [sp, #S_PSTATE]
628	mov	x0, sp				// 'regs'
629	tst	x2, #PSR_MODE_MASK		// user mode regs?
630	b.ne	no_work_pending			// returning to kernel
631	enable_irq				// enable interrupts for do_notify_resume()
632	bl	do_notify_resume
633	b	ret_to_user
634work_resched:
635	bl	schedule
636
637/*
638 * "slow" syscall return path.
639 */
640ret_to_user:
641	disable_irq				// disable interrupts
642	ldr	x1, [tsk, #TI_FLAGS]
643	and	x2, x1, #_TIF_WORK_MASK
644	cbnz	x2, work_pending
645	enable_step_tsk x1, x2
646no_work_pending:
647	kernel_exit 0
648ENDPROC(ret_to_user)
649
650/*
651 * This is how we return from a fork.
652 */
653ENTRY(ret_from_fork)
654	bl	schedule_tail
655	cbz	x19, 1f				// not a kernel thread
656	mov	x0, x20
657	blr	x19
6581:	get_thread_info tsk
659	b	ret_to_user
660ENDPROC(ret_from_fork)
661
662/*
663 * SVC handler.
664 */
665	.align	6
666el0_svc:
667	adrp	stbl, sys_call_table		// load syscall table pointer
668	uxtw	scno, w8			// syscall number in w8
669	mov	sc_nr, #__NR_syscalls
670el0_svc_naked:					// compat entry point
671	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
672	enable_dbg_and_irq
673	ct_user_exit 1
674
675	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
676	tst	x16, #_TIF_SYSCALL_WORK
677	b.ne	__sys_trace
678	cmp     scno, sc_nr                     // check upper syscall limit
679	b.hs	ni_sys
680	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
681	blr	x16				// call sys_* routine
682	b	ret_fast_syscall
683ni_sys:
684	mov	x0, sp
685	bl	do_ni_syscall
686	b	ret_fast_syscall
687ENDPROC(el0_svc)
688
689	/*
690	 * This is the really slow path.  We're going to be doing context
691	 * switches, and waiting for our parent to respond.
692	 */
693__sys_trace:
694	mov	w0, #-1				// set default errno for
695	cmp     scno, x0			// user-issued syscall(-1)
696	b.ne	1f
697	mov	x0, #-ENOSYS
698	str	x0, [sp, #S_X0]
6991:	mov	x0, sp
700	bl	syscall_trace_enter
701	cmp	w0, #-1				// skip the syscall?
702	b.eq	__sys_trace_return_skipped
703	uxtw	scno, w0			// syscall number (possibly new)
704	mov	x1, sp				// pointer to regs
705	cmp	scno, sc_nr			// check upper syscall limit
706	b.hs	__ni_sys_trace
707	ldp	x0, x1, [sp]			// restore the syscall args
708	ldp	x2, x3, [sp, #S_X2]
709	ldp	x4, x5, [sp, #S_X4]
710	ldp	x6, x7, [sp, #S_X6]
711	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
712	blr	x16				// call sys_* routine
713
714__sys_trace_return:
715	str	x0, [sp, #S_X0]			// save returned x0
716__sys_trace_return_skipped:
717	mov	x0, sp
718	bl	syscall_trace_exit
719	b	ret_to_user
720
721__ni_sys_trace:
722	mov	x0, sp
723	bl	do_ni_syscall
724	b	__sys_trace_return
725
726/*
727 * Special system call wrappers.
728 */
729ENTRY(sys_rt_sigreturn_wrapper)
730	mov	x0, sp
731	b	sys_rt_sigreturn
732ENDPROC(sys_rt_sigreturn_wrapper)
733