xref: /linux/arch/arm64/kernel/entry.S (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/esr.h>
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
30
31/*
32 * Context tracking subsystem.  Used to instrument transitions
33 * between user and kernel mode.
34 */
35	.macro ct_user_exit, syscall = 0
36#ifdef CONFIG_CONTEXT_TRACKING
37	bl	context_tracking_user_exit
38	.if \syscall == 1
39	/*
40	 * Save/restore needed during syscalls.  Restore syscall arguments from
41	 * the values already saved on stack during kernel_entry.
42	 */
43	ldp	x0, x1, [sp]
44	ldp	x2, x3, [sp, #S_X2]
45	ldp	x4, x5, [sp, #S_X4]
46	ldp	x6, x7, [sp, #S_X6]
47	.endif
48#endif
49	.endm
50
51	.macro ct_user_enter
52#ifdef CONFIG_CONTEXT_TRACKING
53	bl	context_tracking_user_enter
54#endif
55	.endm
56
57/*
58 * Bad Abort numbers
59 *-----------------
60 */
61#define BAD_SYNC	0
62#define BAD_IRQ		1
63#define BAD_FIQ		2
64#define BAD_ERROR	3
65
66	.macro	kernel_entry, el, regsize = 64
67	sub	sp, sp, #S_FRAME_SIZE
68	.if	\regsize == 32
69	mov	w0, w0				// zero upper 32 bits of x0
70	.endif
71	stp	x0, x1, [sp, #16 * 0]
72	stp	x2, x3, [sp, #16 * 1]
73	stp	x4, x5, [sp, #16 * 2]
74	stp	x6, x7, [sp, #16 * 3]
75	stp	x8, x9, [sp, #16 * 4]
76	stp	x10, x11, [sp, #16 * 5]
77	stp	x12, x13, [sp, #16 * 6]
78	stp	x14, x15, [sp, #16 * 7]
79	stp	x16, x17, [sp, #16 * 8]
80	stp	x18, x19, [sp, #16 * 9]
81	stp	x20, x21, [sp, #16 * 10]
82	stp	x22, x23, [sp, #16 * 11]
83	stp	x24, x25, [sp, #16 * 12]
84	stp	x26, x27, [sp, #16 * 13]
85	stp	x28, x29, [sp, #16 * 14]
86
87	.if	\el == 0
88	mrs	x21, sp_el0
89	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
90	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
91	disable_step_tsk x19, x20		// exceptions when scheduling.
92	.else
93	add	x21, sp, #S_FRAME_SIZE
94	.endif
95	mrs	x22, elr_el1
96	mrs	x23, spsr_el1
97	stp	lr, x21, [sp, #S_LR]
98	stp	x22, x23, [sp, #S_PC]
99
100	/*
101	 * Set syscallno to -1 by default (overridden later if real syscall).
102	 */
103	.if	\el == 0
104	mvn	x21, xzr
105	str	x21, [sp, #S_SYSCALLNO]
106	.endif
107
108	/*
109	 * Registers that may be useful after this macro is invoked:
110	 *
111	 * x21 - aborted SP
112	 * x22 - aborted PC
113	 * x23 - aborted PSTATE
114	*/
115	.endm
116
117	.macro	kernel_exit, el, ret = 0
118	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
119	.if	\el == 0
120	ct_user_enter
121	ldr	x23, [sp, #S_SP]		// load return stack pointer
122	msr	sp_el0, x23
123	.endif
124	msr	elr_el1, x21			// set up the return data
125	msr	spsr_el1, x22
126	.if	\ret
127	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
128	.else
129	ldp	x0, x1, [sp, #16 * 0]
130	.endif
131	ldp	x2, x3, [sp, #16 * 1]
132	ldp	x4, x5, [sp, #16 * 2]
133	ldp	x6, x7, [sp, #16 * 3]
134	ldp	x8, x9, [sp, #16 * 4]
135	ldp	x10, x11, [sp, #16 * 5]
136	ldp	x12, x13, [sp, #16 * 6]
137	ldp	x14, x15, [sp, #16 * 7]
138	ldp	x16, x17, [sp, #16 * 8]
139	ldp	x18, x19, [sp, #16 * 9]
140	ldp	x20, x21, [sp, #16 * 10]
141	ldp	x22, x23, [sp, #16 * 11]
142	ldp	x24, x25, [sp, #16 * 12]
143	ldp	x26, x27, [sp, #16 * 13]
144	ldp	x28, x29, [sp, #16 * 14]
145	ldr	lr, [sp, #S_LR]
146	add	sp, sp, #S_FRAME_SIZE		// restore sp
147	eret					// return to kernel
148	.endm
149
150	.macro	get_thread_info, rd
151	mov	\rd, sp
152	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
153	.endm
154
155/*
156 * These are the registers used in the syscall handler, and allow us to
157 * have in theory up to 7 arguments to a function - x0 to x6.
158 *
159 * x7 is reserved for the system call number in 32-bit mode.
160 */
161sc_nr	.req	x25		// number of system calls
162scno	.req	x26		// syscall number
163stbl	.req	x27		// syscall table pointer
164tsk	.req	x28		// current thread_info
165
166/*
167 * Interrupt handling.
168 */
169	.macro	irq_handler
170	adrp	x1, handle_arch_irq
171	ldr	x1, [x1, #:lo12:handle_arch_irq]
172	mov	x0, sp
173	blr	x1
174	.endm
175
176	.text
177
178/*
179 * Exception vectors.
180 */
181
182	.align	11
183ENTRY(vectors)
184	ventry	el1_sync_invalid		// Synchronous EL1t
185	ventry	el1_irq_invalid			// IRQ EL1t
186	ventry	el1_fiq_invalid			// FIQ EL1t
187	ventry	el1_error_invalid		// Error EL1t
188
189	ventry	el1_sync			// Synchronous EL1h
190	ventry	el1_irq				// IRQ EL1h
191	ventry	el1_fiq_invalid			// FIQ EL1h
192	ventry	el1_error_invalid		// Error EL1h
193
194	ventry	el0_sync			// Synchronous 64-bit EL0
195	ventry	el0_irq				// IRQ 64-bit EL0
196	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
197	ventry	el0_error_invalid		// Error 64-bit EL0
198
199#ifdef CONFIG_COMPAT
200	ventry	el0_sync_compat			// Synchronous 32-bit EL0
201	ventry	el0_irq_compat			// IRQ 32-bit EL0
202	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
203	ventry	el0_error_invalid_compat	// Error 32-bit EL0
204#else
205	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
206	ventry	el0_irq_invalid			// IRQ 32-bit EL0
207	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
208	ventry	el0_error_invalid		// Error 32-bit EL0
209#endif
210END(vectors)
211
212/*
213 * Invalid mode handlers
214 */
215	.macro	inv_entry, el, reason, regsize = 64
216	kernel_entry el, \regsize
217	mov	x0, sp
218	mov	x1, #\reason
219	mrs	x2, esr_el1
220	b	bad_mode
221	.endm
222
223el0_sync_invalid:
224	inv_entry 0, BAD_SYNC
225ENDPROC(el0_sync_invalid)
226
227el0_irq_invalid:
228	inv_entry 0, BAD_IRQ
229ENDPROC(el0_irq_invalid)
230
231el0_fiq_invalid:
232	inv_entry 0, BAD_FIQ
233ENDPROC(el0_fiq_invalid)
234
235el0_error_invalid:
236	inv_entry 0, BAD_ERROR
237ENDPROC(el0_error_invalid)
238
239#ifdef CONFIG_COMPAT
240el0_fiq_invalid_compat:
241	inv_entry 0, BAD_FIQ, 32
242ENDPROC(el0_fiq_invalid_compat)
243
244el0_error_invalid_compat:
245	inv_entry 0, BAD_ERROR, 32
246ENDPROC(el0_error_invalid_compat)
247#endif
248
249el1_sync_invalid:
250	inv_entry 1, BAD_SYNC
251ENDPROC(el1_sync_invalid)
252
253el1_irq_invalid:
254	inv_entry 1, BAD_IRQ
255ENDPROC(el1_irq_invalid)
256
257el1_fiq_invalid:
258	inv_entry 1, BAD_FIQ
259ENDPROC(el1_fiq_invalid)
260
261el1_error_invalid:
262	inv_entry 1, BAD_ERROR
263ENDPROC(el1_error_invalid)
264
265/*
266 * EL1 mode handlers.
267 */
268	.align	6
269el1_sync:
270	kernel_entry 1
271	mrs	x1, esr_el1			// read the syndrome register
272	lsr	x24, x1, #ESR_EL1_EC_SHIFT	// exception class
273	cmp	x24, #ESR_EL1_EC_DABT_EL1	// data abort in EL1
274	b.eq	el1_da
275	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
276	b.eq	el1_undef
277	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
278	b.eq	el1_sp_pc
279	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
280	b.eq	el1_sp_pc
281	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL1
282	b.eq	el1_undef
283	cmp	x24, #ESR_EL1_EC_BREAKPT_EL1	// debug exception in EL1
284	b.ge	el1_dbg
285	b	el1_inv
286el1_da:
287	/*
288	 * Data abort handling
289	 */
290	mrs	x0, far_el1
291	enable_dbg
292	// re-enable interrupts if they were enabled in the aborted context
293	tbnz	x23, #7, 1f			// PSR_I_BIT
294	enable_irq
2951:
296	mov	x2, sp				// struct pt_regs
297	bl	do_mem_abort
298
299	// disable interrupts before pulling preserved data off the stack
300	disable_irq
301	kernel_exit 1
302el1_sp_pc:
303	/*
304	 * Stack or PC alignment exception handling
305	 */
306	mrs	x0, far_el1
307	enable_dbg
308	mov	x2, sp
309	b	do_sp_pc_abort
310el1_undef:
311	/*
312	 * Undefined instruction
313	 */
314	enable_dbg
315	mov	x0, sp
316	b	do_undefinstr
317el1_dbg:
318	/*
319	 * Debug exception handling
320	 */
321	cmp	x24, #ESR_EL1_EC_BRK64		// if BRK64
322	cinc	x24, x24, eq			// set bit '0'
323	tbz	x24, #0, el1_inv		// EL1 only
324	mrs	x0, far_el1
325	mov	x2, sp				// struct pt_regs
326	bl	do_debug_exception
327	kernel_exit 1
328el1_inv:
329	// TODO: add support for undefined instructions in kernel mode
330	enable_dbg
331	mov	x0, sp
332	mov	x1, #BAD_SYNC
333	mrs	x2, esr_el1
334	b	bad_mode
335ENDPROC(el1_sync)
336
337	.align	6
338el1_irq:
339	kernel_entry 1
340	enable_dbg
341#ifdef CONFIG_TRACE_IRQFLAGS
342	bl	trace_hardirqs_off
343#endif
344
345	irq_handler
346
347#ifdef CONFIG_PREEMPT
348	get_thread_info tsk
349	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
350	cbnz	w24, 1f				// preempt count != 0
351	ldr	x0, [tsk, #TI_FLAGS]		// get flags
352	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
353	bl	el1_preempt
3541:
355#endif
356#ifdef CONFIG_TRACE_IRQFLAGS
357	bl	trace_hardirqs_on
358#endif
359	kernel_exit 1
360ENDPROC(el1_irq)
361
362#ifdef CONFIG_PREEMPT
363el1_preempt:
364	mov	x24, lr
3651:	bl	preempt_schedule_irq		// irq en/disable is done inside
366	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
367	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
368	ret	x24
369#endif
370
371/*
372 * EL0 mode handlers.
373 */
374	.align	6
375el0_sync:
376	kernel_entry 0
377	mrs	x25, esr_el1			// read the syndrome register
378	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
379	cmp	x24, #ESR_EL1_EC_SVC64		// SVC in 64-bit state
380	b.eq	el0_svc
381	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
382	b.eq	el0_da
383	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
384	b.eq	el0_ia
385	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
386	b.eq	el0_fpsimd_acc
387	cmp	x24, #ESR_EL1_EC_FP_EXC64	// FP/ASIMD exception
388	b.eq	el0_fpsimd_exc
389	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
390	b.eq	el0_undef
391	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
392	b.eq	el0_sp_pc
393	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
394	b.eq	el0_sp_pc
395	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
396	b.eq	el0_undef
397	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
398	b.ge	el0_dbg
399	b	el0_inv
400
401#ifdef CONFIG_COMPAT
402	.align	6
403el0_sync_compat:
404	kernel_entry 0, 32
405	mrs	x25, esr_el1			// read the syndrome register
406	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
407	cmp	x24, #ESR_EL1_EC_SVC32		// SVC in 32-bit state
408	b.eq	el0_svc_compat
409	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
410	b.eq	el0_da
411	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
412	b.eq	el0_ia
413	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
414	b.eq	el0_fpsimd_acc
415	cmp	x24, #ESR_EL1_EC_FP_EXC32	// FP/ASIMD exception
416	b.eq	el0_fpsimd_exc
417	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
418	b.eq	el0_undef
419	cmp	x24, #ESR_EL1_EC_CP15_32	// CP15 MRC/MCR trap
420	b.eq	el0_undef
421	cmp	x24, #ESR_EL1_EC_CP15_64	// CP15 MRRC/MCRR trap
422	b.eq	el0_undef
423	cmp	x24, #ESR_EL1_EC_CP14_MR	// CP14 MRC/MCR trap
424	b.eq	el0_undef
425	cmp	x24, #ESR_EL1_EC_CP14_LS	// CP14 LDC/STC trap
426	b.eq	el0_undef
427	cmp	x24, #ESR_EL1_EC_CP14_64	// CP14 MRRC/MCRR trap
428	b.eq	el0_undef
429	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
430	b.ge	el0_dbg
431	b	el0_inv
432el0_svc_compat:
433	/*
434	 * AArch32 syscall handling
435	 */
436	adr	stbl, compat_sys_call_table	// load compat syscall table pointer
437	uxtw	scno, w7			// syscall number in w7 (r7)
438	mov     sc_nr, #__NR_compat_syscalls
439	b	el0_svc_naked
440
441	.align	6
442el0_irq_compat:
443	kernel_entry 0, 32
444	b	el0_irq_naked
445#endif
446
447el0_da:
448	/*
449	 * Data abort handling
450	 */
451	mrs	x26, far_el1
452	// enable interrupts before calling the main handler
453	enable_dbg_and_irq
454	ct_user_exit
455	bic	x0, x26, #(0xff << 56)
456	mov	x1, x25
457	mov	x2, sp
458	bl	do_mem_abort
459	b	ret_to_user
460el0_ia:
461	/*
462	 * Instruction abort handling
463	 */
464	mrs	x26, far_el1
465	// enable interrupts before calling the main handler
466	enable_dbg_and_irq
467	ct_user_exit
468	mov	x0, x26
469	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
470	mov	x2, sp
471	bl	do_mem_abort
472	b	ret_to_user
473el0_fpsimd_acc:
474	/*
475	 * Floating Point or Advanced SIMD access
476	 */
477	enable_dbg
478	ct_user_exit
479	mov	x0, x25
480	mov	x1, sp
481	bl	do_fpsimd_acc
482	b	ret_to_user
483el0_fpsimd_exc:
484	/*
485	 * Floating Point or Advanced SIMD exception
486	 */
487	enable_dbg
488	ct_user_exit
489	mov	x0, x25
490	mov	x1, sp
491	bl	do_fpsimd_exc
492	b	ret_to_user
493el0_sp_pc:
494	/*
495	 * Stack or PC alignment exception handling
496	 */
497	mrs	x26, far_el1
498	// enable interrupts before calling the main handler
499	enable_dbg_and_irq
500	mov	x0, x26
501	mov	x1, x25
502	mov	x2, sp
503	bl	do_sp_pc_abort
504	b	ret_to_user
505el0_undef:
506	/*
507	 * Undefined instruction
508	 */
509	// enable interrupts before calling the main handler
510	enable_dbg_and_irq
511	ct_user_exit
512	mov	x0, sp
513	bl	do_undefinstr
514	b	ret_to_user
515el0_dbg:
516	/*
517	 * Debug exception handling
518	 */
519	tbnz	x24, #0, el0_inv		// EL0 only
520	mrs	x0, far_el1
521	mov	x1, x25
522	mov	x2, sp
523	bl	do_debug_exception
524	enable_dbg
525	ct_user_exit
526	b	ret_to_user
527el0_inv:
528	enable_dbg
529	ct_user_exit
530	mov	x0, sp
531	mov	x1, #BAD_SYNC
532	mrs	x2, esr_el1
533	bl	bad_mode
534	b	ret_to_user
535ENDPROC(el0_sync)
536
537	.align	6
538el0_irq:
539	kernel_entry 0
540el0_irq_naked:
541	enable_dbg
542#ifdef CONFIG_TRACE_IRQFLAGS
543	bl	trace_hardirqs_off
544#endif
545
546	ct_user_exit
547	irq_handler
548
549#ifdef CONFIG_TRACE_IRQFLAGS
550	bl	trace_hardirqs_on
551#endif
552	b	ret_to_user
553ENDPROC(el0_irq)
554
555/*
556 * Register switch for AArch64. The callee-saved registers need to be saved
557 * and restored. On entry:
558 *   x0 = previous task_struct (must be preserved across the switch)
559 *   x1 = next task_struct
560 * Previous and next are guaranteed not to be the same.
561 *
562 */
563ENTRY(cpu_switch_to)
564	add	x8, x0, #THREAD_CPU_CONTEXT
565	mov	x9, sp
566	stp	x19, x20, [x8], #16		// store callee-saved registers
567	stp	x21, x22, [x8], #16
568	stp	x23, x24, [x8], #16
569	stp	x25, x26, [x8], #16
570	stp	x27, x28, [x8], #16
571	stp	x29, x9, [x8], #16
572	str	lr, [x8]
573	add	x8, x1, #THREAD_CPU_CONTEXT
574	ldp	x19, x20, [x8], #16		// restore callee-saved registers
575	ldp	x21, x22, [x8], #16
576	ldp	x23, x24, [x8], #16
577	ldp	x25, x26, [x8], #16
578	ldp	x27, x28, [x8], #16
579	ldp	x29, x9, [x8], #16
580	ldr	lr, [x8]
581	mov	sp, x9
582	ret
583ENDPROC(cpu_switch_to)
584
585/*
586 * This is the fast syscall return path.  We do as little as possible here,
587 * and this includes saving x0 back into the kernel stack.
588 */
589ret_fast_syscall:
590	disable_irq				// disable interrupts
591	ldr	x1, [tsk, #TI_FLAGS]
592	and	x2, x1, #_TIF_WORK_MASK
593	cbnz	x2, fast_work_pending
594	enable_step_tsk x1, x2
595	kernel_exit 0, ret = 1
596
597/*
598 * Ok, we need to do extra processing, enter the slow path.
599 */
600fast_work_pending:
601	str	x0, [sp, #S_X0]			// returned x0
602work_pending:
603	tbnz	x1, #TIF_NEED_RESCHED, work_resched
604	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
605	ldr	x2, [sp, #S_PSTATE]
606	mov	x0, sp				// 'regs'
607	tst	x2, #PSR_MODE_MASK		// user mode regs?
608	b.ne	no_work_pending			// returning to kernel
609	enable_irq				// enable interrupts for do_notify_resume()
610	bl	do_notify_resume
611	b	ret_to_user
612work_resched:
613	bl	schedule
614
615/*
616 * "slow" syscall return path.
617 */
618ret_to_user:
619	disable_irq				// disable interrupts
620	ldr	x1, [tsk, #TI_FLAGS]
621	and	x2, x1, #_TIF_WORK_MASK
622	cbnz	x2, work_pending
623	enable_step_tsk x1, x2
624no_work_pending:
625	kernel_exit 0, ret = 0
626ENDPROC(ret_to_user)
627
628/*
629 * This is how we return from a fork.
630 */
631ENTRY(ret_from_fork)
632	bl	schedule_tail
633	cbz	x19, 1f				// not a kernel thread
634	mov	x0, x20
635	blr	x19
6361:	get_thread_info tsk
637	b	ret_to_user
638ENDPROC(ret_from_fork)
639
640/*
641 * SVC handler.
642 */
643	.align	6
644el0_svc:
645	adrp	stbl, sys_call_table		// load syscall table pointer
646	uxtw	scno, w8			// syscall number in w8
647	mov	sc_nr, #__NR_syscalls
648el0_svc_naked:					// compat entry point
649	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
650	enable_dbg_and_irq
651	ct_user_exit 1
652
653	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
654	tst	x16, #_TIF_SYSCALL_WORK
655	b.ne	__sys_trace
656	cmp     scno, sc_nr                     // check upper syscall limit
657	b.hs	ni_sys
658	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
659	blr	x16				// call sys_* routine
660	b	ret_fast_syscall
661ni_sys:
662	mov	x0, sp
663	bl	do_ni_syscall
664	b	ret_fast_syscall
665ENDPROC(el0_svc)
666
667	/*
668	 * This is the really slow path.  We're going to be doing context
669	 * switches, and waiting for our parent to respond.
670	 */
671__sys_trace:
672	mov	w0, #-1				// set default errno for
673	cmp     scno, x0			// user-issued syscall(-1)
674	b.ne	1f
675	mov	x0, #-ENOSYS
676	str	x0, [sp, #S_X0]
6771:	mov	x0, sp
678	bl	syscall_trace_enter
679	cmp	w0, #-1				// skip the syscall?
680	b.eq	__sys_trace_return_skipped
681	uxtw	scno, w0			// syscall number (possibly new)
682	mov	x1, sp				// pointer to regs
683	cmp	scno, sc_nr			// check upper syscall limit
684	b.hs	__ni_sys_trace
685	ldp	x0, x1, [sp]			// restore the syscall args
686	ldp	x2, x3, [sp, #S_X2]
687	ldp	x4, x5, [sp, #S_X4]
688	ldp	x6, x7, [sp, #S_X6]
689	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
690	blr	x16				// call sys_* routine
691
692__sys_trace_return:
693	str	x0, [sp, #S_X0]			// save returned x0
694__sys_trace_return_skipped:
695	mov	x0, sp
696	bl	syscall_trace_exit
697	b	ret_to_user
698
699__ni_sys_trace:
700	mov	x0, sp
701	bl	do_ni_syscall
702	b	__sys_trace_return
703
704/*
705 * Special system call wrappers.
706 */
707ENTRY(sys_rt_sigreturn_wrapper)
708	mov	x0, sp
709	b	sys_rt_sigreturn
710ENDPROC(sys_rt_sigreturn_wrapper)
711