xref: /linux/arch/arm64/kernel/entry.S (revision 5ad75fcdd712d18b393c3b3fe52ab4108703d337)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/memory.h>
32#include <asm/thread_info.h>
33#include <asm/unistd.h>
34
35/*
36 * Context tracking subsystem.  Used to instrument transitions
37 * between user and kernel mode.
38 */
39	.macro ct_user_exit, syscall = 0
40#ifdef CONFIG_CONTEXT_TRACKING
41	bl	context_tracking_user_exit
42	.if \syscall == 1
43	/*
44	 * Save/restore needed during syscalls.  Restore syscall arguments from
45	 * the values already saved on stack during kernel_entry.
46	 */
47	ldp	x0, x1, [sp]
48	ldp	x2, x3, [sp, #S_X2]
49	ldp	x4, x5, [sp, #S_X4]
50	ldp	x6, x7, [sp, #S_X6]
51	.endif
52#endif
53	.endm
54
55	.macro ct_user_enter
56#ifdef CONFIG_CONTEXT_TRACKING
57	bl	context_tracking_user_enter
58#endif
59	.endm
60
61/*
62 * Bad Abort numbers
63 *-----------------
64 */
65#define BAD_SYNC	0
66#define BAD_IRQ		1
67#define BAD_FIQ		2
68#define BAD_ERROR	3
69
70	.macro	kernel_entry, el, regsize = 64
71	sub	sp, sp, #S_FRAME_SIZE
72	.if	\regsize == 32
73	mov	w0, w0				// zero upper 32 bits of x0
74	.endif
75	stp	x0, x1, [sp, #16 * 0]
76	stp	x2, x3, [sp, #16 * 1]
77	stp	x4, x5, [sp, #16 * 2]
78	stp	x6, x7, [sp, #16 * 3]
79	stp	x8, x9, [sp, #16 * 4]
80	stp	x10, x11, [sp, #16 * 5]
81	stp	x12, x13, [sp, #16 * 6]
82	stp	x14, x15, [sp, #16 * 7]
83	stp	x16, x17, [sp, #16 * 8]
84	stp	x18, x19, [sp, #16 * 9]
85	stp	x20, x21, [sp, #16 * 10]
86	stp	x22, x23, [sp, #16 * 11]
87	stp	x24, x25, [sp, #16 * 12]
88	stp	x26, x27, [sp, #16 * 13]
89	stp	x28, x29, [sp, #16 * 14]
90
91	.if	\el == 0
92	mrs	x21, sp_el0
93	mov	tsk, sp
94	and	tsk, tsk, #~(THREAD_SIZE - 1)	// Ensure MDSCR_EL1.SS is clear,
95	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
96	disable_step_tsk x19, x20		// exceptions when scheduling.
97
98	mov	x29, xzr			// fp pointed to user-space
99	.else
100	add	x21, sp, #S_FRAME_SIZE
101	get_thread_info tsk
102	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
103	ldr	x20, [tsk, #TI_ADDR_LIMIT]
104	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
105	mov	x20, #TASK_SIZE_64
106	str	x20, [tsk, #TI_ADDR_LIMIT]
107	ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
108	.endif /* \el == 0 */
109	mrs	x22, elr_el1
110	mrs	x23, spsr_el1
111	stp	lr, x21, [sp, #S_LR]
112	stp	x22, x23, [sp, #S_PC]
113
114	/*
115	 * Set syscallno to -1 by default (overridden later if real syscall).
116	 */
117	.if	\el == 0
118	mvn	x21, xzr
119	str	x21, [sp, #S_SYSCALLNO]
120	.endif
121
122	/*
123	 * Set sp_el0 to current thread_info.
124	 */
125	.if	\el == 0
126	msr	sp_el0, tsk
127	.endif
128
129	/*
130	 * Registers that may be useful after this macro is invoked:
131	 *
132	 * x21 - aborted SP
133	 * x22 - aborted PC
134	 * x23 - aborted PSTATE
135	*/
136	.endm
137
138	.macro	kernel_exit, el
139	.if	\el != 0
140	/* Restore the task's original addr_limit. */
141	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
142	str	x20, [tsk, #TI_ADDR_LIMIT]
143
144	/* No need to restore UAO, it will be restored from SPSR_EL1 */
145	.endif
146
147	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
148	.if	\el == 0
149	ct_user_enter
150	ldr	x23, [sp, #S_SP]		// load return stack pointer
151	msr	sp_el0, x23
152#ifdef CONFIG_ARM64_ERRATUM_845719
153alternative_if_not ARM64_WORKAROUND_845719
154	nop
155	nop
156#ifdef CONFIG_PID_IN_CONTEXTIDR
157	nop
158#endif
159alternative_else
160	tbz	x22, #4, 1f
161#ifdef CONFIG_PID_IN_CONTEXTIDR
162	mrs	x29, contextidr_el1
163	msr	contextidr_el1, x29
164#else
165	msr contextidr_el1, xzr
166#endif
1671:
168alternative_endif
169#endif
170	.endif
171	msr	elr_el1, x21			// set up the return data
172	msr	spsr_el1, x22
173	ldp	x0, x1, [sp, #16 * 0]
174	ldp	x2, x3, [sp, #16 * 1]
175	ldp	x4, x5, [sp, #16 * 2]
176	ldp	x6, x7, [sp, #16 * 3]
177	ldp	x8, x9, [sp, #16 * 4]
178	ldp	x10, x11, [sp, #16 * 5]
179	ldp	x12, x13, [sp, #16 * 6]
180	ldp	x14, x15, [sp, #16 * 7]
181	ldp	x16, x17, [sp, #16 * 8]
182	ldp	x18, x19, [sp, #16 * 9]
183	ldp	x20, x21, [sp, #16 * 10]
184	ldp	x22, x23, [sp, #16 * 11]
185	ldp	x24, x25, [sp, #16 * 12]
186	ldp	x26, x27, [sp, #16 * 13]
187	ldp	x28, x29, [sp, #16 * 14]
188	ldr	lr, [sp, #S_LR]
189	add	sp, sp, #S_FRAME_SIZE		// restore sp
190	eret					// return to kernel
191	.endm
192
193	.macro	get_thread_info, rd
194	mrs	\rd, sp_el0
195	.endm
196
197	.macro	irq_stack_entry
198	mov	x19, sp			// preserve the original sp
199
200	/*
201	 * Compare sp with the current thread_info, if the top
202	 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
203	 * should switch to the irq stack.
204	 */
205	and	x25, x19, #~(THREAD_SIZE - 1)
206	cmp	x25, tsk
207	b.ne	9998f
208
209	this_cpu_ptr irq_stack, x25, x26
210	mov	x26, #IRQ_STACK_START_SP
211	add	x26, x25, x26
212
213	/* switch to the irq stack */
214	mov	sp, x26
215
216	/*
217	 * Add a dummy stack frame, this non-standard format is fixed up
218	 * by unwind_frame()
219	 */
220	stp     x29, x19, [sp, #-16]!
221	mov	x29, sp
222
2239998:
224	.endm
225
226	/*
227	 * x19 should be preserved between irq_stack_entry and
228	 * irq_stack_exit.
229	 */
230	.macro	irq_stack_exit
231	mov	sp, x19
232	.endm
233
234/*
235 * These are the registers used in the syscall handler, and allow us to
236 * have in theory up to 7 arguments to a function - x0 to x6.
237 *
238 * x7 is reserved for the system call number in 32-bit mode.
239 */
240sc_nr	.req	x25		// number of system calls
241scno	.req	x26		// syscall number
242stbl	.req	x27		// syscall table pointer
243tsk	.req	x28		// current thread_info
244
245/*
246 * Interrupt handling.
247 */
248	.macro	irq_handler
249	ldr_l	x1, handle_arch_irq
250	mov	x0, sp
251	irq_stack_entry
252	blr	x1
253	irq_stack_exit
254	.endm
255
256	.text
257
258/*
259 * Exception vectors.
260 */
261	.pushsection ".entry.text", "ax"
262
263	.align	11
264ENTRY(vectors)
265	ventry	el1_sync_invalid		// Synchronous EL1t
266	ventry	el1_irq_invalid			// IRQ EL1t
267	ventry	el1_fiq_invalid			// FIQ EL1t
268	ventry	el1_error_invalid		// Error EL1t
269
270	ventry	el1_sync			// Synchronous EL1h
271	ventry	el1_irq				// IRQ EL1h
272	ventry	el1_fiq_invalid			// FIQ EL1h
273	ventry	el1_error_invalid		// Error EL1h
274
275	ventry	el0_sync			// Synchronous 64-bit EL0
276	ventry	el0_irq				// IRQ 64-bit EL0
277	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
278	ventry	el0_error_invalid		// Error 64-bit EL0
279
280#ifdef CONFIG_COMPAT
281	ventry	el0_sync_compat			// Synchronous 32-bit EL0
282	ventry	el0_irq_compat			// IRQ 32-bit EL0
283	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
284	ventry	el0_error_invalid_compat	// Error 32-bit EL0
285#else
286	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
287	ventry	el0_irq_invalid			// IRQ 32-bit EL0
288	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
289	ventry	el0_error_invalid		// Error 32-bit EL0
290#endif
291END(vectors)
292
293/*
294 * Invalid mode handlers
295 */
296	.macro	inv_entry, el, reason, regsize = 64
297	kernel_entry \el, \regsize
298	mov	x0, sp
299	mov	x1, #\reason
300	mrs	x2, esr_el1
301	b	bad_mode
302	.endm
303
304el0_sync_invalid:
305	inv_entry 0, BAD_SYNC
306ENDPROC(el0_sync_invalid)
307
308el0_irq_invalid:
309	inv_entry 0, BAD_IRQ
310ENDPROC(el0_irq_invalid)
311
312el0_fiq_invalid:
313	inv_entry 0, BAD_FIQ
314ENDPROC(el0_fiq_invalid)
315
316el0_error_invalid:
317	inv_entry 0, BAD_ERROR
318ENDPROC(el0_error_invalid)
319
320#ifdef CONFIG_COMPAT
321el0_fiq_invalid_compat:
322	inv_entry 0, BAD_FIQ, 32
323ENDPROC(el0_fiq_invalid_compat)
324
325el0_error_invalid_compat:
326	inv_entry 0, BAD_ERROR, 32
327ENDPROC(el0_error_invalid_compat)
328#endif
329
330el1_sync_invalid:
331	inv_entry 1, BAD_SYNC
332ENDPROC(el1_sync_invalid)
333
334el1_irq_invalid:
335	inv_entry 1, BAD_IRQ
336ENDPROC(el1_irq_invalid)
337
338el1_fiq_invalid:
339	inv_entry 1, BAD_FIQ
340ENDPROC(el1_fiq_invalid)
341
342el1_error_invalid:
343	inv_entry 1, BAD_ERROR
344ENDPROC(el1_error_invalid)
345
346/*
347 * EL1 mode handlers.
348 */
349	.align	6
350el1_sync:
351	kernel_entry 1
352	mrs	x1, esr_el1			// read the syndrome register
353	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
354	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
355	b.eq	el1_da
356	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
357	b.eq	el1_ia
358	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
359	b.eq	el1_undef
360	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
361	b.eq	el1_sp_pc
362	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
363	b.eq	el1_sp_pc
364	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
365	b.eq	el1_undef
366	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
367	b.ge	el1_dbg
368	b	el1_inv
369
370el1_ia:
371	/*
372	 * Fall through to the Data abort case
373	 */
374el1_da:
375	/*
376	 * Data abort handling
377	 */
378	mrs	x0, far_el1
379	enable_dbg
380	// re-enable interrupts if they were enabled in the aborted context
381	tbnz	x23, #7, 1f			// PSR_I_BIT
382	enable_irq
3831:
384	mov	x2, sp				// struct pt_regs
385	bl	do_mem_abort
386
387	// disable interrupts before pulling preserved data off the stack
388	disable_irq
389	kernel_exit 1
390el1_sp_pc:
391	/*
392	 * Stack or PC alignment exception handling
393	 */
394	mrs	x0, far_el1
395	enable_dbg
396	mov	x2, sp
397	b	do_sp_pc_abort
398el1_undef:
399	/*
400	 * Undefined instruction
401	 */
402	enable_dbg
403	mov	x0, sp
404	b	do_undefinstr
405el1_dbg:
406	/*
407	 * Debug exception handling
408	 */
409	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
410	cinc	x24, x24, eq			// set bit '0'
411	tbz	x24, #0, el1_inv		// EL1 only
412	mrs	x0, far_el1
413	mov	x2, sp				// struct pt_regs
414	bl	do_debug_exception
415	kernel_exit 1
416el1_inv:
417	// TODO: add support for undefined instructions in kernel mode
418	enable_dbg
419	mov	x0, sp
420	mov	x2, x1
421	mov	x1, #BAD_SYNC
422	b	bad_mode
423ENDPROC(el1_sync)
424
425	.align	6
426el1_irq:
427	kernel_entry 1
428	enable_dbg
429#ifdef CONFIG_TRACE_IRQFLAGS
430	bl	trace_hardirqs_off
431#endif
432
433	irq_handler
434
435#ifdef CONFIG_PREEMPT
436	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
437	cbnz	w24, 1f				// preempt count != 0
438	ldr	x0, [tsk, #TI_FLAGS]		// get flags
439	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
440	bl	el1_preempt
4411:
442#endif
443#ifdef CONFIG_TRACE_IRQFLAGS
444	bl	trace_hardirqs_on
445#endif
446	kernel_exit 1
447ENDPROC(el1_irq)
448
449#ifdef CONFIG_PREEMPT
450el1_preempt:
451	mov	x24, lr
4521:	bl	preempt_schedule_irq		// irq en/disable is done inside
453	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
454	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
455	ret	x24
456#endif
457
458/*
459 * EL0 mode handlers.
460 */
461	.align	6
462el0_sync:
463	kernel_entry 0
464	mrs	x25, esr_el1			// read the syndrome register
465	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
466	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
467	b.eq	el0_svc
468	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
469	b.eq	el0_da
470	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
471	b.eq	el0_ia
472	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
473	b.eq	el0_fpsimd_acc
474	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
475	b.eq	el0_fpsimd_exc
476	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
477	b.eq	el0_sys
478	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
479	b.eq	el0_sp_pc
480	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
481	b.eq	el0_sp_pc
482	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
483	b.eq	el0_undef
484	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
485	b.ge	el0_dbg
486	b	el0_inv
487
488#ifdef CONFIG_COMPAT
489	.align	6
490el0_sync_compat:
491	kernel_entry 0, 32
492	mrs	x25, esr_el1			// read the syndrome register
493	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
494	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
495	b.eq	el0_svc_compat
496	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
497	b.eq	el0_da
498	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
499	b.eq	el0_ia
500	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
501	b.eq	el0_fpsimd_acc
502	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
503	b.eq	el0_fpsimd_exc
504	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
505	b.eq	el0_sp_pc
506	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
507	b.eq	el0_undef
508	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
509	b.eq	el0_undef
510	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
511	b.eq	el0_undef
512	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
513	b.eq	el0_undef
514	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
515	b.eq	el0_undef
516	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
517	b.eq	el0_undef
518	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
519	b.ge	el0_dbg
520	b	el0_inv
521el0_svc_compat:
522	/*
523	 * AArch32 syscall handling
524	 */
525	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
526	uxtw	scno, w7			// syscall number in w7 (r7)
527	mov     sc_nr, #__NR_compat_syscalls
528	b	el0_svc_naked
529
530	.align	6
531el0_irq_compat:
532	kernel_entry 0, 32
533	b	el0_irq_naked
534#endif
535
536el0_da:
537	/*
538	 * Data abort handling
539	 */
540	mrs	x26, far_el1
541	// enable interrupts before calling the main handler
542	enable_dbg_and_irq
543	ct_user_exit
544	bic	x0, x26, #(0xff << 56)
545	mov	x1, x25
546	mov	x2, sp
547	bl	do_mem_abort
548	b	ret_to_user
549el0_ia:
550	/*
551	 * Instruction abort handling
552	 */
553	mrs	x26, far_el1
554	// enable interrupts before calling the main handler
555	enable_dbg_and_irq
556	ct_user_exit
557	mov	x0, x26
558	mov	x1, x25
559	mov	x2, sp
560	bl	do_mem_abort
561	b	ret_to_user
562el0_fpsimd_acc:
563	/*
564	 * Floating Point or Advanced SIMD access
565	 */
566	enable_dbg
567	ct_user_exit
568	mov	x0, x25
569	mov	x1, sp
570	bl	do_fpsimd_acc
571	b	ret_to_user
572el0_fpsimd_exc:
573	/*
574	 * Floating Point or Advanced SIMD exception
575	 */
576	enable_dbg
577	ct_user_exit
578	mov	x0, x25
579	mov	x1, sp
580	bl	do_fpsimd_exc
581	b	ret_to_user
582el0_sp_pc:
583	/*
584	 * Stack or PC alignment exception handling
585	 */
586	mrs	x26, far_el1
587	// enable interrupts before calling the main handler
588	enable_dbg_and_irq
589	ct_user_exit
590	mov	x0, x26
591	mov	x1, x25
592	mov	x2, sp
593	bl	do_sp_pc_abort
594	b	ret_to_user
595el0_undef:
596	/*
597	 * Undefined instruction
598	 */
599	// enable interrupts before calling the main handler
600	enable_dbg_and_irq
601	ct_user_exit
602	mov	x0, sp
603	bl	do_undefinstr
604	b	ret_to_user
605el0_sys:
606	/*
607	 * System instructions, for trapped cache maintenance instructions
608	 */
609	enable_dbg_and_irq
610	ct_user_exit
611	mov	x0, x25
612	mov	x1, sp
613	bl	do_sysinstr
614	b	ret_to_user
615el0_dbg:
616	/*
617	 * Debug exception handling
618	 */
619	tbnz	x24, #0, el0_inv		// EL0 only
620	mrs	x0, far_el1
621	mov	x1, x25
622	mov	x2, sp
623	bl	do_debug_exception
624	enable_dbg
625	ct_user_exit
626	b	ret_to_user
627el0_inv:
628	enable_dbg
629	ct_user_exit
630	mov	x0, sp
631	mov	x1, #BAD_SYNC
632	mov	x2, x25
633	bl	bad_mode
634	b	ret_to_user
635ENDPROC(el0_sync)
636
637	.align	6
638el0_irq:
639	kernel_entry 0
640el0_irq_naked:
641	enable_dbg
642#ifdef CONFIG_TRACE_IRQFLAGS
643	bl	trace_hardirqs_off
644#endif
645
646	ct_user_exit
647	irq_handler
648
649#ifdef CONFIG_TRACE_IRQFLAGS
650	bl	trace_hardirqs_on
651#endif
652	b	ret_to_user
653ENDPROC(el0_irq)
654
655/*
656 * Register switch for AArch64. The callee-saved registers need to be saved
657 * and restored. On entry:
658 *   x0 = previous task_struct (must be preserved across the switch)
659 *   x1 = next task_struct
660 * Previous and next are guaranteed not to be the same.
661 *
662 */
663ENTRY(cpu_switch_to)
664	mov	x10, #THREAD_CPU_CONTEXT
665	add	x8, x0, x10
666	mov	x9, sp
667	stp	x19, x20, [x8], #16		// store callee-saved registers
668	stp	x21, x22, [x8], #16
669	stp	x23, x24, [x8], #16
670	stp	x25, x26, [x8], #16
671	stp	x27, x28, [x8], #16
672	stp	x29, x9, [x8], #16
673	str	lr, [x8]
674	add	x8, x1, x10
675	ldp	x19, x20, [x8], #16		// restore callee-saved registers
676	ldp	x21, x22, [x8], #16
677	ldp	x23, x24, [x8], #16
678	ldp	x25, x26, [x8], #16
679	ldp	x27, x28, [x8], #16
680	ldp	x29, x9, [x8], #16
681	ldr	lr, [x8]
682	mov	sp, x9
683	and	x9, x9, #~(THREAD_SIZE - 1)
684	msr	sp_el0, x9
685	ret
686ENDPROC(cpu_switch_to)
687
688/*
689 * This is the fast syscall return path.  We do as little as possible here,
690 * and this includes saving x0 back into the kernel stack.
691 */
692ret_fast_syscall:
693	disable_irq				// disable interrupts
694	str	x0, [sp, #S_X0]			// returned x0
695	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
696	and	x2, x1, #_TIF_SYSCALL_WORK
697	cbnz	x2, ret_fast_syscall_trace
698	and	x2, x1, #_TIF_WORK_MASK
699	cbnz	x2, work_pending
700	enable_step_tsk x1, x2
701	kernel_exit 0
702ret_fast_syscall_trace:
703	enable_irq				// enable interrupts
704	b	__sys_trace_return_skipped	// we already saved x0
705
706/*
707 * Ok, we need to do extra processing, enter the slow path.
708 */
709work_pending:
710	tbnz	x1, #TIF_NEED_RESCHED, work_resched
711	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
712	mov	x0, sp				// 'regs'
713	enable_irq				// enable interrupts for do_notify_resume()
714	bl	do_notify_resume
715	b	ret_to_user
716work_resched:
717#ifdef CONFIG_TRACE_IRQFLAGS
718	bl	trace_hardirqs_off		// the IRQs are off here, inform the tracing code
719#endif
720	bl	schedule
721
722/*
723 * "slow" syscall return path.
724 */
725ret_to_user:
726	disable_irq				// disable interrupts
727	ldr	x1, [tsk, #TI_FLAGS]
728	and	x2, x1, #_TIF_WORK_MASK
729	cbnz	x2, work_pending
730	enable_step_tsk x1, x2
731	kernel_exit 0
732ENDPROC(ret_to_user)
733
734/*
735 * This is how we return from a fork.
736 */
737ENTRY(ret_from_fork)
738	bl	schedule_tail
739	cbz	x19, 1f				// not a kernel thread
740	mov	x0, x20
741	blr	x19
7421:	get_thread_info tsk
743	b	ret_to_user
744ENDPROC(ret_from_fork)
745
746/*
747 * SVC handler.
748 */
749	.align	6
750el0_svc:
751	adrp	stbl, sys_call_table		// load syscall table pointer
752	uxtw	scno, w8			// syscall number in w8
753	mov	sc_nr, #__NR_syscalls
754el0_svc_naked:					// compat entry point
755	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
756	enable_dbg_and_irq
757	ct_user_exit 1
758
759	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
760	tst	x16, #_TIF_SYSCALL_WORK
761	b.ne	__sys_trace
762	cmp     scno, sc_nr                     // check upper syscall limit
763	b.hs	ni_sys
764	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
765	blr	x16				// call sys_* routine
766	b	ret_fast_syscall
767ni_sys:
768	mov	x0, sp
769	bl	do_ni_syscall
770	b	ret_fast_syscall
771ENDPROC(el0_svc)
772
773	/*
774	 * This is the really slow path.  We're going to be doing context
775	 * switches, and waiting for our parent to respond.
776	 */
777__sys_trace:
778	mov	w0, #-1				// set default errno for
779	cmp     scno, x0			// user-issued syscall(-1)
780	b.ne	1f
781	mov	x0, #-ENOSYS
782	str	x0, [sp, #S_X0]
7831:	mov	x0, sp
784	bl	syscall_trace_enter
785	cmp	w0, #-1				// skip the syscall?
786	b.eq	__sys_trace_return_skipped
787	uxtw	scno, w0			// syscall number (possibly new)
788	mov	x1, sp				// pointer to regs
789	cmp	scno, sc_nr			// check upper syscall limit
790	b.hs	__ni_sys_trace
791	ldp	x0, x1, [sp]			// restore the syscall args
792	ldp	x2, x3, [sp, #S_X2]
793	ldp	x4, x5, [sp, #S_X4]
794	ldp	x6, x7, [sp, #S_X6]
795	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
796	blr	x16				// call sys_* routine
797
798__sys_trace_return:
799	str	x0, [sp, #S_X0]			// save returned x0
800__sys_trace_return_skipped:
801	mov	x0, sp
802	bl	syscall_trace_exit
803	b	ret_to_user
804
805__ni_sys_trace:
806	mov	x0, sp
807	bl	do_ni_syscall
808	b	__sys_trace_return
809
810	.popsection				// .entry.text
811
812/*
813 * Special system call wrappers.
814 */
815ENTRY(sys_rt_sigreturn_wrapper)
816	mov	x0, sp
817	b	sys_rt_sigreturn
818ENDPROC(sys_rt_sigreturn_wrapper)
819