xref: /linux/arch/arm64/kernel/entry.S (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/thread_info.h>
28#include <asm/unistd.h>
29#include <asm/unistd32.h>
30
31/*
32 * Bad Abort numbers
33 *-----------------
34 */
35#define BAD_SYNC	0
36#define BAD_IRQ		1
37#define BAD_FIQ		2
38#define BAD_ERROR	3
39
40	.macro	kernel_entry, el, regsize = 64
41	sub	sp, sp, #S_FRAME_SIZE - S_LR	// room for LR, SP, SPSR, ELR
42	.if	\regsize == 32
43	mov	w0, w0				// zero upper 32 bits of x0
44	.endif
45	push	x28, x29
46	push	x26, x27
47	push	x24, x25
48	push	x22, x23
49	push	x20, x21
50	push	x18, x19
51	push	x16, x17
52	push	x14, x15
53	push	x12, x13
54	push	x10, x11
55	push	x8, x9
56	push	x6, x7
57	push	x4, x5
58	push	x2, x3
59	push	x0, x1
60	.if	\el == 0
61	mrs	x21, sp_el0
62	.else
63	add	x21, sp, #S_FRAME_SIZE
64	.endif
65	mrs	x22, elr_el1
66	mrs	x23, spsr_el1
67	stp	lr, x21, [sp, #S_LR]
68	stp	x22, x23, [sp, #S_PC]
69
70	/*
71	 * Set syscallno to -1 by default (overridden later if real syscall).
72	 */
73	.if	\el == 0
74	mvn	x21, xzr
75	str	x21, [sp, #S_SYSCALLNO]
76	.endif
77
78	/*
79	 * Registers that may be useful after this macro is invoked:
80	 *
81	 * x21 - aborted SP
82	 * x22 - aborted PC
83	 * x23 - aborted PSTATE
84	*/
85	.endm
86
87	.macro	kernel_exit, el, ret = 0
88	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
89	.if	\el == 0
90	ldr	x23, [sp, #S_SP]		// load return stack pointer
91	.endif
92	.if	\ret
93	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
94	add	sp, sp, S_X2
95	.else
96	pop	x0, x1
97	.endif
98	pop	x2, x3				// load the rest of the registers
99	pop	x4, x5
100	pop	x6, x7
101	pop	x8, x9
102	msr	elr_el1, x21			// set up the return data
103	msr	spsr_el1, x22
104	.if	\el == 0
105	msr	sp_el0, x23
106	.endif
107	pop	x10, x11
108	pop	x12, x13
109	pop	x14, x15
110	pop	x16, x17
111	pop	x18, x19
112	pop	x20, x21
113	pop	x22, x23
114	pop	x24, x25
115	pop	x26, x27
116	pop	x28, x29
117	ldr	lr, [sp], #S_FRAME_SIZE - S_LR	// load LR and restore SP
118	eret					// return to kernel
119	.endm
120
121	.macro	get_thread_info, rd
122	mov	\rd, sp
123	and	\rd, \rd, #~((1 << 13) - 1)	// top of 8K stack
124	.endm
125
126/*
127 * These are the registers used in the syscall handler, and allow us to
128 * have in theory up to 7 arguments to a function - x0 to x6.
129 *
130 * x7 is reserved for the system call number in 32-bit mode.
131 */
132sc_nr	.req	x25		// number of system calls
133scno	.req	x26		// syscall number
134stbl	.req	x27		// syscall table pointer
135tsk	.req	x28		// current thread_info
136
137/*
138 * Interrupt handling.
139 */
140	.macro	irq_handler
141	ldr	x1, handle_arch_irq
142	mov	x0, sp
143	blr	x1
144	.endm
145
146	.text
147
148/*
149 * Exception vectors.
150 */
151
152	.align	11
153ENTRY(vectors)
154	ventry	el1_sync_invalid		// Synchronous EL1t
155	ventry	el1_irq_invalid			// IRQ EL1t
156	ventry	el1_fiq_invalid			// FIQ EL1t
157	ventry	el1_error_invalid		// Error EL1t
158
159	ventry	el1_sync			// Synchronous EL1h
160	ventry	el1_irq				// IRQ EL1h
161	ventry	el1_fiq_invalid			// FIQ EL1h
162	ventry	el1_error_invalid		// Error EL1h
163
164	ventry	el0_sync			// Synchronous 64-bit EL0
165	ventry	el0_irq				// IRQ 64-bit EL0
166	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
167	ventry	el0_error_invalid		// Error 64-bit EL0
168
169#ifdef CONFIG_COMPAT
170	ventry	el0_sync_compat			// Synchronous 32-bit EL0
171	ventry	el0_irq_compat			// IRQ 32-bit EL0
172	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
173	ventry	el0_error_invalid_compat	// Error 32-bit EL0
174#else
175	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
176	ventry	el0_irq_invalid			// IRQ 32-bit EL0
177	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
178	ventry	el0_error_invalid		// Error 32-bit EL0
179#endif
180END(vectors)
181
182/*
183 * Invalid mode handlers
184 */
185	.macro	inv_entry, el, reason, regsize = 64
186	kernel_entry el, \regsize
187	mov	x0, sp
188	mov	x1, #\reason
189	mrs	x2, esr_el1
190	b	bad_mode
191	.endm
192
193el0_sync_invalid:
194	inv_entry 0, BAD_SYNC
195ENDPROC(el0_sync_invalid)
196
197el0_irq_invalid:
198	inv_entry 0, BAD_IRQ
199ENDPROC(el0_irq_invalid)
200
201el0_fiq_invalid:
202	inv_entry 0, BAD_FIQ
203ENDPROC(el0_fiq_invalid)
204
205el0_error_invalid:
206	inv_entry 0, BAD_ERROR
207ENDPROC(el0_error_invalid)
208
209#ifdef CONFIG_COMPAT
210el0_fiq_invalid_compat:
211	inv_entry 0, BAD_FIQ, 32
212ENDPROC(el0_fiq_invalid_compat)
213
214el0_error_invalid_compat:
215	inv_entry 0, BAD_ERROR, 32
216ENDPROC(el0_error_invalid_compat)
217#endif
218
219el1_sync_invalid:
220	inv_entry 1, BAD_SYNC
221ENDPROC(el1_sync_invalid)
222
223el1_irq_invalid:
224	inv_entry 1, BAD_IRQ
225ENDPROC(el1_irq_invalid)
226
227el1_fiq_invalid:
228	inv_entry 1, BAD_FIQ
229ENDPROC(el1_fiq_invalid)
230
231el1_error_invalid:
232	inv_entry 1, BAD_ERROR
233ENDPROC(el1_error_invalid)
234
235/*
236 * EL1 mode handlers.
237 */
238	.align	6
239el1_sync:
240	kernel_entry 1
241	mrs	x1, esr_el1			// read the syndrome register
242	lsr	x24, x1, #26			// exception class
243	cmp	x24, #0x25			// data abort in EL1
244	b.eq	el1_da
245	cmp	x24, #0x18			// configurable trap
246	b.eq	el1_undef
247	cmp	x24, #0x26			// stack alignment exception
248	b.eq	el1_sp_pc
249	cmp	x24, #0x22			// pc alignment exception
250	b.eq	el1_sp_pc
251	cmp	x24, #0x00			// unknown exception in EL1
252	b.eq	el1_undef
253	cmp	x24, #0x30			// debug exception in EL1
254	b.ge	el1_dbg
255	b	el1_inv
256el1_da:
257	/*
258	 * Data abort handling
259	 */
260	mrs	x0, far_el1
261	enable_dbg_if_not_stepping x2
262	// re-enable interrupts if they were enabled in the aborted context
263	tbnz	x23, #7, 1f			// PSR_I_BIT
264	enable_irq
2651:
266	mov	x2, sp				// struct pt_regs
267	bl	do_mem_abort
268
269	// disable interrupts before pulling preserved data off the stack
270	disable_irq
271	kernel_exit 1
272el1_sp_pc:
273	/*
274	 * Stack or PC alignment exception handling
275	 */
276	mrs	x0, far_el1
277	mov	x1, x25
278	mov	x2, sp
279	b	do_sp_pc_abort
280el1_undef:
281	/*
282	 * Undefined instruction
283	 */
284	mov	x0, sp
285	b	do_undefinstr
286el1_dbg:
287	/*
288	 * Debug exception handling
289	 */
290	tbz	x24, #0, el1_inv		// EL1 only
291	mrs	x0, far_el1
292	mov	x2, sp				// struct pt_regs
293	bl	do_debug_exception
294
295	kernel_exit 1
296el1_inv:
297	// TODO: add support for undefined instructions in kernel mode
298	mov	x0, sp
299	mov	x1, #BAD_SYNC
300	mrs	x2, esr_el1
301	b	bad_mode
302ENDPROC(el1_sync)
303
304	.align	6
305el1_irq:
306	kernel_entry 1
307	enable_dbg_if_not_stepping x0
308#ifdef CONFIG_TRACE_IRQFLAGS
309	bl	trace_hardirqs_off
310#endif
311#ifdef CONFIG_PREEMPT
312	get_thread_info tsk
313	ldr	x24, [tsk, #TI_PREEMPT]		// get preempt count
314	add	x0, x24, #1			// increment it
315	str	x0, [tsk, #TI_PREEMPT]
316#endif
317	irq_handler
318#ifdef CONFIG_PREEMPT
319	str	x24, [tsk, #TI_PREEMPT]		// restore preempt count
320	cbnz	x24, 1f				// preempt count != 0
321	ldr	x0, [tsk, #TI_FLAGS]		// get flags
322	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
323	bl	el1_preempt
3241:
325#endif
326#ifdef CONFIG_TRACE_IRQFLAGS
327	bl	trace_hardirqs_on
328#endif
329	kernel_exit 1
330ENDPROC(el1_irq)
331
332#ifdef CONFIG_PREEMPT
333el1_preempt:
334	mov	x24, lr
3351:	enable_dbg
336	bl	preempt_schedule_irq		// irq en/disable is done inside
337	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
338	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
339	ret	x24
340#endif
341
342/*
343 * EL0 mode handlers.
344 */
345	.align	6
346el0_sync:
347	kernel_entry 0
348	mrs	x25, esr_el1			// read the syndrome register
349	lsr	x24, x25, #26			// exception class
350	cmp	x24, #0x15			// SVC in 64-bit state
351	b.eq	el0_svc
352	adr	lr, ret_from_exception
353	cmp	x24, #0x24			// data abort in EL0
354	b.eq	el0_da
355	cmp	x24, #0x20			// instruction abort in EL0
356	b.eq	el0_ia
357	cmp	x24, #0x07			// FP/ASIMD access
358	b.eq	el0_fpsimd_acc
359	cmp	x24, #0x2c			// FP/ASIMD exception
360	b.eq	el0_fpsimd_exc
361	cmp	x24, #0x18			// configurable trap
362	b.eq	el0_undef
363	cmp	x24, #0x26			// stack alignment exception
364	b.eq	el0_sp_pc
365	cmp	x24, #0x22			// pc alignment exception
366	b.eq	el0_sp_pc
367	cmp	x24, #0x00			// unknown exception in EL0
368	b.eq	el0_undef
369	cmp	x24, #0x30			// debug exception in EL0
370	b.ge	el0_dbg
371	b	el0_inv
372
373#ifdef CONFIG_COMPAT
374	.align	6
375el0_sync_compat:
376	kernel_entry 0, 32
377	mrs	x25, esr_el1			// read the syndrome register
378	lsr	x24, x25, #26			// exception class
379	cmp	x24, #0x11			// SVC in 32-bit state
380	b.eq	el0_svc_compat
381	adr	lr, ret_from_exception
382	cmp	x24, #0x24			// data abort in EL0
383	b.eq	el0_da
384	cmp	x24, #0x20			// instruction abort in EL0
385	b.eq	el0_ia
386	cmp	x24, #0x07			// FP/ASIMD access
387	b.eq	el0_fpsimd_acc
388	cmp	x24, #0x28			// FP/ASIMD exception
389	b.eq	el0_fpsimd_exc
390	cmp	x24, #0x00			// unknown exception in EL0
391	b.eq	el0_undef
392	cmp	x24, #0x30			// debug exception in EL0
393	b.ge	el0_dbg
394	b	el0_inv
395el0_svc_compat:
396	/*
397	 * AArch32 syscall handling
398	 */
399	adr	stbl, compat_sys_call_table	// load compat syscall table pointer
400	uxtw	scno, w7			// syscall number in w7 (r7)
401	mov     sc_nr, #__NR_compat_syscalls
402	b	el0_svc_naked
403
404	.align	6
405el0_irq_compat:
406	kernel_entry 0, 32
407	b	el0_irq_naked
408#endif
409
410el0_da:
411	/*
412	 * Data abort handling
413	 */
414	mrs	x0, far_el1
415	disable_step x1
416	isb
417	enable_dbg
418	// enable interrupts before calling the main handler
419	enable_irq
420	mov	x1, x25
421	mov	x2, sp
422	b	do_mem_abort
423el0_ia:
424	/*
425	 * Instruction abort handling
426	 */
427	mrs	x0, far_el1
428	disable_step x1
429	isb
430	enable_dbg
431	// enable interrupts before calling the main handler
432	enable_irq
433	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
434	mov	x2, sp
435	b	do_mem_abort
436el0_fpsimd_acc:
437	/*
438	 * Floating Point or Advanced SIMD access
439	 */
440	mov	x0, x25
441	mov	x1, sp
442	b	do_fpsimd_acc
443el0_fpsimd_exc:
444	/*
445	 * Floating Point or Advanced SIMD exception
446	 */
447	mov	x0, x25
448	mov	x1, sp
449	b	do_fpsimd_exc
450el0_sp_pc:
451	/*
452	 * Stack or PC alignment exception handling
453	 */
454	mrs	x0, far_el1
455	disable_step x1
456	isb
457	enable_dbg
458	// enable interrupts before calling the main handler
459	enable_irq
460	mov	x1, x25
461	mov	x2, sp
462	b	do_sp_pc_abort
463el0_undef:
464	/*
465	 * Undefined instruction
466	 */
467	mov	x0, sp
468	b	do_undefinstr
469el0_dbg:
470	/*
471	 * Debug exception handling
472	 */
473	tbnz	x24, #0, el0_inv		// EL0 only
474	mrs	x0, far_el1
475	disable_step x1
476	mov	x1, x25
477	mov	x2, sp
478	b	do_debug_exception
479el0_inv:
480	mov	x0, sp
481	mov	x1, #BAD_SYNC
482	mrs	x2, esr_el1
483	b	bad_mode
484ENDPROC(el0_sync)
485
486	.align	6
487el0_irq:
488	kernel_entry 0
489el0_irq_naked:
490	disable_step x1
491	isb
492	enable_dbg
493#ifdef CONFIG_TRACE_IRQFLAGS
494	bl	trace_hardirqs_off
495#endif
496	get_thread_info tsk
497#ifdef CONFIG_PREEMPT
498	ldr	x24, [tsk, #TI_PREEMPT]		// get preempt count
499	add	x23, x24, #1			// increment it
500	str	x23, [tsk, #TI_PREEMPT]
501#endif
502	irq_handler
503#ifdef CONFIG_PREEMPT
504	ldr	x0, [tsk, #TI_PREEMPT]
505	str	x24, [tsk, #TI_PREEMPT]
506	cmp	x0, x23
507	b.eq	1f
508	mov	x1, #0
509	str	x1, [x1]			// BUG
5101:
511#endif
512#ifdef CONFIG_TRACE_IRQFLAGS
513	bl	trace_hardirqs_on
514#endif
515	b	ret_to_user
516ENDPROC(el0_irq)
517
518/*
519 * This is the return code to user mode for abort handlers
520 */
521ret_from_exception:
522	get_thread_info tsk
523	b	ret_to_user
524ENDPROC(ret_from_exception)
525
526/*
527 * Register switch for AArch64. The callee-saved registers need to be saved
528 * and restored. On entry:
529 *   x0 = previous task_struct (must be preserved across the switch)
530 *   x1 = next task_struct
531 * Previous and next are guaranteed not to be the same.
532 *
533 */
534ENTRY(cpu_switch_to)
535	add	x8, x0, #THREAD_CPU_CONTEXT
536	mov	x9, sp
537	stp	x19, x20, [x8], #16		// store callee-saved registers
538	stp	x21, x22, [x8], #16
539	stp	x23, x24, [x8], #16
540	stp	x25, x26, [x8], #16
541	stp	x27, x28, [x8], #16
542	stp	x29, x9, [x8], #16
543	str	lr, [x8]
544	add	x8, x1, #THREAD_CPU_CONTEXT
545	ldp	x19, x20, [x8], #16		// restore callee-saved registers
546	ldp	x21, x22, [x8], #16
547	ldp	x23, x24, [x8], #16
548	ldp	x25, x26, [x8], #16
549	ldp	x27, x28, [x8], #16
550	ldp	x29, x9, [x8], #16
551	ldr	lr, [x8]
552	mov	sp, x9
553	ret
554ENDPROC(cpu_switch_to)
555
556/*
557 * This is the fast syscall return path.  We do as little as possible here,
558 * and this includes saving x0 back into the kernel stack.
559 */
560ret_fast_syscall:
561	disable_irq				// disable interrupts
562	ldr	x1, [tsk, #TI_FLAGS]
563	and	x2, x1, #_TIF_WORK_MASK
564	cbnz	x2, fast_work_pending
565	tbz	x1, #TIF_SINGLESTEP, fast_exit
566	disable_dbg
567	enable_step x2
568fast_exit:
569	kernel_exit 0, ret = 1
570
571/*
572 * Ok, we need to do extra processing, enter the slow path.
573 */
574fast_work_pending:
575	str	x0, [sp, #S_X0]			// returned x0
576work_pending:
577	tbnz	x1, #TIF_NEED_RESCHED, work_resched
578	/* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
579	ldr	x2, [sp, #S_PSTATE]
580	mov	x0, sp				// 'regs'
581	tst	x2, #PSR_MODE_MASK		// user mode regs?
582	b.ne	no_work_pending			// returning to kernel
583	enable_irq				// enable interrupts for do_notify_resume()
584	bl	do_notify_resume
585	b	ret_to_user
586work_resched:
587	enable_dbg
588	bl	schedule
589
590/*
591 * "slow" syscall return path.
592 */
593ret_to_user:
594	disable_irq				// disable interrupts
595	ldr	x1, [tsk, #TI_FLAGS]
596	and	x2, x1, #_TIF_WORK_MASK
597	cbnz	x2, work_pending
598	tbz	x1, #TIF_SINGLESTEP, no_work_pending
599	disable_dbg
600	enable_step x2
601no_work_pending:
602	kernel_exit 0, ret = 0
603ENDPROC(ret_to_user)
604
605/*
606 * This is how we return from a fork.
607 */
608ENTRY(ret_from_fork)
609	bl	schedule_tail
610	cbz	x19, 1f				// not a kernel thread
611	mov	x0, x20
612	blr	x19
6131:	get_thread_info tsk
614	b	ret_to_user
615ENDPROC(ret_from_fork)
616
617/*
618 * SVC handler.
619 */
620	.align	6
621el0_svc:
622	adrp	stbl, sys_call_table		// load syscall table pointer
623	uxtw	scno, w8			// syscall number in w8
624	mov	sc_nr, #__NR_syscalls
625el0_svc_naked:					// compat entry point
626	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
627	disable_step x16
628	isb
629	enable_dbg
630	enable_irq
631
632	get_thread_info tsk
633	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall tracing
634	tbnz	x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
635	adr	lr, ret_fast_syscall		// return address
636	cmp     scno, sc_nr                     // check upper syscall limit
637	b.hs	ni_sys
638	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
639	br	x16				// call sys_* routine
640ni_sys:
641	mov	x0, sp
642	b	do_ni_syscall
643ENDPROC(el0_svc)
644
645	/*
646	 * This is the really slow path.  We're going to be doing context
647	 * switches, and waiting for our parent to respond.
648	 */
649__sys_trace:
650	mov	x1, sp
651	mov	w0, #0				// trace entry
652	bl	syscall_trace
653	adr	lr, __sys_trace_return		// return address
654	uxtw	scno, w0			// syscall number (possibly new)
655	mov	x1, sp				// pointer to regs
656	cmp	scno, sc_nr			// check upper syscall limit
657	b.hs	ni_sys
658	ldp	x0, x1, [sp]			// restore the syscall args
659	ldp	x2, x3, [sp, #S_X2]
660	ldp	x4, x5, [sp, #S_X4]
661	ldp	x6, x7, [sp, #S_X6]
662	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
663	br	x16				// call sys_* routine
664
665__sys_trace_return:
666	str	x0, [sp]			// save returned x0
667	mov	x1, sp
668	mov	w0, #1				// trace exit
669	bl	syscall_trace
670	b	ret_to_user
671
672/*
673 * Special system call wrappers.
674 */
675ENTRY(sys_rt_sigreturn_wrapper)
676	mov	x0, sp
677	b	sys_rt_sigreturn
678ENDPROC(sys_rt_sigreturn_wrapper)
679
680ENTRY(sys_sigaltstack_wrapper)
681	ldr	x2, [sp, #S_SP]
682	b	sys_sigaltstack
683ENDPROC(sys_sigaltstack_wrapper)
684
685ENTRY(handle_arch_irq)
686	.quad	0
687