xref: /linux/arch/arm/kernel/entry-common.S (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/assembler.h>
12#include <asm/unistd.h>
13#include <asm/ftrace.h>
14#include <asm/unwind.h>
15#include <asm/memory.h>
16#ifdef CONFIG_AEABI
17#include <asm/unistd-oabi.h>
18#endif
19
20	.equ	NR_syscalls, __NR_syscalls
21
22#ifdef CONFIG_NEED_RET_TO_USER
23#include <mach/entry-macro.S>
24#else
25	.macro  arch_ret_to_user, tmp1, tmp2
26	.endm
27#endif
28
29#include "entry-header.S"
30
31saved_psr	.req	r8
32#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
33saved_pc	.req	r9
34#define TRACE(x...) x
35#else
36saved_pc	.req	lr
37#define TRACE(x...)
38#endif
39
40	.section .entry.text,"ax",%progbits
41	.align	5
42#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \
43	IS_ENABLED(CONFIG_DEBUG_RSEQ))
44/*
45 * This is the fast syscall return path.  We do as little as possible here,
46 * such as avoiding writing r0 to the stack.  We only use this path if we
47 * have tracing, context tracking and rseq debug disabled - the overheads
48 * from those features make this path too inefficient.
49 */
50ret_fast_syscall:
51__ret_fast_syscall:
52 UNWIND(.fnstart	)
53 UNWIND(.cantunwind	)
54	disable_irq_notrace			@ disable interrupts
55	ldr	r2, [tsk, #TI_ADDR_LIMIT]
56	cmp	r2, #TASK_SIZE
57	blne	addr_limit_check_failed
58	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
59	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
60	bne	fast_work_pending
61
62
63	/* perform architecture specific actions before user return */
64	arch_ret_to_user r1, lr
65
66	restore_user_regs fast = 1, offset = S_OFF
67 UNWIND(.fnend		)
68ENDPROC(ret_fast_syscall)
69
70	/* Ok, we need to do extra processing, enter the slow path. */
71fast_work_pending:
72	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
73	/* fall through to work_pending */
74#else
75/*
76 * The "replacement" ret_fast_syscall for when tracing, context tracking,
77 * or rseq debug is enabled.  As we will need to call out to some C functions,
78 * we save r0 first to avoid needing to save registers around each C function
79 * call.
80 */
81ret_fast_syscall:
82__ret_fast_syscall:
83 UNWIND(.fnstart	)
84 UNWIND(.cantunwind	)
85	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
86#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
87	/* do_rseq_syscall needs interrupts enabled. */
88	mov	r0, sp				@ 'regs'
89	bl	do_rseq_syscall
90#endif
91	disable_irq_notrace			@ disable interrupts
92	ldr	r2, [tsk, #TI_ADDR_LIMIT]
93	cmp	r2, #TASK_SIZE
94	blne	addr_limit_check_failed
95	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
96	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
97	beq	no_work_pending
98 UNWIND(.fnend		)
99ENDPROC(ret_fast_syscall)
100
101	/* Slower path - fall through to work_pending */
102#endif
103
104	tst	r1, #_TIF_SYSCALL_WORK
105	bne	__sys_trace_return_nosave
106slow_work_pending:
107	mov	r0, sp				@ 'regs'
108	mov	r2, why				@ 'syscall'
109	bl	do_work_pending
110	cmp	r0, #0
111	beq	no_work_pending
112	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
113	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
114	b	local_restart			@ ... and off we go
115ENDPROC(ret_fast_syscall)
116
117/*
118 * "slow" syscall return path.  "why" tells us if this was a real syscall.
119 * IRQs may be enabled here, so always disable them.  Note that we use the
120 * "notrace" version to avoid calling into the tracing code unnecessarily.
121 * do_work_pending() will update this state if necessary.
122 */
123ENTRY(ret_to_user)
124ret_slow_syscall:
125#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
126	/* do_rseq_syscall needs interrupts enabled. */
127	enable_irq_notrace			@ enable interrupts
128	mov	r0, sp				@ 'regs'
129	bl	do_rseq_syscall
130#endif
131	disable_irq_notrace			@ disable interrupts
132ENTRY(ret_to_user_from_irq)
133	ldr	r2, [tsk, #TI_ADDR_LIMIT]
134	cmp	r2, #TASK_SIZE
135	blne	addr_limit_check_failed
136	ldr	r1, [tsk, #TI_FLAGS]
137	tst	r1, #_TIF_WORK_MASK
138	bne	slow_work_pending
139no_work_pending:
140	asm_trace_hardirqs_on save = 0
141
142	/* perform architecture specific actions before user return */
143	arch_ret_to_user r1, lr
144	ct_user_enter save = 0
145
146	restore_user_regs fast = 0, offset = 0
147ENDPROC(ret_to_user_from_irq)
148ENDPROC(ret_to_user)
149
150/*
151 * This is how we return from a fork.
152 */
153ENTRY(ret_from_fork)
154	bl	schedule_tail
155	cmp	r5, #0
156	movne	r0, r4
157	badrne	lr, 1f
158	retne	r5
1591:	get_thread_info tsk
160	b	ret_slow_syscall
161ENDPROC(ret_from_fork)
162
163/*=============================================================================
164 * SWI handler
165 *-----------------------------------------------------------------------------
166 */
167
168	.align	5
169ENTRY(vector_swi)
170#ifdef CONFIG_CPU_V7M
171	v7m_exception_entry
172#else
173	sub	sp, sp, #PT_REGS_SIZE
174	stmia	sp, {r0 - r12}			@ Calling r0 - r12
175 ARM(	add	r8, sp, #S_PC		)
176 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
177 THUMB(	mov	r8, sp			)
178 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
179	mrs	saved_psr, spsr			@ called from non-FIQ mode, so ok.
180 TRACE(	mov	saved_pc, lr		)
181	str	saved_pc, [sp, #S_PC]		@ Save calling PC
182	str	saved_psr, [sp, #S_PSR]		@ Save CPSR
183	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
184#endif
185	zero_fp
186	alignment_trap r10, ip, __cr_alignment
187	asm_trace_hardirqs_on save=0
188	enable_irq_notrace
189	ct_user_exit save=0
190
191	/*
192	 * Get the system call number.
193	 */
194
195#if defined(CONFIG_OABI_COMPAT)
196
197	/*
198	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
199	 * value to determine if it is an EABI or an old ABI call.
200	 */
201#ifdef CONFIG_ARM_THUMB
202	tst	saved_psr, #PSR_T_BIT
203	movne	r10, #0				@ no thumb OABI emulation
204 USER(	ldreq	r10, [saved_pc, #-4]	)	@ get SWI instruction
205#else
206 USER(	ldr	r10, [saved_pc, #-4]	)	@ get SWI instruction
207#endif
208 ARM_BE8(rev	r10, r10)			@ little endian instruction
209
210#elif defined(CONFIG_AEABI)
211
212	/*
213	 * Pure EABI user space always put syscall number into scno (r7).
214	 */
215#elif defined(CONFIG_ARM_THUMB)
216	/* Legacy ABI only, possibly thumb mode. */
217	tst	saved_psr, #PSR_T_BIT		@ this is SPSR from save_user_regs
218	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
219 USER(	ldreq	scno, [saved_pc, #-4]	)
220
221#else
222	/* Legacy ABI only. */
223 USER(	ldr	scno, [saved_pc, #-4]	)	@ get SWI instruction
224#endif
225
226	/* saved_psr and saved_pc are now dead */
227
228	uaccess_disable tbl
229
230	adr	tbl, sys_call_table		@ load syscall table pointer
231
232#if defined(CONFIG_OABI_COMPAT)
233	/*
234	 * If the swi argument is zero, this is an EABI call and we do nothing.
235	 *
236	 * If this is an old ABI call, get the syscall number into scno and
237	 * get the old ABI syscall table address.
238	 */
239	bics	r10, r10, #0xff000000
240	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
241	ldrne	tbl, =sys_oabi_call_table
242#elif !defined(CONFIG_AEABI)
243	bic	scno, scno, #0xff000000		@ mask off SWI op-code
244	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
245#endif
246	get_thread_info tsk
247	/*
248	 * Reload the registers that may have been corrupted on entry to
249	 * the syscall assembly (by tracing or context tracking.)
250	 */
251 TRACE(	ldmia	sp, {r0 - r3}		)
252
253local_restart:
254	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
255	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
256
257	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
258	bne	__sys_trace
259
260	invoke_syscall tbl, scno, r10, __ret_fast_syscall
261
262	add	r1, sp, #S_OFF
2632:	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
264	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
265	bcs	arm_syscall
266	mov	why, #0				@ no longer a real syscall
267	b	sys_ni_syscall			@ not private func
268
269#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
270	/*
271	 * We failed to handle a fault trying to access the page
272	 * containing the swi instruction, but we're not really in a
273	 * position to return -EFAULT. Instead, return back to the
274	 * instruction and re-enter the user fault handling path trying
275	 * to page it in. This will likely result in sending SEGV to the
276	 * current task.
277	 */
2789001:
279	sub	lr, saved_pc, #4
280	str	lr, [sp, #S_PC]
281	get_thread_info tsk
282	b	ret_fast_syscall
283#endif
284ENDPROC(vector_swi)
285
286	/*
287	 * This is the really slow path.  We're going to be doing
288	 * context switches, and waiting for our parent to respond.
289	 */
290__sys_trace:
291	mov	r1, scno
292	add	r0, sp, #S_OFF
293	bl	syscall_trace_enter
294	mov	scno, r0
295	invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
296	cmp	scno, #-1			@ skip the syscall?
297	bne	2b
298	add	sp, sp, #S_OFF			@ restore stack
299	b	ret_slow_syscall
300
301__sys_trace_return:
302	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
303	mov	r0, sp
304	bl	syscall_trace_exit
305	b	ret_slow_syscall
306
307__sys_trace_return_nosave:
308	enable_irq_notrace
309	mov	r0, sp
310	bl	syscall_trace_exit
311	b	ret_slow_syscall
312
313	.align	5
314#ifdef CONFIG_ALIGNMENT_TRAP
315	.type	__cr_alignment, #object
316__cr_alignment:
317	.word	cr_alignment
318#endif
319	.ltorg
320
321	.macro	syscall_table_start, sym
322	.equ	__sys_nr, 0
323	.type	\sym, #object
324ENTRY(\sym)
325	.endm
326
327	.macro	syscall, nr, func
328	.ifgt	__sys_nr - \nr
329	.error	"Duplicated/unorded system call entry"
330	.endif
331	.rept	\nr - __sys_nr
332	.long	sys_ni_syscall
333	.endr
334	.long	\func
335	.equ	__sys_nr, \nr + 1
336	.endm
337
338	.macro	syscall_table_end, sym
339	.ifgt	__sys_nr - __NR_syscalls
340	.error	"System call table too big"
341	.endif
342	.rept	__NR_syscalls - __sys_nr
343	.long	sys_ni_syscall
344	.endr
345	.size	\sym, . - \sym
346	.endm
347
348#define NATIVE(nr, func) syscall nr, func
349
350/*
351 * This is the syscall table declaration for native ABI syscalls.
352 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
353 */
354	syscall_table_start sys_call_table
355#define COMPAT(nr, native, compat) syscall nr, native
356#ifdef CONFIG_AEABI
357#include <calls-eabi.S>
358#else
359#include <calls-oabi.S>
360#endif
361#undef COMPAT
362	syscall_table_end sys_call_table
363
364/*============================================================================
365 * Special system call wrappers
366 */
367@ r0 = syscall number
368@ r8 = syscall table
369sys_syscall:
370		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
371		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
372		cmpne	scno, #NR_syscalls	@ check range
373#ifdef CONFIG_CPU_SPECTRE
374		movhs	scno, #0
375		csdb
376#endif
377		stmloia	sp, {r5, r6}		@ shuffle args
378		movlo	r0, r1
379		movlo	r1, r2
380		movlo	r2, r3
381		movlo	r3, r4
382		ldrlo	pc, [tbl, scno, lsl #2]
383		b	sys_ni_syscall
384ENDPROC(sys_syscall)
385
386sys_sigreturn_wrapper:
387		add	r0, sp, #S_OFF
388		mov	why, #0		@ prevent syscall restart handling
389		b	sys_sigreturn
390ENDPROC(sys_sigreturn_wrapper)
391
392sys_rt_sigreturn_wrapper:
393		add	r0, sp, #S_OFF
394		mov	why, #0		@ prevent syscall restart handling
395		b	sys_rt_sigreturn
396ENDPROC(sys_rt_sigreturn_wrapper)
397
398sys_statfs64_wrapper:
399		teq	r1, #88
400		moveq	r1, #84
401		b	sys_statfs64
402ENDPROC(sys_statfs64_wrapper)
403
404sys_fstatfs64_wrapper:
405		teq	r1, #88
406		moveq	r1, #84
407		b	sys_fstatfs64
408ENDPROC(sys_fstatfs64_wrapper)
409
410/*
411 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
412 * offset, we return EINVAL.
413 */
414sys_mmap2:
415		str	r5, [sp, #4]
416		b	sys_mmap_pgoff
417ENDPROC(sys_mmap2)
418
419#ifdef CONFIG_OABI_COMPAT
420
421/*
422 * These are syscalls with argument register differences
423 */
424
425sys_oabi_pread64:
426		stmia	sp, {r3, r4}
427		b	sys_pread64
428ENDPROC(sys_oabi_pread64)
429
430sys_oabi_pwrite64:
431		stmia	sp, {r3, r4}
432		b	sys_pwrite64
433ENDPROC(sys_oabi_pwrite64)
434
435sys_oabi_truncate64:
436		mov	r3, r2
437		mov	r2, r1
438		b	sys_truncate64
439ENDPROC(sys_oabi_truncate64)
440
441sys_oabi_ftruncate64:
442		mov	r3, r2
443		mov	r2, r1
444		b	sys_ftruncate64
445ENDPROC(sys_oabi_ftruncate64)
446
447sys_oabi_readahead:
448		str	r3, [sp]
449		mov	r3, r2
450		mov	r2, r1
451		b	sys_readahead
452ENDPROC(sys_oabi_readahead)
453
454/*
455 * Let's declare a second syscall table for old ABI binaries
456 * using the compatibility syscall entries.
457 */
458	syscall_table_start sys_oabi_call_table
459#define COMPAT(nr, native, compat) syscall nr, compat
460#include <calls-oabi.S>
461	syscall_table_end sys_oabi_call_table
462
463#endif
464
465