xref: /linux/arch/arm/kernel/entry-common.S (revision ac6a0cf6716bb46813d0161024c66c2af66e53d1)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <mach/entry-macro.S>
14#include <asm/unwind.h>
15
16#include "entry-header.S"
17
18
19	.align	5
20/*
21 * This is the fast syscall return path.  We do as little as
22 * possible here, and this includes saving r0 back into the SVC
23 * stack.
24 */
25ret_fast_syscall:
26 UNWIND(.fnstart	)
27 UNWIND(.cantunwind	)
28	disable_irq				@ disable interrupts
29	ldr	r1, [tsk, #TI_FLAGS]
30	tst	r1, #_TIF_WORK_MASK
31	bne	fast_work_pending
32
33	/* perform architecture specific actions before user return */
34	arch_ret_to_user r1, lr
35
36	@ fast_restore_user_regs
37	ldr	r1, [sp, #S_OFF + S_PSR]	@ get calling cpsr
38	ldr	lr, [sp, #S_OFF + S_PC]!	@ get pc
39	msr	spsr_cxsf, r1			@ save in spsr_svc
40	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
41	mov	r0, r0
42	add	sp, sp, #S_FRAME_SIZE - S_PC
43	movs	pc, lr				@ return & move spsr_svc into cpsr
44 UNWIND(.fnend		)
45
46/*
47 * Ok, we need to do extra processing, enter the slow path.
48 */
49fast_work_pending:
50	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
51work_pending:
52	tst	r1, #_TIF_NEED_RESCHED
53	bne	work_resched
54	tst	r1, #_TIF_SIGPENDING
55	beq	no_work_pending
56	mov	r0, sp				@ 'regs'
57	mov	r2, why				@ 'syscall'
58	bl	do_notify_resume
59	b	ret_slow_syscall		@ Check work again
60
61work_resched:
62	bl	schedule
63/*
64 * "slow" syscall return path.  "why" tells us if this was a real syscall.
65 */
66ENTRY(ret_to_user)
67ret_slow_syscall:
68	disable_irq				@ disable interrupts
69	ldr	r1, [tsk, #TI_FLAGS]
70	tst	r1, #_TIF_WORK_MASK
71	bne	work_pending
72no_work_pending:
73	/* perform architecture specific actions before user return */
74	arch_ret_to_user r1, lr
75
76	@ slow_restore_user_regs
77	ldr	r1, [sp, #S_PSR]		@ get calling cpsr
78	ldr	lr, [sp, #S_PC]!		@ get pc
79	msr	spsr_cxsf, r1			@ save in spsr_svc
80	ldmdb	sp, {r0 - lr}^			@ get calling r0 - lr
81	mov	r0, r0
82	add	sp, sp, #S_FRAME_SIZE - S_PC
83	movs	pc, lr				@ return & move spsr_svc into cpsr
84ENDPROC(ret_to_user)
85
86/*
87 * This is how we return from a fork.
88 */
89ENTRY(ret_from_fork)
90	bl	schedule_tail
91	get_thread_info tsk
92	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
93	mov	why, #1
94	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
95	beq	ret_slow_syscall
96	mov	r1, sp
97	mov	r0, #1				@ trace exit [IP = 1]
98	bl	syscall_trace
99	b	ret_slow_syscall
100ENDPROC(ret_from_fork)
101
102	.equ NR_syscalls,0
103#define CALL(x) .equ NR_syscalls,NR_syscalls+1
104#include "calls.S"
105#undef CALL
106#define CALL(x) .long x
107
108#ifdef CONFIG_FUNCTION_TRACER
109#ifdef CONFIG_DYNAMIC_FTRACE
110ENTRY(mcount)
111	stmdb sp!, {r0-r3, lr}
112	mov r0, lr
113	sub r0, r0, #MCOUNT_INSN_SIZE
114
115	.globl mcount_call
116mcount_call:
117	bl ftrace_stub
118	ldr lr, [fp, #-4]			@ restore lr
119	ldmia sp!, {r0-r3, pc}
120
121ENTRY(ftrace_caller)
122	stmdb sp!, {r0-r3, lr}
123	ldr r1, [fp, #-4]
124	mov r0, lr
125	sub r0, r0, #MCOUNT_INSN_SIZE
126
127	.globl ftrace_call
128ftrace_call:
129	bl ftrace_stub
130	ldr lr, [fp, #-4]			@ restore lr
131	ldmia sp!, {r0-r3, pc}
132
133#else
134
135ENTRY(mcount)
136	stmdb sp!, {r0-r3, lr}
137	ldr r0, =ftrace_trace_function
138	ldr r2, [r0]
139	adr r0, ftrace_stub
140	cmp r0, r2
141	bne trace
142	ldr lr, [fp, #-4]			@ restore lr
143	ldmia sp!, {r0-r3, pc}
144
145trace:
146	ldr r1, [fp, #-4]			@ lr of instrumented routine
147	mov r0, lr
148	sub r0, r0, #MCOUNT_INSN_SIZE
149	mov lr, pc
150	mov pc, r2
151	ldr lr, [fp, #-4]			@ restore lr
152	ldmia sp!, {r0-r3, pc}
153
154#endif /* CONFIG_DYNAMIC_FTRACE */
155
156	.globl ftrace_stub
157ftrace_stub:
158	mov pc, lr
159
160#endif /* CONFIG_FUNCTION_TRACER */
161
162/*=============================================================================
163 * SWI handler
164 *-----------------------------------------------------------------------------
165 */
166
167	/* If we're optimising for StrongARM the resulting code won't
168	   run on an ARM7 and we can save a couple of instructions.
169								--pb */
170#ifdef CONFIG_CPU_ARM710
171#define A710(code...) code
172.Larm710bug:
173	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
174	mov	r0, r0
175	add	sp, sp, #S_FRAME_SIZE
176	subs	pc, lr, #4
177#else
178#define A710(code...)
179#endif
180
181	.align	5
182ENTRY(vector_swi)
183	sub	sp, sp, #S_FRAME_SIZE
184	stmia	sp, {r0 - r12}			@ Calling r0 - r12
185	add	r8, sp, #S_PC
186	stmdb	r8, {sp, lr}^			@ Calling sp, lr
187	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
188	str	lr, [sp, #S_PC]			@ Save calling PC
189	str	r8, [sp, #S_PSR]		@ Save CPSR
190	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
191	zero_fp
192
193	/*
194	 * Get the system call number.
195	 */
196
197#if defined(CONFIG_OABI_COMPAT)
198
199	/*
200	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
201	 * value to determine if it is an EABI or an old ABI call.
202	 */
203#ifdef CONFIG_ARM_THUMB
204	tst	r8, #PSR_T_BIT
205	movne	r10, #0				@ no thumb OABI emulation
206	ldreq	r10, [lr, #-4]			@ get SWI instruction
207#else
208	ldr	r10, [lr, #-4]			@ get SWI instruction
209  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
210  A710(	teq	ip, #0x0f000000						)
211  A710(	bne	.Larm710bug						)
212#endif
213#ifdef CONFIG_CPU_ENDIAN_BE8
214	rev	r10, r10			@ little endian instruction
215#endif
216
217#elif defined(CONFIG_AEABI)
218
219	/*
220	 * Pure EABI user space always put syscall number into scno (r7).
221	 */
222  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
223  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
224  A710(	teq	ip, #0x0f000000						)
225  A710(	bne	.Larm710bug						)
226
227#elif defined(CONFIG_ARM_THUMB)
228
229	/* Legacy ABI only, possibly thumb mode. */
230	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
231	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
232	ldreq	scno, [lr, #-4]
233
234#else
235
236	/* Legacy ABI only. */
237	ldr	scno, [lr, #-4]			@ get SWI instruction
238  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
239  A710(	teq	ip, #0x0f000000						)
240  A710(	bne	.Larm710bug						)
241
242#endif
243
244#ifdef CONFIG_ALIGNMENT_TRAP
245	ldr	ip, __cr_alignment
246	ldr	ip, [ip]
247	mcr	p15, 0, ip, c1, c0		@ update control register
248#endif
249	enable_irq
250
251	get_thread_info tsk
252	adr	tbl, sys_call_table		@ load syscall table pointer
253	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
254
255#if defined(CONFIG_OABI_COMPAT)
256	/*
257	 * If the swi argument is zero, this is an EABI call and we do nothing.
258	 *
259	 * If this is an old ABI call, get the syscall number into scno and
260	 * get the old ABI syscall table address.
261	 */
262	bics	r10, r10, #0xff000000
263	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
264	ldrne	tbl, =sys_oabi_call_table
265#elif !defined(CONFIG_AEABI)
266	bic	scno, scno, #0xff000000		@ mask off SWI op-code
267	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
268#endif
269
270	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
271	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
272	bne	__sys_trace
273
274	cmp	scno, #NR_syscalls		@ check upper syscall limit
275	adr	lr, ret_fast_syscall		@ return address
276	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
277
278	add	r1, sp, #S_OFF
2792:	mov	why, #0				@ no longer a real syscall
280	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
281	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
282	bcs	arm_syscall
283	b	sys_ni_syscall			@ not private func
284ENDPROC(vector_swi)
285
286	/*
287	 * This is the really slow path.  We're going to be doing
288	 * context switches, and waiting for our parent to respond.
289	 */
290__sys_trace:
291	mov	r2, scno
292	add	r1, sp, #S_OFF
293	mov	r0, #0				@ trace entry [IP = 0]
294	bl	syscall_trace
295
296	adr	lr, __sys_trace_return		@ return address
297	mov	scno, r0			@ syscall number (possibly new)
298	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
299	cmp	scno, #NR_syscalls		@ check upper syscall limit
300	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
301	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
302	b	2b
303
304__sys_trace_return:
305	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
306	mov	r2, scno
307	mov	r1, sp
308	mov	r0, #1				@ trace exit [IP = 1]
309	bl	syscall_trace
310	b	ret_slow_syscall
311
312	.align	5
313#ifdef CONFIG_ALIGNMENT_TRAP
314	.type	__cr_alignment, #object
315__cr_alignment:
316	.word	cr_alignment
317#endif
318	.ltorg
319
320/*
321 * This is the syscall table declaration for native ABI syscalls.
322 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
323 */
324#define ABI(native, compat) native
325#ifdef CONFIG_AEABI
326#define OBSOLETE(syscall) sys_ni_syscall
327#else
328#define OBSOLETE(syscall) syscall
329#endif
330
331	.type	sys_call_table, #object
332ENTRY(sys_call_table)
333#include "calls.S"
334#undef ABI
335#undef OBSOLETE
336
337/*============================================================================
338 * Special system call wrappers
339 */
340@ r0 = syscall number
341@ r8 = syscall table
342sys_syscall:
343		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
344		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
345		cmpne	scno, #NR_syscalls	@ check range
346		stmloia	sp, {r5, r6}		@ shuffle args
347		movlo	r0, r1
348		movlo	r1, r2
349		movlo	r2, r3
350		movlo	r3, r4
351		ldrlo	pc, [tbl, scno, lsl #2]
352		b	sys_ni_syscall
353ENDPROC(sys_syscall)
354
355sys_fork_wrapper:
356		add	r0, sp, #S_OFF
357		b	sys_fork
358ENDPROC(sys_fork_wrapper)
359
360sys_vfork_wrapper:
361		add	r0, sp, #S_OFF
362		b	sys_vfork
363ENDPROC(sys_vfork_wrapper)
364
365sys_execve_wrapper:
366		add	r3, sp, #S_OFF
367		b	sys_execve
368ENDPROC(sys_execve_wrapper)
369
370sys_clone_wrapper:
371		add	ip, sp, #S_OFF
372		str	ip, [sp, #4]
373		b	sys_clone
374ENDPROC(sys_clone_wrapper)
375
376sys_sigsuspend_wrapper:
377		add	r3, sp, #S_OFF
378		b	sys_sigsuspend
379ENDPROC(sys_sigsuspend_wrapper)
380
381sys_rt_sigsuspend_wrapper:
382		add	r2, sp, #S_OFF
383		b	sys_rt_sigsuspend
384ENDPROC(sys_rt_sigsuspend_wrapper)
385
386sys_sigreturn_wrapper:
387		add	r0, sp, #S_OFF
388		b	sys_sigreturn
389ENDPROC(sys_sigreturn_wrapper)
390
391sys_rt_sigreturn_wrapper:
392		add	r0, sp, #S_OFF
393		b	sys_rt_sigreturn
394ENDPROC(sys_rt_sigreturn_wrapper)
395
396sys_sigaltstack_wrapper:
397		ldr	r2, [sp, #S_OFF + S_SP]
398		b	do_sigaltstack
399ENDPROC(sys_sigaltstack_wrapper)
400
401sys_statfs64_wrapper:
402		teq	r1, #88
403		moveq	r1, #84
404		b	sys_statfs64
405ENDPROC(sys_statfs64_wrapper)
406
407sys_fstatfs64_wrapper:
408		teq	r1, #88
409		moveq	r1, #84
410		b	sys_fstatfs64
411ENDPROC(sys_fstatfs64_wrapper)
412
413/*
414 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
415 * offset, we return EINVAL.
416 */
417sys_mmap2:
418#if PAGE_SHIFT > 12
419		tst	r5, #PGOFF_MASK
420		moveq	r5, r5, lsr #PAGE_SHIFT - 12
421		streq	r5, [sp, #4]
422		beq	do_mmap2
423		mov	r0, #-EINVAL
424		mov	pc, lr
425#else
426		str	r5, [sp, #4]
427		b	do_mmap2
428#endif
429ENDPROC(sys_mmap2)
430
431ENTRY(pabort_ifar)
432		mrc	p15, 0, r0, cr6, cr0, 2
433ENTRY(pabort_noifar)
434		mov	pc, lr
435ENDPROC(pabort_ifar)
436ENDPROC(pabort_noifar)
437
438#ifdef CONFIG_OABI_COMPAT
439
440/*
441 * These are syscalls with argument register differences
442 */
443
444sys_oabi_pread64:
445		stmia	sp, {r3, r4}
446		b	sys_pread64
447ENDPROC(sys_oabi_pread64)
448
449sys_oabi_pwrite64:
450		stmia	sp, {r3, r4}
451		b	sys_pwrite64
452ENDPROC(sys_oabi_pwrite64)
453
454sys_oabi_truncate64:
455		mov	r3, r2
456		mov	r2, r1
457		b	sys_truncate64
458ENDPROC(sys_oabi_truncate64)
459
460sys_oabi_ftruncate64:
461		mov	r3, r2
462		mov	r2, r1
463		b	sys_ftruncate64
464ENDPROC(sys_oabi_ftruncate64)
465
466sys_oabi_readahead:
467		str	r3, [sp]
468		mov	r3, r2
469		mov	r2, r1
470		b	sys_readahead
471ENDPROC(sys_oabi_readahead)
472
473/*
474 * Let's declare a second syscall table for old ABI binaries
475 * using the compatibility syscall entries.
476 */
477#define ABI(native, compat) compat
478#define OBSOLETE(syscall) syscall
479
480	.type	sys_oabi_call_table, #object
481ENTRY(sys_oabi_call_table)
482#include "calls.S"
483#undef ABI
484#undef OBSOLETE
485
486#endif
487
488