xref: /linux/arch/arm/kernel/entry-common.S (revision 606b2f490fb80e55d05cf0e6cec0b6c0ff0fc18f)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <mach/entry-macro.S>
14#include <asm/unwind.h>
15
16#include "entry-header.S"
17
18
19	.align	5
20/*
21 * This is the fast syscall return path.  We do as little as
22 * possible here, and this includes saving r0 back into the SVC
23 * stack.
24 */
25ret_fast_syscall:
26 UNWIND(.fnstart	)
27 UNWIND(.cantunwind	)
28	disable_irq				@ disable interrupts
29	ldr	r1, [tsk, #TI_FLAGS]
30	tst	r1, #_TIF_WORK_MASK
31	bne	fast_work_pending
32
33	/* perform architecture specific actions before user return */
34	arch_ret_to_user r1, lr
35
36	restore_user_regs fast = 1, offset = S_OFF
37 UNWIND(.fnend		)
38
39/*
40 * Ok, we need to do extra processing, enter the slow path.
41 */
42fast_work_pending:
43	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
44work_pending:
45	tst	r1, #_TIF_NEED_RESCHED
46	bne	work_resched
47	tst	r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
48	beq	no_work_pending
49	mov	r0, sp				@ 'regs'
50	mov	r2, why				@ 'syscall'
51	bl	do_notify_resume
52	b	ret_slow_syscall		@ Check work again
53
54work_resched:
55	bl	schedule
56/*
57 * "slow" syscall return path.  "why" tells us if this was a real syscall.
58 */
59ENTRY(ret_to_user)
60ret_slow_syscall:
61	disable_irq				@ disable interrupts
62	ldr	r1, [tsk, #TI_FLAGS]
63	tst	r1, #_TIF_WORK_MASK
64	bne	work_pending
65no_work_pending:
66	/* perform architecture specific actions before user return */
67	arch_ret_to_user r1, lr
68
69	restore_user_regs fast = 0, offset = 0
70ENDPROC(ret_to_user)
71
72/*
73 * This is how we return from a fork.
74 */
75ENTRY(ret_from_fork)
76	bl	schedule_tail
77	get_thread_info tsk
78	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
79	mov	why, #1
80	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
81	beq	ret_slow_syscall
82	mov	r1, sp
83	mov	r0, #1				@ trace exit [IP = 1]
84	bl	syscall_trace
85	b	ret_slow_syscall
86ENDPROC(ret_from_fork)
87
88	.equ NR_syscalls,0
89#define CALL(x) .equ NR_syscalls,NR_syscalls+1
90#include "calls.S"
91#undef CALL
92#define CALL(x) .long x
93
94#ifdef CONFIG_FUNCTION_TRACER
95/*
96 * When compiling with -pg, gcc inserts a call to the mcount routine at the
97 * start of every function.  In mcount, apart from the function's address (in
98 * lr), we need to get hold of the function's caller's address.
99 *
100 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
101 *
102 *	bl	mcount
103 *
104 * These versions have the limitation that in order for the mcount routine to
105 * be able to determine the function's caller's address, an APCS-style frame
106 * pointer (which is set up with something like the code below) is required.
107 *
108 *	mov     ip, sp
109 *	push    {fp, ip, lr, pc}
110 *	sub     fp, ip, #4
111 *
112 * With EABI, these frame pointers are not available unless -mapcs-frame is
113 * specified, and if building as Thumb-2, not even then.
114 *
115 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
116 * with call sites like:
117 *
118 *	push	{lr}
119 *	bl	__gnu_mcount_nc
120 *
121 * With these compilers, frame pointers are not necessary.
122 *
123 * mcount can be thought of as a function called in the middle of a subroutine
124 * call.  As such, it needs to be transparent for both the caller and the
125 * callee: the original lr needs to be restored when leaving mcount, and no
126 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
127 * clobber the ip register.  This is OK because the ARM calling convention
128 * allows it to be clobbered in subroutines and doesn't use it to hold
129 * parameters.)
130 */
131#ifdef CONFIG_DYNAMIC_FTRACE
132ENTRY(mcount)
133	stmdb	sp!, {r0-r3, lr}
134	mov	r0, lr
135	sub	r0, r0, #MCOUNT_INSN_SIZE
136
137	.globl mcount_call
138mcount_call:
139	bl	ftrace_stub
140	ldr	lr, [fp, #-4]			@ restore lr
141	ldmia	sp!, {r0-r3, pc}
142
143ENTRY(ftrace_caller)
144	stmdb	sp!, {r0-r3, lr}
145	ldr	r1, [fp, #-4]
146	mov	r0, lr
147	sub	r0, r0, #MCOUNT_INSN_SIZE
148
149	.globl ftrace_call
150ftrace_call:
151	bl	ftrace_stub
152	ldr	lr, [fp, #-4]			@ restore lr
153	ldmia	sp!, {r0-r3, pc}
154
155#else
156
157ENTRY(__gnu_mcount_nc)
158	stmdb	sp!, {r0-r3, lr}
159	ldr	r0, =ftrace_trace_function
160	ldr	r2, [r0]
161	adr	r0, ftrace_stub
162	cmp	r0, r2
163	bne	gnu_trace
164	ldmia	sp!, {r0-r3, ip, lr}
165	mov	pc, ip
166
167gnu_trace:
168	ldr	r1, [sp, #20]			@ lr of instrumented routine
169	mov	r0, lr
170	sub	r0, r0, #MCOUNT_INSN_SIZE
171	mov	lr, pc
172	mov	pc, r2
173	ldmia	sp!, {r0-r3, ip, lr}
174	mov	pc, ip
175
176ENTRY(mcount)
177	stmdb	sp!, {r0-r3, lr}
178	ldr	r0, =ftrace_trace_function
179	ldr	r2, [r0]
180	adr	r0, ftrace_stub
181	cmp	r0, r2
182	bne	trace
183	ldr	lr, [fp, #-4]			@ restore lr
184	ldmia	sp!, {r0-r3, pc}
185
186trace:
187	ldr	r1, [fp, #-4]			@ lr of instrumented routine
188	mov	r0, lr
189	sub	r0, r0, #MCOUNT_INSN_SIZE
190	mov	lr, pc
191	mov	pc, r2
192	ldr	lr, [fp, #-4]			@ restore lr
193	ldmia	sp!, {r0-r3, pc}
194
195#endif /* CONFIG_DYNAMIC_FTRACE */
196
197	.globl ftrace_stub
198ftrace_stub:
199	mov	pc, lr
200
201#endif /* CONFIG_FUNCTION_TRACER */
202
203/*=============================================================================
204 * SWI handler
205 *-----------------------------------------------------------------------------
206 */
207
208	/* If we're optimising for StrongARM the resulting code won't
209	   run on an ARM7 and we can save a couple of instructions.
210								--pb */
211#ifdef CONFIG_CPU_ARM710
212#define A710(code...) code
213.Larm710bug:
214	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
215	mov	r0, r0
216	add	sp, sp, #S_FRAME_SIZE
217	subs	pc, lr, #4
218#else
219#define A710(code...)
220#endif
221
222	.align	5
223ENTRY(vector_swi)
224	sub	sp, sp, #S_FRAME_SIZE
225	stmia	sp, {r0 - r12}			@ Calling r0 - r12
226 ARM(	add	r8, sp, #S_PC		)
227 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
228 THUMB(	mov	r8, sp			)
229 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
230	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
231	str	lr, [sp, #S_PC]			@ Save calling PC
232	str	r8, [sp, #S_PSR]		@ Save CPSR
233	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
234	zero_fp
235
236	/*
237	 * Get the system call number.
238	 */
239
240#if defined(CONFIG_OABI_COMPAT)
241
242	/*
243	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
244	 * value to determine if it is an EABI or an old ABI call.
245	 */
246#ifdef CONFIG_ARM_THUMB
247	tst	r8, #PSR_T_BIT
248	movne	r10, #0				@ no thumb OABI emulation
249	ldreq	r10, [lr, #-4]			@ get SWI instruction
250#else
251	ldr	r10, [lr, #-4]			@ get SWI instruction
252  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
253  A710(	teq	ip, #0x0f000000						)
254  A710(	bne	.Larm710bug						)
255#endif
256#ifdef CONFIG_CPU_ENDIAN_BE8
257	rev	r10, r10			@ little endian instruction
258#endif
259
260#elif defined(CONFIG_AEABI)
261
262	/*
263	 * Pure EABI user space always put syscall number into scno (r7).
264	 */
265  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
266  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
267  A710(	teq	ip, #0x0f000000						)
268  A710(	bne	.Larm710bug						)
269
270#elif defined(CONFIG_ARM_THUMB)
271
272	/* Legacy ABI only, possibly thumb mode. */
273	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
274	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
275	ldreq	scno, [lr, #-4]
276
277#else
278
279	/* Legacy ABI only. */
280	ldr	scno, [lr, #-4]			@ get SWI instruction
281  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
282  A710(	teq	ip, #0x0f000000						)
283  A710(	bne	.Larm710bug						)
284
285#endif
286
287#ifdef CONFIG_ALIGNMENT_TRAP
288	ldr	ip, __cr_alignment
289	ldr	ip, [ip]
290	mcr	p15, 0, ip, c1, c0		@ update control register
291#endif
292	enable_irq
293
294	get_thread_info tsk
295	adr	tbl, sys_call_table		@ load syscall table pointer
296	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
297
298#if defined(CONFIG_OABI_COMPAT)
299	/*
300	 * If the swi argument is zero, this is an EABI call and we do nothing.
301	 *
302	 * If this is an old ABI call, get the syscall number into scno and
303	 * get the old ABI syscall table address.
304	 */
305	bics	r10, r10, #0xff000000
306	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
307	ldrne	tbl, =sys_oabi_call_table
308#elif !defined(CONFIG_AEABI)
309	bic	scno, scno, #0xff000000		@ mask off SWI op-code
310	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
311#endif
312
313	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
314	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
315	bne	__sys_trace
316
317	cmp	scno, #NR_syscalls		@ check upper syscall limit
318	adr	lr, BSYM(ret_fast_syscall)	@ return address
319	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
320
321	add	r1, sp, #S_OFF
3222:	mov	why, #0				@ no longer a real syscall
323	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
324	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
325	bcs	arm_syscall
326	b	sys_ni_syscall			@ not private func
327ENDPROC(vector_swi)
328
329	/*
330	 * This is the really slow path.  We're going to be doing
331	 * context switches, and waiting for our parent to respond.
332	 */
333__sys_trace:
334	mov	r2, scno
335	add	r1, sp, #S_OFF
336	mov	r0, #0				@ trace entry [IP = 0]
337	bl	syscall_trace
338
339	adr	lr, BSYM(__sys_trace_return)	@ return address
340	mov	scno, r0			@ syscall number (possibly new)
341	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
342	cmp	scno, #NR_syscalls		@ check upper syscall limit
343	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
344	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
345	b	2b
346
347__sys_trace_return:
348	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
349	mov	r2, scno
350	mov	r1, sp
351	mov	r0, #1				@ trace exit [IP = 1]
352	bl	syscall_trace
353	b	ret_slow_syscall
354
355	.align	5
356#ifdef CONFIG_ALIGNMENT_TRAP
357	.type	__cr_alignment, #object
358__cr_alignment:
359	.word	cr_alignment
360#endif
361	.ltorg
362
363/*
364 * This is the syscall table declaration for native ABI syscalls.
365 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
366 */
367#define ABI(native, compat) native
368#ifdef CONFIG_AEABI
369#define OBSOLETE(syscall) sys_ni_syscall
370#else
371#define OBSOLETE(syscall) syscall
372#endif
373
374	.type	sys_call_table, #object
375ENTRY(sys_call_table)
376#include "calls.S"
377#undef ABI
378#undef OBSOLETE
379
380/*============================================================================
381 * Special system call wrappers
382 */
383@ r0 = syscall number
384@ r8 = syscall table
385sys_syscall:
386		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
387		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
388		cmpne	scno, #NR_syscalls	@ check range
389		stmloia	sp, {r5, r6}		@ shuffle args
390		movlo	r0, r1
391		movlo	r1, r2
392		movlo	r2, r3
393		movlo	r3, r4
394		ldrlo	pc, [tbl, scno, lsl #2]
395		b	sys_ni_syscall
396ENDPROC(sys_syscall)
397
398sys_fork_wrapper:
399		add	r0, sp, #S_OFF
400		b	sys_fork
401ENDPROC(sys_fork_wrapper)
402
403sys_vfork_wrapper:
404		add	r0, sp, #S_OFF
405		b	sys_vfork
406ENDPROC(sys_vfork_wrapper)
407
408sys_execve_wrapper:
409		add	r3, sp, #S_OFF
410		b	sys_execve
411ENDPROC(sys_execve_wrapper)
412
413sys_clone_wrapper:
414		add	ip, sp, #S_OFF
415		str	ip, [sp, #4]
416		b	sys_clone
417ENDPROC(sys_clone_wrapper)
418
419sys_sigreturn_wrapper:
420		add	r0, sp, #S_OFF
421		b	sys_sigreturn
422ENDPROC(sys_sigreturn_wrapper)
423
424sys_rt_sigreturn_wrapper:
425		add	r0, sp, #S_OFF
426		b	sys_rt_sigreturn
427ENDPROC(sys_rt_sigreturn_wrapper)
428
429sys_sigaltstack_wrapper:
430		ldr	r2, [sp, #S_OFF + S_SP]
431		b	do_sigaltstack
432ENDPROC(sys_sigaltstack_wrapper)
433
434sys_statfs64_wrapper:
435		teq	r1, #88
436		moveq	r1, #84
437		b	sys_statfs64
438ENDPROC(sys_statfs64_wrapper)
439
440sys_fstatfs64_wrapper:
441		teq	r1, #88
442		moveq	r1, #84
443		b	sys_fstatfs64
444ENDPROC(sys_fstatfs64_wrapper)
445
446/*
447 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
448 * offset, we return EINVAL.
449 */
450sys_mmap2:
451#if PAGE_SHIFT > 12
452		tst	r5, #PGOFF_MASK
453		moveq	r5, r5, lsr #PAGE_SHIFT - 12
454		streq	r5, [sp, #4]
455		beq	sys_mmap_pgoff
456		mov	r0, #-EINVAL
457		mov	pc, lr
458#else
459		str	r5, [sp, #4]
460		b	sys_mmap_pgoff
461#endif
462ENDPROC(sys_mmap2)
463
464#ifdef CONFIG_OABI_COMPAT
465
466/*
467 * These are syscalls with argument register differences
468 */
469
470sys_oabi_pread64:
471		stmia	sp, {r3, r4}
472		b	sys_pread64
473ENDPROC(sys_oabi_pread64)
474
475sys_oabi_pwrite64:
476		stmia	sp, {r3, r4}
477		b	sys_pwrite64
478ENDPROC(sys_oabi_pwrite64)
479
480sys_oabi_truncate64:
481		mov	r3, r2
482		mov	r2, r1
483		b	sys_truncate64
484ENDPROC(sys_oabi_truncate64)
485
486sys_oabi_ftruncate64:
487		mov	r3, r2
488		mov	r2, r1
489		b	sys_ftruncate64
490ENDPROC(sys_oabi_ftruncate64)
491
492sys_oabi_readahead:
493		str	r3, [sp]
494		mov	r3, r2
495		mov	r2, r1
496		b	sys_readahead
497ENDPROC(sys_oabi_readahead)
498
499/*
500 * Let's declare a second syscall table for old ABI binaries
501 * using the compatibility syscall entries.
502 */
503#define ABI(native, compat) compat
504#define OBSOLETE(syscall) syscall
505
506	.type	sys_oabi_call_table, #object
507ENTRY(sys_oabi_call_table)
508#include "calls.S"
509#undef ABI
510#undef OBSOLETE
511
512#endif
513
514