xref: /linux/arch/arm/kernel/entry-common.S (revision 173d6681380aa1d60dfc35ed7178bd7811ba2784)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/unistd.h>
12
13#include "entry-header.S"
14
15
16	.align	5
17/*
18 * This is the fast syscall return path.  We do as little as
19 * possible here, and this includes saving r0 back into the SVC
20 * stack.
21 */
22ret_fast_syscall:
23	disable_irq				@ disable interrupts
24	ldr	r1, [tsk, #TI_FLAGS]
25	tst	r1, #_TIF_WORK_MASK
26	bne	fast_work_pending
27
28	@ fast_restore_user_regs
29	ldr	r1, [sp, #S_OFF + S_PSR]	@ get calling cpsr
30	ldr	lr, [sp, #S_OFF + S_PC]!	@ get pc
31	msr	spsr_cxsf, r1			@ save in spsr_svc
32	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
33	mov	r0, r0
34	add	sp, sp, #S_FRAME_SIZE - S_PC
35	movs	pc, lr				@ return & move spsr_svc into cpsr
36
37/*
38 * Ok, we need to do extra processing, enter the slow path.
39 */
40fast_work_pending:
41	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
42work_pending:
43	tst	r1, #_TIF_NEED_RESCHED
44	bne	work_resched
45	tst	r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
46	beq	no_work_pending
47	mov	r0, sp				@ 'regs'
48	mov	r2, why				@ 'syscall'
49	bl	do_notify_resume
50	b	ret_slow_syscall		@ Check work again
51
52work_resched:
53	bl	schedule
54/*
55 * "slow" syscall return path.  "why" tells us if this was a real syscall.
56 */
57ENTRY(ret_to_user)
58ret_slow_syscall:
59	disable_irq				@ disable interrupts
60	ldr	r1, [tsk, #TI_FLAGS]
61	tst	r1, #_TIF_WORK_MASK
62	bne	work_pending
63no_work_pending:
64	@ slow_restore_user_regs
65	ldr	r1, [sp, #S_PSR]		@ get calling cpsr
66	ldr	lr, [sp, #S_PC]!		@ get pc
67	msr	spsr_cxsf, r1			@ save in spsr_svc
68	ldmdb	sp, {r0 - lr}^			@ get calling r1 - lr
69	mov	r0, r0
70	add	sp, sp, #S_FRAME_SIZE - S_PC
71	movs	pc, lr				@ return & move spsr_svc into cpsr
72
73/*
74 * This is how we return from a fork.
75 */
76ENTRY(ret_from_fork)
77	bl	schedule_tail
78	get_thread_info tsk
79	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
80	mov	why, #1
81	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
82	beq	ret_slow_syscall
83	mov	r1, sp
84	mov	r0, #1				@ trace exit [IP = 1]
85	bl	syscall_trace
86	b	ret_slow_syscall
87
88
89	.equ NR_syscalls,0
90#define CALL(x) .equ NR_syscalls,NR_syscalls+1
91#include "calls.S"
92#undef CALL
93#define CALL(x) .long x
94
95/*=============================================================================
96 * SWI handler
97 *-----------------------------------------------------------------------------
98 */
99
100	/* If we're optimising for StrongARM the resulting code won't
101	   run on an ARM7 and we can save a couple of instructions.
102								--pb */
103#ifdef CONFIG_CPU_ARM710
104#define A710(code...) code
105.Larm710bug:
106	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
107	mov	r0, r0
108	add	sp, sp, #S_FRAME_SIZE
109	subs	pc, lr, #4
110#else
111#define A710(code...)
112#endif
113
114	.align	5
115ENTRY(vector_swi)
116	sub	sp, sp, #S_FRAME_SIZE
117	stmia	sp, {r0 - r12}			@ Calling r0 - r12
118	add	r8, sp, #S_PC
119	stmdb	r8, {sp, lr}^			@ Calling sp, lr
120	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
121	str	lr, [sp, #S_PC]			@ Save calling PC
122	str	r8, [sp, #S_PSR]		@ Save CPSR
123	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
124	zero_fp
125
126	/*
127	 * Get the system call number.
128	 */
129
130#if defined(CONFIG_OABI_COMPAT)
131
132	/*
133	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
134	 * value to determine if it is an EABI or an old ABI call.
135	 */
136#ifdef CONFIG_ARM_THUMB
137	tst	r8, #PSR_T_BIT
138	movne	r10, #0				@ no thumb OABI emulation
139	ldreq	r10, [lr, #-4]			@ get SWI instruction
140#else
141	ldr	r10, [lr, #-4]			@ get SWI instruction
142  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
143  A710(	teq	ip, #0x0f000000						)
144  A710(	bne	.Larm710bug						)
145#endif
146
147#elif defined(CONFIG_AEABI)
148
149	/*
150	 * Pure EABI user space always put syscall number into scno (r7).
151	 */
152  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
153  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
154  A710(	teq	ip, #0x0f000000						)
155  A710(	bne	.Larm710bug						)
156
157#elif defined(CONFIG_ARM_THUMB)
158
159	/* Legacy ABI only, possibly thumb mode. */
160	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
161	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
162	ldreq	scno, [lr, #-4]
163
164#else
165
166	/* Legacy ABI only. */
167	ldr	scno, [lr, #-4]			@ get SWI instruction
168  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
169  A710(	teq	ip, #0x0f000000						)
170  A710(	bne	.Larm710bug						)
171
172#endif
173
174#ifdef CONFIG_ALIGNMENT_TRAP
175	ldr	ip, __cr_alignment
176	ldr	ip, [ip]
177	mcr	p15, 0, ip, c1, c0		@ update control register
178#endif
179	enable_irq
180
181	get_thread_info tsk
182	adr	tbl, sys_call_table		@ load syscall table pointer
183	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
184
185#if defined(CONFIG_OABI_COMPAT)
186	/*
187	 * If the swi argument is zero, this is an EABI call and we do nothing.
188	 *
189	 * If this is an old ABI call, get the syscall number into scno and
190	 * get the old ABI syscall table address.
191	 */
192	bics	r10, r10, #0xff000000
193	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
194	ldrne	tbl, =sys_oabi_call_table
195#elif !defined(CONFIG_AEABI)
196	bic	scno, scno, #0xff000000		@ mask off SWI op-code
197	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
198#endif
199
200	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
201	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
202	bne	__sys_trace
203
204	cmp	scno, #NR_syscalls		@ check upper syscall limit
205	adr	lr, ret_fast_syscall		@ return address
206	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
207
208	add	r1, sp, #S_OFF
2092:	mov	why, #0				@ no longer a real syscall
210	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
211	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
212	bcs	arm_syscall
213	b	sys_ni_syscall			@ not private func
214
215	/*
216	 * This is the really slow path.  We're going to be doing
217	 * context switches, and waiting for our parent to respond.
218	 */
219__sys_trace:
220	mov	r2, scno
221	add	r1, sp, #S_OFF
222	mov	r0, #0				@ trace entry [IP = 0]
223	bl	syscall_trace
224
225	adr	lr, __sys_trace_return		@ return address
226	mov	scno, r0			@ syscall number (possibly new)
227	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
228	cmp	scno, #NR_syscalls		@ check upper syscall limit
229	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
230	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
231	b	2b
232
233__sys_trace_return:
234	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
235	mov	r2, scno
236	mov	r1, sp
237	mov	r0, #1				@ trace exit [IP = 1]
238	bl	syscall_trace
239	b	ret_slow_syscall
240
241	.align	5
242#ifdef CONFIG_ALIGNMENT_TRAP
243	.type	__cr_alignment, #object
244__cr_alignment:
245	.word	cr_alignment
246#endif
247	.ltorg
248
249/*
250 * This is the syscall table declaration for native ABI syscalls.
251 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
252 */
253#define ABI(native, compat) native
254#ifdef CONFIG_AEABI
255#define OBSOLETE(syscall) sys_ni_syscall
256#else
257#define OBSOLETE(syscall) syscall
258#endif
259
260	.type	sys_call_table, #object
261ENTRY(sys_call_table)
262#include "calls.S"
263#undef ABI
264#undef OBSOLETE
265
266/*============================================================================
267 * Special system call wrappers
268 */
269@ r0 = syscall number
270@ r8 = syscall table
271		.type	sys_syscall, #function
272sys_syscall:
273		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
274		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
275		cmpne	scno, #NR_syscalls	@ check range
276		stmloia	sp, {r5, r6}		@ shuffle args
277		movlo	r0, r1
278		movlo	r1, r2
279		movlo	r2, r3
280		movlo	r3, r4
281		ldrlo	pc, [tbl, scno, lsl #2]
282		b	sys_ni_syscall
283
284sys_fork_wrapper:
285		add	r0, sp, #S_OFF
286		b	sys_fork
287
288sys_vfork_wrapper:
289		add	r0, sp, #S_OFF
290		b	sys_vfork
291
292sys_execve_wrapper:
293		add	r3, sp, #S_OFF
294		b	sys_execve
295
296sys_clone_wrapper:
297		add	ip, sp, #S_OFF
298		str	ip, [sp, #4]
299		b	sys_clone
300
301sys_sigsuspend_wrapper:
302		add	r3, sp, #S_OFF
303		b	sys_sigsuspend
304
305sys_rt_sigsuspend_wrapper:
306		add	r2, sp, #S_OFF
307		b	sys_rt_sigsuspend
308
309sys_sigreturn_wrapper:
310		add	r0, sp, #S_OFF
311		b	sys_sigreturn
312
313sys_rt_sigreturn_wrapper:
314		add	r0, sp, #S_OFF
315		b	sys_rt_sigreturn
316
317sys_sigaltstack_wrapper:
318		ldr	r2, [sp, #S_OFF + S_SP]
319		b	do_sigaltstack
320
321sys_statfs64_wrapper:
322		teq	r1, #88
323		moveq	r1, #84
324		b	sys_statfs64
325
326sys_fstatfs64_wrapper:
327		teq	r1, #88
328		moveq	r1, #84
329		b	sys_fstatfs64
330
331/*
332 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
333 * offset, we return EINVAL.
334 */
335sys_mmap2:
336#if PAGE_SHIFT > 12
337		tst	r5, #PGOFF_MASK
338		moveq	r5, r5, lsr #PAGE_SHIFT - 12
339		streq	r5, [sp, #4]
340		beq	do_mmap2
341		mov	r0, #-EINVAL
342		mov	pc, lr
343#else
344		str	r5, [sp, #4]
345		b	do_mmap2
346#endif
347
348#ifdef CONFIG_OABI_COMPAT
349
350/*
351 * These are syscalls with argument register differences
352 */
353
354sys_oabi_pread64:
355		stmia	sp, {r3, r4}
356		b	sys_pread64
357
358sys_oabi_pwrite64:
359		stmia	sp, {r3, r4}
360		b	sys_pwrite64
361
362sys_oabi_truncate64:
363		mov	r3, r2
364		mov	r2, r1
365		b	sys_truncate64
366
367sys_oabi_ftruncate64:
368		mov	r3, r2
369		mov	r2, r1
370		b	sys_ftruncate64
371
372sys_oabi_readahead:
373		str	r3, [sp]
374		mov	r3, r2
375		mov	r2, r1
376		b	sys_readahead
377
378/*
379 * Let's declare a second syscall table for old ABI binaries
380 * using the compatibility syscall entries.
381 */
382#define ABI(native, compat) compat
383#define OBSOLETE(syscall) syscall
384
385	.type	sys_oabi_call_table, #object
386ENTRY(sys_oabi_call_table)
387#include "calls.S"
388#undef ABI
389#undef OBSOLETE
390
391#endif
392
393