xref: /linux/arch/arm/kernel/entry-common.S (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11
12#include <asm/unistd.h>
13
14#include "entry-header.S"
15
16
17	.align	5
18/*
19 * This is the fast syscall return path.  We do as little as
20 * possible here, and this includes saving r0 back into the SVC
21 * stack.
22 */
23ret_fast_syscall:
24	disable_irq				@ disable interrupts
25	ldr	r1, [tsk, #TI_FLAGS]
26	tst	r1, #_TIF_WORK_MASK
27	bne	fast_work_pending
28
29	@ fast_restore_user_regs
30	ldr	r1, [sp, #S_OFF + S_PSR]	@ get calling cpsr
31	ldr	lr, [sp, #S_OFF + S_PC]!	@ get pc
32	msr	spsr_cxsf, r1			@ save in spsr_svc
33	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
34	mov	r0, r0
35	add	sp, sp, #S_FRAME_SIZE - S_PC
36	movs	pc, lr				@ return & move spsr_svc into cpsr
37
38/*
39 * Ok, we need to do extra processing, enter the slow path.
40 */
41fast_work_pending:
42	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
43work_pending:
44	tst	r1, #_TIF_NEED_RESCHED
45	bne	work_resched
46	tst	r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
47	beq	no_work_pending
48	mov	r0, sp				@ 'regs'
49	mov	r2, why				@ 'syscall'
50	bl	do_notify_resume
51	b	ret_slow_syscall		@ Check work again
52
53work_resched:
54	bl	schedule
55/*
56 * "slow" syscall return path.  "why" tells us if this was a real syscall.
57 */
58ENTRY(ret_to_user)
59ret_slow_syscall:
60	disable_irq				@ disable interrupts
61	ldr	r1, [tsk, #TI_FLAGS]
62	tst	r1, #_TIF_WORK_MASK
63	bne	work_pending
64no_work_pending:
65	@ slow_restore_user_regs
66	ldr	r1, [sp, #S_PSR]		@ get calling cpsr
67	ldr	lr, [sp, #S_PC]!		@ get pc
68	msr	spsr_cxsf, r1			@ save in spsr_svc
69	ldmdb	sp, {r0 - lr}^			@ get calling r1 - lr
70	mov	r0, r0
71	add	sp, sp, #S_FRAME_SIZE - S_PC
72	movs	pc, lr				@ return & move spsr_svc into cpsr
73
74/*
75 * This is how we return from a fork.
76 */
77ENTRY(ret_from_fork)
78	bl	schedule_tail
79	get_thread_info tsk
80	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
81	mov	why, #1
82	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
83	beq	ret_slow_syscall
84	mov	r1, sp
85	mov	r0, #1				@ trace exit [IP = 1]
86	bl	syscall_trace
87	b	ret_slow_syscall
88
89
90	.equ NR_syscalls,0
91#define CALL(x) .equ NR_syscalls,NR_syscalls+1
92#include "calls.S"
93#undef CALL
94#define CALL(x) .long x
95
96/*=============================================================================
97 * SWI handler
98 *-----------------------------------------------------------------------------
99 */
100
101	/* If we're optimising for StrongARM the resulting code won't
102	   run on an ARM7 and we can save a couple of instructions.
103								--pb */
104#ifdef CONFIG_CPU_ARM710
105#define A710(code...) code
106.Larm710bug:
107	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
108	mov	r0, r0
109	add	sp, sp, #S_FRAME_SIZE
110	subs	pc, lr, #4
111#else
112#define A710(code...)
113#endif
114
115	.align	5
116ENTRY(vector_swi)
117	sub	sp, sp, #S_FRAME_SIZE
118	stmia	sp, {r0 - r12}			@ Calling r0 - r12
119	add	r8, sp, #S_PC
120	stmdb	r8, {sp, lr}^			@ Calling sp, lr
121	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
122	str	lr, [sp, #S_PC]			@ Save calling PC
123	str	r8, [sp, #S_PSR]		@ Save CPSR
124	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
125	zero_fp
126
127	/*
128	 * Get the system call number.
129	 */
130
131#if defined(CONFIG_OABI_COMPAT)
132
133	/*
134	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
135	 * value to determine if it is an EABI or an old ABI call.
136	 */
137#ifdef CONFIG_ARM_THUMB
138	tst	r8, #PSR_T_BIT
139	movne	r10, #0				@ no thumb OABI emulation
140	ldreq	r10, [lr, #-4]			@ get SWI instruction
141#else
142	ldr	r10, [lr, #-4]			@ get SWI instruction
143  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
144  A710(	teq	ip, #0x0f000000						)
145  A710(	bne	.Larm710bug						)
146#endif
147
148#elif defined(CONFIG_AEABI)
149
150	/*
151	 * Pure EABI user space always put syscall number into scno (r7).
152	 */
153  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
154  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
155  A710(	teq	ip, #0x0f000000						)
156  A710(	bne	.Larm710bug						)
157
158#elif defined(CONFIG_ARM_THUMB)
159
160	/* Legacy ABI only, possibly thumb mode. */
161	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
162	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
163	ldreq	scno, [lr, #-4]
164
165#else
166
167	/* Legacy ABI only. */
168	ldr	scno, [lr, #-4]			@ get SWI instruction
169  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
170  A710(	teq	ip, #0x0f000000						)
171  A710(	bne	.Larm710bug						)
172
173#endif
174
175#ifdef CONFIG_ALIGNMENT_TRAP
176	ldr	ip, __cr_alignment
177	ldr	ip, [ip]
178	mcr	p15, 0, ip, c1, c0		@ update control register
179#endif
180	enable_irq
181
182	get_thread_info tsk
183	adr	tbl, sys_call_table		@ load syscall table pointer
184	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
185
186#if defined(CONFIG_OABI_COMPAT)
187	/*
188	 * If the swi argument is zero, this is an EABI call and we do nothing.
189	 *
190	 * If this is an old ABI call, get the syscall number into scno and
191	 * get the old ABI syscall table address.
192	 */
193	bics	r10, r10, #0xff000000
194	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
195	ldrne	tbl, =sys_oabi_call_table
196#elif !defined(CONFIG_AEABI)
197	bic	scno, scno, #0xff000000		@ mask off SWI op-code
198	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
199#endif
200
201	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
202	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
203	bne	__sys_trace
204
205	cmp	scno, #NR_syscalls		@ check upper syscall limit
206	adr	lr, ret_fast_syscall		@ return address
207	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
208
209	add	r1, sp, #S_OFF
2102:	mov	why, #0				@ no longer a real syscall
211	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
212	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
213	bcs	arm_syscall
214	b	sys_ni_syscall			@ not private func
215
216	/*
217	 * This is the really slow path.  We're going to be doing
218	 * context switches, and waiting for our parent to respond.
219	 */
220__sys_trace:
221	mov	r2, scno
222	add	r1, sp, #S_OFF
223	mov	r0, #0				@ trace entry [IP = 0]
224	bl	syscall_trace
225
226	adr	lr, __sys_trace_return		@ return address
227	mov	scno, r0			@ syscall number (possibly new)
228	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
229	cmp	scno, #NR_syscalls		@ check upper syscall limit
230	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
231	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
232	b	2b
233
234__sys_trace_return:
235	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
236	mov	r2, scno
237	mov	r1, sp
238	mov	r0, #1				@ trace exit [IP = 1]
239	bl	syscall_trace
240	b	ret_slow_syscall
241
242	.align	5
243#ifdef CONFIG_ALIGNMENT_TRAP
244	.type	__cr_alignment, #object
245__cr_alignment:
246	.word	cr_alignment
247#endif
248	.ltorg
249
250/*
251 * This is the syscall table declaration for native ABI syscalls.
252 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
253 */
254#define ABI(native, compat) native
255#ifdef CONFIG_AEABI
256#define OBSOLETE(syscall) sys_ni_syscall
257#else
258#define OBSOLETE(syscall) syscall
259#endif
260
261	.type	sys_call_table, #object
262ENTRY(sys_call_table)
263#include "calls.S"
264#undef ABI
265#undef OBSOLETE
266
267/*============================================================================
268 * Special system call wrappers
269 */
270@ r0 = syscall number
271@ r8 = syscall table
272		.type	sys_syscall, #function
273sys_syscall:
274		eor	scno, r0, #__NR_OABI_SYSCALL_BASE
275		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
276		cmpne	scno, #NR_syscalls	@ check range
277		stmloia	sp, {r5, r6}		@ shuffle args
278		movlo	r0, r1
279		movlo	r1, r2
280		movlo	r2, r3
281		movlo	r3, r4
282		ldrlo	pc, [tbl, scno, lsl #2]
283		b	sys_ni_syscall
284
285sys_fork_wrapper:
286		add	r0, sp, #S_OFF
287		b	sys_fork
288
289sys_vfork_wrapper:
290		add	r0, sp, #S_OFF
291		b	sys_vfork
292
293sys_execve_wrapper:
294		add	r3, sp, #S_OFF
295		b	sys_execve
296
297sys_clone_wrapper:
298		add	ip, sp, #S_OFF
299		str	ip, [sp, #4]
300		b	sys_clone
301
302sys_sigsuspend_wrapper:
303		add	r3, sp, #S_OFF
304		b	sys_sigsuspend
305
306sys_rt_sigsuspend_wrapper:
307		add	r2, sp, #S_OFF
308		b	sys_rt_sigsuspend
309
310sys_sigreturn_wrapper:
311		add	r0, sp, #S_OFF
312		b	sys_sigreturn
313
314sys_rt_sigreturn_wrapper:
315		add	r0, sp, #S_OFF
316		b	sys_rt_sigreturn
317
318sys_sigaltstack_wrapper:
319		ldr	r2, [sp, #S_OFF + S_SP]
320		b	do_sigaltstack
321
322sys_statfs64_wrapper:
323		teq	r1, #88
324		moveq	r1, #84
325		b	sys_statfs64
326
327sys_fstatfs64_wrapper:
328		teq	r1, #88
329		moveq	r1, #84
330		b	sys_fstatfs64
331
332/*
333 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
334 * offset, we return EINVAL.
335 */
336sys_mmap2:
337#if PAGE_SHIFT > 12
338		tst	r5, #PGOFF_MASK
339		moveq	r5, r5, lsr #PAGE_SHIFT - 12
340		streq	r5, [sp, #4]
341		beq	do_mmap2
342		mov	r0, #-EINVAL
343		RETINSTR(mov,pc, lr)
344#else
345		str	r5, [sp, #4]
346		b	do_mmap2
347#endif
348
349#ifdef CONFIG_OABI_COMPAT
350
351/*
352 * These are syscalls with argument register differences
353 */
354
355sys_oabi_pread64:
356		stmia	sp, {r3, r4}
357		b	sys_pread64
358
359sys_oabi_pwrite64:
360		stmia	sp, {r3, r4}
361		b	sys_pwrite64
362
363sys_oabi_truncate64:
364		mov	r3, r2
365		mov	r2, r1
366		b	sys_truncate64
367
368sys_oabi_ftruncate64:
369		mov	r3, r2
370		mov	r2, r1
371		b	sys_ftruncate64
372
373sys_oabi_readahead:
374		str	r3, [sp]
375		mov	r3, r2
376		mov	r2, r1
377		b	sys_readahead
378
379/*
380 * Let's declare a second syscall table for old ABI binaries
381 * using the compatibility syscall entries.
382 */
383#define ABI(native, compat) compat
384#define OBSOLETE(syscall) syscall
385
386	.type	sys_oabi_call_table, #object
387ENTRY(sys_oabi_call_table)
388#include "calls.S"
389#undef ABI
390#undef OBSOLETE
391
392#endif
393
394