xref: /linux/arch/arm/kernel/entry-common.S (revision 687ad0191488a067b3b3cc94f670cc21f93811e1)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11
12#include <asm/unistd.h>
13
14#include "entry-header.S"
15
16
17	.align	5
18/*
19 * This is the fast syscall return path.  We do as little as
20 * possible here, and this includes saving r0 back into the SVC
21 * stack.
22 */
23ret_fast_syscall:
24	disable_irq				@ disable interrupts
25	ldr	r1, [tsk, #TI_FLAGS]
26	tst	r1, #_TIF_WORK_MASK
27	bne	fast_work_pending
28
29	@ fast_restore_user_regs
30	ldr	r1, [sp, #S_OFF + S_PSR]	@ get calling cpsr
31	ldr	lr, [sp, #S_OFF + S_PC]!	@ get pc
32	msr	spsr_cxsf, r1			@ save in spsr_svc
33	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
34	mov	r0, r0
35	add	sp, sp, #S_FRAME_SIZE - S_PC
36	movs	pc, lr				@ return & move spsr_svc into cpsr
37
38/*
39 * Ok, we need to do extra processing, enter the slow path.
40 */
41fast_work_pending:
42	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
43work_pending:
44	tst	r1, #_TIF_NEED_RESCHED
45	bne	work_resched
46	tst	r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
47	beq	no_work_pending
48	mov	r0, sp				@ 'regs'
49	mov	r2, why				@ 'syscall'
50	bl	do_notify_resume
51	b	ret_slow_syscall		@ Check work again
52
53work_resched:
54	bl	schedule
55/*
56 * "slow" syscall return path.  "why" tells us if this was a real syscall.
57 */
58ENTRY(ret_to_user)
59ret_slow_syscall:
60	disable_irq				@ disable interrupts
61	ldr	r1, [tsk, #TI_FLAGS]
62	tst	r1, #_TIF_WORK_MASK
63	bne	work_pending
64no_work_pending:
65	@ slow_restore_user_regs
66	ldr	r1, [sp, #S_PSR]		@ get calling cpsr
67	ldr	lr, [sp, #S_PC]!		@ get pc
68	msr	spsr_cxsf, r1			@ save in spsr_svc
69	ldmdb	sp, {r0 - lr}^			@ get calling r1 - lr
70	mov	r0, r0
71	add	sp, sp, #S_FRAME_SIZE - S_PC
72	movs	pc, lr				@ return & move spsr_svc into cpsr
73
74/*
75 * This is how we return from a fork.
76 */
77ENTRY(ret_from_fork)
78	bl	schedule_tail
79	get_thread_info tsk
80	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
81	mov	why, #1
82	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
83	beq	ret_slow_syscall
84	mov	r1, sp
85	mov	r0, #1				@ trace exit [IP = 1]
86	bl	syscall_trace
87	b	ret_slow_syscall
88
89
90#include "calls.S"
91
92/*=============================================================================
93 * SWI handler
94 *-----------------------------------------------------------------------------
95 */
96
97	/* If we're optimising for StrongARM the resulting code won't
98	   run on an ARM7 and we can save a couple of instructions.
99								--pb */
100#ifdef CONFIG_CPU_ARM710
101#define A710(code...) code
102.Larm710bug:
103	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
104	mov	r0, r0
105	add	sp, sp, #S_FRAME_SIZE
106	subs	pc, lr, #4
107#else
108#define A710(code...)
109#endif
110
111	.align	5
112ENTRY(vector_swi)
113	sub	sp, sp, #S_FRAME_SIZE
114	stmia	sp, {r0 - r12}			@ Calling r0 - r12
115	add	r8, sp, #S_PC
116	stmdb	r8, {sp, lr}^			@ Calling sp, lr
117	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
118	str	lr, [sp, #S_PC]			@ Save calling PC
119	str	r8, [sp, #S_PSR]		@ Save CPSR
120	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
121	zero_fp
122
123	/*
124	 * Get the system call number.
125	 */
126#if defined(CONFIG_AEABI)
127
128	@ syscall number is in scno (r7) already.
129
130  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
131  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
132  A710(	teq	ip, #0x0f000000						)
133  A710(	bne	.Larm710bug						)
134#elif defined(CONFIG_ARM_THUMB)
135	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
136	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
137	ldreq	scno, [lr, #-4]
138#else
139	ldr	scno, [lr, #-4]			@ get SWI instruction
140  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
141  A710(	teq	ip, #0x0f000000						)
142  A710(	bne	.Larm710bug						)
143#endif
144
145#ifdef CONFIG_ALIGNMENT_TRAP
146	ldr	ip, __cr_alignment
147	ldr	ip, [ip]
148	mcr	p15, 0, ip, c1, c0		@ update control register
149#endif
150	enable_irq
151
152	get_thread_info tsk
153	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
154#ifndef CONFIG_AEABI
155	bic	scno, scno, #0xff000000		@ mask off SWI op-code
156	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
157#endif
158	adr	tbl, sys_call_table		@ load syscall table pointer
159	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
160	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
161	bne	__sys_trace
162
163	cmp	scno, #NR_syscalls		@ check upper syscall limit
164	adr	lr, ret_fast_syscall		@ return address
165	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
166
167	add	r1, sp, #S_OFF
1682:	mov	why, #0				@ no longer a real syscall
169	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
170	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
171	bcs	arm_syscall
172	b	sys_ni_syscall			@ not private func
173
174	/*
175	 * This is the really slow path.  We're going to be doing
176	 * context switches, and waiting for our parent to respond.
177	 */
178__sys_trace:
179	add	r1, sp, #S_OFF
180	mov	r0, #0				@ trace entry [IP = 0]
181	bl	syscall_trace
182
183	adr	lr, __sys_trace_return		@ return address
184	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
185	cmp	scno, #NR_syscalls		@ check upper syscall limit
186	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
187	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
188	b	2b
189
190__sys_trace_return:
191	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
192	mov	r1, sp
193	mov	r0, #1				@ trace exit [IP = 1]
194	bl	syscall_trace
195	b	ret_slow_syscall
196
197	.align	5
198#ifdef CONFIG_ALIGNMENT_TRAP
199	.type	__cr_alignment, #object
200__cr_alignment:
201	.word	cr_alignment
202#endif
203
204	.type	sys_call_table, #object
205ENTRY(sys_call_table)
206#include "calls.S"
207
208/*============================================================================
209 * Special system call wrappers
210 */
211@ r0 = syscall number
212@ r8 = syscall table
213		.type	sys_syscall, #function
214sys_syscall:
215#ifndef CONFIG_AEABI
216		eor	scno, r0, #__NR_SYSCALL_BASE
217		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
218		cmpne	scno, #NR_syscalls	@ check range
219		stmloia	sp, {r5, r6}		@ shuffle args
220		movlo	r0, r1
221		movlo	r1, r2
222		movlo	r2, r3
223		movlo	r3, r4
224		ldrlo	pc, [tbl, scno, lsl #2]
225#endif
226		b	sys_ni_syscall
227
228sys_fork_wrapper:
229		add	r0, sp, #S_OFF
230		b	sys_fork
231
232sys_vfork_wrapper:
233		add	r0, sp, #S_OFF
234		b	sys_vfork
235
236sys_execve_wrapper:
237		add	r3, sp, #S_OFF
238		b	sys_execve
239
240sys_clone_wrapper:
241		add	ip, sp, #S_OFF
242		str	ip, [sp, #4]
243		b	sys_clone
244
245sys_sigsuspend_wrapper:
246		add	r3, sp, #S_OFF
247		b	sys_sigsuspend
248
249sys_rt_sigsuspend_wrapper:
250		add	r2, sp, #S_OFF
251		b	sys_rt_sigsuspend
252
253sys_sigreturn_wrapper:
254		add	r0, sp, #S_OFF
255		b	sys_sigreturn
256
257sys_rt_sigreturn_wrapper:
258		add	r0, sp, #S_OFF
259		b	sys_rt_sigreturn
260
261sys_sigaltstack_wrapper:
262		ldr	r2, [sp, #S_OFF + S_SP]
263		b	do_sigaltstack
264
265sys_statfs64_wrapper:
266		teq	r1, #88
267		moveq	r1, #84
268		b	sys_statfs64
269
270sys_fstatfs64_wrapper:
271		teq	r1, #88
272		moveq	r1, #84
273		b	sys_fstatfs64
274
275/*
276 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
277 * offset, we return EINVAL.
278 */
279sys_mmap2:
280#if PAGE_SHIFT > 12
281		tst	r5, #PGOFF_MASK
282		moveq	r5, r5, lsr #PAGE_SHIFT - 12
283		streq	r5, [sp, #4]
284		beq	do_mmap2
285		mov	r0, #-EINVAL
286		RETINSTR(mov,pc, lr)
287#else
288		str	r5, [sp, #4]
289		b	do_mmap2
290#endif
291
292#ifdef CONFIG_OABI_COMPAT
293/*
294 * These are syscalls with argument register differences
295 */
296
297sys_oabi_pread64:
298		stmia	sp, {r3, r4}
299		b	sys_pread64
300
301sys_oabi_pwrite64:
302		stmia	sp, {r3, r4}
303		b	sys_pwrite64
304
305sys_oabi_truncate64:
306		mov	r3, r2
307		mov	r2, r1
308		b	sys_truncate64
309
310sys_oabi_ftruncate64:
311		mov	r3, r2
312		mov	r2, r1
313		b	sys_ftruncate64
314
315sys_oabi_readahead:
316		str	r3, [sp]
317		mov	r3, r2
318		mov	r2, r1
319		b	sys_readahead
320
321#endif
322
323