xref: /linux/arch/m68k/68000/entry.S (revision 60684c2bd35064043360e6f716d1b7c20e967b7d)
1/*
2 *  entry.S -- non-mmu 68000 interrupt and exception entry points
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file README.legal in the main directory of this archive
8 * for more details.
9 *
10 * Linux/m68k support by Hamish Macdonald
11 */
12
13#include <linux/linkage.h>
14#include <asm/thread_info.h>
15#include <asm/unistd.h>
16#include <asm/errno.h>
17#include <asm/setup.h>
18#include <asm/traps.h>
19#include <asm/asm-offsets.h>
20#include <asm/entry.h>
21
22.text
23
24.globl system_call
25.globl resume
26.globl ret_from_exception
27.globl sys_call_table
28.globl bad_interrupt
29.globl inthandler1
30.globl inthandler2
31.globl inthandler3
32.globl inthandler4
33.globl inthandler5
34.globl inthandler6
35.globl inthandler7
36
37badsys:
38	movel	#-ENOSYS,%sp@(PT_OFF_D0)
39	jra	ret_from_exception
40
41do_trace:
42	movel	#-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
43	subql	#4,%sp
44	SAVE_SWITCH_STACK
45	jbsr	syscall_trace_enter
46	RESTORE_SWITCH_STACK
47	addql	#4,%sp
48	addql	#1,%d0
49	jeq	ret_from_exception
50	movel	%sp@(PT_OFF_ORIG_D0),%d1
51	movel	#-ENOSYS,%d0
52	cmpl	#NR_syscalls,%d1
53	jcc	1f
54	lsl	#2,%d1
55	lea	sys_call_table, %a0
56	jbsr	%a0@(%d1)
57
581:	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value */
59	subql	#4,%sp			/* dummy return address */
60	SAVE_SWITCH_STACK
61	jbsr	syscall_trace_leave
62	RESTORE_SWITCH_STACK
63	addql	#4,%sp
64	jra	ret_from_exception
65
66ENTRY(system_call)
67	SAVE_ALL_SYS
68
69	/* save top of frame*/
70	pea	%sp@
71	jbsr	set_esp0
72	addql	#4,%sp
73
74	movel	%sp@(PT_OFF_ORIG_D0),%d0
75
76	movel	%sp,%d1			/* get thread_info pointer */
77	andl	#-THREAD_SIZE,%d1
78	movel	%d1,%a2
79	btst	#(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
80	jne	do_trace
81	cmpl	#NR_syscalls,%d0
82	jcc	badsys
83	lsl	#2,%d0
84	lea	sys_call_table,%a0
85	movel	%a0@(%d0), %a0
86	jbsr	%a0@
87	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value*/
88
89ret_from_exception:
90	btst	#5,%sp@(PT_OFF_SR)	/* check if returning to kernel*/
91	jeq	Luser_return		/* if so, skip resched, signals*/
92
93Lkernel_return:
94	RESTORE_ALL
95
96Luser_return:
97	/* only allow interrupts when we are really the last one on the*/
98	/* kernel stack, otherwise stack overflow can occur during*/
99	/* heavy interrupt load*/
100	andw	#ALLOWINT,%sr
101
102	movel	%sp,%d1			/* get thread_info pointer */
103	andl	#-THREAD_SIZE,%d1
104	movel	%d1,%a2
1051:
106	move	%a2@(TINFO_FLAGS),%d1	/* thread_info->flags */
107	jne	Lwork_to_do
108	RESTORE_ALL
109
110Lwork_to_do:
111	movel	%a2@(TINFO_FLAGS),%d1	/* thread_info->flags */
112	btst	#TIF_NEED_RESCHED,%d1
113	jne	reschedule
114
115Lsignal_return:
116	subql	#4,%sp			/* dummy return address*/
117	SAVE_SWITCH_STACK
118	pea	%sp@(SWITCH_STACK_SIZE)
119	bsrw	do_notify_resume
120	addql	#4,%sp
121	RESTORE_SWITCH_STACK
122	addql	#4,%sp
123	jra	1b
124
125/*
126 * This is the main interrupt handler, responsible for calling process_int()
127 */
128inthandler1:
129	SAVE_ALL_INT
130	movew	%sp@(PT_OFF_FORMATVEC), %d0
131	and	#0x3ff, %d0
132
133	movel	%sp,%sp@-
134	movel	#65,%sp@- 		/*  put vector # on stack*/
135	jbsr	process_int		/*  process the IRQ*/
1363:     	addql	#8,%sp			/*  pop parameters off stack*/
137	bra	ret_from_exception
138
139inthandler2:
140	SAVE_ALL_INT
141	movew	%sp@(PT_OFF_FORMATVEC), %d0
142	and	#0x3ff, %d0
143
144	movel	%sp,%sp@-
145	movel	#66,%sp@- 		/*  put vector # on stack*/
146	jbsr	process_int		/*  process the IRQ*/
1473:     	addql	#8,%sp			/*  pop parameters off stack*/
148	bra	ret_from_exception
149
150inthandler3:
151	SAVE_ALL_INT
152	movew	%sp@(PT_OFF_FORMATVEC), %d0
153	and	#0x3ff, %d0
154
155	movel	%sp,%sp@-
156	movel	#67,%sp@- 		/*  put vector # on stack*/
157	jbsr	process_int		/*  process the IRQ*/
1583:     	addql	#8,%sp			/*  pop parameters off stack*/
159	bra	ret_from_exception
160
161inthandler4:
162	SAVE_ALL_INT
163	movew	%sp@(PT_OFF_FORMATVEC), %d0
164	and	#0x3ff, %d0
165
166	movel	%sp,%sp@-
167	movel	#68,%sp@- 		/*  put vector # on stack*/
168	jbsr	process_int		/*  process the IRQ*/
1693:     	addql	#8,%sp			/*  pop parameters off stack*/
170	bra	ret_from_exception
171
172inthandler5:
173	SAVE_ALL_INT
174	movew	%sp@(PT_OFF_FORMATVEC), %d0
175	and	#0x3ff, %d0
176
177	movel	%sp,%sp@-
178	movel	#69,%sp@- 		/*  put vector # on stack*/
179	jbsr	process_int		/*  process the IRQ*/
1803:     	addql	#8,%sp			/*  pop parameters off stack*/
181	bra	ret_from_exception
182
183inthandler6:
184	SAVE_ALL_INT
185	movew	%sp@(PT_OFF_FORMATVEC), %d0
186	and	#0x3ff, %d0
187
188	movel	%sp,%sp@-
189	movel	#70,%sp@- 		/*  put vector # on stack*/
190	jbsr	process_int		/*  process the IRQ*/
1913:     	addql	#8,%sp			/*  pop parameters off stack*/
192	bra	ret_from_exception
193
194inthandler7:
195	SAVE_ALL_INT
196	movew	%sp@(PT_OFF_FORMATVEC), %d0
197	and	#0x3ff, %d0
198
199	movel	%sp,%sp@-
200	movel	#71,%sp@- 		/*  put vector # on stack*/
201	jbsr	process_int		/*  process the IRQ*/
2023:     	addql	#8,%sp			/*  pop parameters off stack*/
203	bra	ret_from_exception
204
205inthandler:
206	SAVE_ALL_INT
207	movew	%sp@(PT_OFF_FORMATVEC), %d0
208	and	#0x3ff, %d0
209
210	movel	%sp,%sp@-
211	movel	%d0,%sp@- 		/*  put vector # on stack*/
212	jbsr	process_int		/*  process the IRQ*/
2133:     	addql	#8,%sp			/*  pop parameters off stack*/
214	bra	ret_from_exception
215
216/*
217 * Handler for uninitialized and spurious interrupts.
218 */
219ENTRY(bad_interrupt)
220	addql	#1,irq_err_count
221	rte
222
223/*
224 * Beware - when entering resume, prev (the current task) is
225 * in a0, next (the new task) is in a1, so don't change these
226 * registers until their contents are no longer needed.
227 */
228ENTRY(resume)
229	movel	%a0,%d1				/* save prev thread in d1 */
230	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)	/* save sr */
231	SAVE_SWITCH_STACK
232	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
233	movel	%usp,%a3			/* save usp */
234	movel	%a3,%a0@(TASK_THREAD+THREAD_USP)
235
236	movel	%a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
237	movel	%a3,%usp
238	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
239	RESTORE_SWITCH_STACK
240	movew	%a1@(TASK_THREAD+THREAD_SR),%sr	/* restore thread status reg */
241	rts
242
243