xref: /linux/arch/m68k/68000/entry.S (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1/* SPDX-License-Identifier: GPL-2.0-or-later
2 *
3 *  entry.S -- non-mmu 68000 interrupt and exception entry points
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *
7 * Linux/m68k support by Hamish Macdonald
8 */
9
10#include <linux/linkage.h>
11#include <asm/thread_info.h>
12#include <asm/unistd.h>
13#include <asm/errno.h>
14#include <asm/setup.h>
15#include <asm/traps.h>
16#include <asm/asm-offsets.h>
17#include <asm/entry.h>
18
19.text
20
21/* get thread_info pointer into a2 */
22 .macro getthreadinfo
23	movel	%sp,%d1
24	andl	#-THREAD_SIZE,%d1
25	movel	%d1,%a2
26 .endm
27
28.globl system_call
29.globl resume
30.globl ret_from_exception
31.globl sys_call_table
32.globl bad_interrupt
33.globl inthandler1
34.globl inthandler2
35.globl inthandler3
36.globl inthandler4
37.globl inthandler5
38.globl inthandler6
39.globl inthandler7
40
41badsys:
42	movel	#-ENOSYS,%sp@(PT_OFF_D0)
43	jra	ret_from_exception
44
45do_trace:
46	movel	#-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47	subql	#4,%sp
48	SAVE_SWITCH_STACK
49	jbsr	syscall_trace_enter
50	RESTORE_SWITCH_STACK
51	addql	#4,%sp
52	addql	#1,%d0
53	jeq	ret_from_exception
54	movel	%sp@(PT_OFF_ORIG_D0),%d1
55	movel	#-ENOSYS,%d0
56	cmpl	#NR_syscalls,%d1
57	jcc	1f
58	lsl	#2,%d1
59	lea	sys_call_table, %a0
60	jbsr	%a0@(%d1)
61
621:	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value */
63	subql	#4,%sp			/* dummy return address */
64	SAVE_SWITCH_STACK
65	jbsr	syscall_trace_leave
66	RESTORE_SWITCH_STACK
67	addql	#4,%sp
68	jra	ret_from_exception
69
70ENTRY(system_call)
71	SAVE_ALL_SYS
72
73	/* save top of frame*/
74	pea	%sp@
75	jbsr	set_esp0
76	addql	#4,%sp
77
78	movel	%sp@(PT_OFF_ORIG_D0),%d0
79
80	/* Doing a trace ? */
81	getthreadinfo
82	btst	#(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
83	jne	do_trace
84	cmpl	#NR_syscalls,%d0
85	jcc	badsys
86	lsl	#2,%d0
87	lea	sys_call_table,%a0
88	movel	%a0@(%d0), %a0
89	jbsr	%a0@
90	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value*/
91
92ret_from_exception:
93	btst	#5,%sp@(PT_OFF_SR)	/* check if returning to kernel*/
94	jeq	Luser_return		/* if so, skip resched, signals*/
95
96Lkernel_return:
97	RESTORE_ALL
98
99Luser_return:
100	/* only allow interrupts when we are really the last one on the*/
101	/* kernel stack, otherwise stack overflow can occur during*/
102	/* heavy interrupt load*/
103	andw	#ALLOWINT,%sr
104
105	getthreadinfo
1061:
107	/* check if any of the flags are set */
108	moveb	%a2@(TINFO_FLAGS + 3),%d1	/* thread_info->flags (low 8 bits) */
109	jne	Lwork_to_do
110	RESTORE_ALL
111
112Lwork_to_do:
113	/* check if reschedule needs to be called */
114	btst	#TIF_NEED_RESCHED,%d1
115	jne	reschedule
116
117Lsignal_return:
118	subql	#4,%sp			/* dummy return address*/
119	SAVE_SWITCH_STACK
120	pea	%sp@(SWITCH_STACK_SIZE)
121	bsrw	do_notify_resume
122	addql	#4,%sp
123	RESTORE_SWITCH_STACK
124	addql	#4,%sp
125	jra	1b
126
127/*
128 * This is the main interrupt handler, responsible for calling process_int()
129 */
130inthandler1:
131	SAVE_ALL_INT
132	movew	%sp@(PT_OFF_FORMATVEC), %d0
133	and	#0x3ff, %d0
134
135	movel	%sp,%sp@-
136	movel	#65,%sp@- 		/*  put vector # on stack*/
137	jbsr	process_int		/*  process the IRQ*/
1383:     	addql	#8,%sp			/*  pop parameters off stack*/
139	bra	ret_from_exception
140
141inthandler2:
142	SAVE_ALL_INT
143	movew	%sp@(PT_OFF_FORMATVEC), %d0
144	and	#0x3ff, %d0
145
146	movel	%sp,%sp@-
147	movel	#66,%sp@- 		/*  put vector # on stack*/
148	jbsr	process_int		/*  process the IRQ*/
1493:     	addql	#8,%sp			/*  pop parameters off stack*/
150	bra	ret_from_exception
151
152inthandler3:
153	SAVE_ALL_INT
154	movew	%sp@(PT_OFF_FORMATVEC), %d0
155	and	#0x3ff, %d0
156
157	movel	%sp,%sp@-
158	movel	#67,%sp@- 		/*  put vector # on stack*/
159	jbsr	process_int		/*  process the IRQ*/
1603:     	addql	#8,%sp			/*  pop parameters off stack*/
161	bra	ret_from_exception
162
163inthandler4:
164	SAVE_ALL_INT
165	movew	%sp@(PT_OFF_FORMATVEC), %d0
166	and	#0x3ff, %d0
167
168	movel	%sp,%sp@-
169	movel	#68,%sp@- 		/*  put vector # on stack*/
170	jbsr	process_int		/*  process the IRQ*/
1713:     	addql	#8,%sp			/*  pop parameters off stack*/
172	bra	ret_from_exception
173
174inthandler5:
175	SAVE_ALL_INT
176	movew	%sp@(PT_OFF_FORMATVEC), %d0
177	and	#0x3ff, %d0
178
179	movel	%sp,%sp@-
180	movel	#69,%sp@- 		/*  put vector # on stack*/
181	jbsr	process_int		/*  process the IRQ*/
1823:     	addql	#8,%sp			/*  pop parameters off stack*/
183	bra	ret_from_exception
184
185inthandler6:
186	SAVE_ALL_INT
187	movew	%sp@(PT_OFF_FORMATVEC), %d0
188	and	#0x3ff, %d0
189
190	movel	%sp,%sp@-
191	movel	#70,%sp@- 		/*  put vector # on stack*/
192	jbsr	process_int		/*  process the IRQ*/
1933:     	addql	#8,%sp			/*  pop parameters off stack*/
194	bra	ret_from_exception
195
196inthandler7:
197	SAVE_ALL_INT
198	movew	%sp@(PT_OFF_FORMATVEC), %d0
199	and	#0x3ff, %d0
200
201	movel	%sp,%sp@-
202	movel	#71,%sp@- 		/*  put vector # on stack*/
203	jbsr	process_int		/*  process the IRQ*/
2043:     	addql	#8,%sp			/*  pop parameters off stack*/
205	bra	ret_from_exception
206
207inthandler:
208	SAVE_ALL_INT
209	movew	%sp@(PT_OFF_FORMATVEC), %d0
210	and	#0x3ff, %d0
211
212	movel	%sp,%sp@-
213	movel	%d0,%sp@- 		/*  put vector # on stack*/
214	jbsr	process_int		/*  process the IRQ*/
2153:     	addql	#8,%sp			/*  pop parameters off stack*/
216	bra	ret_from_exception
217
218/*
219 * Handler for uninitialized and spurious interrupts.
220 */
221ENTRY(bad_interrupt)
222	addql	#1,irq_err_count
223	rte
224
225/*
226 * Beware - when entering resume, prev (the current task) is
227 * in a0, next (the new task) is in a1, so don't change these
228 * registers until their contents are no longer needed.
229 */
230ENTRY(resume)
231	movel	%a0,%d1				/* save prev thread in d1 */
232	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)	/* save sr */
233	SAVE_SWITCH_STACK
234	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
235	movel	%usp,%a3			/* save usp */
236	movel	%a3,%a0@(TASK_THREAD+THREAD_USP)
237
238	movel	%a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
239	movel	%a3,%usp
240	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
241	RESTORE_SWITCH_STACK
242	movew	%a1@(TASK_THREAD+THREAD_SR),%sr	/* restore thread status reg */
243	rts
244
245