xref: /linux/arch/m68k/68000/entry.S (revision d26270061ae66b915138af7cd73ca6f8b85e6b44)
1/* SPDX-License-Identifier: GPL-2.0-or-later
2 *
3 *  entry.S -- non-mmu 68000 interrupt and exception entry points
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *
7 * Linux/m68k support by Hamish Macdonald
8 */
9
10#include <linux/linkage.h>
11#include <asm/thread_info.h>
12#include <asm/unistd.h>
13#include <asm/errno.h>
14#include <asm/setup.h>
15#include <asm/traps.h>
16#include <asm/asm-offsets.h>
17#include <asm/entry.h>
18
19.text
20
21.globl system_call
22.globl resume
23.globl ret_from_exception
24.globl sys_call_table
25.globl bad_interrupt
26.globl inthandler1
27.globl inthandler2
28.globl inthandler3
29.globl inthandler4
30.globl inthandler5
31.globl inthandler6
32.globl inthandler7
33
34badsys:
35	movel	#-ENOSYS,%sp@(PT_OFF_D0)
36	jra	ret_from_exception
37
38do_trace:
39	movel	#-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
40	subql	#4,%sp
41	SAVE_SWITCH_STACK
42	jbsr	syscall_trace_enter
43	RESTORE_SWITCH_STACK
44	addql	#4,%sp
45	addql	#1,%d0
46	jeq	ret_from_exception
47	movel	%sp@(PT_OFF_ORIG_D0),%d1
48	movel	#-ENOSYS,%d0
49	cmpl	#NR_syscalls,%d1
50	jcc	1f
51	lsl	#2,%d1
52	lea	sys_call_table, %a0
53	jbsr	%a0@(%d1)
54
551:	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value */
56	subql	#4,%sp			/* dummy return address */
57	SAVE_SWITCH_STACK
58	jbsr	syscall_trace_leave
59	RESTORE_SWITCH_STACK
60	addql	#4,%sp
61	jra	ret_from_exception
62
63ENTRY(system_call)
64	SAVE_ALL_SYS
65
66	/* save top of frame*/
67	pea	%sp@
68	jbsr	set_esp0
69	addql	#4,%sp
70
71	movel	%sp@(PT_OFF_ORIG_D0),%d0
72
73	movel	%sp,%d1			/* get thread_info pointer */
74	andl	#-THREAD_SIZE,%d1
75	movel	%d1,%a2
76	btst	#(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
77	jne	do_trace
78	cmpl	#NR_syscalls,%d0
79	jcc	badsys
80	lsl	#2,%d0
81	lea	sys_call_table,%a0
82	movel	%a0@(%d0), %a0
83	jbsr	%a0@
84	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value*/
85
86ret_from_exception:
87	btst	#5,%sp@(PT_OFF_SR)	/* check if returning to kernel*/
88	jeq	Luser_return		/* if so, skip resched, signals*/
89
90Lkernel_return:
91	RESTORE_ALL
92
93Luser_return:
94	/* only allow interrupts when we are really the last one on the*/
95	/* kernel stack, otherwise stack overflow can occur during*/
96	/* heavy interrupt load*/
97	andw	#ALLOWINT,%sr
98
99	movel	%sp,%d1			/* get thread_info pointer */
100	andl	#-THREAD_SIZE,%d1
101	movel	%d1,%a2
1021:
103	move	%a2@(TINFO_FLAGS),%d1	/* thread_info->flags */
104	jne	Lwork_to_do
105	RESTORE_ALL
106
107Lwork_to_do:
108	movel	%a2@(TINFO_FLAGS),%d1	/* thread_info->flags */
109	btst	#TIF_NEED_RESCHED,%d1
110	jne	reschedule
111
112Lsignal_return:
113	subql	#4,%sp			/* dummy return address*/
114	SAVE_SWITCH_STACK
115	pea	%sp@(SWITCH_STACK_SIZE)
116	bsrw	do_notify_resume
117	addql	#4,%sp
118	RESTORE_SWITCH_STACK
119	addql	#4,%sp
120	jra	1b
121
122/*
123 * This is the main interrupt handler, responsible for calling process_int()
124 */
125inthandler1:
126	SAVE_ALL_INT
127	movew	%sp@(PT_OFF_FORMATVEC), %d0
128	and	#0x3ff, %d0
129
130	movel	%sp,%sp@-
131	movel	#65,%sp@- 		/*  put vector # on stack*/
132	jbsr	process_int		/*  process the IRQ*/
1333:     	addql	#8,%sp			/*  pop parameters off stack*/
134	bra	ret_from_exception
135
136inthandler2:
137	SAVE_ALL_INT
138	movew	%sp@(PT_OFF_FORMATVEC), %d0
139	and	#0x3ff, %d0
140
141	movel	%sp,%sp@-
142	movel	#66,%sp@- 		/*  put vector # on stack*/
143	jbsr	process_int		/*  process the IRQ*/
1443:     	addql	#8,%sp			/*  pop parameters off stack*/
145	bra	ret_from_exception
146
147inthandler3:
148	SAVE_ALL_INT
149	movew	%sp@(PT_OFF_FORMATVEC), %d0
150	and	#0x3ff, %d0
151
152	movel	%sp,%sp@-
153	movel	#67,%sp@- 		/*  put vector # on stack*/
154	jbsr	process_int		/*  process the IRQ*/
1553:     	addql	#8,%sp			/*  pop parameters off stack*/
156	bra	ret_from_exception
157
158inthandler4:
159	SAVE_ALL_INT
160	movew	%sp@(PT_OFF_FORMATVEC), %d0
161	and	#0x3ff, %d0
162
163	movel	%sp,%sp@-
164	movel	#68,%sp@- 		/*  put vector # on stack*/
165	jbsr	process_int		/*  process the IRQ*/
1663:     	addql	#8,%sp			/*  pop parameters off stack*/
167	bra	ret_from_exception
168
169inthandler5:
170	SAVE_ALL_INT
171	movew	%sp@(PT_OFF_FORMATVEC), %d0
172	and	#0x3ff, %d0
173
174	movel	%sp,%sp@-
175	movel	#69,%sp@- 		/*  put vector # on stack*/
176	jbsr	process_int		/*  process the IRQ*/
1773:     	addql	#8,%sp			/*  pop parameters off stack*/
178	bra	ret_from_exception
179
180inthandler6:
181	SAVE_ALL_INT
182	movew	%sp@(PT_OFF_FORMATVEC), %d0
183	and	#0x3ff, %d0
184
185	movel	%sp,%sp@-
186	movel	#70,%sp@- 		/*  put vector # on stack*/
187	jbsr	process_int		/*  process the IRQ*/
1883:     	addql	#8,%sp			/*  pop parameters off stack*/
189	bra	ret_from_exception
190
191inthandler7:
192	SAVE_ALL_INT
193	movew	%sp@(PT_OFF_FORMATVEC), %d0
194	and	#0x3ff, %d0
195
196	movel	%sp,%sp@-
197	movel	#71,%sp@- 		/*  put vector # on stack*/
198	jbsr	process_int		/*  process the IRQ*/
1993:     	addql	#8,%sp			/*  pop parameters off stack*/
200	bra	ret_from_exception
201
202inthandler:
203	SAVE_ALL_INT
204	movew	%sp@(PT_OFF_FORMATVEC), %d0
205	and	#0x3ff, %d0
206
207	movel	%sp,%sp@-
208	movel	%d0,%sp@- 		/*  put vector # on stack*/
209	jbsr	process_int		/*  process the IRQ*/
2103:     	addql	#8,%sp			/*  pop parameters off stack*/
211	bra	ret_from_exception
212
213/*
214 * Handler for uninitialized and spurious interrupts.
215 */
216ENTRY(bad_interrupt)
217	addql	#1,irq_err_count
218	rte
219
220/*
221 * Beware - when entering resume, prev (the current task) is
222 * in a0, next (the new task) is in a1, so don't change these
223 * registers until their contents are no longer needed.
224 */
225ENTRY(resume)
226	movel	%a0,%d1				/* save prev thread in d1 */
227	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)	/* save sr */
228	SAVE_SWITCH_STACK
229	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
230	movel	%usp,%a3			/* save usp */
231	movel	%a3,%a0@(TASK_THREAD+THREAD_USP)
232
233	movel	%a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
234	movel	%a3,%usp
235	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
236	RESTORE_SWITCH_STACK
237	movew	%a1@(TASK_THREAD+THREAD_SR),%sr	/* restore thread status reg */
238	rts
239
240