xref: /linux/arch/sh/kernel/cpu/sh3/entry.S (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1/*
2 * arch/sh/kernel/entry.S
3 *
4 *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5 *  Copyright (C) 2003 - 2006  Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/sys.h>
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/cpu/mmu_context.h>
17#include <asm/unistd.h>
18
19! NOTE:
20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
21! to be jumped is too far, but it causes illegal slot exception.
22
23/*
24 * entry.S contains the system-call and fault low-level handling routines.
25 * This also contains the timer-interrupt handler, as well as all interrupts
26 * and faults that can result in a task-switch.
27 *
28 * NOTE: This code handles signal-recognition, which happens every time
29 * after a timer-interrupt and after each system call.
30 *
31 * NOTE: This code uses a convention that instructions in the delay slot
32 * of a transfer-control instruction are indented by an extra space, thus:
33 *
34 *    jmp	@k0	    ! control-transfer instruction
35 *     ldc	k1, ssr     ! delay slot
36 *
37 * Stack layout in 'ret_from_syscall':
38 * 	ptrace needs to have all regs on the stack.
39 *	if the order here is changed, it needs to be
40 *	updated in ptrace.c and ptrace.h
41 *
42 *	r0
43 *      ...
44 *	r15 = stack pointer
45 *	spc
46 *	pr
47 *	ssr
48 *	gbr
49 *	mach
50 *	macl
51 *	syscall #
52 *
53 */
54#if defined(CONFIG_KGDB_NMI)
55NMI_VEC = 0x1c0			! Must catch early for debounce
56#endif
57
58/* Offsets to the stack */
59OFF_R0  =  0		/* Return value. New ABI also arg4 */
60OFF_R1  =  4     	/* New ABI: arg5 */
61OFF_R2  =  8     	/* New ABI: arg6 */
62OFF_R3  =  12     	/* New ABI: syscall_nr */
63OFF_R4  =  16     	/* New ABI: arg0 */
64OFF_R5  =  20     	/* New ABI: arg1 */
65OFF_R6  =  24     	/* New ABI: arg2 */
66OFF_R7  =  28     	/* New ABI: arg3 */
67OFF_SP	=  (15*4)
68OFF_PC  =  (16*4)
69OFF_SR	=  (16*4+8)
70OFF_TRA	=  (16*4+6*4)
71
72
73#define k0	r0
74#define k1	r1
75#define k2	r2
76#define k3	r3
77#define k4	r4
78
79#define g_imask		r6	/* r6_bank1 */
80#define k_g_imask	r6_bank	/* r6_bank1 */
81#define current		r7	/* r7_bank1 */
82
83#include <asm/entry-macros.S>
84
85/*
86 * Kernel mode register usage:
87 *	k0	scratch
88 *	k1	scratch
89 *	k2	scratch (Exception code)
90 *	k3	scratch (Return address)
91 *	k4	scratch
92 *	k5	reserved
93 *	k6	Global Interrupt Mask (0--15 << 4)
94 *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
95 */
96
97!
98! TLB Miss / Initial Page write exception handling
99!			_and_
100! TLB hits, but the access violate the protection.
101! It can be valid access, such as stack grow and/or C-O-W.
102!
103!
104! Find the pmd/pte entry and loadtlb
105! If it's not found, cause address error (SEGV)
106!
107! Although this could be written in assembly language (and it'd be faster),
108! this first version depends *much* on C implementation.
109!
110
111#if defined(CONFIG_MMU)
112	.align	2
113ENTRY(tlb_miss_load)
114	bra	call_dpf
115	 mov	#0, r5
116
117	.align	2
118ENTRY(tlb_miss_store)
119	bra	call_dpf
120	 mov	#1, r5
121
122	.align	2
123ENTRY(initial_page_write)
124	bra	call_dpf
125	 mov	#1, r5
126
127	.align	2
128ENTRY(tlb_protection_violation_load)
129	bra	call_dpf
130	 mov	#0, r5
131
132	.align	2
133ENTRY(tlb_protection_violation_store)
134	bra	call_dpf
135	 mov	#1, r5
136
137call_dpf:
138	mov.l	1f, r0
139	mov	r5, r8
140	mov.l	@r0, r6
141	mov	r6, r9
142	mov.l	2f, r0
143	sts	pr, r10
144	jsr	@r0
145	 mov	r15, r4
146	!
147	tst	r0, r0
148	bf/s	0f
149	 lds	r10, pr
150	rts
151	 nop
1520:	sti
153	mov.l	3f, r0
154	mov	r9, r6
155	mov	r8, r5
156	jmp	@r0
157	 mov	r15, r4
158
159	.align 2
1601:	.long	MMU_TEA
1612:	.long	__do_page_fault
1623:	.long	do_page_fault
163
164	.align	2
165ENTRY(address_error_load)
166	bra	call_dae
167	 mov	#0,r5		! writeaccess = 0
168
169	.align	2
170ENTRY(address_error_store)
171	bra	call_dae
172	 mov	#1,r5		! writeaccess = 1
173
174	.align	2
175call_dae:
176	mov.l	1f, r0
177	mov.l	@r0, r6		! address
178	mov.l	2f, r0
179	jmp	@r0
180	 mov	r15, r4		! regs
181
182	.align 2
1831:	.long	MMU_TEA
1842:	.long   do_address_error
185#endif /* CONFIG_MMU */
186
187#if defined(CONFIG_SH_STANDARD_BIOS)
188	/* Unwind the stack and jmp to the debug entry */
189ENTRY(sh_bios_handler)
190	mov.l	@r15+, r0
191	mov.l	@r15+, r1
192	mov.l	@r15+, r2
193	mov.l	@r15+, r3
194	mov.l	@r15+, r4
195	mov.l	@r15+, r5
196	mov.l	@r15+, r6
197	mov.l	@r15+, r7
198	stc	sr, r8
199	mov.l	1f, r9			! BL =1, RB=1, IMASK=0x0F
200	or	r9, r8
201	ldc	r8, sr			! here, change the register bank
202	mov.l	@r15+, r8
203	mov.l	@r15+, r9
204	mov.l	@r15+, r10
205	mov.l	@r15+, r11
206	mov.l	@r15+, r12
207	mov.l	@r15+, r13
208	mov.l	@r15+, r14
209	mov.l	@r15+, k0
210	ldc.l	@r15+, spc
211	lds.l	@r15+, pr
212	mov.l	@r15+, k1
213	ldc.l	@r15+, gbr
214	lds.l	@r15+, mach
215	lds.l	@r15+, macl
216	mov	k0, r15
217	!
218	mov.l	2f, k0
219	mov.l	@k0, k0
220	jmp	@k0
221	 ldc	k1, ssr
222	.align	2
2231:	.long	0x300000f0
2242:	.long	gdb_vbr_vector
225#endif /* CONFIG_SH_STANDARD_BIOS */
226
227restore_all:
228	mov.l	@r15+, r0
229	mov.l	@r15+, r1
230	mov.l	@r15+, r2
231	mov.l	@r15+, r3
232	mov.l	@r15+, r4
233	mov.l	@r15+, r5
234	mov.l	@r15+, r6
235	mov.l	@r15+, r7
236	!
237	stc	sr, r8
238	mov.l	7f, r9
239	or	r9, r8			! BL =1, RB=1
240	ldc	r8, sr			! here, change the register bank
241	!
242	mov.l	@r15+, r8
243	mov.l	@r15+, r9
244	mov.l	@r15+, r10
245	mov.l	@r15+, r11
246	mov.l	@r15+, r12
247	mov.l	@r15+, r13
248	mov.l	@r15+, r14
249	mov.l	@r15+, k4		! original stack pointer
250	ldc.l	@r15+, spc
251	lds.l	@r15+, pr
252	mov.l	@r15+, k3		! original SR
253	ldc.l	@r15+, gbr
254	lds.l	@r15+, mach
255	lds.l	@r15+, macl
256	add	#4, r15			! Skip syscall number
257	!
258#ifdef CONFIG_SH_DSP
259	mov.l	@r15+, k0		! DSP mode marker
260	mov.l	5f, k1
261	cmp/eq	k0, k1			! Do we have a DSP stack frame?
262	bf	skip_restore
263
264	stc	sr, k0			! Enable CPU DSP mode
265	or	k1, k0			! (within kernel it may be disabled)
266	ldc	k0, sr
267	mov	r2, k0			! Backup r2
268
269	! Restore DSP registers from stack
270	mov	r15, r2
271	movs.l	@r2+, a1
272	movs.l	@r2+, a0g
273	movs.l	@r2+, a1g
274	movs.l	@r2+, m0
275	movs.l	@r2+, m1
276	mov	r2, r15
277
278	lds.l	@r15+, a0
279	lds.l	@r15+, x0
280	lds.l	@r15+, x1
281	lds.l	@r15+, y0
282	lds.l	@r15+, y1
283	lds.l	@r15+, dsr
284	ldc.l	@r15+, rs
285	ldc.l	@r15+, re
286	ldc.l	@r15+, mod
287
288	mov	k0, r2			! Restore r2
289skip_restore:
290#endif
291	!
292	! Calculate new SR value
293	mov	k3, k2			! original SR value
294	mov	#0xf0, k1
295	extu.b	k1, k1
296	not	k1, k1
297	and	k1, k2			! Mask orignal SR value
298	!
299	mov	k3, k0			! Calculate IMASK-bits
300	shlr2	k0
301	and	#0x3c, k0
302	cmp/eq	#0x3c, k0
303	bt/s	6f
304	 shll2	k0
305	mov	g_imask, k0
306	!
3076:	or	k0, k2			! Set the IMASK-bits
308	ldc	k2, ssr
309	!
310#if defined(CONFIG_KGDB_NMI)
311	! Clear in_nmi
312	mov.l	6f, k0
313	mov	#0, k1
314	mov.b	k1, @k0
315#endif
316	mov.l	@r15+, k2		! restore EXPEVT
317	mov	k4, r15
318	rte
319	 nop
320
321	.align	2
3225:	.long	0x00001000	! DSP
3237:	.long	0x30000000
324
325! common exception handler
326#include "../../entry-common.S"
327
328! Exception Vector Base
329!
330!	Should be aligned page boundary.
331!
332	.balign 	4096,0,4096
333ENTRY(vbr_base)
334	.long	0
335!
336	.balign 	256,0,256
337general_exception:
338	mov.l	1f, k2
339	mov.l	2f, k3
340	bra	handle_exception
341	 mov.l	@k2, k2
342	.align	2
3431:	.long	EXPEVT
3442:	.long	ret_from_exception
345!
346!
347
348	.balign 	1024,0,1024
349tlb_miss:
350	mov.l	1f, k2
351	mov.l	4f, k3
352	bra	handle_exception
353	 mov.l	@k2, k2
354!
355	.balign 	512,0,512
356interrupt:
357	mov.l	2f, k2
358	mov.l	3f, k3
359#if defined(CONFIG_KGDB_NMI)
360	! Debounce (filter nested NMI)
361	mov.l	@k2, k0
362	mov.l	5f, k1
363	cmp/eq	k1, k0
364	bf	0f
365	mov.l	6f, k1
366	tas.b	@k1
367	bt	0f
368	rte
369	 nop
370	.align	2
3715:	.long	NMI_VEC
3726:	.long	in_nmi
3730:
374#endif /* defined(CONFIG_KGDB_NMI) */
375	bra	handle_exception
376	 mov	#-1, k2		! interrupt exception marker
377
378	.align	2
3791:	.long	EXPEVT
3802:	.long	INTEVT
3813:	.long	ret_from_irq
3824:	.long	ret_from_exception
383
384!
385!
386	.align	2
387ENTRY(handle_exception)
388	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
389	! save all registers onto stack.
390	!
391	stc	ssr, k0		! Is it from kernel space?
392	shll	k0		! Check MD bit (bit30) by shifting it into...
393	shll	k0		!       ...the T bit
394	bt/s	1f		! It's a kernel to kernel transition.
395	 mov	r15, k0		! save original stack to k0
396	/* User space to kernel */
397	mov	#(THREAD_SIZE >> 10), k1
398	shll8	k1		! k1 := THREAD_SIZE
399	shll2	k1
400	add	current, k1
401	mov	k1, r15		! change to kernel stack
402	!
4031:	mov.l	2f, k1
404	!
405#ifdef CONFIG_SH_DSP
406	mov.l	r2, @-r15		! Save r2, we need another reg
407	stc	sr, k4
408	mov.l	1f, r2
409	tst	r2, k4			! Check if in DSP mode
410	mov.l	@r15+, r2		! Restore r2 now
411	bt/s	skip_save
412	 mov	#0, k4			! Set marker for no stack frame
413
414	mov	r2, k4			! Backup r2 (in k4) for later
415
416	! Save DSP registers on stack
417	stc.l	mod, @-r15
418	stc.l	re, @-r15
419	stc.l	rs, @-r15
420	sts.l	dsr, @-r15
421	sts.l	y1, @-r15
422	sts.l	y0, @-r15
423	sts.l	x1, @-r15
424	sts.l	x0, @-r15
425	sts.l	a0, @-r15
426
427	! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
428
429	! FIXME: Make sure that this is still the case with newer toolchains,
430	! as we're not at all interested in supporting ancient toolchains at
431	! this point. -- PFM.
432
433	mov	r15, r2
434	.word	0xf653			! movs.l	a1, @-r2
435	.word	0xf6f3			! movs.l	a0g, @-r2
436	.word	0xf6d3			! movs.l	a1g, @-r2
437	.word	0xf6c3			! movs.l	m0, @-r2
438	.word	0xf6e3			! movs.l	m1, @-r2
439	mov	r2, r15
440
441	mov	k4, r2			! Restore r2
442	mov.l	1f, k4			! Force DSP stack frame
443skip_save:
444	mov.l	k4, @-r15		! Push DSP mode marker onto stack
445#endif
446	! Save the user registers on the stack.
447	mov.l	k2, @-r15	! EXPEVT
448
449	mov	#-1, k4
450	mov.l	k4, @-r15	! set TRA (default: -1)
451	!
452	sts.l	macl, @-r15
453	sts.l	mach, @-r15
454	stc.l	gbr, @-r15
455	stc.l	ssr, @-r15
456	sts.l	pr, @-r15
457	stc.l	spc, @-r15
458	!
459	lds	k3, pr		! Set the return address to pr
460	!
461	mov.l	k0, @-r15	! save orignal stack
462	mov.l	r14, @-r15
463	mov.l	r13, @-r15
464	mov.l	r12, @-r15
465	mov.l	r11, @-r15
466	mov.l	r10, @-r15
467	mov.l	r9, @-r15
468	mov.l	r8, @-r15
469	!
470	stc	sr, r8		! Back to normal register bank, and
471	or	k1, r8		! Block all interrupts
472	mov.l	3f, k1
473	and	k1, r8		! ...
474	ldc	r8, sr		! ...changed here.
475	!
476	mov.l	r7, @-r15
477	mov.l	r6, @-r15
478	mov.l	r5, @-r15
479	mov.l	r4, @-r15
480	mov.l	r3, @-r15
481	mov.l	r2, @-r15
482	mov.l	r1, @-r15
483	mov.l	r0, @-r15
484
485	/*
486	 * This gets a bit tricky.. in the INTEVT case we don't want to use
487	 * the VBR offset as a destination in the jump call table, since all
488	 * of the destinations are the same. In this case, (interrupt) sets
489	 * a marker in r2 (now r2_bank since SR.RB changed), which we check
490	 * to determine the exception type. For all other exceptions, we
491	 * forcibly read EXPEVT from memory and fix up the jump address, in
492	 * the interrupt exception case we jump to do_IRQ() and defer the
493	 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
494	 * checks that do_IRQ() was doing..
495	 */
496	stc	r2_bank, r8
497	cmp/pz	r8
498	bf	interrupt_exception
499	shlr2	r8
500	shlr	r8
501	mov.l	4f, r9
502	add	r8, r9
503	mov.l	@r9, r9
504	jmp	@r9
505	 nop
506	rts
507	 nop
508
509	.align	2
5101:	.long	0x00001000	! DSP=1
5112:	.long	0x000080f0	! FD=1, IMASK=15
5123:	.long	0xcfffffff	! RB=0, BL=0
5134:	.long	exception_handling_table
514
515interrupt_exception:
516	mov.l	1f, r9
517	jmp	@r9
518	 nop
519	rts
520	 nop
521
522	.align 2
5231:	.long	do_IRQ
524
525	.align	2
526ENTRY(exception_none)
527	rts
528	 nop
529