xref: /linux/arch/sh/kernel/cpu/sh3/entry.S (revision 47902f3611b392209e2a412bf7ec02dca95e666d)
1/*
2 * arch/sh/kernel/cpu/sh3/entry.S
3 *
4 *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5 *  Copyright (C) 2003 - 2006  Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/sys.h>
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <cpu/mmu_context.h>
18#include <asm/page.h>
19#include <asm/cache.h>
20
21! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
23! to be jumped is too far, but it causes illegal slot exception.
24
25/*
26 * entry.S contains the system-call and fault low-level handling routines.
27 * This also contains the timer-interrupt handler, as well as all interrupts
28 * and faults that can result in a task-switch.
29 *
30 * NOTE: This code handles signal-recognition, which happens every time
31 * after a timer-interrupt and after each system call.
32 *
33 * NOTE: This code uses a convention that instructions in the delay slot
34 * of a transfer-control instruction are indented by an extra space, thus:
35 *
36 *    jmp	@k0	    ! control-transfer instruction
37 *     ldc	k1, ssr     ! delay slot
38 *
39 * Stack layout in 'ret_from_syscall':
40 * 	ptrace needs to have all regs on the stack.
41 *	if the order here is changed, it needs to be
42 *	updated in ptrace.c and ptrace.h
43 *
44 *	r0
45 *      ...
46 *	r15 = stack pointer
47 *	spc
48 *	pr
49 *	ssr
50 *	gbr
51 *	mach
52 *	macl
53 *	syscall #
54 *
55 */
56/* Offsets to the stack */
57OFF_R0  =  0		/* Return value. New ABI also arg4 */
58OFF_R1  =  4     	/* New ABI: arg5 */
59OFF_R2  =  8     	/* New ABI: arg6 */
60OFF_R3  =  12     	/* New ABI: syscall_nr */
61OFF_R4  =  16     	/* New ABI: arg0 */
62OFF_R5  =  20     	/* New ABI: arg1 */
63OFF_R6  =  24     	/* New ABI: arg2 */
64OFF_R7  =  28     	/* New ABI: arg3 */
65OFF_SP	=  (15*4)
66OFF_PC  =  (16*4)
67OFF_SR	=  (16*4+8)
68OFF_TRA	=  (16*4+6*4)
69
70#define k0	r0
71#define k1	r1
72#define k2	r2
73#define k3	r3
74#define k4	r4
75
76#define g_imask		r6	/* r6_bank1 */
77#define k_g_imask	r6_bank	/* r6_bank1 */
78#define current		r7	/* r7_bank1 */
79
80#include <asm/entry-macros.S>
81
82/*
83 * Kernel mode register usage:
84 *	k0	scratch
85 *	k1	scratch
86 *	k2	scratch (Exception code)
87 *	k3	scratch (Return address)
88 *	k4	scratch
89 *	k5	reserved
90 *	k6	Global Interrupt Mask (0--15 << 4)
91 *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
92 */
93
94!
95! TLB Miss / Initial Page write exception handling
96!			_and_
97! TLB hits, but the access violate the protection.
98! It can be valid access, such as stack grow and/or C-O-W.
99!
100!
101! Find the pmd/pte entry and loadtlb
102! If it's not found, cause address error (SEGV)
103!
104! Although this could be written in assembly language (and it'd be faster),
105! this first version depends *much* on C implementation.
106!
107
108#if defined(CONFIG_MMU)
109	.align	2
110ENTRY(tlb_miss_load)
111	bra	call_handle_tlbmiss
112	 mov	#0, r5
113
114	.align	2
115ENTRY(tlb_miss_store)
116	bra	call_handle_tlbmiss
117	 mov	#1, r5
118
119	.align	2
120ENTRY(initial_page_write)
121	bra	call_handle_tlbmiss
122	 mov	#2, r5
123
124	.align	2
125ENTRY(tlb_protection_violation_load)
126	bra	call_do_page_fault
127	 mov	#0, r5
128
129	.align	2
130ENTRY(tlb_protection_violation_store)
131	bra	call_do_page_fault
132	 mov	#1, r5
133
134call_handle_tlbmiss:
135	mov.l	1f, r0
136	mov	r5, r8
137	mov.l	@r0, r6
138	mov.l	2f, r0
139	sts	pr, r10
140	jsr	@r0
141	 mov	r15, r4
142	!
143	tst	r0, r0
144	bf/s	0f
145	 lds	r10, pr
146	rts
147	 nop
1480:
149	mov	r8, r5
150call_do_page_fault:
151	mov.l	1f, r0
152	mov.l	@r0, r6
153
154	mov.l	3f, r0
155	mov.l	4f, r1
156	mov	r15, r4
157	jmp	@r0
158	 lds	r1, pr
159
160	.align 2
1611:	.long	MMU_TEA
1622:	.long	handle_tlbmiss
1633:	.long	do_page_fault
1644:	.long	ret_from_exception
165
166	.align	2
167ENTRY(address_error_load)
168	bra	call_dae
169	 mov	#0,r5		! writeaccess = 0
170
171	.align	2
172ENTRY(address_error_store)
173	bra	call_dae
174	 mov	#1,r5		! writeaccess = 1
175
176	.align	2
177call_dae:
178	mov.l	1f, r0
179	mov.l	@r0, r6		! address
180	mov.l	2f, r0
181	jmp	@r0
182	 mov	r15, r4		! regs
183
184	.align 2
1851:	.long	MMU_TEA
1862:	.long   do_address_error
187#endif /* CONFIG_MMU */
188
189#if defined(CONFIG_SH_STANDARD_BIOS)
190	/* Unwind the stack and jmp to the debug entry */
191ENTRY(sh_bios_handler)
192	mov.l	1f, r8
193	bsr	restore_regs
194	 nop
195
196	lds	k2, pr			! restore pr
197	mov	k4, r15
198	!
199	mov.l	2f, k0
200	mov.l	@k0, k0
201	jmp	@k0
202	 ldc	k3, ssr
203	.align	2
2041:	.long	0x300000f0
2052:	.long	gdb_vbr_vector
206#endif /* CONFIG_SH_STANDARD_BIOS */
207
208! restore_regs()
209! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
210! - switch bank
211! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
212! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
213! k2 returns original pr
214! k3 returns original sr
215! k4 returns original stack pointer
216! r8 passes SR bitmask, overwritten with restored data on return
217! r9 trashed
218! BL=0 on entry, on exit BL=1 (depending on r8).
219
220ENTRY(restore_regs)
221	mov.l	@r15+, r0
222	mov.l	@r15+, r1
223	mov.l	@r15+, r2
224	mov.l	@r15+, r3
225	mov.l	@r15+, r4
226	mov.l	@r15+, r5
227	mov.l	@r15+, r6
228	mov.l	@r15+, r7
229	!
230	stc	sr, r9
231	or	r8, r9
232	ldc	r9, sr
233	!
234	mov.l	@r15+, r8
235	mov.l	@r15+, r9
236	mov.l	@r15+, r10
237	mov.l	@r15+, r11
238	mov.l	@r15+, r12
239	mov.l	@r15+, r13
240	mov.l	@r15+, r14
241	mov.l	@r15+, k4		! original stack pointer
242	ldc.l	@r15+, spc
243	mov.l	@r15+, k2		! original PR
244	mov.l	@r15+, k3		! original SR
245	ldc.l	@r15+, gbr
246	lds.l	@r15+, mach
247	lds.l	@r15+, macl
248	rts
249	 add	#4, r15			! Skip syscall number
250
251restore_all:
252	mov.l	7f, r8
253	bsr	restore_regs
254	 nop
255
256	lds	k2, pr			! restore pr
257	!
258	! Calculate new SR value
259	mov	k3, k2			! original SR value
260	mov	#0xfffffff0, k1
261	extu.b	k1, k1
262	not	k1, k1
263	and	k1, k2			! Mask original SR value
264	!
265	mov	k3, k0			! Calculate IMASK-bits
266	shlr2	k0
267	and	#0x3c, k0
268	cmp/eq	#0x3c, k0
269	bt/s	6f
270	 shll2	k0
271	mov	g_imask, k0
272	!
2736:	or	k0, k2			! Set the IMASK-bits
274	ldc	k2, ssr
275	!
276	mov	k4, r15
277	rte
278	 nop
279
280	.align	2
2815:	.long	0x00001000	! DSP
2827:	.long	0x30000000
283
284! common exception handler
285#include "../../entry-common.S"
286
287! Exception Vector Base
288!
289!	Should be aligned page boundary.
290!
291	.balign 	4096,0,4096
292ENTRY(vbr_base)
293	.long	0
294!
295! 0x100: General exception vector
296!
297	.balign 	256,0,256
298general_exception:
299	bra	handle_exception
300	 sts	pr, k3		! save original pr value in k3
301
302! prepare_stack()
303! - roll back gRB
304! - switch to kernel stack
305! k0 returns original sp (after roll back)
306! k1 trashed
307! k2 trashed
308
309prepare_stack:
310#ifdef CONFIG_GUSA
311	! Check for roll back gRB (User and Kernel)
312	mov	r15, k0
313	shll	k0
314	bf/s	1f
315	 shll	k0
316	bf/s	1f
317	 stc	spc, k1
318	stc	r0_bank, k0
319	cmp/hs	k0, k1		! test k1 (saved PC) >= k0 (saved r0)
320	bt/s	2f
321	 stc	r1_bank, k1
322
323	add	#-2, k0
324	add	r15, k0
325	ldc	k0, spc		! PC = saved r0 + r15 - 2
3262:	mov	k1, r15		! SP = r1
3271:
328#endif
329	! Switch to kernel stack if needed
330	stc	ssr, k0		! Is it from kernel space?
331	shll	k0		! Check MD bit (bit30) by shifting it into...
332	shll	k0		!       ...the T bit
333	bt/s	1f		! It's a kernel to kernel transition.
334	 mov	r15, k0		! save original stack to k0
335	/* User space to kernel */
336	mov	#(THREAD_SIZE >> 10), k1
337	shll8	k1		! k1 := THREAD_SIZE
338	shll2	k1
339	add	current, k1
340	mov	k1, r15		! change to kernel stack
341	!
3421:
343	rts
344	 nop
345
346!
347! 0x400: Instruction and Data TLB miss exception vector
348!
349	.balign 	1024,0,1024
350tlb_miss:
351	sts	pr, k3		! save original pr value in k3
352
353handle_exception:
354	mova	exception_data, k0
355
356	! Setup stack and save DSP context (k0 contains original r15 on return)
357	bsr	prepare_stack
358	 PREF(k0)
359
360	! Save registers / Switch to bank 0
361	mov.l	5f, k2		! vector register address
362	mov.l	1f, k4		! SR bits to clear in k4
363	bsr	save_regs	! needs original pr value in k3
364	 mov.l	@k2, k2		! read out vector and keep in k2
365
366handle_exception_special:
367	setup_frame_reg
368
369	! Setup return address and jump to exception handler
370	mov.l	7f, r9		! fetch return address
371	stc	r2_bank, r0	! k2 (vector)
372	mov.l	6f, r10
373	shlr2	r0
374	shlr	r0
375	mov.l	@(r0, r10), r10
376	jmp	@r10
377	 lds	r9, pr		! put return address in pr
378
379	.align	L1_CACHE_SHIFT
380
381! save_regs()
382! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
383! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
384! - switch bank
385! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
386! k0 contains original stack pointer*
387! k1 trashed
388! k3 passes original pr*
389! k4 passes SR bitmask
390! BL=1 on entry, on exit BL=0.
391
392ENTRY(save_regs)
393	mov	#-1, r1
394	mov.l	k1, @-r15	! set TRA (default: -1)
395	sts.l	macl, @-r15
396	sts.l	mach, @-r15
397	stc.l	gbr, @-r15
398	stc.l	ssr, @-r15
399	mov.l	k3, @-r15	! original pr in k3
400	stc.l	spc, @-r15
401
402	mov.l	k0, @-r15	! original stack pointer in k0
403	mov.l	r14, @-r15
404	mov.l	r13, @-r15
405	mov.l	r12, @-r15
406	mov.l	r11, @-r15
407	mov.l	r10, @-r15
408	mov.l	r9, @-r15
409	mov.l	r8, @-r15
410
411	mov.l	0f, k3		! SR bits to set in k3
412
413	! fall-through
414
415! save_low_regs()
416! - modify SR for bank switch
417! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
418! k3 passes bits to set in SR
419! k4 passes bits to clear in SR
420
421ENTRY(save_low_regs)
422	stc	sr, r8
423	or	k3, r8
424	and	k4, r8
425	ldc	r8, sr
426
427	mov.l	r7, @-r15
428	mov.l	r6, @-r15
429	mov.l	r5, @-r15
430	mov.l	r4, @-r15
431	mov.l	r3, @-r15
432	mov.l	r2, @-r15
433	mov.l	r1, @-r15
434	rts
435	 mov.l	r0, @-r15
436
437!
438! 0x600: Interrupt / NMI vector
439!
440	.balign 	512,0,512
441ENTRY(handle_interrupt)
442	sts	pr, k3		! save original pr value in k3
443	mova	exception_data, k0
444
445	! Setup stack and save DSP context (k0 contains original r15 on return)
446	bsr	prepare_stack
447	 PREF(k0)
448
449	! Save registers / Switch to bank 0
450	mov.l	1f, k4		! SR bits to clear in k4
451	bsr	save_regs	! needs original pr value in k3
452	 mov	#-1, k2		! default vector kept in k2
453
454	setup_frame_reg
455
456	stc	sr, r0	! get status register
457	shlr2	r0
458	and	#0x3c, r0
459	cmp/eq	#0x3c, r0
460	bf	9f
461	TRACE_IRQS_OFF
4629:
463
464	! Setup return address and jump to do_IRQ
465	mov.l	4f, r9		! fetch return address
466	lds	r9, pr		! put return address in pr
467	mov.l	2f, r4
468	mov.l	3f, r9
469	mov.l	@r4, r4		! pass INTEVT vector as arg0
470
471	shlr2	r4
472	shlr	r4
473	mov	r4, r0		! save vector->jmp table offset for later
474
475	shlr2	r4		! vector to IRQ# conversion
476	add	#-0x10, r4
477
478	cmp/pz	r4		! is it a valid IRQ?
479	bt	10f
480
481	/*
482	 * We got here as a result of taking the INTEVT path for something
483	 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
484	 * path and special case the event dispatch instead.  This is the
485	 * expected path for the NMI (and any other brilliantly implemented
486	 * exception), which effectively wants regular exception dispatch
487	 * but is unfortunately reported through INTEVT rather than
488	 * EXPEVT.  Grr.
489	 */
490	mov.l	6f, r9
491	mov.l	@(r0, r9), r9
492	jmp	@r9
493	 mov	r15, r8		! trap handlers take saved regs in r8
494
49510:
496	jmp	@r9		! Off to do_IRQ() we go.
497	 mov	r15, r5		! pass saved registers as arg1
498
499ENTRY(exception_none)
500	rts
501	 nop
502
503	.align	L1_CACHE_SHIFT
504exception_data:
5050:	.long	0x000080f0	! FD=1, IMASK=15
5061:	.long	0xcfffffff	! RB=0, BL=0
5072:	.long	INTEVT
5083:	.long	do_IRQ
5094:	.long	ret_from_irq
5105:	.long	EXPEVT
5116:	.long	exception_handling_table
5127:	.long	ret_from_exception
513