xref: /linux/arch/powerpc/kernel/entry_32.S (revision 6cb7bfebb145af5ea1d052512a2ae7ff07a47202)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)	\
50	mtspr	exc_level##_SPRG,r8;			\
51	BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);		\
52	lwz	r0,GPR10-INT_FRAME_SIZE(r8);		\
53	stw	r0,GPR10(r11);				\
54	lwz	r0,GPR11-INT_FRAME_SIZE(r8);		\
55	stw	r0,GPR11(r11);				\
56	mfspr	r8,exc_level##_SPRG
57
58	.globl	mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60	TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61	b	transfer_to_handler_full
62
63	.globl	debug_transfer_to_handler
64debug_transfer_to_handler:
65	TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66	b	transfer_to_handler_full
67
68	.globl	crit_transfer_to_handler
69crit_transfer_to_handler:
70	TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71	/* fall through */
72#endif
73
74#ifdef CONFIG_40x
75	.globl	crit_transfer_to_handler
76crit_transfer_to_handler:
77	lwz	r0,crit_r10@l(0)
78	stw	r0,GPR10(r11)
79	lwz	r0,crit_r11@l(0)
80	stw	r0,GPR11(r11)
81	/* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91	.globl	transfer_to_handler_full
92transfer_to_handler_full:
93	SAVE_NVGPRS(r11)
94	/* fall through */
95
96	.globl	transfer_to_handler
97transfer_to_handler:
98	stw	r2,GPR2(r11)
99	stw	r12,_NIP(r11)
100	stw	r9,_MSR(r11)
101	andi.	r2,r9,MSR_PR
102	mfctr	r12
103	mfspr	r2,SPRN_XER
104	stw	r12,_CTR(r11)
105	stw	r2,_XER(r11)
106	mfspr	r12,SPRN_SPRG3
107	addi	r2,r12,-THREAD
108	tovirt(r2,r2)			/* set r2 to current */
109	beq	2f			/* if from user, fix up THREAD.regs */
110	addi	r11,r1,STACK_FRAME_OVERHEAD
111	stw	r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113	/* Check to see if the dbcr0 register is set up to debug.  Use the
114	   single-step bit to do this. */
115	lwz	r12,THREAD_DBCR0(r12)
116	andis.	r12,r12,DBCR0_IC@h
117	beq+	3f
118	/* From user and task is ptraced - load up global dbcr0 */
119	li	r12,-1			/* clear all pending debug events */
120	mtspr	SPRN_DBSR,r12
121	lis	r11,global_dbcr0@ha
122	tophys(r11,r11)
123	addi	r11,r11,global_dbcr0@l
124	lwz	r12,0(r11)
125	mtspr	SPRN_DBCR0,r12
126	lwz	r12,4(r11)
127	addi	r12,r12,-1
128	stw	r12,4(r11)
129#endif
130	b	3f
1312:	/* if from kernel, check interrupted DOZE/NAP mode and
132         * check for stack overflow
133         */
134#ifdef CONFIG_6xx
135	mfspr	r11,SPRN_HID0
136	mtcr	r11
137BEGIN_FTR_SECTION
138	bt-	8,power_save_6xx_restore	/* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141	bt-	9,power_save_6xx_restore	/* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144	.globl transfer_to_handler_cont
145transfer_to_handler_cont:
146	lwz	r11,THREAD_INFO-THREAD(r12)
147	cmplw	r1,r11			/* if r1 <= current->thread_info */
148	ble-	stack_ovf		/* then the kernel stack overflowed */
1493:
150	mflr	r9
151	lwz	r11,0(r9)		/* virtual address of handler */
152	lwz	r9,4(r9)		/* where to go when done */
153	FIX_SRR1(r10,r12)
154	mtspr	SPRN_SRR0,r11
155	mtspr	SPRN_SRR1,r10
156	mtlr	r9
157	SYNC
158	RFI				/* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165	/* sometimes we use a statically-allocated stack, which is OK. */
166	lis	r11,_end@h
167	ori	r11,r11,_end@l
168	cmplw	r1,r11
169	ble	3b			/* r1 <= &_end is OK */
170	SAVE_NVGPRS(r11)
171	addi	r3,r1,STACK_FRAME_OVERHEAD
172	lis	r1,init_thread_union@ha
173	addi	r1,r1,init_thread_union@l
174	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175	lis	r9,StackOverflow@ha
176	addi	r9,r9,StackOverflow@l
177	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178	FIX_SRR1(r10,r12)
179	mtspr	SPRN_SRR0,r9
180	mtspr	SPRN_SRR1,r10
181	SYNC
182	RFI
183
184/*
185 * Handle a system call.
186 */
187	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
188	.stabs	"entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192	stw	r0,THREAD+LAST_SYSCALL(r2)
193	stw	r3,ORIG_GPR3(r1)
194	li	r12,0
195	stw	r12,RESULT(r1)
196	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
197	rlwinm	r11,r11,0,4,2
198	stw	r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200	bl	do_show_syscall
201#endif /* SHOW_SYSCALLS */
202	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
203	li	r11,0
204	stb	r11,TI_SC_NOERR(r10)
205	lwz	r11,TI_FLAGS(r10)
206	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
207	bne-	syscall_dotrace
208syscall_dotrace_cont:
209	cmplwi	0,r0,NR_syscalls
210	lis	r10,sys_call_table@h
211	ori	r10,r10,sys_call_table@l
212	slwi	r0,r0,2
213	bge-	66f
214	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
215	mtlr	r10
216	addi	r9,r1,STACK_FRAME_OVERHEAD
217	PPC440EP_ERR42
218	blrl			/* Call handler */
219	.globl	ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222	bl	do_show_syscall_exit
223#endif
224	mr	r6,r3
225	li	r11,-_LAST_ERRNO
226	cmplw	0,r3,r11
227	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
228	blt+	30f
229	lbz	r11,TI_SC_NOERR(r12)
230	cmpwi	r11,0
231	bne	30f
232	neg	r3,r3
233	lwz	r10,_CCR(r1)	/* Set SO bit in CR */
234	oris	r10,r10,0x1000
235	stw	r10,_CCR(r1)
236
237	/* disable interrupts so current_thread_info()->flags can't change */
23830:	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
239	SYNC
240	MTMSRD(r10)
241	lwz	r9,TI_FLAGS(r12)
242	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
243	bne-	syscall_exit_work
244syscall_exit_cont:
245#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
246	/* If the process has its own DBCR0 value, load it up.  The single
247	   step bit tells us that dbcr0 should be loaded. */
248	lwz	r0,THREAD+THREAD_DBCR0(r2)
249	andis.	r10,r0,DBCR0_IC@h
250	bnel-	load_dbcr0
251#endif
252	stwcx.	r0,0,r1			/* to clear the reservation */
253	lwz	r4,_LINK(r1)
254	lwz	r5,_CCR(r1)
255	mtlr	r4
256	mtcr	r5
257	lwz	r7,_NIP(r1)
258	lwz	r8,_MSR(r1)
259	FIX_SRR1(r8, r0)
260	lwz	r2,GPR2(r1)
261	lwz	r1,GPR1(r1)
262	mtspr	SPRN_SRR0,r7
263	mtspr	SPRN_SRR1,r8
264	SYNC
265	RFI
266
26766:	li	r3,-ENOSYS
268	b	ret_from_syscall
269
270	.globl	ret_from_fork
271ret_from_fork:
272	REST_NVGPRS(r1)
273	bl	schedule_tail
274	li	r3,0
275	b	ret_from_syscall
276
277/* Traced system call support */
278syscall_dotrace:
279	SAVE_NVGPRS(r1)
280	li	r0,0xc00
281	stw	r0,TRAP(r1)
282	addi	r3,r1,STACK_FRAME_OVERHEAD
283	bl	do_syscall_trace_enter
284	lwz	r0,GPR0(r1)	/* Restore original registers */
285	lwz	r3,GPR3(r1)
286	lwz	r4,GPR4(r1)
287	lwz	r5,GPR5(r1)
288	lwz	r6,GPR6(r1)
289	lwz	r7,GPR7(r1)
290	lwz	r8,GPR8(r1)
291	REST_NVGPRS(r1)
292	b	syscall_dotrace_cont
293
294syscall_exit_work:
295	stw	r6,RESULT(r1)	/* Save result */
296	stw	r3,GPR3(r1)	/* Update return value */
297	andi.	r0,r9,_TIF_SYSCALL_T_OR_A
298	beq	5f
299	ori	r10,r10,MSR_EE
300	SYNC
301	MTMSRD(r10)		/* re-enable interrupts */
302	lwz	r4,TRAP(r1)
303	andi.	r4,r4,1
304	beq	4f
305	SAVE_NVGPRS(r1)
306	li	r4,0xc00
307	stw	r4,TRAP(r1)
3084:
309	addi	r3,r1,STACK_FRAME_OVERHEAD
310	bl	do_syscall_trace_leave
311	REST_NVGPRS(r1)
3122:
313	lwz	r3,GPR3(r1)
314	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
315	SYNC
316	MTMSRD(r10)		/* disable interrupts again */
317	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
318	lwz	r9,TI_FLAGS(r12)
3195:
320	andi.	r0,r9,_TIF_NEED_RESCHED
321	bne	1f
322	lwz	r5,_MSR(r1)
323	andi.	r5,r5,MSR_PR
324	beq	syscall_exit_cont
325	andi.	r0,r9,_TIF_SIGPENDING
326	beq	syscall_exit_cont
327	b	do_user_signal
3281:
329	ori	r10,r10,MSR_EE
330	SYNC
331	MTMSRD(r10)		/* re-enable interrupts */
332	bl	schedule
333	b	2b
334
335#ifdef SHOW_SYSCALLS
336do_show_syscall:
337#ifdef SHOW_SYSCALLS_TASK
338	lis	r11,show_syscalls_task@ha
339	lwz	r11,show_syscalls_task@l(r11)
340	cmp	0,r2,r11
341	bnelr
342#endif
343	stw	r31,GPR31(r1)
344	mflr	r31
345	lis	r3,7f@ha
346	addi	r3,r3,7f@l
347	lwz	r4,GPR0(r1)
348	lwz	r5,GPR3(r1)
349	lwz	r6,GPR4(r1)
350	lwz	r7,GPR5(r1)
351	lwz	r8,GPR6(r1)
352	lwz	r9,GPR7(r1)
353	bl	printk
354	lis	r3,77f@ha
355	addi	r3,r3,77f@l
356	lwz	r4,GPR8(r1)
357	mr	r5,r2
358	bl	printk
359	lwz	r0,GPR0(r1)
360	lwz	r3,GPR3(r1)
361	lwz	r4,GPR4(r1)
362	lwz	r5,GPR5(r1)
363	lwz	r6,GPR6(r1)
364	lwz	r7,GPR7(r1)
365	lwz	r8,GPR8(r1)
366	mtlr	r31
367	lwz	r31,GPR31(r1)
368	blr
369
370do_show_syscall_exit:
371#ifdef SHOW_SYSCALLS_TASK
372	lis	r11,show_syscalls_task@ha
373	lwz	r11,show_syscalls_task@l(r11)
374	cmp	0,r2,r11
375	bnelr
376#endif
377	stw	r31,GPR31(r1)
378	mflr	r31
379	stw	r3,RESULT(r1)	/* Save result */
380	mr	r4,r3
381	lis	r3,79f@ha
382	addi	r3,r3,79f@l
383	bl	printk
384	lwz	r3,RESULT(r1)
385	mtlr	r31
386	lwz	r31,GPR31(r1)
387	blr
388
3897:	.string	"syscall %d(%x, %x, %x, %x, %x, "
39077:	.string	"%x), current=%p\n"
39179:	.string	" -> %x\n"
392	.align	2,0
393
394#ifdef SHOW_SYSCALLS_TASK
395	.data
396	.globl	show_syscalls_task
397show_syscalls_task:
398	.long	-1
399	.text
400#endif
401#endif /* SHOW_SYSCALLS */
402
403/*
404 * The sigsuspend and rt_sigsuspend system calls can call do_signal
405 * and thus put the process into the stopped state where we might
406 * want to examine its user state with ptrace.  Therefore we need
407 * to save all the nonvolatile registers (r13 - r31) before calling
408 * the C code.
409 */
410	.globl	ppc_sigsuspend
411ppc_sigsuspend:
412	SAVE_NVGPRS(r1)
413	lwz	r0,TRAP(r1)
414	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
415	stw	r0,TRAP(r1)		/* register set saved */
416	b	sys_sigsuspend
417
418	.globl	ppc_rt_sigsuspend
419ppc_rt_sigsuspend:
420	SAVE_NVGPRS(r1)
421	lwz	r0,TRAP(r1)
422	rlwinm	r0,r0,0,0,30
423	stw	r0,TRAP(r1)
424	b	sys_rt_sigsuspend
425
426	.globl	ppc_fork
427ppc_fork:
428	SAVE_NVGPRS(r1)
429	lwz	r0,TRAP(r1)
430	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
431	stw	r0,TRAP(r1)		/* register set saved */
432	b	sys_fork
433
434	.globl	ppc_vfork
435ppc_vfork:
436	SAVE_NVGPRS(r1)
437	lwz	r0,TRAP(r1)
438	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
439	stw	r0,TRAP(r1)		/* register set saved */
440	b	sys_vfork
441
442	.globl	ppc_clone
443ppc_clone:
444	SAVE_NVGPRS(r1)
445	lwz	r0,TRAP(r1)
446	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
447	stw	r0,TRAP(r1)		/* register set saved */
448	b	sys_clone
449
450	.globl	ppc_swapcontext
451ppc_swapcontext:
452	SAVE_NVGPRS(r1)
453	lwz	r0,TRAP(r1)
454	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
455	stw	r0,TRAP(r1)		/* register set saved */
456	b	sys_swapcontext
457
458/*
459 * Top-level page fault handling.
460 * This is in assembler because if do_page_fault tells us that
461 * it is a bad kernel page fault, we want to save the non-volatile
462 * registers before calling bad_page_fault.
463 */
464	.globl	handle_page_fault
465handle_page_fault:
466	stw	r4,_DAR(r1)
467	addi	r3,r1,STACK_FRAME_OVERHEAD
468	bl	do_page_fault
469	cmpwi	r3,0
470	beq+	ret_from_except
471	SAVE_NVGPRS(r1)
472	lwz	r0,TRAP(r1)
473	clrrwi	r0,r0,1
474	stw	r0,TRAP(r1)
475	mr	r5,r3
476	addi	r3,r1,STACK_FRAME_OVERHEAD
477	lwz	r4,_DAR(r1)
478	bl	bad_page_fault
479	b	ret_from_except_full
480
481/*
482 * This routine switches between two different tasks.  The process
483 * state of one is saved on its kernel stack.  Then the state
484 * of the other is restored from its kernel stack.  The memory
485 * management hardware is updated to the second process's state.
486 * Finally, we can return to the second process.
487 * On entry, r3 points to the THREAD for the current task, r4
488 * points to the THREAD for the new task.
489 *
490 * This routine is always called with interrupts disabled.
491 *
492 * Note: there are two ways to get to the "going out" portion
493 * of this code; either by coming in via the entry (_switch)
494 * or via "fork" which must set up an environment equivalent
495 * to the "_switch" path.  If you change this , you'll have to
496 * change the fork code also.
497 *
498 * The code which creates the new task context is in 'copy_thread'
499 * in arch/ppc/kernel/process.c
500 */
501_GLOBAL(_switch)
502	stwu	r1,-INT_FRAME_SIZE(r1)
503	mflr	r0
504	stw	r0,INT_FRAME_SIZE+4(r1)
505	/* r3-r12 are caller saved -- Cort */
506	SAVE_NVGPRS(r1)
507	stw	r0,_NIP(r1)	/* Return to switch caller */
508	mfmsr	r11
509	li	r0,MSR_FP	/* Disable floating-point */
510#ifdef CONFIG_ALTIVEC
511BEGIN_FTR_SECTION
512	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
513	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
514	stw	r12,THREAD+THREAD_VRSAVE(r2)
515END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
516#endif /* CONFIG_ALTIVEC */
517#ifdef CONFIG_SPE
518	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
519	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
520	stw	r12,THREAD+THREAD_SPEFSCR(r2)
521#endif /* CONFIG_SPE */
522	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
523	beq+	1f
524	andc	r11,r11,r0
525	MTMSRD(r11)
526	isync
5271:	stw	r11,_MSR(r1)
528	mfcr	r10
529	stw	r10,_CCR(r1)
530	stw	r1,KSP(r3)	/* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533	/* We need a sync somewhere here to make sure that if the
534	 * previous task gets rescheduled on another CPU, it sees all
535	 * stores it has performed on this one.
536	 */
537	sync
538#endif /* CONFIG_SMP */
539
540	tophys(r0,r4)
541	CLR_TOP32(r0)
542	mtspr	SPRN_SPRG3,r0	/* Update current THREAD phys addr */
543	lwz	r1,KSP(r4)	/* Load new stack pointer */
544
545	/* save the old current 'last' for return value */
546	mr	r3,r2
547	addi	r2,r4,-THREAD	/* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551	lwz	r0,THREAD+THREAD_VRSAVE(r2)
552	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555#ifdef CONFIG_SPE
556	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
557	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
558#endif /* CONFIG_SPE */
559
560	lwz	r0,_CCR(r1)
561	mtcrf	0xFF,r0
562	/* r3-r12 are destroyed -- Cort */
563	REST_NVGPRS(r1)
564
565	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
566	mtlr	r4
567	addi	r1,r1,INT_FRAME_SIZE
568	blr
569
570	.globl	fast_exception_return
571fast_exception_return:
572#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
573	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
574	beq	1f			/* if not, we've got problems */
575#endif
576
5772:	REST_4GPRS(3, r11)
578	lwz	r10,_CCR(r11)
579	REST_GPR(1, r11)
580	mtcr	r10
581	lwz	r10,_LINK(r11)
582	mtlr	r10
583	REST_GPR(10, r11)
584	mtspr	SPRN_SRR1,r9
585	mtspr	SPRN_SRR0,r12
586	REST_GPR(9, r11)
587	REST_GPR(12, r11)
588	lwz	r11,GPR11(r11)
589	SYNC
590	RFI
591
592#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
593/* check if the exception happened in a restartable section */
5941:	lis	r3,exc_exit_restart_end@ha
595	addi	r3,r3,exc_exit_restart_end@l
596	cmplw	r12,r3
597	bge	3f
598	lis	r4,exc_exit_restart@ha
599	addi	r4,r4,exc_exit_restart@l
600	cmplw	r12,r4
601	blt	3f
602	lis	r3,fee_restarts@ha
603	tophys(r3,r3)
604	lwz	r5,fee_restarts@l(r3)
605	addi	r5,r5,1
606	stw	r5,fee_restarts@l(r3)
607	mr	r12,r4		/* restart at exc_exit_restart */
608	b	2b
609
610	.comm	fee_restarts,4
611
612/* aargh, a nonrecoverable interrupt, panic */
613/* aargh, we don't know which trap this is */
614/* but the 601 doesn't implement the RI bit, so assume it's OK */
6153:
616BEGIN_FTR_SECTION
617	b	2b
618END_FTR_SECTION_IFSET(CPU_FTR_601)
619	li	r10,-1
620	stw	r10,TRAP(r11)
621	addi	r3,r1,STACK_FRAME_OVERHEAD
622	lis	r10,MSR_KERNEL@h
623	ori	r10,r10,MSR_KERNEL@l
624	bl	transfer_to_handler_full
625	.long	nonrecoverable_exception
626	.long	ret_from_except
627#endif
628
629	.globl	sigreturn_exit
630sigreturn_exit:
631	subi	r1,r3,STACK_FRAME_OVERHEAD
632	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
633	lwz	r9,TI_FLAGS(r12)
634	andi.	r0,r9,_TIF_SYSCALL_T_OR_A
635	beq+	ret_from_except_full
636	bl	do_syscall_trace_leave
637	/* fall through */
638
639	.globl	ret_from_except_full
640ret_from_except_full:
641	REST_NVGPRS(r1)
642	/* fall through */
643
644	.globl	ret_from_except
645ret_from_except:
646	/* Hard-disable interrupts so that current_thread_info()->flags
647	 * can't change between when we test it and when we return
648	 * from the interrupt. */
649	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650	SYNC			/* Some chip revs have problems here... */
651	MTMSRD(r10)		/* disable interrupts */
652
653	lwz	r3,_MSR(r1)	/* Returning to user mode? */
654	andi.	r0,r3,MSR_PR
655	beq	resume_kernel
656
657user_exc_return:		/* r10 contains MSR_KERNEL here */
658	/* Check current_thread_info()->flags */
659	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
660	lwz	r9,TI_FLAGS(r9)
661	andi.	r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662	bne	do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666	/* Check whether this process has its own DBCR0 value.  The single
667	   step bit tells us that dbcr0 should be loaded. */
668	lwz	r0,THREAD+THREAD_DBCR0(r2)
669	andis.	r10,r0,DBCR0_IC@h
670	bnel-	load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674	b	restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678	/* check current_thread_info->preempt_count */
679	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
680	lwz	r0,TI_PREEMPT(r9)
681	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
682	bne	restore
683	lwz	r0,TI_FLAGS(r9)
684	andi.	r0,r0,_TIF_NEED_RESCHED
685	beq+	restore
686	andi.	r0,r3,MSR_EE	/* interrupts off? */
687	beq	restore		/* don't schedule if so */
6881:	bl	preempt_schedule_irq
689	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
690	lwz	r3,TI_FLAGS(r9)
691	andi.	r0,r3,_TIF_NEED_RESCHED
692	bne-	1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697	/* interrupts are hard-disabled at this point */
698restore:
699	lwz	r0,GPR0(r1)
700	lwz	r2,GPR2(r1)
701	REST_4GPRS(3, r1)
702	REST_2GPRS(7, r1)
703
704	lwz	r10,_XER(r1)
705	lwz	r11,_CTR(r1)
706	mtspr	SPRN_XER,r10
707	mtctr	r11
708
709	PPC405_ERR77(0,r1)
710	stwcx.	r0,0,r1			/* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713	lwz	r9,_MSR(r1)
714	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
715	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
716
717	lwz	r10,_CCR(r1)
718	lwz	r11,_LINK(r1)
719	mtcrf	0xFF,r10
720	mtlr	r11
721
722	/*
723	 * Once we put values in SRR0 and SRR1, we are in a state
724	 * where exceptions are not recoverable, since taking an
725	 * exception will trash SRR0 and SRR1.  Therefore we clear the
726	 * MSR:RI bit to indicate this.  If we do take an exception,
727	 * we can't return to the point of the exception but we
728	 * can restart the exception exit path at the label
729	 * exc_exit_restart below.  -- paulus
730	 */
731	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732	SYNC
733	MTMSRD(r10)		/* clear the RI bit */
734	.globl exc_exit_restart
735exc_exit_restart:
736	lwz	r9,_MSR(r1)
737	lwz	r12,_NIP(r1)
738	FIX_SRR1(r9,r10)
739	mtspr	SPRN_SRR0,r12
740	mtspr	SPRN_SRR1,r9
741	REST_4GPRS(9, r1)
742	lwz	r1,GPR1(r1)
743	.globl exc_exit_restart_end
744exc_exit_restart_end:
745	SYNC
746	RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749	/*
750	 * This is a bit different on 4xx/Book-E because it doesn't have
751	 * the RI bit in the MSR.
752	 * The TLB miss handler checks if we have interrupted
753	 * the exception exit path and restarts it if so
754	 * (well maybe one day it will... :).
755	 */
756	lwz	r11,_LINK(r1)
757	mtlr	r11
758	lwz	r10,_CCR(r1)
759	mtcrf	0xff,r10
760	REST_2GPRS(9, r1)
761	.globl exc_exit_restart
762exc_exit_restart:
763	lwz	r11,_NIP(r1)
764	lwz	r12,_MSR(r1)
765exc_exit_start:
766	mtspr	SPRN_SRR0,r11
767	mtspr	SPRN_SRR1,r12
768	REST_2GPRS(11, r1)
769	lwz	r1,GPR1(r1)
770	.globl exc_exit_restart_end
771exc_exit_restart_end:
772	PPC405_ERR77_SYNC
773	rfi
774	b	.			/* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception.  For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR						    \
791	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
792	 * assume the instructions here are mapped by a pinned TLB entry */ \
793	li	r10,MSR_IR;						    \
794	mtmsr	r10;							    \
795	isync;								    \
796	tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
802	REST_NVGPRS(r1);						\
803	lwz	r3,_MSR(r1);						\
804	andi.	r3,r3,MSR_PR;						\
805	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
806	bne	user_exc_return;					\
807	lwz	r0,GPR0(r1);						\
808	lwz	r2,GPR2(r1);						\
809	REST_4GPRS(3, r1);						\
810	REST_2GPRS(7, r1);						\
811	lwz	r10,_XER(r1);						\
812	lwz	r11,_CTR(r1);						\
813	mtspr	SPRN_XER,r10;						\
814	mtctr	r11;							\
815	PPC405_ERR77(0,r1);						\
816	stwcx.	r0,0,r1;		/* to clear the reservation */	\
817	lwz	r11,_LINK(r1);						\
818	mtlr	r11;							\
819	lwz	r10,_CCR(r1);						\
820	mtcrf	0xff,r10;						\
821	PPC_40x_TURN_OFF_MSR_DR;					\
822	lwz	r9,_DEAR(r1);						\
823	lwz	r10,_ESR(r1);						\
824	mtspr	SPRN_DEAR,r9;						\
825	mtspr	SPRN_ESR,r10;						\
826	lwz	r11,_NIP(r1);						\
827	lwz	r12,_MSR(r1);						\
828	mtspr	exc_lvl_srr0,r11;					\
829	mtspr	exc_lvl_srr1,r12;					\
830	lwz	r9,GPR9(r1);						\
831	lwz	r12,GPR12(r1);						\
832	lwz	r10,GPR10(r1);						\
833	lwz	r11,GPR11(r1);						\
834	lwz	r1,GPR1(r1);						\
835	PPC405_ERR77_SYNC;						\
836	exc_lvl_rfi;							\
837	b	.;		/* prevent prefetch past exc_lvl_rfi */
838
839	.globl	ret_from_crit_exc
840ret_from_crit_exc:
841	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844	.globl	ret_from_debug_exc
845ret_from_debug_exc:
846	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848	.globl	ret_from_mcheck_exc
849ret_from_mcheck_exc:
850	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0.  Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859	mfmsr	r10		/* first disable debug exceptions */
860	rlwinm	r10,r10,0,~MSR_DE
861	mtmsr	r10
862	isync
863	mfspr	r10,SPRN_DBCR0
864	lis	r11,global_dbcr0@ha
865	addi	r11,r11,global_dbcr0@l
866	stw	r10,0(r11)
867	mtspr	SPRN_DBCR0,r0
868	lwz	r10,4(r11)
869	addi	r10,r10,1
870	stw	r10,4(r11)
871	li	r11,-1
872	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
873	blr
874
875	.comm	global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work:			/* r10 contains MSR_KERNEL here */
879	andi.	r0,r9,_TIF_NEED_RESCHED
880	beq	do_user_signal
881
882do_resched:			/* r10 contains MSR_KERNEL here */
883	ori	r10,r10,MSR_EE
884	SYNC
885	MTMSRD(r10)		/* hard-enable interrupts */
886	bl	schedule
887recheck:
888	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889	SYNC
890	MTMSRD(r10)		/* disable interrupts */
891	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
892	lwz	r9,TI_FLAGS(r9)
893	andi.	r0,r9,_TIF_NEED_RESCHED
894	bne-	do_resched
895	andi.	r0,r9,_TIF_SIGPENDING
896	beq	restore_user
897do_user_signal:			/* r10 contains MSR_KERNEL here */
898	ori	r10,r10,MSR_EE
899	SYNC
900	MTMSRD(r10)		/* hard-enable interrupts */
901	/* save r13-r31 in the exception frame, if not already done */
902	lwz	r3,TRAP(r1)
903	andi.	r0,r3,1
904	beq	2f
905	SAVE_NVGPRS(r1)
906	rlwinm	r3,r3,0,0,30
907	stw	r3,TRAP(r1)
9082:	li	r3,0
909	addi	r4,r1,STACK_FRAME_OVERHEAD
910	bl	do_signal
911	REST_NVGPRS(r1)
912	b	recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920	lis	r10,exc_exit_restart_end@ha
921	addi	r10,r10,exc_exit_restart_end@l
922	cmplw	r12,r10
923	bge	3f
924	lis	r11,exc_exit_restart@ha
925	addi	r11,r11,exc_exit_restart@l
926	cmplw	r12,r11
927	blt	3f
928	lis	r10,ee_restarts@ha
929	lwz	r12,ee_restarts@l(r10)
930	addi	r12,r12,1
931	stw	r12,ee_restarts@l(r10)
932	mr	r12,r11		/* restart at exc_exit_restart */
933	blr
9343:	/* OK, we can't recover, kill this process */
935	/* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937	blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939	lwz	r3,TRAP(r1)
940	andi.	r0,r3,1
941	beq	4f
942	SAVE_NVGPRS(r1)
943	rlwinm	r3,r3,0,0,30
944	stw	r3,TRAP(r1)
9454:	addi	r3,r1,STACK_FRAME_OVERHEAD
946	bl	nonrecoverable_exception
947	/* shouldn't return */
948	b	4b
949
950	.comm	ee_restarts,4
951
952/*
953 * PROM code for specific machines follows.  Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_OF
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963	stwu	r1,-INT_FRAME_SIZE(r1)
964	mflr	r0
965	stw	r0,INT_FRAME_SIZE+4(r1)
966	lis	r4,rtas_data@ha
967	lwz	r4,rtas_data@l(r4)
968	lis	r6,1f@ha	/* physical return address for rtas */
969	addi	r6,r6,1f@l
970	tophys(r6,r6)
971	tophys(r7,r1)
972	lis	r8,rtas_entry@ha
973	lwz	r8,rtas_entry@l(r8)
974	mfmsr	r9
975	stw	r9,8(r1)
976	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
977	SYNC			/* disable interrupts so SRR0/1 */
978	MTMSRD(r0)		/* don't get trashed */
979	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
980	mtlr	r6
981	CLR_TOP32(r7)
982	mtspr	SPRN_SPRG2,r7
983	mtspr	SPRN_SRR0,r8
984	mtspr	SPRN_SRR1,r9
985	RFI
9861:	tophys(r9,r1)
987	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
988	lwz	r9,8(r9)	/* original msr value */
989	FIX_SRR1(r9,r0)
990	addi	r1,r1,INT_FRAME_SIZE
991	li	r0,0
992	mtspr	SPRN_SPRG2,r0
993	mtspr	SPRN_SRR0,r8
994	mtspr	SPRN_SRR1,r9
995	RFI			/* return to caller */
996
997	.globl	machine_check_in_rtas
998machine_check_in_rtas:
999	twi	31,0,0
1000	/* XXX load up BATs and panic */
1001
1002#endif /* CONFIG_PPC_OF */
1003