xref: /linux/arch/powerpc/kernel/entry_32.S (revision eb2bce7f5e7ac1ca6da434461217fadf3c688d2c)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)	\
49	mtspr	exc_level##_SPRG,r8;			\
50	BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);		\
51	lwz	r0,GPR10-INT_FRAME_SIZE(r8);		\
52	stw	r0,GPR10(r11);				\
53	lwz	r0,GPR11-INT_FRAME_SIZE(r8);		\
54	stw	r0,GPR11(r11);				\
55	mfspr	r8,exc_level##_SPRG
56
57	.globl	mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59	TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60	b	transfer_to_handler_full
61
62	.globl	debug_transfer_to_handler
63debug_transfer_to_handler:
64	TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65	b	transfer_to_handler_full
66
67	.globl	crit_transfer_to_handler
68crit_transfer_to_handler:
69	TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70	/* fall through */
71#endif
72
73#ifdef CONFIG_40x
74	.globl	crit_transfer_to_handler
75crit_transfer_to_handler:
76	lwz	r0,crit_r10@l(0)
77	stw	r0,GPR10(r11)
78	lwz	r0,crit_r11@l(0)
79	stw	r0,GPR11(r11)
80	/* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90	.globl	transfer_to_handler_full
91transfer_to_handler_full:
92	SAVE_NVGPRS(r11)
93	/* fall through */
94
95	.globl	transfer_to_handler
96transfer_to_handler:
97	stw	r2,GPR2(r11)
98	stw	r12,_NIP(r11)
99	stw	r9,_MSR(r11)
100	andi.	r2,r9,MSR_PR
101	mfctr	r12
102	mfspr	r2,SPRN_XER
103	stw	r12,_CTR(r11)
104	stw	r2,_XER(r11)
105	mfspr	r12,SPRN_SPRG3
106	addi	r2,r12,-THREAD
107	tovirt(r2,r2)			/* set r2 to current */
108	beq	2f			/* if from user, fix up THREAD.regs */
109	addi	r11,r1,STACK_FRAME_OVERHEAD
110	stw	r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112	/* Check to see if the dbcr0 register is set up to debug.  Use the
113	   single-step bit to do this. */
114	lwz	r12,THREAD_DBCR0(r12)
115	andis.	r12,r12,DBCR0_IC@h
116	beq+	3f
117	/* From user and task is ptraced - load up global dbcr0 */
118	li	r12,-1			/* clear all pending debug events */
119	mtspr	SPRN_DBSR,r12
120	lis	r11,global_dbcr0@ha
121	tophys(r11,r11)
122	addi	r11,r11,global_dbcr0@l
123	lwz	r12,0(r11)
124	mtspr	SPRN_DBCR0,r12
125	lwz	r12,4(r11)
126	addi	r12,r12,-1
127	stw	r12,4(r11)
128#endif
129	b	3f
130
1312:	/* if from kernel, check interrupted DOZE/NAP mode and
132         * check for stack overflow
133         */
134	lwz	r9,THREAD_INFO-THREAD(r12)
135	cmplw	r1,r9			/* if r1 <= current->thread_info */
136	ble-	stack_ovf		/* then the kernel stack overflowed */
1375:
138#ifdef CONFIG_6xx
139	tophys(r9,r9)			/* check local flags */
140	lwz	r12,TI_LOCAL_FLAGS(r9)
141	mtcrf	0x01,r12
142	bt-	31-TLF_NAPPING,4f
143#endif /* CONFIG_6xx */
144	.globl transfer_to_handler_cont
145transfer_to_handler_cont:
1463:
147	mflr	r9
148	lwz	r11,0(r9)		/* virtual address of handler */
149	lwz	r9,4(r9)		/* where to go when done */
150	mtspr	SPRN_SRR0,r11
151	mtspr	SPRN_SRR1,r10
152	mtlr	r9
153	SYNC
154	RFI				/* jump to handler, enable MMU */
155
156#ifdef CONFIG_6xx
1574:	rlwinm	r12,r12,0,~_TLF_NAPPING
158	stw	r12,TI_LOCAL_FLAGS(r9)
159	b	power_save_6xx_restore
160#endif
161
162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167	/* sometimes we use a statically-allocated stack, which is OK. */
168	lis	r12,_end@h
169	ori	r12,r12,_end@l
170	cmplw	r1,r12
171	ble	5b			/* r1 <= &_end is OK */
172	SAVE_NVGPRS(r11)
173	addi	r3,r1,STACK_FRAME_OVERHEAD
174	lis	r1,init_thread_union@ha
175	addi	r1,r1,init_thread_union@l
176	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177	lis	r9,StackOverflow@ha
178	addi	r9,r9,StackOverflow@l
179	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180	FIX_SRR1(r10,r12)
181	mtspr	SPRN_SRR0,r9
182	mtspr	SPRN_SRR1,r10
183	SYNC
184	RFI
185
186/*
187 * Handle a system call.
188 */
189	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
190	.stabs	"entry_32.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
194	stw	r3,ORIG_GPR3(r1)
195	li	r12,0
196	stw	r12,RESULT(r1)
197	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
198	rlwinm	r11,r11,0,4,2
199	stw	r11,_CCR(r1)
200#ifdef SHOW_SYSCALLS
201	bl	do_show_syscall
202#endif /* SHOW_SYSCALLS */
203	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
204	lwz	r11,TI_FLAGS(r10)
205	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
206	bne-	syscall_dotrace
207syscall_dotrace_cont:
208	cmplwi	0,r0,NR_syscalls
209	lis	r10,sys_call_table@h
210	ori	r10,r10,sys_call_table@l
211	slwi	r0,r0,2
212	bge-	66f
213	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
214	mtlr	r10
215	addi	r9,r1,STACK_FRAME_OVERHEAD
216	PPC440EP_ERR42
217	blrl			/* Call handler */
218	.globl	ret_from_syscall
219ret_from_syscall:
220#ifdef SHOW_SYSCALLS
221	bl	do_show_syscall_exit
222#endif
223	mr	r6,r3
224	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
225	/* disable interrupts so current_thread_info()->flags can't change */
226	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
227	SYNC
228	MTMSRD(r10)
229	lwz	r9,TI_FLAGS(r12)
230	li	r8,-_LAST_ERRNO
231	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
232	bne-	syscall_exit_work
233	cmplw	0,r3,r8
234	blt+	syscall_exit_cont
235	lwz	r11,_CCR(r1)			/* Load CR */
236	neg	r3,r3
237	oris	r11,r11,0x1000	/* Set SO bit in CR */
238	stw	r11,_CCR(r1)
239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241	/* If the process has its own DBCR0 value, load it up.  The single
242	   step bit tells us that dbcr0 should be loaded. */
243	lwz	r0,THREAD+THREAD_DBCR0(r2)
244	andis.	r10,r0,DBCR0_IC@h
245	bnel-	load_dbcr0
246#endif
247	stwcx.	r0,0,r1			/* to clear the reservation */
248	lwz	r4,_LINK(r1)
249	lwz	r5,_CCR(r1)
250	mtlr	r4
251	mtcr	r5
252	lwz	r7,_NIP(r1)
253	lwz	r8,_MSR(r1)
254	FIX_SRR1(r8, r0)
255	lwz	r2,GPR2(r1)
256	lwz	r1,GPR1(r1)
257	mtspr	SPRN_SRR0,r7
258	mtspr	SPRN_SRR1,r8
259	SYNC
260	RFI
261
26266:	li	r3,-ENOSYS
263	b	ret_from_syscall
264
265	.globl	ret_from_fork
266ret_from_fork:
267	REST_NVGPRS(r1)
268	bl	schedule_tail
269	li	r3,0
270	b	ret_from_syscall
271
272/* Traced system call support */
273syscall_dotrace:
274	SAVE_NVGPRS(r1)
275	li	r0,0xc00
276	stw	r0,_TRAP(r1)
277	addi	r3,r1,STACK_FRAME_OVERHEAD
278	bl	do_syscall_trace_enter
279	lwz	r0,GPR0(r1)	/* Restore original registers */
280	lwz	r3,GPR3(r1)
281	lwz	r4,GPR4(r1)
282	lwz	r5,GPR5(r1)
283	lwz	r6,GPR6(r1)
284	lwz	r7,GPR7(r1)
285	lwz	r8,GPR8(r1)
286	REST_NVGPRS(r1)
287	b	syscall_dotrace_cont
288
289syscall_exit_work:
290	andi.	r0,r9,_TIF_RESTOREALL
291	beq+	0f
292	REST_NVGPRS(r1)
293	b	2f
2940:	cmplw	0,r3,r8
295	blt+	1f
296	andi.	r0,r9,_TIF_NOERROR
297	bne-	1f
298	lwz	r11,_CCR(r1)			/* Load CR */
299	neg	r3,r3
300	oris	r11,r11,0x1000	/* Set SO bit in CR */
301	stw	r11,_CCR(r1)
302
3031:	stw	r6,RESULT(r1)	/* Save result */
304	stw	r3,GPR3(r1)	/* Update return value */
3052:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
306	beq	4f
307
308	/* Clear per-syscall TIF flags if any are set.  */
309
310	li	r11,_TIF_PERSYSCALL_MASK
311	addi	r12,r12,TI_FLAGS
3123:	lwarx	r8,0,r12
313	andc	r8,r8,r11
314#ifdef CONFIG_IBM405_ERR77
315	dcbt	0,r12
316#endif
317	stwcx.	r8,0,r12
318	bne-	3b
319	subi	r12,r12,TI_FLAGS
320
3214:	/* Anything which requires enabling interrupts? */
322	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
323	beq	ret_from_except
324
325	/* Re-enable interrupts */
326	ori	r10,r10,MSR_EE
327	SYNC
328	MTMSRD(r10)
329
330	/* Save NVGPRS if they're not saved already */
331	lwz	r4,_TRAP(r1)
332	andi.	r4,r4,1
333	beq	5f
334	SAVE_NVGPRS(r1)
335	li	r4,0xc00
336	stw	r4,_TRAP(r1)
3375:
338	addi	r3,r1,STACK_FRAME_OVERHEAD
339	bl	do_syscall_trace_leave
340	b	ret_from_except_full
341
342#ifdef SHOW_SYSCALLS
343do_show_syscall:
344#ifdef SHOW_SYSCALLS_TASK
345	lis	r11,show_syscalls_task@ha
346	lwz	r11,show_syscalls_task@l(r11)
347	cmp	0,r2,r11
348	bnelr
349#endif
350	stw	r31,GPR31(r1)
351	mflr	r31
352	lis	r3,7f@ha
353	addi	r3,r3,7f@l
354	lwz	r4,GPR0(r1)
355	lwz	r5,GPR3(r1)
356	lwz	r6,GPR4(r1)
357	lwz	r7,GPR5(r1)
358	lwz	r8,GPR6(r1)
359	lwz	r9,GPR7(r1)
360	bl	printk
361	lis	r3,77f@ha
362	addi	r3,r3,77f@l
363	lwz	r4,GPR8(r1)
364	mr	r5,r2
365	bl	printk
366	lwz	r0,GPR0(r1)
367	lwz	r3,GPR3(r1)
368	lwz	r4,GPR4(r1)
369	lwz	r5,GPR5(r1)
370	lwz	r6,GPR6(r1)
371	lwz	r7,GPR7(r1)
372	lwz	r8,GPR8(r1)
373	mtlr	r31
374	lwz	r31,GPR31(r1)
375	blr
376
377do_show_syscall_exit:
378#ifdef SHOW_SYSCALLS_TASK
379	lis	r11,show_syscalls_task@ha
380	lwz	r11,show_syscalls_task@l(r11)
381	cmp	0,r2,r11
382	bnelr
383#endif
384	stw	r31,GPR31(r1)
385	mflr	r31
386	stw	r3,RESULT(r1)	/* Save result */
387	mr	r4,r3
388	lis	r3,79f@ha
389	addi	r3,r3,79f@l
390	bl	printk
391	lwz	r3,RESULT(r1)
392	mtlr	r31
393	lwz	r31,GPR31(r1)
394	blr
395
3967:	.string	"syscall %d(%x, %x, %x, %x, %x, "
39777:	.string	"%x), current=%p\n"
39879:	.string	" -> %x\n"
399	.align	2,0
400
401#ifdef SHOW_SYSCALLS_TASK
402	.data
403	.globl	show_syscalls_task
404show_syscalls_task:
405	.long	-1
406	.text
407#endif
408#endif /* SHOW_SYSCALLS */
409
410/*
411 * The fork/clone functions need to copy the full register set into
412 * the child process. Therefore we need to save all the nonvolatile
413 * registers (r13 - r31) before calling the C code.
414 */
415	.globl	ppc_fork
416ppc_fork:
417	SAVE_NVGPRS(r1)
418	lwz	r0,_TRAP(r1)
419	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
420	stw	r0,_TRAP(r1)		/* register set saved */
421	b	sys_fork
422
423	.globl	ppc_vfork
424ppc_vfork:
425	SAVE_NVGPRS(r1)
426	lwz	r0,_TRAP(r1)
427	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
428	stw	r0,_TRAP(r1)		/* register set saved */
429	b	sys_vfork
430
431	.globl	ppc_clone
432ppc_clone:
433	SAVE_NVGPRS(r1)
434	lwz	r0,_TRAP(r1)
435	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
436	stw	r0,_TRAP(r1)		/* register set saved */
437	b	sys_clone
438
439	.globl	ppc_swapcontext
440ppc_swapcontext:
441	SAVE_NVGPRS(r1)
442	lwz	r0,_TRAP(r1)
443	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
444	stw	r0,_TRAP(r1)		/* register set saved */
445	b	sys_swapcontext
446
447/*
448 * Top-level page fault handling.
449 * This is in assembler because if do_page_fault tells us that
450 * it is a bad kernel page fault, we want to save the non-volatile
451 * registers before calling bad_page_fault.
452 */
453	.globl	handle_page_fault
454handle_page_fault:
455	stw	r4,_DAR(r1)
456	addi	r3,r1,STACK_FRAME_OVERHEAD
457	bl	do_page_fault
458	cmpwi	r3,0
459	beq+	ret_from_except
460	SAVE_NVGPRS(r1)
461	lwz	r0,_TRAP(r1)
462	clrrwi	r0,r0,1
463	stw	r0,_TRAP(r1)
464	mr	r5,r3
465	addi	r3,r1,STACK_FRAME_OVERHEAD
466	lwz	r4,_DAR(r1)
467	bl	bad_page_fault
468	b	ret_from_except_full
469
470/*
471 * This routine switches between two different tasks.  The process
472 * state of one is saved on its kernel stack.  Then the state
473 * of the other is restored from its kernel stack.  The memory
474 * management hardware is updated to the second process's state.
475 * Finally, we can return to the second process.
476 * On entry, r3 points to the THREAD for the current task, r4
477 * points to the THREAD for the new task.
478 *
479 * This routine is always called with interrupts disabled.
480 *
481 * Note: there are two ways to get to the "going out" portion
482 * of this code; either by coming in via the entry (_switch)
483 * or via "fork" which must set up an environment equivalent
484 * to the "_switch" path.  If you change this , you'll have to
485 * change the fork code also.
486 *
487 * The code which creates the new task context is in 'copy_thread'
488 * in arch/ppc/kernel/process.c
489 */
490_GLOBAL(_switch)
491	stwu	r1,-INT_FRAME_SIZE(r1)
492	mflr	r0
493	stw	r0,INT_FRAME_SIZE+4(r1)
494	/* r3-r12 are caller saved -- Cort */
495	SAVE_NVGPRS(r1)
496	stw	r0,_NIP(r1)	/* Return to switch caller */
497	mfmsr	r11
498	li	r0,MSR_FP	/* Disable floating-point */
499#ifdef CONFIG_ALTIVEC
500BEGIN_FTR_SECTION
501	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
502	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
503	stw	r12,THREAD+THREAD_VRSAVE(r2)
504END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
505#endif /* CONFIG_ALTIVEC */
506#ifdef CONFIG_SPE
507	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
508	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
509	stw	r12,THREAD+THREAD_SPEFSCR(r2)
510#endif /* CONFIG_SPE */
511	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
512	beq+	1f
513	andc	r11,r11,r0
514	MTMSRD(r11)
515	isync
5161:	stw	r11,_MSR(r1)
517	mfcr	r10
518	stw	r10,_CCR(r1)
519	stw	r1,KSP(r3)	/* Set old stack pointer */
520
521#ifdef CONFIG_SMP
522	/* We need a sync somewhere here to make sure that if the
523	 * previous task gets rescheduled on another CPU, it sees all
524	 * stores it has performed on this one.
525	 */
526	sync
527#endif /* CONFIG_SMP */
528
529	tophys(r0,r4)
530	CLR_TOP32(r0)
531	mtspr	SPRN_SPRG3,r0	/* Update current THREAD phys addr */
532	lwz	r1,KSP(r4)	/* Load new stack pointer */
533
534	/* save the old current 'last' for return value */
535	mr	r3,r2
536	addi	r2,r4,-THREAD	/* Update current */
537
538#ifdef CONFIG_ALTIVEC
539BEGIN_FTR_SECTION
540	lwz	r0,THREAD+THREAD_VRSAVE(r2)
541	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
542END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
543#endif /* CONFIG_ALTIVEC */
544#ifdef CONFIG_SPE
545	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
546	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
547#endif /* CONFIG_SPE */
548
549	lwz	r0,_CCR(r1)
550	mtcrf	0xFF,r0
551	/* r3-r12 are destroyed -- Cort */
552	REST_NVGPRS(r1)
553
554	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
555	mtlr	r4
556	addi	r1,r1,INT_FRAME_SIZE
557	blr
558
559	.globl	fast_exception_return
560fast_exception_return:
561#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
562	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
563	beq	1f			/* if not, we've got problems */
564#endif
565
5662:	REST_4GPRS(3, r11)
567	lwz	r10,_CCR(r11)
568	REST_GPR(1, r11)
569	mtcr	r10
570	lwz	r10,_LINK(r11)
571	mtlr	r10
572	REST_GPR(10, r11)
573	mtspr	SPRN_SRR1,r9
574	mtspr	SPRN_SRR0,r12
575	REST_GPR(9, r11)
576	REST_GPR(12, r11)
577	lwz	r11,GPR11(r11)
578	SYNC
579	RFI
580
581#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
582/* check if the exception happened in a restartable section */
5831:	lis	r3,exc_exit_restart_end@ha
584	addi	r3,r3,exc_exit_restart_end@l
585	cmplw	r12,r3
586	bge	3f
587	lis	r4,exc_exit_restart@ha
588	addi	r4,r4,exc_exit_restart@l
589	cmplw	r12,r4
590	blt	3f
591	lis	r3,fee_restarts@ha
592	tophys(r3,r3)
593	lwz	r5,fee_restarts@l(r3)
594	addi	r5,r5,1
595	stw	r5,fee_restarts@l(r3)
596	mr	r12,r4		/* restart at exc_exit_restart */
597	b	2b
598
599	.comm	fee_restarts,4
600
601/* aargh, a nonrecoverable interrupt, panic */
602/* aargh, we don't know which trap this is */
603/* but the 601 doesn't implement the RI bit, so assume it's OK */
6043:
605BEGIN_FTR_SECTION
606	b	2b
607END_FTR_SECTION_IFSET(CPU_FTR_601)
608	li	r10,-1
609	stw	r10,_TRAP(r11)
610	addi	r3,r1,STACK_FRAME_OVERHEAD
611	lis	r10,MSR_KERNEL@h
612	ori	r10,r10,MSR_KERNEL@l
613	bl	transfer_to_handler_full
614	.long	nonrecoverable_exception
615	.long	ret_from_except
616#endif
617
618	.globl	ret_from_except_full
619ret_from_except_full:
620	REST_NVGPRS(r1)
621	/* fall through */
622
623	.globl	ret_from_except
624ret_from_except:
625	/* Hard-disable interrupts so that current_thread_info()->flags
626	 * can't change between when we test it and when we return
627	 * from the interrupt. */
628	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
629	SYNC			/* Some chip revs have problems here... */
630	MTMSRD(r10)		/* disable interrupts */
631
632	lwz	r3,_MSR(r1)	/* Returning to user mode? */
633	andi.	r0,r3,MSR_PR
634	beq	resume_kernel
635
636user_exc_return:		/* r10 contains MSR_KERNEL here */
637	/* Check current_thread_info()->flags */
638	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
639	lwz	r9,TI_FLAGS(r9)
640	andi.	r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
641	bne	do_work
642
643restore_user:
644#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
645	/* Check whether this process has its own DBCR0 value.  The single
646	   step bit tells us that dbcr0 should be loaded. */
647	lwz	r0,THREAD+THREAD_DBCR0(r2)
648	andis.	r10,r0,DBCR0_IC@h
649	bnel-	load_dbcr0
650#endif
651
652#ifdef CONFIG_PREEMPT
653	b	restore
654
655/* N.B. the only way to get here is from the beq following ret_from_except. */
656resume_kernel:
657	/* check current_thread_info->preempt_count */
658	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
659	lwz	r0,TI_PREEMPT(r9)
660	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
661	bne	restore
662	lwz	r0,TI_FLAGS(r9)
663	andi.	r0,r0,_TIF_NEED_RESCHED
664	beq+	restore
665	andi.	r0,r3,MSR_EE	/* interrupts off? */
666	beq	restore		/* don't schedule if so */
6671:	bl	preempt_schedule_irq
668	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
669	lwz	r3,TI_FLAGS(r9)
670	andi.	r0,r3,_TIF_NEED_RESCHED
671	bne-	1b
672#else
673resume_kernel:
674#endif /* CONFIG_PREEMPT */
675
676	/* interrupts are hard-disabled at this point */
677restore:
678	lwz	r0,GPR0(r1)
679	lwz	r2,GPR2(r1)
680	REST_4GPRS(3, r1)
681	REST_2GPRS(7, r1)
682
683	lwz	r10,_XER(r1)
684	lwz	r11,_CTR(r1)
685	mtspr	SPRN_XER,r10
686	mtctr	r11
687
688	PPC405_ERR77(0,r1)
689	stwcx.	r0,0,r1			/* to clear the reservation */
690
691#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
692	lwz	r9,_MSR(r1)
693	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
694	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
695
696	lwz	r10,_CCR(r1)
697	lwz	r11,_LINK(r1)
698	mtcrf	0xFF,r10
699	mtlr	r11
700
701	/*
702	 * Once we put values in SRR0 and SRR1, we are in a state
703	 * where exceptions are not recoverable, since taking an
704	 * exception will trash SRR0 and SRR1.  Therefore we clear the
705	 * MSR:RI bit to indicate this.  If we do take an exception,
706	 * we can't return to the point of the exception but we
707	 * can restart the exception exit path at the label
708	 * exc_exit_restart below.  -- paulus
709	 */
710	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
711	SYNC
712	MTMSRD(r10)		/* clear the RI bit */
713	.globl exc_exit_restart
714exc_exit_restart:
715	lwz	r9,_MSR(r1)
716	lwz	r12,_NIP(r1)
717	FIX_SRR1(r9,r10)
718	mtspr	SPRN_SRR0,r12
719	mtspr	SPRN_SRR1,r9
720	REST_4GPRS(9, r1)
721	lwz	r1,GPR1(r1)
722	.globl exc_exit_restart_end
723exc_exit_restart_end:
724	SYNC
725	RFI
726
727#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
728	/*
729	 * This is a bit different on 4xx/Book-E because it doesn't have
730	 * the RI bit in the MSR.
731	 * The TLB miss handler checks if we have interrupted
732	 * the exception exit path and restarts it if so
733	 * (well maybe one day it will... :).
734	 */
735	lwz	r11,_LINK(r1)
736	mtlr	r11
737	lwz	r10,_CCR(r1)
738	mtcrf	0xff,r10
739	REST_2GPRS(9, r1)
740	.globl exc_exit_restart
741exc_exit_restart:
742	lwz	r11,_NIP(r1)
743	lwz	r12,_MSR(r1)
744exc_exit_start:
745	mtspr	SPRN_SRR0,r11
746	mtspr	SPRN_SRR1,r12
747	REST_2GPRS(11, r1)
748	lwz	r1,GPR1(r1)
749	.globl exc_exit_restart_end
750exc_exit_restart_end:
751	PPC405_ERR77_SYNC
752	rfi
753	b	.			/* prevent prefetch past rfi */
754
755/*
756 * Returning from a critical interrupt in user mode doesn't need
757 * to be any different from a normal exception.  For a critical
758 * interrupt in the kernel, we just return (without checking for
759 * preemption) since the interrupt may have happened at some crucial
760 * place (e.g. inside the TLB miss handler), and because we will be
761 * running with r1 pointing into critical_stack, not the current
762 * process's kernel stack (and therefore current_thread_info() will
763 * give the wrong answer).
764 * We have to restore various SPRs that may have been in use at the
765 * time of the critical interrupt.
766 *
767 */
768#ifdef CONFIG_40x
769#define PPC_40x_TURN_OFF_MSR_DR						    \
770	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
771	 * assume the instructions here are mapped by a pinned TLB entry */ \
772	li	r10,MSR_IR;						    \
773	mtmsr	r10;							    \
774	isync;								    \
775	tophys(r1, r1);
776#else
777#define PPC_40x_TURN_OFF_MSR_DR
778#endif
779
780#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
781	REST_NVGPRS(r1);						\
782	lwz	r3,_MSR(r1);						\
783	andi.	r3,r3,MSR_PR;						\
784	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
785	bne	user_exc_return;					\
786	lwz	r0,GPR0(r1);						\
787	lwz	r2,GPR2(r1);						\
788	REST_4GPRS(3, r1);						\
789	REST_2GPRS(7, r1);						\
790	lwz	r10,_XER(r1);						\
791	lwz	r11,_CTR(r1);						\
792	mtspr	SPRN_XER,r10;						\
793	mtctr	r11;							\
794	PPC405_ERR77(0,r1);						\
795	stwcx.	r0,0,r1;		/* to clear the reservation */	\
796	lwz	r11,_LINK(r1);						\
797	mtlr	r11;							\
798	lwz	r10,_CCR(r1);						\
799	mtcrf	0xff,r10;						\
800	PPC_40x_TURN_OFF_MSR_DR;					\
801	lwz	r9,_DEAR(r1);						\
802	lwz	r10,_ESR(r1);						\
803	mtspr	SPRN_DEAR,r9;						\
804	mtspr	SPRN_ESR,r10;						\
805	lwz	r11,_NIP(r1);						\
806	lwz	r12,_MSR(r1);						\
807	mtspr	exc_lvl_srr0,r11;					\
808	mtspr	exc_lvl_srr1,r12;					\
809	lwz	r9,GPR9(r1);						\
810	lwz	r12,GPR12(r1);						\
811	lwz	r10,GPR10(r1);						\
812	lwz	r11,GPR11(r1);						\
813	lwz	r1,GPR1(r1);						\
814	PPC405_ERR77_SYNC;						\
815	exc_lvl_rfi;							\
816	b	.;		/* prevent prefetch past exc_lvl_rfi */
817
818	.globl	ret_from_crit_exc
819ret_from_crit_exc:
820	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
821
822#ifdef CONFIG_BOOKE
823	.globl	ret_from_debug_exc
824ret_from_debug_exc:
825	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
826
827	.globl	ret_from_mcheck_exc
828ret_from_mcheck_exc:
829	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
830#endif /* CONFIG_BOOKE */
831
832/*
833 * Load the DBCR0 value for a task that is being ptraced,
834 * having first saved away the global DBCR0.  Note that r0
835 * has the dbcr0 value to set upon entry to this.
836 */
837load_dbcr0:
838	mfmsr	r10		/* first disable debug exceptions */
839	rlwinm	r10,r10,0,~MSR_DE
840	mtmsr	r10
841	isync
842	mfspr	r10,SPRN_DBCR0
843	lis	r11,global_dbcr0@ha
844	addi	r11,r11,global_dbcr0@l
845	stw	r10,0(r11)
846	mtspr	SPRN_DBCR0,r0
847	lwz	r10,4(r11)
848	addi	r10,r10,1
849	stw	r10,4(r11)
850	li	r11,-1
851	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
852	blr
853
854	.comm	global_dbcr0,8
855#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
856
857do_work:			/* r10 contains MSR_KERNEL here */
858	andi.	r0,r9,_TIF_NEED_RESCHED
859	beq	do_user_signal
860
861do_resched:			/* r10 contains MSR_KERNEL here */
862	ori	r10,r10,MSR_EE
863	SYNC
864	MTMSRD(r10)		/* hard-enable interrupts */
865	bl	schedule
866recheck:
867	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
868	SYNC
869	MTMSRD(r10)		/* disable interrupts */
870	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
871	lwz	r9,TI_FLAGS(r9)
872	andi.	r0,r9,_TIF_NEED_RESCHED
873	bne-	do_resched
874	andi.	r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
875	beq	restore_user
876do_user_signal:			/* r10 contains MSR_KERNEL here */
877	ori	r10,r10,MSR_EE
878	SYNC
879	MTMSRD(r10)		/* hard-enable interrupts */
880	/* save r13-r31 in the exception frame, if not already done */
881	lwz	r3,_TRAP(r1)
882	andi.	r0,r3,1
883	beq	2f
884	SAVE_NVGPRS(r1)
885	rlwinm	r3,r3,0,0,30
886	stw	r3,_TRAP(r1)
8872:	li	r3,0
888	addi	r4,r1,STACK_FRAME_OVERHEAD
889	bl	do_signal
890	REST_NVGPRS(r1)
891	b	recheck
892
893/*
894 * We come here when we are at the end of handling an exception
895 * that occurred at a place where taking an exception will lose
896 * state information, such as the contents of SRR0 and SRR1.
897 */
898nonrecoverable:
899	lis	r10,exc_exit_restart_end@ha
900	addi	r10,r10,exc_exit_restart_end@l
901	cmplw	r12,r10
902	bge	3f
903	lis	r11,exc_exit_restart@ha
904	addi	r11,r11,exc_exit_restart@l
905	cmplw	r12,r11
906	blt	3f
907	lis	r10,ee_restarts@ha
908	lwz	r12,ee_restarts@l(r10)
909	addi	r12,r12,1
910	stw	r12,ee_restarts@l(r10)
911	mr	r12,r11		/* restart at exc_exit_restart */
912	blr
9133:	/* OK, we can't recover, kill this process */
914	/* but the 601 doesn't implement the RI bit, so assume it's OK */
915BEGIN_FTR_SECTION
916	blr
917END_FTR_SECTION_IFSET(CPU_FTR_601)
918	lwz	r3,_TRAP(r1)
919	andi.	r0,r3,1
920	beq	4f
921	SAVE_NVGPRS(r1)
922	rlwinm	r3,r3,0,0,30
923	stw	r3,_TRAP(r1)
9244:	addi	r3,r1,STACK_FRAME_OVERHEAD
925	bl	nonrecoverable_exception
926	/* shouldn't return */
927	b	4b
928
929	.comm	ee_restarts,4
930
931/*
932 * PROM code for specific machines follows.  Put it
933 * here so it's easy to add arch-specific sections later.
934 * -- Cort
935 */
936#ifdef CONFIG_PPC_RTAS
937/*
938 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
939 * called with the MMU off.
940 */
941_GLOBAL(enter_rtas)
942	stwu	r1,-INT_FRAME_SIZE(r1)
943	mflr	r0
944	stw	r0,INT_FRAME_SIZE+4(r1)
945	LOAD_REG_ADDR(r4, rtas)
946	lis	r6,1f@ha	/* physical return address for rtas */
947	addi	r6,r6,1f@l
948	tophys(r6,r6)
949	tophys(r7,r1)
950	lwz	r8,RTASENTRY(r4)
951	lwz	r4,RTASBASE(r4)
952	mfmsr	r9
953	stw	r9,8(r1)
954	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
955	SYNC			/* disable interrupts so SRR0/1 */
956	MTMSRD(r0)		/* don't get trashed */
957	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
958	mtlr	r6
959	mtspr	SPRN_SPRG2,r7
960	mtspr	SPRN_SRR0,r8
961	mtspr	SPRN_SRR1,r9
962	RFI
9631:	tophys(r9,r1)
964	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
965	lwz	r9,8(r9)	/* original msr value */
966	FIX_SRR1(r9,r0)
967	addi	r1,r1,INT_FRAME_SIZE
968	li	r0,0
969	mtspr	SPRN_SPRG2,r0
970	mtspr	SPRN_SRR0,r8
971	mtspr	SPRN_SRR1,r9
972	RFI			/* return to caller */
973
974	.globl	machine_check_in_rtas
975machine_check_in_rtas:
976	twi	31,0,0
977	/* XXX load up BATs and panic */
978
979#endif /* CONFIG_PPC_RTAS */
980