xref: /linux/arch/powerpc/kernel/entry_32.S (revision 394d83c17fac2b7bcf05cb99d1e945135767bb6b)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49	.globl	mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51	mfspr	r0,SPRN_DSRR0
52	stw	r0,_DSRR0(r11)
53	mfspr	r0,SPRN_DSRR1
54	stw	r0,_DSRR1(r11)
55	/* fall through */
56
57	.globl	debug_transfer_to_handler
58debug_transfer_to_handler:
59	mfspr	r0,SPRN_CSRR0
60	stw	r0,_CSRR0(r11)
61	mfspr	r0,SPRN_CSRR1
62	stw	r0,_CSRR1(r11)
63	/* fall through */
64
65	.globl	crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68	mfspr	r0,SPRN_MAS0
69	stw	r0,MAS0(r11)
70	mfspr	r0,SPRN_MAS1
71	stw	r0,MAS1(r11)
72	mfspr	r0,SPRN_MAS2
73	stw	r0,MAS2(r11)
74	mfspr	r0,SPRN_MAS3
75	stw	r0,MAS3(r11)
76	mfspr	r0,SPRN_MAS6
77	stw	r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79	mfspr	r0,SPRN_MAS7
80	stw	r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84	mfspr	r0,SPRN_MMUCR
85	stw	r0,MMUCR(r11)
86#endif
87	mfspr	r0,SPRN_SRR0
88	stw	r0,_SRR0(r11)
89	mfspr	r0,SPRN_SRR1
90	stw	r0,_SRR1(r11)
91
92	mfspr	r8,SPRN_SPRG_THREAD
93	lwz	r0,KSP_LIMIT(r8)
94	stw	r0,SAVED_KSP_LIMIT(r11)
95	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
96	stw	r0,KSP_LIMIT(r8)
97	/* fall through */
98#endif
99
100#ifdef CONFIG_40x
101	.globl	crit_transfer_to_handler
102crit_transfer_to_handler:
103	lwz	r0,crit_r10@l(0)
104	stw	r0,GPR10(r11)
105	lwz	r0,crit_r11@l(0)
106	stw	r0,GPR11(r11)
107	mfspr	r0,SPRN_SRR0
108	stw	r0,crit_srr0@l(0)
109	mfspr	r0,SPRN_SRR1
110	stw	r0,crit_srr1@l(0)
111
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	addi	r2,r12,-THREAD
144	tovirt(r2,r2)			/* set r2 to current */
145	beq	2f			/* if from user, fix up THREAD.regs */
146	addi	r11,r1,STACK_FRAME_OVERHEAD
147	stw	r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149	/* Check to see if the dbcr0 register is set up to debug.  Use the
150	   internal debug mode bit to do this. */
151	lwz	r12,THREAD_DBCR0(r12)
152	andis.	r12,r12,DBCR0_IDM@h
153	beq+	3f
154	/* From user and task is ptraced - load up global dbcr0 */
155	li	r12,-1			/* clear all pending debug events */
156	mtspr	SPRN_DBSR,r12
157	lis	r11,global_dbcr0@ha
158	tophys(r11,r11)
159	addi	r11,r11,global_dbcr0@l
160#ifdef CONFIG_SMP
161	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
162	lwz	r9,TI_CPU(r9)
163	slwi	r9,r9,3
164	add	r11,r11,r9
165#endif
166	lwz	r12,0(r11)
167	mtspr	SPRN_DBCR0,r12
168	lwz	r12,4(r11)
169	addi	r12,r12,-1
170	stw	r12,4(r11)
171#endif
172	b	3f
173
1742:	/* if from kernel, check interrupted DOZE/NAP mode and
175         * check for stack overflow
176         */
177	lwz	r9,KSP_LIMIT(r12)
178	cmplw	r1,r9			/* if r1 <= ksp_limit */
179	ble-	stack_ovf		/* then the kernel stack overflowed */
1805:
181#if defined(CONFIG_6xx) || defined(CONFIG_E500)
182	rlwinm	r9,r1,0,0,31-THREAD_SHIFT
183	tophys(r9,r9)			/* check local flags */
184	lwz	r12,TI_LOCAL_FLAGS(r9)
185	mtcrf	0x01,r12
186	bt-	31-TLF_NAPPING,4f
187	bt-	31-TLF_SLEEPING,7f
188#endif /* CONFIG_6xx || CONFIG_E500 */
189	.globl transfer_to_handler_cont
190transfer_to_handler_cont:
1913:
192	mflr	r9
193	lwz	r11,0(r9)		/* virtual address of handler */
194	lwz	r9,4(r9)		/* where to go when done */
195#ifdef CONFIG_TRACE_IRQFLAGS
196	lis	r12,reenable_mmu@h
197	ori	r12,r12,reenable_mmu@l
198	mtspr	SPRN_SRR0,r12
199	mtspr	SPRN_SRR1,r10
200	SYNC
201	RFI
202reenable_mmu:				/* re-enable mmu so we can */
203	mfmsr	r10
204	lwz	r12,_MSR(r1)
205	xor	r10,r10,r12
206	andi.	r10,r10,MSR_EE		/* Did EE change? */
207	beq	1f
208
209	/* Save handler and return address into the 2 unused words
210	 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
211	 * else can be recovered from the pt_regs except r3 which for
212	 * normal interrupts has been set to pt_regs and for syscalls
213	 * is an argument, so we temporarily use ORIG_GPR3 to save it
214	 */
215	stw	r9,8(r1)
216	stw	r11,12(r1)
217	stw	r3,ORIG_GPR3(r1)
218	bl	trace_hardirqs_off
219	lwz	r0,GPR0(r1)
220	lwz	r3,ORIG_GPR3(r1)
221	lwz	r4,GPR4(r1)
222	lwz	r5,GPR5(r1)
223	lwz	r6,GPR6(r1)
224	lwz	r7,GPR7(r1)
225	lwz	r8,GPR8(r1)
226	lwz	r9,8(r1)
227	lwz	r11,12(r1)
2281:	mtctr	r11
229	mtlr	r9
230	bctr				/* jump to handler */
231#else /* CONFIG_TRACE_IRQFLAGS */
232	mtspr	SPRN_SRR0,r11
233	mtspr	SPRN_SRR1,r10
234	mtlr	r9
235	SYNC
236	RFI				/* jump to handler, enable MMU */
237#endif /* CONFIG_TRACE_IRQFLAGS */
238
239#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2404:	rlwinm	r12,r12,0,~_TLF_NAPPING
241	stw	r12,TI_LOCAL_FLAGS(r9)
242	b	power_save_ppc32_restore
243
2447:	rlwinm	r12,r12,0,~_TLF_SLEEPING
245	stw	r12,TI_LOCAL_FLAGS(r9)
246	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
247	rlwinm	r9,r9,0,~MSR_EE
248	lwz	r12,_LINK(r11)		/* and return to address in LR */
249	b	fast_exception_return
250#endif
251
252/*
253 * On kernel stack overflow, load up an initial stack pointer
254 * and call StackOverflow(regs), which should not return.
255 */
256stack_ovf:
257	/* sometimes we use a statically-allocated stack, which is OK. */
258	lis	r12,_end@h
259	ori	r12,r12,_end@l
260	cmplw	r1,r12
261	ble	5b			/* r1 <= &_end is OK */
262	SAVE_NVGPRS(r11)
263	addi	r3,r1,STACK_FRAME_OVERHEAD
264	lis	r1,init_thread_union@ha
265	addi	r1,r1,init_thread_union@l
266	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
267	lis	r9,StackOverflow@ha
268	addi	r9,r9,StackOverflow@l
269	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
270	FIX_SRR1(r10,r12)
271	mtspr	SPRN_SRR0,r9
272	mtspr	SPRN_SRR1,r10
273	SYNC
274	RFI
275
276/*
277 * Handle a system call.
278 */
279	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
280	.stabs	"entry_32.S",N_SO,0,0,0f
2810:
282
283_GLOBAL(DoSyscall)
284	stw	r3,ORIG_GPR3(r1)
285	li	r12,0
286	stw	r12,RESULT(r1)
287	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
288	rlwinm	r11,r11,0,4,2
289	stw	r11,_CCR(r1)
290#ifdef SHOW_SYSCALLS
291	bl	do_show_syscall
292#endif /* SHOW_SYSCALLS */
293#ifdef CONFIG_TRACE_IRQFLAGS
294	/* Return from syscalls can (and generally will) hard enable
295	 * interrupts. You aren't supposed to call a syscall with
296	 * interrupts disabled in the first place. However, to ensure
297	 * that we get it right vs. lockdep if it happens, we force
298	 * that hard enable here with appropriate tracing if we see
299	 * that we have been called with interrupts off
300	 */
301	mfmsr	r11
302	andi.	r12,r11,MSR_EE
303	bne+	1f
304	/* We came in with interrupts disabled, we enable them now */
305	bl	trace_hardirqs_on
306	mfmsr	r11
307	lwz	r0,GPR0(r1)
308	lwz	r3,GPR3(r1)
309	lwz	r4,GPR4(r1)
310	ori	r11,r11,MSR_EE
311	lwz	r5,GPR5(r1)
312	lwz	r6,GPR6(r1)
313	lwz	r7,GPR7(r1)
314	lwz	r8,GPR8(r1)
315	mtmsr	r11
3161:
317#endif /* CONFIG_TRACE_IRQFLAGS */
318	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
319	lwz	r11,TI_FLAGS(r10)
320	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
321	bne-	syscall_dotrace
322syscall_dotrace_cont:
323	cmplwi	0,r0,NR_syscalls
324	lis	r10,sys_call_table@h
325	ori	r10,r10,sys_call_table@l
326	slwi	r0,r0,2
327	bge-	66f
328	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
329	mtlr	r10
330	addi	r9,r1,STACK_FRAME_OVERHEAD
331	PPC440EP_ERR42
332	blrl			/* Call handler */
333	.globl	ret_from_syscall
334ret_from_syscall:
335#ifdef SHOW_SYSCALLS
336	bl	do_show_syscall_exit
337#endif
338	mr	r6,r3
339	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
340	/* disable interrupts so current_thread_info()->flags can't change */
341	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
342	/* Note: We don't bother telling lockdep about it */
343	SYNC
344	MTMSRD(r10)
345	lwz	r9,TI_FLAGS(r12)
346	li	r8,-_LAST_ERRNO
347	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
348	bne-	syscall_exit_work
349	cmplw	0,r3,r8
350	blt+	syscall_exit_cont
351	lwz	r11,_CCR(r1)			/* Load CR */
352	neg	r3,r3
353	oris	r11,r11,0x1000	/* Set SO bit in CR */
354	stw	r11,_CCR(r1)
355syscall_exit_cont:
356	lwz	r8,_MSR(r1)
357#ifdef CONFIG_TRACE_IRQFLAGS
358	/* If we are going to return from the syscall with interrupts
359	 * off, we trace that here. It shouldn't happen though but we
360	 * want to catch the bugger if it does right ?
361	 */
362	andi.	r10,r8,MSR_EE
363	bne+	1f
364	stw	r3,GPR3(r1)
365	bl      trace_hardirqs_off
366	lwz	r3,GPR3(r1)
3671:
368#endif /* CONFIG_TRACE_IRQFLAGS */
369#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
370	/* If the process has its own DBCR0 value, load it up.  The internal
371	   debug mode bit tells us that dbcr0 should be loaded. */
372	lwz	r0,THREAD+THREAD_DBCR0(r2)
373	andis.	r10,r0,DBCR0_IDM@h
374	bnel-	load_dbcr0
375#endif
376#ifdef CONFIG_44x
377BEGIN_MMU_FTR_SECTION
378	lis	r4,icache_44x_need_flush@ha
379	lwz	r5,icache_44x_need_flush@l(r4)
380	cmplwi	cr0,r5,0
381	bne-	2f
3821:
383END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
384#endif /* CONFIG_44x */
385BEGIN_FTR_SECTION
386	lwarx	r7,0,r1
387END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
388	stwcx.	r0,0,r1			/* to clear the reservation */
389	lwz	r4,_LINK(r1)
390	lwz	r5,_CCR(r1)
391	mtlr	r4
392	mtcr	r5
393	lwz	r7,_NIP(r1)
394	FIX_SRR1(r8, r0)
395	lwz	r2,GPR2(r1)
396	lwz	r1,GPR1(r1)
397	mtspr	SPRN_SRR0,r7
398	mtspr	SPRN_SRR1,r8
399	SYNC
400	RFI
401#ifdef CONFIG_44x
4022:	li	r7,0
403	iccci	r0,r0
404	stw	r7,icache_44x_need_flush@l(r4)
405	b	1b
406#endif  /* CONFIG_44x */
407
40866:	li	r3,-ENOSYS
409	b	ret_from_syscall
410
411	.globl	ret_from_fork
412ret_from_fork:
413	REST_NVGPRS(r1)
414	bl	schedule_tail
415	li	r3,0
416	b	ret_from_syscall
417
418/* Traced system call support */
419syscall_dotrace:
420	SAVE_NVGPRS(r1)
421	li	r0,0xc00
422	stw	r0,_TRAP(r1)
423	addi	r3,r1,STACK_FRAME_OVERHEAD
424	bl	do_syscall_trace_enter
425	/*
426	 * Restore argument registers possibly just changed.
427	 * We use the return value of do_syscall_trace_enter
428	 * for call number to look up in the table (r0).
429	 */
430	mr	r0,r3
431	lwz	r3,GPR3(r1)
432	lwz	r4,GPR4(r1)
433	lwz	r5,GPR5(r1)
434	lwz	r6,GPR6(r1)
435	lwz	r7,GPR7(r1)
436	lwz	r8,GPR8(r1)
437	REST_NVGPRS(r1)
438	b	syscall_dotrace_cont
439
440syscall_exit_work:
441	andi.	r0,r9,_TIF_RESTOREALL
442	beq+	0f
443	REST_NVGPRS(r1)
444	b	2f
4450:	cmplw	0,r3,r8
446	blt+	1f
447	andi.	r0,r9,_TIF_NOERROR
448	bne-	1f
449	lwz	r11,_CCR(r1)			/* Load CR */
450	neg	r3,r3
451	oris	r11,r11,0x1000	/* Set SO bit in CR */
452	stw	r11,_CCR(r1)
453
4541:	stw	r6,RESULT(r1)	/* Save result */
455	stw	r3,GPR3(r1)	/* Update return value */
4562:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
457	beq	4f
458
459	/* Clear per-syscall TIF flags if any are set.  */
460
461	li	r11,_TIF_PERSYSCALL_MASK
462	addi	r12,r12,TI_FLAGS
4633:	lwarx	r8,0,r12
464	andc	r8,r8,r11
465#ifdef CONFIG_IBM405_ERR77
466	dcbt	0,r12
467#endif
468	stwcx.	r8,0,r12
469	bne-	3b
470	subi	r12,r12,TI_FLAGS
471
4724:	/* Anything which requires enabling interrupts? */
473	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
474	beq	ret_from_except
475
476	/* Re-enable interrupts. There is no need to trace that with
477	 * lockdep as we are supposed to have IRQs on at this point
478	 */
479	ori	r10,r10,MSR_EE
480	SYNC
481	MTMSRD(r10)
482
483	/* Save NVGPRS if they're not saved already */
484	lwz	r4,_TRAP(r1)
485	andi.	r4,r4,1
486	beq	5f
487	SAVE_NVGPRS(r1)
488	li	r4,0xc00
489	stw	r4,_TRAP(r1)
4905:
491	addi	r3,r1,STACK_FRAME_OVERHEAD
492	bl	do_syscall_trace_leave
493	b	ret_from_except_full
494
495#ifdef SHOW_SYSCALLS
496do_show_syscall:
497#ifdef SHOW_SYSCALLS_TASK
498	lis	r11,show_syscalls_task@ha
499	lwz	r11,show_syscalls_task@l(r11)
500	cmp	0,r2,r11
501	bnelr
502#endif
503	stw	r31,GPR31(r1)
504	mflr	r31
505	lis	r3,7f@ha
506	addi	r3,r3,7f@l
507	lwz	r4,GPR0(r1)
508	lwz	r5,GPR3(r1)
509	lwz	r6,GPR4(r1)
510	lwz	r7,GPR5(r1)
511	lwz	r8,GPR6(r1)
512	lwz	r9,GPR7(r1)
513	bl	printk
514	lis	r3,77f@ha
515	addi	r3,r3,77f@l
516	lwz	r4,GPR8(r1)
517	mr	r5,r2
518	bl	printk
519	lwz	r0,GPR0(r1)
520	lwz	r3,GPR3(r1)
521	lwz	r4,GPR4(r1)
522	lwz	r5,GPR5(r1)
523	lwz	r6,GPR6(r1)
524	lwz	r7,GPR7(r1)
525	lwz	r8,GPR8(r1)
526	mtlr	r31
527	lwz	r31,GPR31(r1)
528	blr
529
530do_show_syscall_exit:
531#ifdef SHOW_SYSCALLS_TASK
532	lis	r11,show_syscalls_task@ha
533	lwz	r11,show_syscalls_task@l(r11)
534	cmp	0,r2,r11
535	bnelr
536#endif
537	stw	r31,GPR31(r1)
538	mflr	r31
539	stw	r3,RESULT(r1)	/* Save result */
540	mr	r4,r3
541	lis	r3,79f@ha
542	addi	r3,r3,79f@l
543	bl	printk
544	lwz	r3,RESULT(r1)
545	mtlr	r31
546	lwz	r31,GPR31(r1)
547	blr
548
5497:	.string	"syscall %d(%x, %x, %x, %x, %x, "
55077:	.string	"%x), current=%p\n"
55179:	.string	" -> %x\n"
552	.align	2,0
553
554#ifdef SHOW_SYSCALLS_TASK
555	.data
556	.globl	show_syscalls_task
557show_syscalls_task:
558	.long	-1
559	.text
560#endif
561#endif /* SHOW_SYSCALLS */
562
563/*
564 * The fork/clone functions need to copy the full register set into
565 * the child process. Therefore we need to save all the nonvolatile
566 * registers (r13 - r31) before calling the C code.
567 */
568	.globl	ppc_fork
569ppc_fork:
570	SAVE_NVGPRS(r1)
571	lwz	r0,_TRAP(r1)
572	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
573	stw	r0,_TRAP(r1)		/* register set saved */
574	b	sys_fork
575
576	.globl	ppc_vfork
577ppc_vfork:
578	SAVE_NVGPRS(r1)
579	lwz	r0,_TRAP(r1)
580	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
581	stw	r0,_TRAP(r1)		/* register set saved */
582	b	sys_vfork
583
584	.globl	ppc_clone
585ppc_clone:
586	SAVE_NVGPRS(r1)
587	lwz	r0,_TRAP(r1)
588	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
589	stw	r0,_TRAP(r1)		/* register set saved */
590	b	sys_clone
591
592	.globl	ppc_swapcontext
593ppc_swapcontext:
594	SAVE_NVGPRS(r1)
595	lwz	r0,_TRAP(r1)
596	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
597	stw	r0,_TRAP(r1)		/* register set saved */
598	b	sys_swapcontext
599
600/*
601 * Top-level page fault handling.
602 * This is in assembler because if do_page_fault tells us that
603 * it is a bad kernel page fault, we want to save the non-volatile
604 * registers before calling bad_page_fault.
605 */
606	.globl	handle_page_fault
607handle_page_fault:
608	stw	r4,_DAR(r1)
609	addi	r3,r1,STACK_FRAME_OVERHEAD
610	bl	do_page_fault
611	cmpwi	r3,0
612	beq+	ret_from_except
613	SAVE_NVGPRS(r1)
614	lwz	r0,_TRAP(r1)
615	clrrwi	r0,r0,1
616	stw	r0,_TRAP(r1)
617	mr	r5,r3
618	addi	r3,r1,STACK_FRAME_OVERHEAD
619	lwz	r4,_DAR(r1)
620	bl	bad_page_fault
621	b	ret_from_except_full
622
623/*
624 * This routine switches between two different tasks.  The process
625 * state of one is saved on its kernel stack.  Then the state
626 * of the other is restored from its kernel stack.  The memory
627 * management hardware is updated to the second process's state.
628 * Finally, we can return to the second process.
629 * On entry, r3 points to the THREAD for the current task, r4
630 * points to the THREAD for the new task.
631 *
632 * This routine is always called with interrupts disabled.
633 *
634 * Note: there are two ways to get to the "going out" portion
635 * of this code; either by coming in via the entry (_switch)
636 * or via "fork" which must set up an environment equivalent
637 * to the "_switch" path.  If you change this , you'll have to
638 * change the fork code also.
639 *
640 * The code which creates the new task context is in 'copy_thread'
641 * in arch/ppc/kernel/process.c
642 */
643_GLOBAL(_switch)
644	stwu	r1,-INT_FRAME_SIZE(r1)
645	mflr	r0
646	stw	r0,INT_FRAME_SIZE+4(r1)
647	/* r3-r12 are caller saved -- Cort */
648	SAVE_NVGPRS(r1)
649	stw	r0,_NIP(r1)	/* Return to switch caller */
650	mfmsr	r11
651	li	r0,MSR_FP	/* Disable floating-point */
652#ifdef CONFIG_ALTIVEC
653BEGIN_FTR_SECTION
654	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
655	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
656	stw	r12,THREAD+THREAD_VRSAVE(r2)
657END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
658#endif /* CONFIG_ALTIVEC */
659#ifdef CONFIG_SPE
660BEGIN_FTR_SECTION
661	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
662	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
663	stw	r12,THREAD+THREAD_SPEFSCR(r2)
664END_FTR_SECTION_IFSET(CPU_FTR_SPE)
665#endif /* CONFIG_SPE */
666	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
667	beq+	1f
668	andc	r11,r11,r0
669	MTMSRD(r11)
670	isync
6711:	stw	r11,_MSR(r1)
672	mfcr	r10
673	stw	r10,_CCR(r1)
674	stw	r1,KSP(r3)	/* Set old stack pointer */
675
676#ifdef CONFIG_SMP
677	/* We need a sync somewhere here to make sure that if the
678	 * previous task gets rescheduled on another CPU, it sees all
679	 * stores it has performed on this one.
680	 */
681	sync
682#endif /* CONFIG_SMP */
683
684	tophys(r0,r4)
685	CLR_TOP32(r0)
686	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
687	lwz	r1,KSP(r4)	/* Load new stack pointer */
688
689	/* save the old current 'last' for return value */
690	mr	r3,r2
691	addi	r2,r4,-THREAD	/* Update current */
692
693#ifdef CONFIG_ALTIVEC
694BEGIN_FTR_SECTION
695	lwz	r0,THREAD+THREAD_VRSAVE(r2)
696	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
697END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
698#endif /* CONFIG_ALTIVEC */
699#ifdef CONFIG_SPE
700BEGIN_FTR_SECTION
701	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
702	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
703END_FTR_SECTION_IFSET(CPU_FTR_SPE)
704#endif /* CONFIG_SPE */
705
706	lwz	r0,_CCR(r1)
707	mtcrf	0xFF,r0
708	/* r3-r12 are destroyed -- Cort */
709	REST_NVGPRS(r1)
710
711	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
712	mtlr	r4
713	addi	r1,r1,INT_FRAME_SIZE
714	blr
715
716	.globl	fast_exception_return
717fast_exception_return:
718#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
719	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
720	beq	1f			/* if not, we've got problems */
721#endif
722
7232:	REST_4GPRS(3, r11)
724	lwz	r10,_CCR(r11)
725	REST_GPR(1, r11)
726	mtcr	r10
727	lwz	r10,_LINK(r11)
728	mtlr	r10
729	REST_GPR(10, r11)
730	mtspr	SPRN_SRR1,r9
731	mtspr	SPRN_SRR0,r12
732	REST_GPR(9, r11)
733	REST_GPR(12, r11)
734	lwz	r11,GPR11(r11)
735	SYNC
736	RFI
737
738#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
739/* check if the exception happened in a restartable section */
7401:	lis	r3,exc_exit_restart_end@ha
741	addi	r3,r3,exc_exit_restart_end@l
742	cmplw	r12,r3
743	bge	3f
744	lis	r4,exc_exit_restart@ha
745	addi	r4,r4,exc_exit_restart@l
746	cmplw	r12,r4
747	blt	3f
748	lis	r3,fee_restarts@ha
749	tophys(r3,r3)
750	lwz	r5,fee_restarts@l(r3)
751	addi	r5,r5,1
752	stw	r5,fee_restarts@l(r3)
753	mr	r12,r4		/* restart at exc_exit_restart */
754	b	2b
755
756	.section .bss
757	.align	2
758fee_restarts:
759	.space	4
760	.previous
761
762/* aargh, a nonrecoverable interrupt, panic */
763/* aargh, we don't know which trap this is */
764/* but the 601 doesn't implement the RI bit, so assume it's OK */
7653:
766BEGIN_FTR_SECTION
767	b	2b
768END_FTR_SECTION_IFSET(CPU_FTR_601)
769	li	r10,-1
770	stw	r10,_TRAP(r11)
771	addi	r3,r1,STACK_FRAME_OVERHEAD
772	lis	r10,MSR_KERNEL@h
773	ori	r10,r10,MSR_KERNEL@l
774	bl	transfer_to_handler_full
775	.long	nonrecoverable_exception
776	.long	ret_from_except
777#endif
778
779	.globl	ret_from_except_full
780ret_from_except_full:
781	REST_NVGPRS(r1)
782	/* fall through */
783
784	.globl	ret_from_except
785ret_from_except:
786	/* Hard-disable interrupts so that current_thread_info()->flags
787	 * can't change between when we test it and when we return
788	 * from the interrupt. */
789	/* Note: We don't bother telling lockdep about it */
790	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
791	SYNC			/* Some chip revs have problems here... */
792	MTMSRD(r10)		/* disable interrupts */
793
794	lwz	r3,_MSR(r1)	/* Returning to user mode? */
795	andi.	r0,r3,MSR_PR
796	beq	resume_kernel
797
798user_exc_return:		/* r10 contains MSR_KERNEL here */
799	/* Check current_thread_info()->flags */
800	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
801	lwz	r9,TI_FLAGS(r9)
802	andi.	r0,r9,_TIF_USER_WORK_MASK
803	bne	do_work
804
805restore_user:
806#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
807	/* Check whether this process has its own DBCR0 value.  The internal
808	   debug mode bit tells us that dbcr0 should be loaded. */
809	lwz	r0,THREAD+THREAD_DBCR0(r2)
810	andis.	r10,r0,DBCR0_IDM@h
811	bnel-	load_dbcr0
812#endif
813
814#ifdef CONFIG_PREEMPT
815	b	restore
816
817/* N.B. the only way to get here is from the beq following ret_from_except. */
818resume_kernel:
819	/* check current_thread_info->preempt_count */
820	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
821	lwz	r0,TI_PREEMPT(r9)
822	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
823	bne	restore
824	lwz	r0,TI_FLAGS(r9)
825	andi.	r0,r0,_TIF_NEED_RESCHED
826	beq+	restore
827	andi.	r0,r3,MSR_EE	/* interrupts off? */
828	beq	restore		/* don't schedule if so */
829#ifdef CONFIG_TRACE_IRQFLAGS
830	/* Lockdep thinks irqs are enabled, we need to call
831	 * preempt_schedule_irq with IRQs off, so we inform lockdep
832	 * now that we -did- turn them off already
833	 */
834	bl	trace_hardirqs_off
835#endif
8361:	bl	preempt_schedule_irq
837	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
838	lwz	r3,TI_FLAGS(r9)
839	andi.	r0,r3,_TIF_NEED_RESCHED
840	bne-	1b
841#ifdef CONFIG_TRACE_IRQFLAGS
842	/* And now, to properly rebalance the above, we tell lockdep they
843	 * are being turned back on, which will happen when we return
844	 */
845	bl	trace_hardirqs_on
846#endif
847#else
848resume_kernel:
849#endif /* CONFIG_PREEMPT */
850
851	/* interrupts are hard-disabled at this point */
852restore:
853#ifdef CONFIG_44x
854BEGIN_MMU_FTR_SECTION
855	b	1f
856END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
857	lis	r4,icache_44x_need_flush@ha
858	lwz	r5,icache_44x_need_flush@l(r4)
859	cmplwi	cr0,r5,0
860	beq+	1f
861	li	r6,0
862	iccci	r0,r0
863	stw	r6,icache_44x_need_flush@l(r4)
8641:
865#endif  /* CONFIG_44x */
866
867	lwz	r9,_MSR(r1)
868#ifdef CONFIG_TRACE_IRQFLAGS
869	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
870	 * off in this assembly code while peeking at TI_FLAGS() and such. However
871	 * we need to inform it if the exception turned interrupts off, and we
872	 * are about to trun them back on.
873	 *
874	 * The problem here sadly is that we don't know whether the exceptions was
875	 * one that turned interrupts off or not. So we always tell lockdep about
876	 * turning them on here when we go back to wherever we came from with EE
877	 * on, even if that may meen some redudant calls being tracked. Maybe later
878	 * we could encode what the exception did somewhere or test the exception
879	 * type in the pt_regs but that sounds overkill
880	 */
881	andi.	r10,r9,MSR_EE
882	beq	1f
883	bl	trace_hardirqs_on
884	lwz	r9,_MSR(r1)
8851:
886#endif /* CONFIG_TRACE_IRQFLAGS */
887
888	lwz	r0,GPR0(r1)
889	lwz	r2,GPR2(r1)
890	REST_4GPRS(3, r1)
891	REST_2GPRS(7, r1)
892
893	lwz	r10,_XER(r1)
894	lwz	r11,_CTR(r1)
895	mtspr	SPRN_XER,r10
896	mtctr	r11
897
898	PPC405_ERR77(0,r1)
899BEGIN_FTR_SECTION
900	lwarx	r11,0,r1
901END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
902	stwcx.	r0,0,r1			/* to clear the reservation */
903
904#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
905	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
906	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
907
908	lwz	r10,_CCR(r1)
909	lwz	r11,_LINK(r1)
910	mtcrf	0xFF,r10
911	mtlr	r11
912
913	/*
914	 * Once we put values in SRR0 and SRR1, we are in a state
915	 * where exceptions are not recoverable, since taking an
916	 * exception will trash SRR0 and SRR1.  Therefore we clear the
917	 * MSR:RI bit to indicate this.  If we do take an exception,
918	 * we can't return to the point of the exception but we
919	 * can restart the exception exit path at the label
920	 * exc_exit_restart below.  -- paulus
921	 */
922	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
923	SYNC
924	MTMSRD(r10)		/* clear the RI bit */
925	.globl exc_exit_restart
926exc_exit_restart:
927	lwz	r12,_NIP(r1)
928	FIX_SRR1(r9,r10)
929	mtspr	SPRN_SRR0,r12
930	mtspr	SPRN_SRR1,r9
931	REST_4GPRS(9, r1)
932	lwz	r1,GPR1(r1)
933	.globl exc_exit_restart_end
934exc_exit_restart_end:
935	SYNC
936	RFI
937
938#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
939	/*
940	 * This is a bit different on 4xx/Book-E because it doesn't have
941	 * the RI bit in the MSR.
942	 * The TLB miss handler checks if we have interrupted
943	 * the exception exit path and restarts it if so
944	 * (well maybe one day it will... :).
945	 */
946	lwz	r11,_LINK(r1)
947	mtlr	r11
948	lwz	r10,_CCR(r1)
949	mtcrf	0xff,r10
950	REST_2GPRS(9, r1)
951	.globl exc_exit_restart
952exc_exit_restart:
953	lwz	r11,_NIP(r1)
954	lwz	r12,_MSR(r1)
955exc_exit_start:
956	mtspr	SPRN_SRR0,r11
957	mtspr	SPRN_SRR1,r12
958	REST_2GPRS(11, r1)
959	lwz	r1,GPR1(r1)
960	.globl exc_exit_restart_end
961exc_exit_restart_end:
962	PPC405_ERR77_SYNC
963	rfi
964	b	.			/* prevent prefetch past rfi */
965
966/*
967 * Returning from a critical interrupt in user mode doesn't need
968 * to be any different from a normal exception.  For a critical
969 * interrupt in the kernel, we just return (without checking for
970 * preemption) since the interrupt may have happened at some crucial
971 * place (e.g. inside the TLB miss handler), and because we will be
972 * running with r1 pointing into critical_stack, not the current
973 * process's kernel stack (and therefore current_thread_info() will
974 * give the wrong answer).
975 * We have to restore various SPRs that may have been in use at the
976 * time of the critical interrupt.
977 *
978 */
979#ifdef CONFIG_40x
980#define PPC_40x_TURN_OFF_MSR_DR						    \
981	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
982	 * assume the instructions here are mapped by a pinned TLB entry */ \
983	li	r10,MSR_IR;						    \
984	mtmsr	r10;							    \
985	isync;								    \
986	tophys(r1, r1);
987#else
988#define PPC_40x_TURN_OFF_MSR_DR
989#endif
990
991#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
992	REST_NVGPRS(r1);						\
993	lwz	r3,_MSR(r1);						\
994	andi.	r3,r3,MSR_PR;						\
995	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
996	bne	user_exc_return;					\
997	lwz	r0,GPR0(r1);						\
998	lwz	r2,GPR2(r1);						\
999	REST_4GPRS(3, r1);						\
1000	REST_2GPRS(7, r1);						\
1001	lwz	r10,_XER(r1);						\
1002	lwz	r11,_CTR(r1);						\
1003	mtspr	SPRN_XER,r10;						\
1004	mtctr	r11;							\
1005	PPC405_ERR77(0,r1);						\
1006	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1007	lwz	r11,_LINK(r1);						\
1008	mtlr	r11;							\
1009	lwz	r10,_CCR(r1);						\
1010	mtcrf	0xff,r10;						\
1011	PPC_40x_TURN_OFF_MSR_DR;					\
1012	lwz	r9,_DEAR(r1);						\
1013	lwz	r10,_ESR(r1);						\
1014	mtspr	SPRN_DEAR,r9;						\
1015	mtspr	SPRN_ESR,r10;						\
1016	lwz	r11,_NIP(r1);						\
1017	lwz	r12,_MSR(r1);						\
1018	mtspr	exc_lvl_srr0,r11;					\
1019	mtspr	exc_lvl_srr1,r12;					\
1020	lwz	r9,GPR9(r1);						\
1021	lwz	r12,GPR12(r1);						\
1022	lwz	r10,GPR10(r1);						\
1023	lwz	r11,GPR11(r1);						\
1024	lwz	r1,GPR1(r1);						\
1025	PPC405_ERR77_SYNC;						\
1026	exc_lvl_rfi;							\
1027	b	.;		/* prevent prefetch past exc_lvl_rfi */
1028
1029#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1030	lwz	r9,_##exc_lvl_srr0(r1);					\
1031	lwz	r10,_##exc_lvl_srr1(r1);				\
1032	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1033	mtspr	SPRN_##exc_lvl_srr1,r10;
1034
1035#if defined(CONFIG_PPC_BOOK3E_MMU)
1036#ifdef CONFIG_PHYS_64BIT
1037#define	RESTORE_MAS7							\
1038	lwz	r11,MAS7(r1);						\
1039	mtspr	SPRN_MAS7,r11;
1040#else
1041#define	RESTORE_MAS7
1042#endif /* CONFIG_PHYS_64BIT */
1043#define RESTORE_MMU_REGS						\
1044	lwz	r9,MAS0(r1);						\
1045	lwz	r10,MAS1(r1);						\
1046	lwz	r11,MAS2(r1);						\
1047	mtspr	SPRN_MAS0,r9;						\
1048	lwz	r9,MAS3(r1);						\
1049	mtspr	SPRN_MAS1,r10;						\
1050	lwz	r10,MAS6(r1);						\
1051	mtspr	SPRN_MAS2,r11;						\
1052	mtspr	SPRN_MAS3,r9;						\
1053	mtspr	SPRN_MAS6,r10;						\
1054	RESTORE_MAS7;
1055#elif defined(CONFIG_44x)
1056#define RESTORE_MMU_REGS						\
1057	lwz	r9,MMUCR(r1);						\
1058	mtspr	SPRN_MMUCR,r9;
1059#else
1060#define RESTORE_MMU_REGS
1061#endif
1062
1063#ifdef CONFIG_40x
1064	.globl	ret_from_crit_exc
1065ret_from_crit_exc:
1066	mfspr	r9,SPRN_SPRG_THREAD
1067	lis	r10,saved_ksp_limit@ha;
1068	lwz	r10,saved_ksp_limit@l(r10);
1069	tovirt(r9,r9);
1070	stw	r10,KSP_LIMIT(r9)
1071	lis	r9,crit_srr0@ha;
1072	lwz	r9,crit_srr0@l(r9);
1073	lis	r10,crit_srr1@ha;
1074	lwz	r10,crit_srr1@l(r10);
1075	mtspr	SPRN_SRR0,r9;
1076	mtspr	SPRN_SRR1,r10;
1077	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1078#endif /* CONFIG_40x */
1079
1080#ifdef CONFIG_BOOKE
1081	.globl	ret_from_crit_exc
1082ret_from_crit_exc:
1083	mfspr	r9,SPRN_SPRG_THREAD
1084	lwz	r10,SAVED_KSP_LIMIT(r1)
1085	stw	r10,KSP_LIMIT(r9)
1086	RESTORE_xSRR(SRR0,SRR1);
1087	RESTORE_MMU_REGS;
1088	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1089
1090	.globl	ret_from_debug_exc
1091ret_from_debug_exc:
1092	mfspr	r9,SPRN_SPRG_THREAD
1093	lwz	r10,SAVED_KSP_LIMIT(r1)
1094	stw	r10,KSP_LIMIT(r9)
1095	lwz	r9,THREAD_INFO-THREAD(r9)
1096	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)
1097	lwz	r10,TI_PREEMPT(r10)
1098	stw	r10,TI_PREEMPT(r9)
1099	RESTORE_xSRR(SRR0,SRR1);
1100	RESTORE_xSRR(CSRR0,CSRR1);
1101	RESTORE_MMU_REGS;
1102	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1103
1104	.globl	ret_from_mcheck_exc
1105ret_from_mcheck_exc:
1106	mfspr	r9,SPRN_SPRG_THREAD
1107	lwz	r10,SAVED_KSP_LIMIT(r1)
1108	stw	r10,KSP_LIMIT(r9)
1109	RESTORE_xSRR(SRR0,SRR1);
1110	RESTORE_xSRR(CSRR0,CSRR1);
1111	RESTORE_xSRR(DSRR0,DSRR1);
1112	RESTORE_MMU_REGS;
1113	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1114#endif /* CONFIG_BOOKE */
1115
1116/*
1117 * Load the DBCR0 value for a task that is being ptraced,
1118 * having first saved away the global DBCR0.  Note that r0
1119 * has the dbcr0 value to set upon entry to this.
1120 */
1121load_dbcr0:
1122	mfmsr	r10		/* first disable debug exceptions */
1123	rlwinm	r10,r10,0,~MSR_DE
1124	mtmsr	r10
1125	isync
1126	mfspr	r10,SPRN_DBCR0
1127	lis	r11,global_dbcr0@ha
1128	addi	r11,r11,global_dbcr0@l
1129#ifdef CONFIG_SMP
1130	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1131	lwz	r9,TI_CPU(r9)
1132	slwi	r9,r9,3
1133	add	r11,r11,r9
1134#endif
1135	stw	r10,0(r11)
1136	mtspr	SPRN_DBCR0,r0
1137	lwz	r10,4(r11)
1138	addi	r10,r10,1
1139	stw	r10,4(r11)
1140	li	r11,-1
1141	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1142	blr
1143
1144	.section .bss
1145	.align	4
1146global_dbcr0:
1147	.space	8*NR_CPUS
1148	.previous
1149#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1150
1151do_work:			/* r10 contains MSR_KERNEL here */
1152	andi.	r0,r9,_TIF_NEED_RESCHED
1153	beq	do_user_signal
1154
1155do_resched:			/* r10 contains MSR_KERNEL here */
1156	/* Note: We don't need to inform lockdep that we are enabling
1157	 * interrupts here. As far as it knows, they are already enabled
1158	 */
1159	ori	r10,r10,MSR_EE
1160	SYNC
1161	MTMSRD(r10)		/* hard-enable interrupts */
1162	bl	schedule
1163recheck:
1164	/* Note: And we don't tell it we are disabling them again
1165	 * neither. Those disable/enable cycles used to peek at
1166	 * TI_FLAGS aren't advertised.
1167	 */
1168	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1169	SYNC
1170	MTMSRD(r10)		/* disable interrupts */
1171	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1172	lwz	r9,TI_FLAGS(r9)
1173	andi.	r0,r9,_TIF_NEED_RESCHED
1174	bne-	do_resched
1175	andi.	r0,r9,_TIF_USER_WORK_MASK
1176	beq	restore_user
1177do_user_signal:			/* r10 contains MSR_KERNEL here */
1178	ori	r10,r10,MSR_EE
1179	SYNC
1180	MTMSRD(r10)		/* hard-enable interrupts */
1181	/* save r13-r31 in the exception frame, if not already done */
1182	lwz	r3,_TRAP(r1)
1183	andi.	r0,r3,1
1184	beq	2f
1185	SAVE_NVGPRS(r1)
1186	rlwinm	r3,r3,0,0,30
1187	stw	r3,_TRAP(r1)
11882:	addi	r3,r1,STACK_FRAME_OVERHEAD
1189	mr	r4,r9
1190	bl	do_signal
1191	REST_NVGPRS(r1)
1192	b	recheck
1193
1194/*
1195 * We come here when we are at the end of handling an exception
1196 * that occurred at a place where taking an exception will lose
1197 * state information, such as the contents of SRR0 and SRR1.
1198 */
1199nonrecoverable:
1200	lis	r10,exc_exit_restart_end@ha
1201	addi	r10,r10,exc_exit_restart_end@l
1202	cmplw	r12,r10
1203	bge	3f
1204	lis	r11,exc_exit_restart@ha
1205	addi	r11,r11,exc_exit_restart@l
1206	cmplw	r12,r11
1207	blt	3f
1208	lis	r10,ee_restarts@ha
1209	lwz	r12,ee_restarts@l(r10)
1210	addi	r12,r12,1
1211	stw	r12,ee_restarts@l(r10)
1212	mr	r12,r11		/* restart at exc_exit_restart */
1213	blr
12143:	/* OK, we can't recover, kill this process */
1215	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1216BEGIN_FTR_SECTION
1217	blr
1218END_FTR_SECTION_IFSET(CPU_FTR_601)
1219	lwz	r3,_TRAP(r1)
1220	andi.	r0,r3,1
1221	beq	4f
1222	SAVE_NVGPRS(r1)
1223	rlwinm	r3,r3,0,0,30
1224	stw	r3,_TRAP(r1)
12254:	addi	r3,r1,STACK_FRAME_OVERHEAD
1226	bl	nonrecoverable_exception
1227	/* shouldn't return */
1228	b	4b
1229
1230	.section .bss
1231	.align	2
1232ee_restarts:
1233	.space	4
1234	.previous
1235
1236/*
1237 * PROM code for specific machines follows.  Put it
1238 * here so it's easy to add arch-specific sections later.
1239 * -- Cort
1240 */
1241#ifdef CONFIG_PPC_RTAS
1242/*
1243 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1244 * called with the MMU off.
1245 */
1246_GLOBAL(enter_rtas)
1247	stwu	r1,-INT_FRAME_SIZE(r1)
1248	mflr	r0
1249	stw	r0,INT_FRAME_SIZE+4(r1)
1250	LOAD_REG_ADDR(r4, rtas)
1251	lis	r6,1f@ha	/* physical return address for rtas */
1252	addi	r6,r6,1f@l
1253	tophys(r6,r6)
1254	tophys(r7,r1)
1255	lwz	r8,RTASENTRY(r4)
1256	lwz	r4,RTASBASE(r4)
1257	mfmsr	r9
1258	stw	r9,8(r1)
1259	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1260	SYNC			/* disable interrupts so SRR0/1 */
1261	MTMSRD(r0)		/* don't get trashed */
1262	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1263	mtlr	r6
1264	mtspr	SPRN_SPRG_RTAS,r7
1265	mtspr	SPRN_SRR0,r8
1266	mtspr	SPRN_SRR1,r9
1267	RFI
12681:	tophys(r9,r1)
1269	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1270	lwz	r9,8(r9)	/* original msr value */
1271	FIX_SRR1(r9,r0)
1272	addi	r1,r1,INT_FRAME_SIZE
1273	li	r0,0
1274	mtspr	SPRN_SPRG_RTAS,r0
1275	mtspr	SPRN_SRR0,r8
1276	mtspr	SPRN_SRR1,r9
1277	RFI			/* return to caller */
1278
1279	.globl	machine_check_in_rtas
1280machine_check_in_rtas:
1281	twi	31,0,0
1282	/* XXX load up BATs and panic */
1283
1284#endif /* CONFIG_PPC_RTAS */
1285
1286#ifdef CONFIG_FUNCTION_TRACER
1287#ifdef CONFIG_DYNAMIC_FTRACE
1288_GLOBAL(mcount)
1289_GLOBAL(_mcount)
1290	/*
1291	 * It is required that _mcount on PPC32 must preserve the
1292	 * link register. But we have r0 to play with. We use r0
1293	 * to push the return address back to the caller of mcount
1294	 * into the ctr register, restore the link register and
1295	 * then jump back using the ctr register.
1296	 */
1297	mflr	r0
1298	mtctr	r0
1299	lwz	r0, 4(r1)
1300	mtlr	r0
1301	bctr
1302
1303_GLOBAL(ftrace_caller)
1304	MCOUNT_SAVE_FRAME
1305	/* r3 ends up with link register */
1306	subi	r3, r3, MCOUNT_INSN_SIZE
1307.globl ftrace_call
1308ftrace_call:
1309	bl	ftrace_stub
1310	nop
1311#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1312.globl ftrace_graph_call
1313ftrace_graph_call:
1314	b	ftrace_graph_stub
1315_GLOBAL(ftrace_graph_stub)
1316#endif
1317	MCOUNT_RESTORE_FRAME
1318	/* old link register ends up in ctr reg */
1319	bctr
1320#else
1321_GLOBAL(mcount)
1322_GLOBAL(_mcount)
1323
1324	MCOUNT_SAVE_FRAME
1325
1326	subi	r3, r3, MCOUNT_INSN_SIZE
1327	LOAD_REG_ADDR(r5, ftrace_trace_function)
1328	lwz	r5,0(r5)
1329
1330	mtctr	r5
1331	bctrl
1332	nop
1333
1334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335	b	ftrace_graph_caller
1336#endif
1337	MCOUNT_RESTORE_FRAME
1338	bctr
1339#endif
1340
1341_GLOBAL(ftrace_stub)
1342	blr
1343
1344#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1345_GLOBAL(ftrace_graph_caller)
1346	/* load r4 with local address */
1347	lwz	r4, 44(r1)
1348	subi	r4, r4, MCOUNT_INSN_SIZE
1349
1350	/* get the parent address */
1351	addi	r3, r1, 52
1352
1353	bl	prepare_ftrace_return
1354	nop
1355
1356	MCOUNT_RESTORE_FRAME
1357	/* old link register ends up in ctr reg */
1358	bctr
1359
1360_GLOBAL(return_to_handler)
1361	/* need to save return values */
1362	stwu	r1, -32(r1)
1363	stw	r3, 20(r1)
1364	stw	r4, 16(r1)
1365	stw	r31, 12(r1)
1366	mr	r31, r1
1367
1368	bl	ftrace_return_to_handler
1369	nop
1370
1371	/* return value has real return address */
1372	mtlr	r3
1373
1374	lwz	r3, 20(r1)
1375	lwz	r4, 16(r1)
1376	lwz	r31,12(r1)
1377	lwz	r1, 0(r1)
1378
1379	/* Jump back to real return address */
1380	blr
1381#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1382
1383#endif /* CONFIG_MCOUNT */
1384