xref: /linux/arch/powerpc/kernel/entry_32.S (revision 4a8e43feeac7996b8de2d5b2823e316917493df4)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49	.globl	mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51	mfspr	r0,SPRN_DSRR0
52	stw	r0,_DSRR0(r11)
53	mfspr	r0,SPRN_DSRR1
54	stw	r0,_DSRR1(r11)
55	/* fall through */
56
57	.globl	debug_transfer_to_handler
58debug_transfer_to_handler:
59	mfspr	r0,SPRN_CSRR0
60	stw	r0,_CSRR0(r11)
61	mfspr	r0,SPRN_CSRR1
62	stw	r0,_CSRR1(r11)
63	/* fall through */
64
65	.globl	crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68	mfspr	r0,SPRN_MAS0
69	stw	r0,MAS0(r11)
70	mfspr	r0,SPRN_MAS1
71	stw	r0,MAS1(r11)
72	mfspr	r0,SPRN_MAS2
73	stw	r0,MAS2(r11)
74	mfspr	r0,SPRN_MAS3
75	stw	r0,MAS3(r11)
76	mfspr	r0,SPRN_MAS6
77	stw	r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79	mfspr	r0,SPRN_MAS7
80	stw	r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84	mfspr	r0,SPRN_MMUCR
85	stw	r0,MMUCR(r11)
86#endif
87	mfspr	r0,SPRN_SRR0
88	stw	r0,_SRR0(r11)
89	mfspr	r0,SPRN_SRR1
90	stw	r0,_SRR1(r11)
91
92	/* set the stack limit to the current stack
93	 * and set the limit to protect the thread_info
94	 * struct
95	 */
96	mfspr	r8,SPRN_SPRG_THREAD
97	lwz	r0,KSP_LIMIT(r8)
98	stw	r0,SAVED_KSP_LIMIT(r11)
99	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
100	stw	r0,KSP_LIMIT(r8)
101	/* fall through */
102#endif
103
104#ifdef CONFIG_40x
105	.globl	crit_transfer_to_handler
106crit_transfer_to_handler:
107	lwz	r0,crit_r10@l(0)
108	stw	r0,GPR10(r11)
109	lwz	r0,crit_r11@l(0)
110	stw	r0,GPR11(r11)
111	mfspr	r0,SPRN_SRR0
112	stw	r0,crit_srr0@l(0)
113	mfspr	r0,SPRN_SRR1
114	stw	r0,crit_srr1@l(0)
115
116	/* set the stack limit to the current stack
117	 * and set the limit to protect the thread_info
118	 * struct
119	 */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126#endif
127
128/*
129 * This code finishes saving the registers to the exception frame
130 * and jumps to the appropriate handler for the exception, turning
131 * on address translation.
132 * Note that we rely on the caller having set cr0.eq iff the exception
133 * occurred in kernel mode (i.e. MSR:PR = 0).
134 */
135	.globl	transfer_to_handler_full
136transfer_to_handler_full:
137	SAVE_NVGPRS(r11)
138	/* fall through */
139
140	.globl	transfer_to_handler
141transfer_to_handler:
142	stw	r2,GPR2(r11)
143	stw	r12,_NIP(r11)
144	stw	r9,_MSR(r11)
145	andi.	r2,r9,MSR_PR
146	mfctr	r12
147	mfspr	r2,SPRN_XER
148	stw	r12,_CTR(r11)
149	stw	r2,_XER(r11)
150	mfspr	r12,SPRN_SPRG_THREAD
151	addi	r2,r12,-THREAD
152	tovirt(r2,r2)			/* set r2 to current */
153	beq	2f			/* if from user, fix up THREAD.regs */
154	addi	r11,r1,STACK_FRAME_OVERHEAD
155	stw	r11,PT_REGS(r12)
156#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
157	/* Check to see if the dbcr0 register is set up to debug.  Use the
158	   internal debug mode bit to do this. */
159	lwz	r12,THREAD_DBCR0(r12)
160	andis.	r12,r12,DBCR0_IDM@h
161	beq+	3f
162	/* From user and task is ptraced - load up global dbcr0 */
163	li	r12,-1			/* clear all pending debug events */
164	mtspr	SPRN_DBSR,r12
165	lis	r11,global_dbcr0@ha
166	tophys(r11,r11)
167	addi	r11,r11,global_dbcr0@l
168#ifdef CONFIG_SMP
169	CURRENT_THREAD_INFO(r9, r1)
170	lwz	r9,TI_CPU(r9)
171	slwi	r9,r9,3
172	add	r11,r11,r9
173#endif
174	lwz	r12,0(r11)
175	mtspr	SPRN_DBCR0,r12
176	lwz	r12,4(r11)
177	addi	r12,r12,-1
178	stw	r12,4(r11)
179#endif
180	b	3f
181
1822:	/* if from kernel, check interrupted DOZE/NAP mode and
183         * check for stack overflow
184         */
185	lwz	r9,KSP_LIMIT(r12)
186	cmplw	r1,r9			/* if r1 <= ksp_limit */
187	ble-	stack_ovf		/* then the kernel stack overflowed */
1885:
189#if defined(CONFIG_6xx) || defined(CONFIG_E500)
190	CURRENT_THREAD_INFO(r9, r1)
191	tophys(r9,r9)			/* check local flags */
192	lwz	r12,TI_LOCAL_FLAGS(r9)
193	mtcrf	0x01,r12
194	bt-	31-TLF_NAPPING,4f
195	bt-	31-TLF_SLEEPING,7f
196#endif /* CONFIG_6xx || CONFIG_E500 */
197	.globl transfer_to_handler_cont
198transfer_to_handler_cont:
1993:
200	mflr	r9
201	lwz	r11,0(r9)		/* virtual address of handler */
202	lwz	r9,4(r9)		/* where to go when done */
203#ifdef CONFIG_TRACE_IRQFLAGS
204	lis	r12,reenable_mmu@h
205	ori	r12,r12,reenable_mmu@l
206	mtspr	SPRN_SRR0,r12
207	mtspr	SPRN_SRR1,r10
208	SYNC
209	RFI
210reenable_mmu:				/* re-enable mmu so we can */
211	mfmsr	r10
212	lwz	r12,_MSR(r1)
213	xor	r10,r10,r12
214	andi.	r10,r10,MSR_EE		/* Did EE change? */
215	beq	1f
216
217	/*
218	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
219	 * If from user mode there is only one stack frame on the stack, and
220	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
221	 * stack frame to make trace_hardirqs_off happy.
222	 *
223	 * This is handy because we also need to save a bunch of GPRs,
224	 * r3 can be different from GPR3(r1) at this point, r9 and r11
225	 * contains the old MSR and handler address respectively,
226	 * r4 & r5 can contain page fault arguments that need to be passed
227	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
228	 * they aren't useful past this point (aren't syscall arguments),
229	 * the rest is restored from the exception frame.
230	 */
231	stwu	r1,-32(r1)
232	stw	r9,8(r1)
233	stw	r11,12(r1)
234	stw	r3,16(r1)
235	stw	r4,20(r1)
236	stw	r5,24(r1)
237	bl	trace_hardirqs_off
238	lwz	r5,24(r1)
239	lwz	r4,20(r1)
240	lwz	r3,16(r1)
241	lwz	r11,12(r1)
242	lwz	r9,8(r1)
243	addi	r1,r1,32
244	lwz	r0,GPR0(r1)
245	lwz	r6,GPR6(r1)
246	lwz	r7,GPR7(r1)
247	lwz	r8,GPR8(r1)
2481:	mtctr	r11
249	mtlr	r9
250	bctr				/* jump to handler */
251#else /* CONFIG_TRACE_IRQFLAGS */
252	mtspr	SPRN_SRR0,r11
253	mtspr	SPRN_SRR1,r10
254	mtlr	r9
255	SYNC
256	RFI				/* jump to handler, enable MMU */
257#endif /* CONFIG_TRACE_IRQFLAGS */
258
259#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2604:	rlwinm	r12,r12,0,~_TLF_NAPPING
261	stw	r12,TI_LOCAL_FLAGS(r9)
262	b	power_save_ppc32_restore
263
2647:	rlwinm	r12,r12,0,~_TLF_SLEEPING
265	stw	r12,TI_LOCAL_FLAGS(r9)
266	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
267	rlwinm	r9,r9,0,~MSR_EE
268	lwz	r12,_LINK(r11)		/* and return to address in LR */
269	b	fast_exception_return
270#endif
271
272/*
273 * On kernel stack overflow, load up an initial stack pointer
274 * and call StackOverflow(regs), which should not return.
275 */
276stack_ovf:
277	/* sometimes we use a statically-allocated stack, which is OK. */
278	lis	r12,_end@h
279	ori	r12,r12,_end@l
280	cmplw	r1,r12
281	ble	5b			/* r1 <= &_end is OK */
282	SAVE_NVGPRS(r11)
283	addi	r3,r1,STACK_FRAME_OVERHEAD
284	lis	r1,init_thread_union@ha
285	addi	r1,r1,init_thread_union@l
286	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
287	lis	r9,StackOverflow@ha
288	addi	r9,r9,StackOverflow@l
289	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
290	FIX_SRR1(r10,r12)
291	mtspr	SPRN_SRR0,r9
292	mtspr	SPRN_SRR1,r10
293	SYNC
294	RFI
295
296/*
297 * Handle a system call.
298 */
299	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
300	.stabs	"entry_32.S",N_SO,0,0,0f
3010:
302
303_GLOBAL(DoSyscall)
304	stw	r3,ORIG_GPR3(r1)
305	li	r12,0
306	stw	r12,RESULT(r1)
307	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
308	rlwinm	r11,r11,0,4,2
309	stw	r11,_CCR(r1)
310#ifdef SHOW_SYSCALLS
311	bl	do_show_syscall
312#endif /* SHOW_SYSCALLS */
313#ifdef CONFIG_TRACE_IRQFLAGS
314	/* Return from syscalls can (and generally will) hard enable
315	 * interrupts. You aren't supposed to call a syscall with
316	 * interrupts disabled in the first place. However, to ensure
317	 * that we get it right vs. lockdep if it happens, we force
318	 * that hard enable here with appropriate tracing if we see
319	 * that we have been called with interrupts off
320	 */
321	mfmsr	r11
322	andi.	r12,r11,MSR_EE
323	bne+	1f
324	/* We came in with interrupts disabled, we enable them now */
325	bl	trace_hardirqs_on
326	mfmsr	r11
327	lwz	r0,GPR0(r1)
328	lwz	r3,GPR3(r1)
329	lwz	r4,GPR4(r1)
330	ori	r11,r11,MSR_EE
331	lwz	r5,GPR5(r1)
332	lwz	r6,GPR6(r1)
333	lwz	r7,GPR7(r1)
334	lwz	r8,GPR8(r1)
335	mtmsr	r11
3361:
337#endif /* CONFIG_TRACE_IRQFLAGS */
338	CURRENT_THREAD_INFO(r10, r1)
339	lwz	r11,TI_FLAGS(r10)
340	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
341	bne-	syscall_dotrace
342syscall_dotrace_cont:
343	cmplwi	0,r0,NR_syscalls
344	lis	r10,sys_call_table@h
345	ori	r10,r10,sys_call_table@l
346	slwi	r0,r0,2
347	bge-	66f
348	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
349	mtlr	r10
350	addi	r9,r1,STACK_FRAME_OVERHEAD
351	PPC440EP_ERR42
352	blrl			/* Call handler */
353	.globl	ret_from_syscall
354ret_from_syscall:
355#ifdef SHOW_SYSCALLS
356	bl	do_show_syscall_exit
357#endif
358	mr	r6,r3
359	CURRENT_THREAD_INFO(r12, r1)
360	/* disable interrupts so current_thread_info()->flags can't change */
361	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
362	/* Note: We don't bother telling lockdep about it */
363	SYNC
364	MTMSRD(r10)
365	lwz	r9,TI_FLAGS(r12)
366	li	r8,-_LAST_ERRNO
367	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
368	bne-	syscall_exit_work
369	cmplw	0,r3,r8
370	blt+	syscall_exit_cont
371	lwz	r11,_CCR(r1)			/* Load CR */
372	neg	r3,r3
373	oris	r11,r11,0x1000	/* Set SO bit in CR */
374	stw	r11,_CCR(r1)
375syscall_exit_cont:
376	lwz	r8,_MSR(r1)
377#ifdef CONFIG_TRACE_IRQFLAGS
378	/* If we are going to return from the syscall with interrupts
379	 * off, we trace that here. It shouldn't happen though but we
380	 * want to catch the bugger if it does right ?
381	 */
382	andi.	r10,r8,MSR_EE
383	bne+	1f
384	stw	r3,GPR3(r1)
385	bl      trace_hardirqs_off
386	lwz	r3,GPR3(r1)
3871:
388#endif /* CONFIG_TRACE_IRQFLAGS */
389#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
390	/* If the process has its own DBCR0 value, load it up.  The internal
391	   debug mode bit tells us that dbcr0 should be loaded. */
392	lwz	r0,THREAD+THREAD_DBCR0(r2)
393	andis.	r10,r0,DBCR0_IDM@h
394	bnel-	load_dbcr0
395#endif
396#ifdef CONFIG_44x
397BEGIN_MMU_FTR_SECTION
398	lis	r4,icache_44x_need_flush@ha
399	lwz	r5,icache_44x_need_flush@l(r4)
400	cmplwi	cr0,r5,0
401	bne-	2f
4021:
403END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
404#endif /* CONFIG_44x */
405BEGIN_FTR_SECTION
406	lwarx	r7,0,r1
407END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
408	stwcx.	r0,0,r1			/* to clear the reservation */
409	lwz	r4,_LINK(r1)
410	lwz	r5,_CCR(r1)
411	mtlr	r4
412	mtcr	r5
413	lwz	r7,_NIP(r1)
414	FIX_SRR1(r8, r0)
415	lwz	r2,GPR2(r1)
416	lwz	r1,GPR1(r1)
417	mtspr	SPRN_SRR0,r7
418	mtspr	SPRN_SRR1,r8
419	SYNC
420	RFI
421#ifdef CONFIG_44x
4222:	li	r7,0
423	iccci	r0,r0
424	stw	r7,icache_44x_need_flush@l(r4)
425	b	1b
426#endif  /* CONFIG_44x */
427
42866:	li	r3,-ENOSYS
429	b	ret_from_syscall
430
431	.globl	ret_from_fork
432ret_from_fork:
433	REST_NVGPRS(r1)
434	bl	schedule_tail
435	li	r3,0
436	b	ret_from_syscall
437
438/* Traced system call support */
439syscall_dotrace:
440	SAVE_NVGPRS(r1)
441	li	r0,0xc00
442	stw	r0,_TRAP(r1)
443	addi	r3,r1,STACK_FRAME_OVERHEAD
444	bl	do_syscall_trace_enter
445	/*
446	 * Restore argument registers possibly just changed.
447	 * We use the return value of do_syscall_trace_enter
448	 * for call number to look up in the table (r0).
449	 */
450	mr	r0,r3
451	lwz	r3,GPR3(r1)
452	lwz	r4,GPR4(r1)
453	lwz	r5,GPR5(r1)
454	lwz	r6,GPR6(r1)
455	lwz	r7,GPR7(r1)
456	lwz	r8,GPR8(r1)
457	REST_NVGPRS(r1)
458	b	syscall_dotrace_cont
459
460syscall_exit_work:
461	andi.	r0,r9,_TIF_RESTOREALL
462	beq+	0f
463	REST_NVGPRS(r1)
464	b	2f
4650:	cmplw	0,r3,r8
466	blt+	1f
467	andi.	r0,r9,_TIF_NOERROR
468	bne-	1f
469	lwz	r11,_CCR(r1)			/* Load CR */
470	neg	r3,r3
471	oris	r11,r11,0x1000	/* Set SO bit in CR */
472	stw	r11,_CCR(r1)
473
4741:	stw	r6,RESULT(r1)	/* Save result */
475	stw	r3,GPR3(r1)	/* Update return value */
4762:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
477	beq	4f
478
479	/* Clear per-syscall TIF flags if any are set.  */
480
481	li	r11,_TIF_PERSYSCALL_MASK
482	addi	r12,r12,TI_FLAGS
4833:	lwarx	r8,0,r12
484	andc	r8,r8,r11
485#ifdef CONFIG_IBM405_ERR77
486	dcbt	0,r12
487#endif
488	stwcx.	r8,0,r12
489	bne-	3b
490	subi	r12,r12,TI_FLAGS
491
4924:	/* Anything which requires enabling interrupts? */
493	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
494	beq	ret_from_except
495
496	/* Re-enable interrupts. There is no need to trace that with
497	 * lockdep as we are supposed to have IRQs on at this point
498	 */
499	ori	r10,r10,MSR_EE
500	SYNC
501	MTMSRD(r10)
502
503	/* Save NVGPRS if they're not saved already */
504	lwz	r4,_TRAP(r1)
505	andi.	r4,r4,1
506	beq	5f
507	SAVE_NVGPRS(r1)
508	li	r4,0xc00
509	stw	r4,_TRAP(r1)
5105:
511	addi	r3,r1,STACK_FRAME_OVERHEAD
512	bl	do_syscall_trace_leave
513	b	ret_from_except_full
514
515#ifdef SHOW_SYSCALLS
516do_show_syscall:
517#ifdef SHOW_SYSCALLS_TASK
518	lis	r11,show_syscalls_task@ha
519	lwz	r11,show_syscalls_task@l(r11)
520	cmp	0,r2,r11
521	bnelr
522#endif
523	stw	r31,GPR31(r1)
524	mflr	r31
525	lis	r3,7f@ha
526	addi	r3,r3,7f@l
527	lwz	r4,GPR0(r1)
528	lwz	r5,GPR3(r1)
529	lwz	r6,GPR4(r1)
530	lwz	r7,GPR5(r1)
531	lwz	r8,GPR6(r1)
532	lwz	r9,GPR7(r1)
533	bl	printk
534	lis	r3,77f@ha
535	addi	r3,r3,77f@l
536	lwz	r4,GPR8(r1)
537	mr	r5,r2
538	bl	printk
539	lwz	r0,GPR0(r1)
540	lwz	r3,GPR3(r1)
541	lwz	r4,GPR4(r1)
542	lwz	r5,GPR5(r1)
543	lwz	r6,GPR6(r1)
544	lwz	r7,GPR7(r1)
545	lwz	r8,GPR8(r1)
546	mtlr	r31
547	lwz	r31,GPR31(r1)
548	blr
549
550do_show_syscall_exit:
551#ifdef SHOW_SYSCALLS_TASK
552	lis	r11,show_syscalls_task@ha
553	lwz	r11,show_syscalls_task@l(r11)
554	cmp	0,r2,r11
555	bnelr
556#endif
557	stw	r31,GPR31(r1)
558	mflr	r31
559	stw	r3,RESULT(r1)	/* Save result */
560	mr	r4,r3
561	lis	r3,79f@ha
562	addi	r3,r3,79f@l
563	bl	printk
564	lwz	r3,RESULT(r1)
565	mtlr	r31
566	lwz	r31,GPR31(r1)
567	blr
568
5697:	.string	"syscall %d(%x, %x, %x, %x, %x, "
57077:	.string	"%x), current=%p\n"
57179:	.string	" -> %x\n"
572	.align	2,0
573
574#ifdef SHOW_SYSCALLS_TASK
575	.data
576	.globl	show_syscalls_task
577show_syscalls_task:
578	.long	-1
579	.text
580#endif
581#endif /* SHOW_SYSCALLS */
582
583/*
584 * The fork/clone functions need to copy the full register set into
585 * the child process. Therefore we need to save all the nonvolatile
586 * registers (r13 - r31) before calling the C code.
587 */
588	.globl	ppc_fork
589ppc_fork:
590	SAVE_NVGPRS(r1)
591	lwz	r0,_TRAP(r1)
592	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
593	stw	r0,_TRAP(r1)		/* register set saved */
594	b	sys_fork
595
596	.globl	ppc_vfork
597ppc_vfork:
598	SAVE_NVGPRS(r1)
599	lwz	r0,_TRAP(r1)
600	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
601	stw	r0,_TRAP(r1)		/* register set saved */
602	b	sys_vfork
603
604	.globl	ppc_clone
605ppc_clone:
606	SAVE_NVGPRS(r1)
607	lwz	r0,_TRAP(r1)
608	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
609	stw	r0,_TRAP(r1)		/* register set saved */
610	b	sys_clone
611
612	.globl	ppc_swapcontext
613ppc_swapcontext:
614	SAVE_NVGPRS(r1)
615	lwz	r0,_TRAP(r1)
616	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
617	stw	r0,_TRAP(r1)		/* register set saved */
618	b	sys_swapcontext
619
620/*
621 * Top-level page fault handling.
622 * This is in assembler because if do_page_fault tells us that
623 * it is a bad kernel page fault, we want to save the non-volatile
624 * registers before calling bad_page_fault.
625 */
626	.globl	handle_page_fault
627handle_page_fault:
628	stw	r4,_DAR(r1)
629	addi	r3,r1,STACK_FRAME_OVERHEAD
630	bl	do_page_fault
631	cmpwi	r3,0
632	beq+	ret_from_except
633	SAVE_NVGPRS(r1)
634	lwz	r0,_TRAP(r1)
635	clrrwi	r0,r0,1
636	stw	r0,_TRAP(r1)
637	mr	r5,r3
638	addi	r3,r1,STACK_FRAME_OVERHEAD
639	lwz	r4,_DAR(r1)
640	bl	bad_page_fault
641	b	ret_from_except_full
642
643/*
644 * This routine switches between two different tasks.  The process
645 * state of one is saved on its kernel stack.  Then the state
646 * of the other is restored from its kernel stack.  The memory
647 * management hardware is updated to the second process's state.
648 * Finally, we can return to the second process.
649 * On entry, r3 points to the THREAD for the current task, r4
650 * points to the THREAD for the new task.
651 *
652 * This routine is always called with interrupts disabled.
653 *
654 * Note: there are two ways to get to the "going out" portion
655 * of this code; either by coming in via the entry (_switch)
656 * or via "fork" which must set up an environment equivalent
657 * to the "_switch" path.  If you change this , you'll have to
658 * change the fork code also.
659 *
660 * The code which creates the new task context is in 'copy_thread'
661 * in arch/ppc/kernel/process.c
662 */
663_GLOBAL(_switch)
664	stwu	r1,-INT_FRAME_SIZE(r1)
665	mflr	r0
666	stw	r0,INT_FRAME_SIZE+4(r1)
667	/* r3-r12 are caller saved -- Cort */
668	SAVE_NVGPRS(r1)
669	stw	r0,_NIP(r1)	/* Return to switch caller */
670	mfmsr	r11
671	li	r0,MSR_FP	/* Disable floating-point */
672#ifdef CONFIG_ALTIVEC
673BEGIN_FTR_SECTION
674	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
675	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
676	stw	r12,THREAD+THREAD_VRSAVE(r2)
677END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
678#endif /* CONFIG_ALTIVEC */
679#ifdef CONFIG_SPE
680BEGIN_FTR_SECTION
681	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
682	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
683	stw	r12,THREAD+THREAD_SPEFSCR(r2)
684END_FTR_SECTION_IFSET(CPU_FTR_SPE)
685#endif /* CONFIG_SPE */
686	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
687	beq+	1f
688	andc	r11,r11,r0
689	MTMSRD(r11)
690	isync
6911:	stw	r11,_MSR(r1)
692	mfcr	r10
693	stw	r10,_CCR(r1)
694	stw	r1,KSP(r3)	/* Set old stack pointer */
695
696#ifdef CONFIG_SMP
697	/* We need a sync somewhere here to make sure that if the
698	 * previous task gets rescheduled on another CPU, it sees all
699	 * stores it has performed on this one.
700	 */
701	sync
702#endif /* CONFIG_SMP */
703
704	tophys(r0,r4)
705	CLR_TOP32(r0)
706	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
707	lwz	r1,KSP(r4)	/* Load new stack pointer */
708
709	/* save the old current 'last' for return value */
710	mr	r3,r2
711	addi	r2,r4,-THREAD	/* Update current */
712
713#ifdef CONFIG_ALTIVEC
714BEGIN_FTR_SECTION
715	lwz	r0,THREAD+THREAD_VRSAVE(r2)
716	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
717END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
718#endif /* CONFIG_ALTIVEC */
719#ifdef CONFIG_SPE
720BEGIN_FTR_SECTION
721	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
722	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
723END_FTR_SECTION_IFSET(CPU_FTR_SPE)
724#endif /* CONFIG_SPE */
725
726	lwz	r0,_CCR(r1)
727	mtcrf	0xFF,r0
728	/* r3-r12 are destroyed -- Cort */
729	REST_NVGPRS(r1)
730
731	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
732	mtlr	r4
733	addi	r1,r1,INT_FRAME_SIZE
734	blr
735
736	.globl	fast_exception_return
737fast_exception_return:
738#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
739	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
740	beq	1f			/* if not, we've got problems */
741#endif
742
7432:	REST_4GPRS(3, r11)
744	lwz	r10,_CCR(r11)
745	REST_GPR(1, r11)
746	mtcr	r10
747	lwz	r10,_LINK(r11)
748	mtlr	r10
749	REST_GPR(10, r11)
750	mtspr	SPRN_SRR1,r9
751	mtspr	SPRN_SRR0,r12
752	REST_GPR(9, r11)
753	REST_GPR(12, r11)
754	lwz	r11,GPR11(r11)
755	SYNC
756	RFI
757
758#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
759/* check if the exception happened in a restartable section */
7601:	lis	r3,exc_exit_restart_end@ha
761	addi	r3,r3,exc_exit_restart_end@l
762	cmplw	r12,r3
763	bge	3f
764	lis	r4,exc_exit_restart@ha
765	addi	r4,r4,exc_exit_restart@l
766	cmplw	r12,r4
767	blt	3f
768	lis	r3,fee_restarts@ha
769	tophys(r3,r3)
770	lwz	r5,fee_restarts@l(r3)
771	addi	r5,r5,1
772	stw	r5,fee_restarts@l(r3)
773	mr	r12,r4		/* restart at exc_exit_restart */
774	b	2b
775
776	.section .bss
777	.align	2
778fee_restarts:
779	.space	4
780	.previous
781
782/* aargh, a nonrecoverable interrupt, panic */
783/* aargh, we don't know which trap this is */
784/* but the 601 doesn't implement the RI bit, so assume it's OK */
7853:
786BEGIN_FTR_SECTION
787	b	2b
788END_FTR_SECTION_IFSET(CPU_FTR_601)
789	li	r10,-1
790	stw	r10,_TRAP(r11)
791	addi	r3,r1,STACK_FRAME_OVERHEAD
792	lis	r10,MSR_KERNEL@h
793	ori	r10,r10,MSR_KERNEL@l
794	bl	transfer_to_handler_full
795	.long	nonrecoverable_exception
796	.long	ret_from_except
797#endif
798
799	.globl	ret_from_except_full
800ret_from_except_full:
801	REST_NVGPRS(r1)
802	/* fall through */
803
804	.globl	ret_from_except
805ret_from_except:
806	/* Hard-disable interrupts so that current_thread_info()->flags
807	 * can't change between when we test it and when we return
808	 * from the interrupt. */
809	/* Note: We don't bother telling lockdep about it */
810	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
811	SYNC			/* Some chip revs have problems here... */
812	MTMSRD(r10)		/* disable interrupts */
813
814	lwz	r3,_MSR(r1)	/* Returning to user mode? */
815	andi.	r0,r3,MSR_PR
816	beq	resume_kernel
817
818user_exc_return:		/* r10 contains MSR_KERNEL here */
819	/* Check current_thread_info()->flags */
820	CURRENT_THREAD_INFO(r9, r1)
821	lwz	r9,TI_FLAGS(r9)
822	andi.	r0,r9,_TIF_USER_WORK_MASK
823	bne	do_work
824
825restore_user:
826#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
827	/* Check whether this process has its own DBCR0 value.  The internal
828	   debug mode bit tells us that dbcr0 should be loaded. */
829	lwz	r0,THREAD+THREAD_DBCR0(r2)
830	andis.	r10,r0,DBCR0_IDM@h
831	bnel-	load_dbcr0
832#endif
833
834	b	restore
835
836/* N.B. the only way to get here is from the beq following ret_from_except. */
837resume_kernel:
838	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
839	CURRENT_THREAD_INFO(r9, r1)
840	lwz	r8,TI_FLAGS(r9)
841	andis.	r8,r8,_TIF_EMULATE_STACK_STORE@h
842	beq+	1f
843
844	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
845
846	lwz	r3,GPR1(r1)
847	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
848	mr	r4,r1			/* src:  current exception frame */
849	mr	r1,r3			/* Reroute the trampoline frame to r1 */
850
851	/* Copy from the original to the trampoline. */
852	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
853	li	r6,0			/* start offset: 0 */
854	mtctr	r5
8552:	lwzx	r0,r6,r4
856	stwx	r0,r6,r3
857	addi	r6,r6,4
858	bdnz	2b
859
860	/* Do real store operation to complete stwu */
861	lwz	r5,GPR1(r1)
862	stw	r8,0(r5)
863
864	/* Clear _TIF_EMULATE_STACK_STORE flag */
865	lis	r11,_TIF_EMULATE_STACK_STORE@h
866	addi	r5,r9,TI_FLAGS
8670:	lwarx	r8,0,r5
868	andc	r8,r8,r11
869#ifdef CONFIG_IBM405_ERR77
870	dcbt	0,r5
871#endif
872	stwcx.	r8,0,r5
873	bne-	0b
8741:
875
876#ifdef CONFIG_PREEMPT
877	/* check current_thread_info->preempt_count */
878	lwz	r0,TI_PREEMPT(r9)
879	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
880	bne	restore
881	andi.	r8,r8,_TIF_NEED_RESCHED
882	beq+	restore
883	lwz	r3,_MSR(r1)
884	andi.	r0,r3,MSR_EE	/* interrupts off? */
885	beq	restore		/* don't schedule if so */
886#ifdef CONFIG_TRACE_IRQFLAGS
887	/* Lockdep thinks irqs are enabled, we need to call
888	 * preempt_schedule_irq with IRQs off, so we inform lockdep
889	 * now that we -did- turn them off already
890	 */
891	bl	trace_hardirqs_off
892#endif
8931:	bl	preempt_schedule_irq
894	CURRENT_THREAD_INFO(r9, r1)
895	lwz	r3,TI_FLAGS(r9)
896	andi.	r0,r3,_TIF_NEED_RESCHED
897	bne-	1b
898#ifdef CONFIG_TRACE_IRQFLAGS
899	/* And now, to properly rebalance the above, we tell lockdep they
900	 * are being turned back on, which will happen when we return
901	 */
902	bl	trace_hardirqs_on
903#endif
904#endif /* CONFIG_PREEMPT */
905
906	/* interrupts are hard-disabled at this point */
907restore:
908#ifdef CONFIG_44x
909BEGIN_MMU_FTR_SECTION
910	b	1f
911END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
912	lis	r4,icache_44x_need_flush@ha
913	lwz	r5,icache_44x_need_flush@l(r4)
914	cmplwi	cr0,r5,0
915	beq+	1f
916	li	r6,0
917	iccci	r0,r0
918	stw	r6,icache_44x_need_flush@l(r4)
9191:
920#endif  /* CONFIG_44x */
921
922	lwz	r9,_MSR(r1)
923#ifdef CONFIG_TRACE_IRQFLAGS
924	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
925	 * off in this assembly code while peeking at TI_FLAGS() and such. However
926	 * we need to inform it if the exception turned interrupts off, and we
927	 * are about to trun them back on.
928	 *
929	 * The problem here sadly is that we don't know whether the exceptions was
930	 * one that turned interrupts off or not. So we always tell lockdep about
931	 * turning them on here when we go back to wherever we came from with EE
932	 * on, even if that may meen some redudant calls being tracked. Maybe later
933	 * we could encode what the exception did somewhere or test the exception
934	 * type in the pt_regs but that sounds overkill
935	 */
936	andi.	r10,r9,MSR_EE
937	beq	1f
938	/*
939	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
940	 * which is the stack frame here, we need to force a stack frame
941	 * in case we came from user space.
942	 */
943	stwu	r1,-32(r1)
944	mflr	r0
945	stw	r0,4(r1)
946	stwu	r1,-32(r1)
947	bl	trace_hardirqs_on
948	lwz	r1,0(r1)
949	lwz	r1,0(r1)
950	lwz	r9,_MSR(r1)
9511:
952#endif /* CONFIG_TRACE_IRQFLAGS */
953
954	lwz	r0,GPR0(r1)
955	lwz	r2,GPR2(r1)
956	REST_4GPRS(3, r1)
957	REST_2GPRS(7, r1)
958
959	lwz	r10,_XER(r1)
960	lwz	r11,_CTR(r1)
961	mtspr	SPRN_XER,r10
962	mtctr	r11
963
964	PPC405_ERR77(0,r1)
965BEGIN_FTR_SECTION
966	lwarx	r11,0,r1
967END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
968	stwcx.	r0,0,r1			/* to clear the reservation */
969
970#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
971	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
972	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
973
974	lwz	r10,_CCR(r1)
975	lwz	r11,_LINK(r1)
976	mtcrf	0xFF,r10
977	mtlr	r11
978
979	/*
980	 * Once we put values in SRR0 and SRR1, we are in a state
981	 * where exceptions are not recoverable, since taking an
982	 * exception will trash SRR0 and SRR1.  Therefore we clear the
983	 * MSR:RI bit to indicate this.  If we do take an exception,
984	 * we can't return to the point of the exception but we
985	 * can restart the exception exit path at the label
986	 * exc_exit_restart below.  -- paulus
987	 */
988	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
989	SYNC
990	MTMSRD(r10)		/* clear the RI bit */
991	.globl exc_exit_restart
992exc_exit_restart:
993	lwz	r12,_NIP(r1)
994	FIX_SRR1(r9,r10)
995	mtspr	SPRN_SRR0,r12
996	mtspr	SPRN_SRR1,r9
997	REST_4GPRS(9, r1)
998	lwz	r1,GPR1(r1)
999	.globl exc_exit_restart_end
1000exc_exit_restart_end:
1001	SYNC
1002	RFI
1003
1004#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1005	/*
1006	 * This is a bit different on 4xx/Book-E because it doesn't have
1007	 * the RI bit in the MSR.
1008	 * The TLB miss handler checks if we have interrupted
1009	 * the exception exit path and restarts it if so
1010	 * (well maybe one day it will... :).
1011	 */
1012	lwz	r11,_LINK(r1)
1013	mtlr	r11
1014	lwz	r10,_CCR(r1)
1015	mtcrf	0xff,r10
1016	REST_2GPRS(9, r1)
1017	.globl exc_exit_restart
1018exc_exit_restart:
1019	lwz	r11,_NIP(r1)
1020	lwz	r12,_MSR(r1)
1021exc_exit_start:
1022	mtspr	SPRN_SRR0,r11
1023	mtspr	SPRN_SRR1,r12
1024	REST_2GPRS(11, r1)
1025	lwz	r1,GPR1(r1)
1026	.globl exc_exit_restart_end
1027exc_exit_restart_end:
1028	PPC405_ERR77_SYNC
1029	rfi
1030	b	.			/* prevent prefetch past rfi */
1031
1032/*
1033 * Returning from a critical interrupt in user mode doesn't need
1034 * to be any different from a normal exception.  For a critical
1035 * interrupt in the kernel, we just return (without checking for
1036 * preemption) since the interrupt may have happened at some crucial
1037 * place (e.g. inside the TLB miss handler), and because we will be
1038 * running with r1 pointing into critical_stack, not the current
1039 * process's kernel stack (and therefore current_thread_info() will
1040 * give the wrong answer).
1041 * We have to restore various SPRs that may have been in use at the
1042 * time of the critical interrupt.
1043 *
1044 */
1045#ifdef CONFIG_40x
1046#define PPC_40x_TURN_OFF_MSR_DR						    \
1047	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1048	 * assume the instructions here are mapped by a pinned TLB entry */ \
1049	li	r10,MSR_IR;						    \
1050	mtmsr	r10;							    \
1051	isync;								    \
1052	tophys(r1, r1);
1053#else
1054#define PPC_40x_TURN_OFF_MSR_DR
1055#endif
1056
1057#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1058	REST_NVGPRS(r1);						\
1059	lwz	r3,_MSR(r1);						\
1060	andi.	r3,r3,MSR_PR;						\
1061	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1062	bne	user_exc_return;					\
1063	lwz	r0,GPR0(r1);						\
1064	lwz	r2,GPR2(r1);						\
1065	REST_4GPRS(3, r1);						\
1066	REST_2GPRS(7, r1);						\
1067	lwz	r10,_XER(r1);						\
1068	lwz	r11,_CTR(r1);						\
1069	mtspr	SPRN_XER,r10;						\
1070	mtctr	r11;							\
1071	PPC405_ERR77(0,r1);						\
1072	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1073	lwz	r11,_LINK(r1);						\
1074	mtlr	r11;							\
1075	lwz	r10,_CCR(r1);						\
1076	mtcrf	0xff,r10;						\
1077	PPC_40x_TURN_OFF_MSR_DR;					\
1078	lwz	r9,_DEAR(r1);						\
1079	lwz	r10,_ESR(r1);						\
1080	mtspr	SPRN_DEAR,r9;						\
1081	mtspr	SPRN_ESR,r10;						\
1082	lwz	r11,_NIP(r1);						\
1083	lwz	r12,_MSR(r1);						\
1084	mtspr	exc_lvl_srr0,r11;					\
1085	mtspr	exc_lvl_srr1,r12;					\
1086	lwz	r9,GPR9(r1);						\
1087	lwz	r12,GPR12(r1);						\
1088	lwz	r10,GPR10(r1);						\
1089	lwz	r11,GPR11(r1);						\
1090	lwz	r1,GPR1(r1);						\
1091	PPC405_ERR77_SYNC;						\
1092	exc_lvl_rfi;							\
1093	b	.;		/* prevent prefetch past exc_lvl_rfi */
1094
1095#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1096	lwz	r9,_##exc_lvl_srr0(r1);					\
1097	lwz	r10,_##exc_lvl_srr1(r1);				\
1098	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1099	mtspr	SPRN_##exc_lvl_srr1,r10;
1100
1101#if defined(CONFIG_PPC_BOOK3E_MMU)
1102#ifdef CONFIG_PHYS_64BIT
1103#define	RESTORE_MAS7							\
1104	lwz	r11,MAS7(r1);						\
1105	mtspr	SPRN_MAS7,r11;
1106#else
1107#define	RESTORE_MAS7
1108#endif /* CONFIG_PHYS_64BIT */
1109#define RESTORE_MMU_REGS						\
1110	lwz	r9,MAS0(r1);						\
1111	lwz	r10,MAS1(r1);						\
1112	lwz	r11,MAS2(r1);						\
1113	mtspr	SPRN_MAS0,r9;						\
1114	lwz	r9,MAS3(r1);						\
1115	mtspr	SPRN_MAS1,r10;						\
1116	lwz	r10,MAS6(r1);						\
1117	mtspr	SPRN_MAS2,r11;						\
1118	mtspr	SPRN_MAS3,r9;						\
1119	mtspr	SPRN_MAS6,r10;						\
1120	RESTORE_MAS7;
1121#elif defined(CONFIG_44x)
1122#define RESTORE_MMU_REGS						\
1123	lwz	r9,MMUCR(r1);						\
1124	mtspr	SPRN_MMUCR,r9;
1125#else
1126#define RESTORE_MMU_REGS
1127#endif
1128
1129#ifdef CONFIG_40x
1130	.globl	ret_from_crit_exc
1131ret_from_crit_exc:
1132	mfspr	r9,SPRN_SPRG_THREAD
1133	lis	r10,saved_ksp_limit@ha;
1134	lwz	r10,saved_ksp_limit@l(r10);
1135	tovirt(r9,r9);
1136	stw	r10,KSP_LIMIT(r9)
1137	lis	r9,crit_srr0@ha;
1138	lwz	r9,crit_srr0@l(r9);
1139	lis	r10,crit_srr1@ha;
1140	lwz	r10,crit_srr1@l(r10);
1141	mtspr	SPRN_SRR0,r9;
1142	mtspr	SPRN_SRR1,r10;
1143	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1144#endif /* CONFIG_40x */
1145
1146#ifdef CONFIG_BOOKE
1147	.globl	ret_from_crit_exc
1148ret_from_crit_exc:
1149	mfspr	r9,SPRN_SPRG_THREAD
1150	lwz	r10,SAVED_KSP_LIMIT(r1)
1151	stw	r10,KSP_LIMIT(r9)
1152	RESTORE_xSRR(SRR0,SRR1);
1153	RESTORE_MMU_REGS;
1154	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1155
1156	.globl	ret_from_debug_exc
1157ret_from_debug_exc:
1158	mfspr	r9,SPRN_SPRG_THREAD
1159	lwz	r10,SAVED_KSP_LIMIT(r1)
1160	stw	r10,KSP_LIMIT(r9)
1161	lwz	r9,THREAD_INFO-THREAD(r9)
1162	CURRENT_THREAD_INFO(r10, r1)
1163	lwz	r10,TI_PREEMPT(r10)
1164	stw	r10,TI_PREEMPT(r9)
1165	RESTORE_xSRR(SRR0,SRR1);
1166	RESTORE_xSRR(CSRR0,CSRR1);
1167	RESTORE_MMU_REGS;
1168	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1169
1170	.globl	ret_from_mcheck_exc
1171ret_from_mcheck_exc:
1172	mfspr	r9,SPRN_SPRG_THREAD
1173	lwz	r10,SAVED_KSP_LIMIT(r1)
1174	stw	r10,KSP_LIMIT(r9)
1175	RESTORE_xSRR(SRR0,SRR1);
1176	RESTORE_xSRR(CSRR0,CSRR1);
1177	RESTORE_xSRR(DSRR0,DSRR1);
1178	RESTORE_MMU_REGS;
1179	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1180#endif /* CONFIG_BOOKE */
1181
1182/*
1183 * Load the DBCR0 value for a task that is being ptraced,
1184 * having first saved away the global DBCR0.  Note that r0
1185 * has the dbcr0 value to set upon entry to this.
1186 */
1187load_dbcr0:
1188	mfmsr	r10		/* first disable debug exceptions */
1189	rlwinm	r10,r10,0,~MSR_DE
1190	mtmsr	r10
1191	isync
1192	mfspr	r10,SPRN_DBCR0
1193	lis	r11,global_dbcr0@ha
1194	addi	r11,r11,global_dbcr0@l
1195#ifdef CONFIG_SMP
1196	CURRENT_THREAD_INFO(r9, r1)
1197	lwz	r9,TI_CPU(r9)
1198	slwi	r9,r9,3
1199	add	r11,r11,r9
1200#endif
1201	stw	r10,0(r11)
1202	mtspr	SPRN_DBCR0,r0
1203	lwz	r10,4(r11)
1204	addi	r10,r10,1
1205	stw	r10,4(r11)
1206	li	r11,-1
1207	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1208	blr
1209
1210	.section .bss
1211	.align	4
1212global_dbcr0:
1213	.space	8*NR_CPUS
1214	.previous
1215#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1216
1217do_work:			/* r10 contains MSR_KERNEL here */
1218	andi.	r0,r9,_TIF_NEED_RESCHED
1219	beq	do_user_signal
1220
1221do_resched:			/* r10 contains MSR_KERNEL here */
1222	/* Note: We don't need to inform lockdep that we are enabling
1223	 * interrupts here. As far as it knows, they are already enabled
1224	 */
1225	ori	r10,r10,MSR_EE
1226	SYNC
1227	MTMSRD(r10)		/* hard-enable interrupts */
1228	bl	schedule
1229recheck:
1230	/* Note: And we don't tell it we are disabling them again
1231	 * neither. Those disable/enable cycles used to peek at
1232	 * TI_FLAGS aren't advertised.
1233	 */
1234	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1235	SYNC
1236	MTMSRD(r10)		/* disable interrupts */
1237	CURRENT_THREAD_INFO(r9, r1)
1238	lwz	r9,TI_FLAGS(r9)
1239	andi.	r0,r9,_TIF_NEED_RESCHED
1240	bne-	do_resched
1241	andi.	r0,r9,_TIF_USER_WORK_MASK
1242	beq	restore_user
1243do_user_signal:			/* r10 contains MSR_KERNEL here */
1244	ori	r10,r10,MSR_EE
1245	SYNC
1246	MTMSRD(r10)		/* hard-enable interrupts */
1247	/* save r13-r31 in the exception frame, if not already done */
1248	lwz	r3,_TRAP(r1)
1249	andi.	r0,r3,1
1250	beq	2f
1251	SAVE_NVGPRS(r1)
1252	rlwinm	r3,r3,0,0,30
1253	stw	r3,_TRAP(r1)
12542:	addi	r3,r1,STACK_FRAME_OVERHEAD
1255	mr	r4,r9
1256	bl	do_notify_resume
1257	REST_NVGPRS(r1)
1258	b	recheck
1259
1260/*
1261 * We come here when we are at the end of handling an exception
1262 * that occurred at a place where taking an exception will lose
1263 * state information, such as the contents of SRR0 and SRR1.
1264 */
1265nonrecoverable:
1266	lis	r10,exc_exit_restart_end@ha
1267	addi	r10,r10,exc_exit_restart_end@l
1268	cmplw	r12,r10
1269	bge	3f
1270	lis	r11,exc_exit_restart@ha
1271	addi	r11,r11,exc_exit_restart@l
1272	cmplw	r12,r11
1273	blt	3f
1274	lis	r10,ee_restarts@ha
1275	lwz	r12,ee_restarts@l(r10)
1276	addi	r12,r12,1
1277	stw	r12,ee_restarts@l(r10)
1278	mr	r12,r11		/* restart at exc_exit_restart */
1279	blr
12803:	/* OK, we can't recover, kill this process */
1281	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1282BEGIN_FTR_SECTION
1283	blr
1284END_FTR_SECTION_IFSET(CPU_FTR_601)
1285	lwz	r3,_TRAP(r1)
1286	andi.	r0,r3,1
1287	beq	4f
1288	SAVE_NVGPRS(r1)
1289	rlwinm	r3,r3,0,0,30
1290	stw	r3,_TRAP(r1)
12914:	addi	r3,r1,STACK_FRAME_OVERHEAD
1292	bl	nonrecoverable_exception
1293	/* shouldn't return */
1294	b	4b
1295
1296	.section .bss
1297	.align	2
1298ee_restarts:
1299	.space	4
1300	.previous
1301
1302/*
1303 * PROM code for specific machines follows.  Put it
1304 * here so it's easy to add arch-specific sections later.
1305 * -- Cort
1306 */
1307#ifdef CONFIG_PPC_RTAS
1308/*
1309 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1310 * called with the MMU off.
1311 */
1312_GLOBAL(enter_rtas)
1313	stwu	r1,-INT_FRAME_SIZE(r1)
1314	mflr	r0
1315	stw	r0,INT_FRAME_SIZE+4(r1)
1316	LOAD_REG_ADDR(r4, rtas)
1317	lis	r6,1f@ha	/* physical return address for rtas */
1318	addi	r6,r6,1f@l
1319	tophys(r6,r6)
1320	tophys(r7,r1)
1321	lwz	r8,RTASENTRY(r4)
1322	lwz	r4,RTASBASE(r4)
1323	mfmsr	r9
1324	stw	r9,8(r1)
1325	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1326	SYNC			/* disable interrupts so SRR0/1 */
1327	MTMSRD(r0)		/* don't get trashed */
1328	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1329	mtlr	r6
1330	mtspr	SPRN_SPRG_RTAS,r7
1331	mtspr	SPRN_SRR0,r8
1332	mtspr	SPRN_SRR1,r9
1333	RFI
13341:	tophys(r9,r1)
1335	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1336	lwz	r9,8(r9)	/* original msr value */
1337	FIX_SRR1(r9,r0)
1338	addi	r1,r1,INT_FRAME_SIZE
1339	li	r0,0
1340	mtspr	SPRN_SPRG_RTAS,r0
1341	mtspr	SPRN_SRR0,r8
1342	mtspr	SPRN_SRR1,r9
1343	RFI			/* return to caller */
1344
1345	.globl	machine_check_in_rtas
1346machine_check_in_rtas:
1347	twi	31,0,0
1348	/* XXX load up BATs and panic */
1349
1350#endif /* CONFIG_PPC_RTAS */
1351
1352#ifdef CONFIG_FUNCTION_TRACER
1353#ifdef CONFIG_DYNAMIC_FTRACE
1354_GLOBAL(mcount)
1355_GLOBAL(_mcount)
1356	/*
1357	 * It is required that _mcount on PPC32 must preserve the
1358	 * link register. But we have r0 to play with. We use r0
1359	 * to push the return address back to the caller of mcount
1360	 * into the ctr register, restore the link register and
1361	 * then jump back using the ctr register.
1362	 */
1363	mflr	r0
1364	mtctr	r0
1365	lwz	r0, 4(r1)
1366	mtlr	r0
1367	bctr
1368
1369_GLOBAL(ftrace_caller)
1370	MCOUNT_SAVE_FRAME
1371	/* r3 ends up with link register */
1372	subi	r3, r3, MCOUNT_INSN_SIZE
1373.globl ftrace_call
1374ftrace_call:
1375	bl	ftrace_stub
1376	nop
1377#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1378.globl ftrace_graph_call
1379ftrace_graph_call:
1380	b	ftrace_graph_stub
1381_GLOBAL(ftrace_graph_stub)
1382#endif
1383	MCOUNT_RESTORE_FRAME
1384	/* old link register ends up in ctr reg */
1385	bctr
1386#else
1387_GLOBAL(mcount)
1388_GLOBAL(_mcount)
1389
1390	MCOUNT_SAVE_FRAME
1391
1392	subi	r3, r3, MCOUNT_INSN_SIZE
1393	LOAD_REG_ADDR(r5, ftrace_trace_function)
1394	lwz	r5,0(r5)
1395
1396	mtctr	r5
1397	bctrl
1398	nop
1399
1400#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1401	b	ftrace_graph_caller
1402#endif
1403	MCOUNT_RESTORE_FRAME
1404	bctr
1405#endif
1406
1407_GLOBAL(ftrace_stub)
1408	blr
1409
1410#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1411_GLOBAL(ftrace_graph_caller)
1412	/* load r4 with local address */
1413	lwz	r4, 44(r1)
1414	subi	r4, r4, MCOUNT_INSN_SIZE
1415
1416	/* get the parent address */
1417	addi	r3, r1, 52
1418
1419	bl	prepare_ftrace_return
1420	nop
1421
1422	MCOUNT_RESTORE_FRAME
1423	/* old link register ends up in ctr reg */
1424	bctr
1425
1426_GLOBAL(return_to_handler)
1427	/* need to save return values */
1428	stwu	r1, -32(r1)
1429	stw	r3, 20(r1)
1430	stw	r4, 16(r1)
1431	stw	r31, 12(r1)
1432	mr	r31, r1
1433
1434	bl	ftrace_return_to_handler
1435	nop
1436
1437	/* return value has real return address */
1438	mtlr	r3
1439
1440	lwz	r3, 20(r1)
1441	lwz	r4, 16(r1)
1442	lwz	r31,12(r1)
1443	lwz	r1, 0(r1)
1444
1445	/* Jump back to real return address */
1446	blr
1447#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1448
1449#endif /* CONFIG_MCOUNT */
1450