xref: /linux/arch/powerpc/kernel/entry_32.S (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49	.globl	mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51	mfspr	r0,SPRN_DSRR0
52	stw	r0,_DSRR0(r11)
53	mfspr	r0,SPRN_DSRR1
54	stw	r0,_DSRR1(r11)
55	/* fall through */
56
57	.globl	debug_transfer_to_handler
58debug_transfer_to_handler:
59	mfspr	r0,SPRN_CSRR0
60	stw	r0,_CSRR0(r11)
61	mfspr	r0,SPRN_CSRR1
62	stw	r0,_CSRR1(r11)
63	/* fall through */
64
65	.globl	crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68	mfspr	r0,SPRN_MAS0
69	stw	r0,MAS0(r11)
70	mfspr	r0,SPRN_MAS1
71	stw	r0,MAS1(r11)
72	mfspr	r0,SPRN_MAS2
73	stw	r0,MAS2(r11)
74	mfspr	r0,SPRN_MAS3
75	stw	r0,MAS3(r11)
76	mfspr	r0,SPRN_MAS6
77	stw	r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79	mfspr	r0,SPRN_MAS7
80	stw	r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84	mfspr	r0,SPRN_MMUCR
85	stw	r0,MMUCR(r11)
86#endif
87	mfspr	r0,SPRN_SRR0
88	stw	r0,_SRR0(r11)
89	mfspr	r0,SPRN_SRR1
90	stw	r0,_SRR1(r11)
91
92	/* set the stack limit to the current stack
93	 * and set the limit to protect the thread_info
94	 * struct
95	 */
96	mfspr	r8,SPRN_SPRG_THREAD
97	lwz	r0,KSP_LIMIT(r8)
98	stw	r0,SAVED_KSP_LIMIT(r11)
99	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
100	stw	r0,KSP_LIMIT(r8)
101	/* fall through */
102#endif
103
104#ifdef CONFIG_40x
105	.globl	crit_transfer_to_handler
106crit_transfer_to_handler:
107	lwz	r0,crit_r10@l(0)
108	stw	r0,GPR10(r11)
109	lwz	r0,crit_r11@l(0)
110	stw	r0,GPR11(r11)
111	mfspr	r0,SPRN_SRR0
112	stw	r0,crit_srr0@l(0)
113	mfspr	r0,SPRN_SRR1
114	stw	r0,crit_srr1@l(0)
115
116	/* set the stack limit to the current stack
117	 * and set the limit to protect the thread_info
118	 * struct
119	 */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126#endif
127
128/*
129 * This code finishes saving the registers to the exception frame
130 * and jumps to the appropriate handler for the exception, turning
131 * on address translation.
132 * Note that we rely on the caller having set cr0.eq iff the exception
133 * occurred in kernel mode (i.e. MSR:PR = 0).
134 */
135	.globl	transfer_to_handler_full
136transfer_to_handler_full:
137	SAVE_NVGPRS(r11)
138	/* fall through */
139
140	.globl	transfer_to_handler
141transfer_to_handler:
142	stw	r2,GPR2(r11)
143	stw	r12,_NIP(r11)
144	stw	r9,_MSR(r11)
145	andi.	r2,r9,MSR_PR
146	mfctr	r12
147	mfspr	r2,SPRN_XER
148	stw	r12,_CTR(r11)
149	stw	r2,_XER(r11)
150	mfspr	r12,SPRN_SPRG_THREAD
151	addi	r2,r12,-THREAD
152	tovirt(r2,r2)			/* set r2 to current */
153	beq	2f			/* if from user, fix up THREAD.regs */
154	addi	r11,r1,STACK_FRAME_OVERHEAD
155	stw	r11,PT_REGS(r12)
156#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
157	/* Check to see if the dbcr0 register is set up to debug.  Use the
158	   internal debug mode bit to do this. */
159	lwz	r12,THREAD_DBCR0(r12)
160	andis.	r12,r12,DBCR0_IDM@h
161	beq+	3f
162	/* From user and task is ptraced - load up global dbcr0 */
163	li	r12,-1			/* clear all pending debug events */
164	mtspr	SPRN_DBSR,r12
165	lis	r11,global_dbcr0@ha
166	tophys(r11,r11)
167	addi	r11,r11,global_dbcr0@l
168#ifdef CONFIG_SMP
169	CURRENT_THREAD_INFO(r9, r1)
170	lwz	r9,TI_CPU(r9)
171	slwi	r9,r9,3
172	add	r11,r11,r9
173#endif
174	lwz	r12,0(r11)
175	mtspr	SPRN_DBCR0,r12
176	lwz	r12,4(r11)
177	addi	r12,r12,-1
178	stw	r12,4(r11)
179#endif
180	b	3f
181
1822:	/* if from kernel, check interrupted DOZE/NAP mode and
183         * check for stack overflow
184         */
185	lwz	r9,KSP_LIMIT(r12)
186	cmplw	r1,r9			/* if r1 <= ksp_limit */
187	ble-	stack_ovf		/* then the kernel stack overflowed */
1885:
189#if defined(CONFIG_6xx) || defined(CONFIG_E500)
190	CURRENT_THREAD_INFO(r9, r1)
191	tophys(r9,r9)			/* check local flags */
192	lwz	r12,TI_LOCAL_FLAGS(r9)
193	mtcrf	0x01,r12
194	bt-	31-TLF_NAPPING,4f
195	bt-	31-TLF_SLEEPING,7f
196#endif /* CONFIG_6xx || CONFIG_E500 */
197	.globl transfer_to_handler_cont
198transfer_to_handler_cont:
1993:
200	mflr	r9
201	lwz	r11,0(r9)		/* virtual address of handler */
202	lwz	r9,4(r9)		/* where to go when done */
203#ifdef CONFIG_TRACE_IRQFLAGS
204	lis	r12,reenable_mmu@h
205	ori	r12,r12,reenable_mmu@l
206	mtspr	SPRN_SRR0,r12
207	mtspr	SPRN_SRR1,r10
208	SYNC
209	RFI
210reenable_mmu:				/* re-enable mmu so we can */
211	mfmsr	r10
212	lwz	r12,_MSR(r1)
213	xor	r10,r10,r12
214	andi.	r10,r10,MSR_EE		/* Did EE change? */
215	beq	1f
216
217	/*
218	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
219	 * If from user mode there is only one stack frame on the stack, and
220	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
221	 * stack frame to make trace_hardirqs_off happy.
222	 *
223	 * This is handy because we also need to save a bunch of GPRs,
224	 * r3 can be different from GPR3(r1) at this point, r9 and r11
225	 * contains the old MSR and handler address respectively,
226	 * r4 & r5 can contain page fault arguments that need to be passed
227	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
228	 * they aren't useful past this point (aren't syscall arguments),
229	 * the rest is restored from the exception frame.
230	 */
231	stwu	r1,-32(r1)
232	stw	r9,8(r1)
233	stw	r11,12(r1)
234	stw	r3,16(r1)
235	stw	r4,20(r1)
236	stw	r5,24(r1)
237	bl	trace_hardirqs_off
238	lwz	r5,24(r1)
239	lwz	r4,20(r1)
240	lwz	r3,16(r1)
241	lwz	r11,12(r1)
242	lwz	r9,8(r1)
243	addi	r1,r1,32
244	lwz	r0,GPR0(r1)
245	lwz	r6,GPR6(r1)
246	lwz	r7,GPR7(r1)
247	lwz	r8,GPR8(r1)
2481:	mtctr	r11
249	mtlr	r9
250	bctr				/* jump to handler */
251#else /* CONFIG_TRACE_IRQFLAGS */
252	mtspr	SPRN_SRR0,r11
253	mtspr	SPRN_SRR1,r10
254	mtlr	r9
255	SYNC
256	RFI				/* jump to handler, enable MMU */
257#endif /* CONFIG_TRACE_IRQFLAGS */
258
259#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2604:	rlwinm	r12,r12,0,~_TLF_NAPPING
261	stw	r12,TI_LOCAL_FLAGS(r9)
262	b	power_save_ppc32_restore
263
2647:	rlwinm	r12,r12,0,~_TLF_SLEEPING
265	stw	r12,TI_LOCAL_FLAGS(r9)
266	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
267	rlwinm	r9,r9,0,~MSR_EE
268	lwz	r12,_LINK(r11)		/* and return to address in LR */
269	b	fast_exception_return
270#endif
271
272/*
273 * On kernel stack overflow, load up an initial stack pointer
274 * and call StackOverflow(regs), which should not return.
275 */
276stack_ovf:
277	/* sometimes we use a statically-allocated stack, which is OK. */
278	lis	r12,_end@h
279	ori	r12,r12,_end@l
280	cmplw	r1,r12
281	ble	5b			/* r1 <= &_end is OK */
282	SAVE_NVGPRS(r11)
283	addi	r3,r1,STACK_FRAME_OVERHEAD
284	lis	r1,init_thread_union@ha
285	addi	r1,r1,init_thread_union@l
286	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
287	lis	r9,StackOverflow@ha
288	addi	r9,r9,StackOverflow@l
289	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
290	FIX_SRR1(r10,r12)
291	mtspr	SPRN_SRR0,r9
292	mtspr	SPRN_SRR1,r10
293	SYNC
294	RFI
295
296/*
297 * Handle a system call.
298 */
299	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
300	.stabs	"entry_32.S",N_SO,0,0,0f
3010:
302
303_GLOBAL(DoSyscall)
304	stw	r3,ORIG_GPR3(r1)
305	li	r12,0
306	stw	r12,RESULT(r1)
307	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
308	rlwinm	r11,r11,0,4,2
309	stw	r11,_CCR(r1)
310#ifdef SHOW_SYSCALLS
311	bl	do_show_syscall
312#endif /* SHOW_SYSCALLS */
313#ifdef CONFIG_TRACE_IRQFLAGS
314	/* Return from syscalls can (and generally will) hard enable
315	 * interrupts. You aren't supposed to call a syscall with
316	 * interrupts disabled in the first place. However, to ensure
317	 * that we get it right vs. lockdep if it happens, we force
318	 * that hard enable here with appropriate tracing if we see
319	 * that we have been called with interrupts off
320	 */
321	mfmsr	r11
322	andi.	r12,r11,MSR_EE
323	bne+	1f
324	/* We came in with interrupts disabled, we enable them now */
325	bl	trace_hardirqs_on
326	mfmsr	r11
327	lwz	r0,GPR0(r1)
328	lwz	r3,GPR3(r1)
329	lwz	r4,GPR4(r1)
330	ori	r11,r11,MSR_EE
331	lwz	r5,GPR5(r1)
332	lwz	r6,GPR6(r1)
333	lwz	r7,GPR7(r1)
334	lwz	r8,GPR8(r1)
335	mtmsr	r11
3361:
337#endif /* CONFIG_TRACE_IRQFLAGS */
338	CURRENT_THREAD_INFO(r10, r1)
339	lwz	r11,TI_FLAGS(r10)
340	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
341	bne-	syscall_dotrace
342syscall_dotrace_cont:
343	cmplwi	0,r0,NR_syscalls
344	lis	r10,sys_call_table@h
345	ori	r10,r10,sys_call_table@l
346	slwi	r0,r0,2
347	bge-	66f
348	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
349	mtlr	r10
350	addi	r9,r1,STACK_FRAME_OVERHEAD
351	PPC440EP_ERR42
352	blrl			/* Call handler */
353	.globl	ret_from_syscall
354ret_from_syscall:
355#ifdef SHOW_SYSCALLS
356	bl	do_show_syscall_exit
357#endif
358	mr	r6,r3
359	CURRENT_THREAD_INFO(r12, r1)
360	/* disable interrupts so current_thread_info()->flags can't change */
361	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
362	/* Note: We don't bother telling lockdep about it */
363	SYNC
364	MTMSRD(r10)
365	lwz	r9,TI_FLAGS(r12)
366	li	r8,-_LAST_ERRNO
367	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
368	bne-	syscall_exit_work
369	cmplw	0,r3,r8
370	blt+	syscall_exit_cont
371	lwz	r11,_CCR(r1)			/* Load CR */
372	neg	r3,r3
373	oris	r11,r11,0x1000	/* Set SO bit in CR */
374	stw	r11,_CCR(r1)
375syscall_exit_cont:
376	lwz	r8,_MSR(r1)
377#ifdef CONFIG_TRACE_IRQFLAGS
378	/* If we are going to return from the syscall with interrupts
379	 * off, we trace that here. It shouldn't happen though but we
380	 * want to catch the bugger if it does right ?
381	 */
382	andi.	r10,r8,MSR_EE
383	bne+	1f
384	stw	r3,GPR3(r1)
385	bl      trace_hardirqs_off
386	lwz	r3,GPR3(r1)
3871:
388#endif /* CONFIG_TRACE_IRQFLAGS */
389#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
390	/* If the process has its own DBCR0 value, load it up.  The internal
391	   debug mode bit tells us that dbcr0 should be loaded. */
392	lwz	r0,THREAD+THREAD_DBCR0(r2)
393	andis.	r10,r0,DBCR0_IDM@h
394	bnel-	load_dbcr0
395#endif
396#ifdef CONFIG_44x
397BEGIN_MMU_FTR_SECTION
398	lis	r4,icache_44x_need_flush@ha
399	lwz	r5,icache_44x_need_flush@l(r4)
400	cmplwi	cr0,r5,0
401	bne-	2f
4021:
403END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
404#endif /* CONFIG_44x */
405BEGIN_FTR_SECTION
406	lwarx	r7,0,r1
407END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
408	stwcx.	r0,0,r1			/* to clear the reservation */
409	lwz	r4,_LINK(r1)
410	lwz	r5,_CCR(r1)
411	mtlr	r4
412	mtcr	r5
413	lwz	r7,_NIP(r1)
414	FIX_SRR1(r8, r0)
415	lwz	r2,GPR2(r1)
416	lwz	r1,GPR1(r1)
417	mtspr	SPRN_SRR0,r7
418	mtspr	SPRN_SRR1,r8
419	SYNC
420	RFI
421#ifdef CONFIG_44x
4222:	li	r7,0
423	iccci	r0,r0
424	stw	r7,icache_44x_need_flush@l(r4)
425	b	1b
426#endif  /* CONFIG_44x */
427
42866:	li	r3,-ENOSYS
429	b	ret_from_syscall
430
431	.globl	ret_from_fork
432ret_from_fork:
433	REST_NVGPRS(r1)
434	bl	schedule_tail
435	li	r3,0
436	b	ret_from_syscall
437
438	.globl	ret_from_kernel_thread
439ret_from_kernel_thread:
440	REST_NVGPRS(r1)
441	bl	schedule_tail
442	mtlr	r14
443	mr	r3,r15
444	PPC440EP_ERR42
445	blrl
446	li	r3,0
447	b	do_exit		# no return
448
449	.globl	__ret_from_kernel_execve
450__ret_from_kernel_execve:
451	addi	r1,r3,-STACK_FRAME_OVERHEAD
452	b	ret_from_syscall
453
454/* Traced system call support */
455syscall_dotrace:
456	SAVE_NVGPRS(r1)
457	li	r0,0xc00
458	stw	r0,_TRAP(r1)
459	addi	r3,r1,STACK_FRAME_OVERHEAD
460	bl	do_syscall_trace_enter
461	/*
462	 * Restore argument registers possibly just changed.
463	 * We use the return value of do_syscall_trace_enter
464	 * for call number to look up in the table (r0).
465	 */
466	mr	r0,r3
467	lwz	r3,GPR3(r1)
468	lwz	r4,GPR4(r1)
469	lwz	r5,GPR5(r1)
470	lwz	r6,GPR6(r1)
471	lwz	r7,GPR7(r1)
472	lwz	r8,GPR8(r1)
473	REST_NVGPRS(r1)
474	b	syscall_dotrace_cont
475
476syscall_exit_work:
477	andi.	r0,r9,_TIF_RESTOREALL
478	beq+	0f
479	REST_NVGPRS(r1)
480	b	2f
4810:	cmplw	0,r3,r8
482	blt+	1f
483	andi.	r0,r9,_TIF_NOERROR
484	bne-	1f
485	lwz	r11,_CCR(r1)			/* Load CR */
486	neg	r3,r3
487	oris	r11,r11,0x1000	/* Set SO bit in CR */
488	stw	r11,_CCR(r1)
489
4901:	stw	r6,RESULT(r1)	/* Save result */
491	stw	r3,GPR3(r1)	/* Update return value */
4922:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
493	beq	4f
494
495	/* Clear per-syscall TIF flags if any are set.  */
496
497	li	r11,_TIF_PERSYSCALL_MASK
498	addi	r12,r12,TI_FLAGS
4993:	lwarx	r8,0,r12
500	andc	r8,r8,r11
501#ifdef CONFIG_IBM405_ERR77
502	dcbt	0,r12
503#endif
504	stwcx.	r8,0,r12
505	bne-	3b
506	subi	r12,r12,TI_FLAGS
507
5084:	/* Anything which requires enabling interrupts? */
509	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
510	beq	ret_from_except
511
512	/* Re-enable interrupts. There is no need to trace that with
513	 * lockdep as we are supposed to have IRQs on at this point
514	 */
515	ori	r10,r10,MSR_EE
516	SYNC
517	MTMSRD(r10)
518
519	/* Save NVGPRS if they're not saved already */
520	lwz	r4,_TRAP(r1)
521	andi.	r4,r4,1
522	beq	5f
523	SAVE_NVGPRS(r1)
524	li	r4,0xc00
525	stw	r4,_TRAP(r1)
5265:
527	addi	r3,r1,STACK_FRAME_OVERHEAD
528	bl	do_syscall_trace_leave
529	b	ret_from_except_full
530
531#ifdef SHOW_SYSCALLS
532do_show_syscall:
533#ifdef SHOW_SYSCALLS_TASK
534	lis	r11,show_syscalls_task@ha
535	lwz	r11,show_syscalls_task@l(r11)
536	cmp	0,r2,r11
537	bnelr
538#endif
539	stw	r31,GPR31(r1)
540	mflr	r31
541	lis	r3,7f@ha
542	addi	r3,r3,7f@l
543	lwz	r4,GPR0(r1)
544	lwz	r5,GPR3(r1)
545	lwz	r6,GPR4(r1)
546	lwz	r7,GPR5(r1)
547	lwz	r8,GPR6(r1)
548	lwz	r9,GPR7(r1)
549	bl	printk
550	lis	r3,77f@ha
551	addi	r3,r3,77f@l
552	lwz	r4,GPR8(r1)
553	mr	r5,r2
554	bl	printk
555	lwz	r0,GPR0(r1)
556	lwz	r3,GPR3(r1)
557	lwz	r4,GPR4(r1)
558	lwz	r5,GPR5(r1)
559	lwz	r6,GPR6(r1)
560	lwz	r7,GPR7(r1)
561	lwz	r8,GPR8(r1)
562	mtlr	r31
563	lwz	r31,GPR31(r1)
564	blr
565
566do_show_syscall_exit:
567#ifdef SHOW_SYSCALLS_TASK
568	lis	r11,show_syscalls_task@ha
569	lwz	r11,show_syscalls_task@l(r11)
570	cmp	0,r2,r11
571	bnelr
572#endif
573	stw	r31,GPR31(r1)
574	mflr	r31
575	stw	r3,RESULT(r1)	/* Save result */
576	mr	r4,r3
577	lis	r3,79f@ha
578	addi	r3,r3,79f@l
579	bl	printk
580	lwz	r3,RESULT(r1)
581	mtlr	r31
582	lwz	r31,GPR31(r1)
583	blr
584
5857:	.string	"syscall %d(%x, %x, %x, %x, %x, "
58677:	.string	"%x), current=%p\n"
58779:	.string	" -> %x\n"
588	.align	2,0
589
590#ifdef SHOW_SYSCALLS_TASK
591	.data
592	.globl	show_syscalls_task
593show_syscalls_task:
594	.long	-1
595	.text
596#endif
597#endif /* SHOW_SYSCALLS */
598
599/*
600 * The fork/clone functions need to copy the full register set into
601 * the child process. Therefore we need to save all the nonvolatile
602 * registers (r13 - r31) before calling the C code.
603 */
604	.globl	ppc_fork
605ppc_fork:
606	SAVE_NVGPRS(r1)
607	lwz	r0,_TRAP(r1)
608	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
609	stw	r0,_TRAP(r1)		/* register set saved */
610	b	sys_fork
611
612	.globl	ppc_vfork
613ppc_vfork:
614	SAVE_NVGPRS(r1)
615	lwz	r0,_TRAP(r1)
616	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
617	stw	r0,_TRAP(r1)		/* register set saved */
618	b	sys_vfork
619
620	.globl	ppc_clone
621ppc_clone:
622	SAVE_NVGPRS(r1)
623	lwz	r0,_TRAP(r1)
624	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
625	stw	r0,_TRAP(r1)		/* register set saved */
626	b	sys_clone
627
628	.globl	ppc_swapcontext
629ppc_swapcontext:
630	SAVE_NVGPRS(r1)
631	lwz	r0,_TRAP(r1)
632	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
633	stw	r0,_TRAP(r1)		/* register set saved */
634	b	sys_swapcontext
635
636/*
637 * Top-level page fault handling.
638 * This is in assembler because if do_page_fault tells us that
639 * it is a bad kernel page fault, we want to save the non-volatile
640 * registers before calling bad_page_fault.
641 */
642	.globl	handle_page_fault
643handle_page_fault:
644	stw	r4,_DAR(r1)
645	addi	r3,r1,STACK_FRAME_OVERHEAD
646	bl	do_page_fault
647	cmpwi	r3,0
648	beq+	ret_from_except
649	SAVE_NVGPRS(r1)
650	lwz	r0,_TRAP(r1)
651	clrrwi	r0,r0,1
652	stw	r0,_TRAP(r1)
653	mr	r5,r3
654	addi	r3,r1,STACK_FRAME_OVERHEAD
655	lwz	r4,_DAR(r1)
656	bl	bad_page_fault
657	b	ret_from_except_full
658
659/*
660 * This routine switches between two different tasks.  The process
661 * state of one is saved on its kernel stack.  Then the state
662 * of the other is restored from its kernel stack.  The memory
663 * management hardware is updated to the second process's state.
664 * Finally, we can return to the second process.
665 * On entry, r3 points to the THREAD for the current task, r4
666 * points to the THREAD for the new task.
667 *
668 * This routine is always called with interrupts disabled.
669 *
670 * Note: there are two ways to get to the "going out" portion
671 * of this code; either by coming in via the entry (_switch)
672 * or via "fork" which must set up an environment equivalent
673 * to the "_switch" path.  If you change this , you'll have to
674 * change the fork code also.
675 *
676 * The code which creates the new task context is in 'copy_thread'
677 * in arch/ppc/kernel/process.c
678 */
679_GLOBAL(_switch)
680	stwu	r1,-INT_FRAME_SIZE(r1)
681	mflr	r0
682	stw	r0,INT_FRAME_SIZE+4(r1)
683	/* r3-r12 are caller saved -- Cort */
684	SAVE_NVGPRS(r1)
685	stw	r0,_NIP(r1)	/* Return to switch caller */
686	mfmsr	r11
687	li	r0,MSR_FP	/* Disable floating-point */
688#ifdef CONFIG_ALTIVEC
689BEGIN_FTR_SECTION
690	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
691	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
692	stw	r12,THREAD+THREAD_VRSAVE(r2)
693END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
694#endif /* CONFIG_ALTIVEC */
695#ifdef CONFIG_SPE
696BEGIN_FTR_SECTION
697	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
698	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
699	stw	r12,THREAD+THREAD_SPEFSCR(r2)
700END_FTR_SECTION_IFSET(CPU_FTR_SPE)
701#endif /* CONFIG_SPE */
702	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
703	beq+	1f
704	andc	r11,r11,r0
705	MTMSRD(r11)
706	isync
7071:	stw	r11,_MSR(r1)
708	mfcr	r10
709	stw	r10,_CCR(r1)
710	stw	r1,KSP(r3)	/* Set old stack pointer */
711
712#ifdef CONFIG_SMP
713	/* We need a sync somewhere here to make sure that if the
714	 * previous task gets rescheduled on another CPU, it sees all
715	 * stores it has performed on this one.
716	 */
717	sync
718#endif /* CONFIG_SMP */
719
720	tophys(r0,r4)
721	CLR_TOP32(r0)
722	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
723	lwz	r1,KSP(r4)	/* Load new stack pointer */
724
725	/* save the old current 'last' for return value */
726	mr	r3,r2
727	addi	r2,r4,-THREAD	/* Update current */
728
729#ifdef CONFIG_ALTIVEC
730BEGIN_FTR_SECTION
731	lwz	r0,THREAD+THREAD_VRSAVE(r2)
732	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
733END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
734#endif /* CONFIG_ALTIVEC */
735#ifdef CONFIG_SPE
736BEGIN_FTR_SECTION
737	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
738	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
739END_FTR_SECTION_IFSET(CPU_FTR_SPE)
740#endif /* CONFIG_SPE */
741
742	lwz	r0,_CCR(r1)
743	mtcrf	0xFF,r0
744	/* r3-r12 are destroyed -- Cort */
745	REST_NVGPRS(r1)
746
747	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
748	mtlr	r4
749	addi	r1,r1,INT_FRAME_SIZE
750	blr
751
752	.globl	fast_exception_return
753fast_exception_return:
754#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
755	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
756	beq	1f			/* if not, we've got problems */
757#endif
758
7592:	REST_4GPRS(3, r11)
760	lwz	r10,_CCR(r11)
761	REST_GPR(1, r11)
762	mtcr	r10
763	lwz	r10,_LINK(r11)
764	mtlr	r10
765	REST_GPR(10, r11)
766	mtspr	SPRN_SRR1,r9
767	mtspr	SPRN_SRR0,r12
768	REST_GPR(9, r11)
769	REST_GPR(12, r11)
770	lwz	r11,GPR11(r11)
771	SYNC
772	RFI
773
774#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
775/* check if the exception happened in a restartable section */
7761:	lis	r3,exc_exit_restart_end@ha
777	addi	r3,r3,exc_exit_restart_end@l
778	cmplw	r12,r3
779	bge	3f
780	lis	r4,exc_exit_restart@ha
781	addi	r4,r4,exc_exit_restart@l
782	cmplw	r12,r4
783	blt	3f
784	lis	r3,fee_restarts@ha
785	tophys(r3,r3)
786	lwz	r5,fee_restarts@l(r3)
787	addi	r5,r5,1
788	stw	r5,fee_restarts@l(r3)
789	mr	r12,r4		/* restart at exc_exit_restart */
790	b	2b
791
792	.section .bss
793	.align	2
794fee_restarts:
795	.space	4
796	.previous
797
798/* aargh, a nonrecoverable interrupt, panic */
799/* aargh, we don't know which trap this is */
800/* but the 601 doesn't implement the RI bit, so assume it's OK */
8013:
802BEGIN_FTR_SECTION
803	b	2b
804END_FTR_SECTION_IFSET(CPU_FTR_601)
805	li	r10,-1
806	stw	r10,_TRAP(r11)
807	addi	r3,r1,STACK_FRAME_OVERHEAD
808	lis	r10,MSR_KERNEL@h
809	ori	r10,r10,MSR_KERNEL@l
810	bl	transfer_to_handler_full
811	.long	nonrecoverable_exception
812	.long	ret_from_except
813#endif
814
815	.globl	ret_from_except_full
816ret_from_except_full:
817	REST_NVGPRS(r1)
818	/* fall through */
819
820	.globl	ret_from_except
821ret_from_except:
822	/* Hard-disable interrupts so that current_thread_info()->flags
823	 * can't change between when we test it and when we return
824	 * from the interrupt. */
825	/* Note: We don't bother telling lockdep about it */
826	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
827	SYNC			/* Some chip revs have problems here... */
828	MTMSRD(r10)		/* disable interrupts */
829
830	lwz	r3,_MSR(r1)	/* Returning to user mode? */
831	andi.	r0,r3,MSR_PR
832	beq	resume_kernel
833
834user_exc_return:		/* r10 contains MSR_KERNEL here */
835	/* Check current_thread_info()->flags */
836	CURRENT_THREAD_INFO(r9, r1)
837	lwz	r9,TI_FLAGS(r9)
838	andi.	r0,r9,_TIF_USER_WORK_MASK
839	bne	do_work
840
841restore_user:
842#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
843	/* Check whether this process has its own DBCR0 value.  The internal
844	   debug mode bit tells us that dbcr0 should be loaded. */
845	lwz	r0,THREAD+THREAD_DBCR0(r2)
846	andis.	r10,r0,DBCR0_IDM@h
847	bnel-	load_dbcr0
848#endif
849
850	b	restore
851
852/* N.B. the only way to get here is from the beq following ret_from_except. */
853resume_kernel:
854	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
855	CURRENT_THREAD_INFO(r9, r1)
856	lwz	r8,TI_FLAGS(r9)
857	andis.	r8,r8,_TIF_EMULATE_STACK_STORE@h
858	beq+	1f
859
860	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
861
862	lwz	r3,GPR1(r1)
863	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
864	mr	r4,r1			/* src:  current exception frame */
865	mr	r1,r3			/* Reroute the trampoline frame to r1 */
866
867	/* Copy from the original to the trampoline. */
868	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
869	li	r6,0			/* start offset: 0 */
870	mtctr	r5
8712:	lwzx	r0,r6,r4
872	stwx	r0,r6,r3
873	addi	r6,r6,4
874	bdnz	2b
875
876	/* Do real store operation to complete stwu */
877	lwz	r5,GPR1(r1)
878	stw	r8,0(r5)
879
880	/* Clear _TIF_EMULATE_STACK_STORE flag */
881	lis	r11,_TIF_EMULATE_STACK_STORE@h
882	addi	r5,r9,TI_FLAGS
8830:	lwarx	r8,0,r5
884	andc	r8,r8,r11
885#ifdef CONFIG_IBM405_ERR77
886	dcbt	0,r5
887#endif
888	stwcx.	r8,0,r5
889	bne-	0b
8901:
891
892#ifdef CONFIG_PREEMPT
893	/* check current_thread_info->preempt_count */
894	lwz	r0,TI_PREEMPT(r9)
895	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
896	bne	restore
897	andi.	r8,r8,_TIF_NEED_RESCHED
898	beq+	restore
899	lwz	r3,_MSR(r1)
900	andi.	r0,r3,MSR_EE	/* interrupts off? */
901	beq	restore		/* don't schedule if so */
902#ifdef CONFIG_TRACE_IRQFLAGS
903	/* Lockdep thinks irqs are enabled, we need to call
904	 * preempt_schedule_irq with IRQs off, so we inform lockdep
905	 * now that we -did- turn them off already
906	 */
907	bl	trace_hardirqs_off
908#endif
9091:	bl	preempt_schedule_irq
910	CURRENT_THREAD_INFO(r9, r1)
911	lwz	r3,TI_FLAGS(r9)
912	andi.	r0,r3,_TIF_NEED_RESCHED
913	bne-	1b
914#ifdef CONFIG_TRACE_IRQFLAGS
915	/* And now, to properly rebalance the above, we tell lockdep they
916	 * are being turned back on, which will happen when we return
917	 */
918	bl	trace_hardirqs_on
919#endif
920#endif /* CONFIG_PREEMPT */
921
922	/* interrupts are hard-disabled at this point */
923restore:
924#ifdef CONFIG_44x
925BEGIN_MMU_FTR_SECTION
926	b	1f
927END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
928	lis	r4,icache_44x_need_flush@ha
929	lwz	r5,icache_44x_need_flush@l(r4)
930	cmplwi	cr0,r5,0
931	beq+	1f
932	li	r6,0
933	iccci	r0,r0
934	stw	r6,icache_44x_need_flush@l(r4)
9351:
936#endif  /* CONFIG_44x */
937
938	lwz	r9,_MSR(r1)
939#ifdef CONFIG_TRACE_IRQFLAGS
940	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
941	 * off in this assembly code while peeking at TI_FLAGS() and such. However
942	 * we need to inform it if the exception turned interrupts off, and we
943	 * are about to trun them back on.
944	 *
945	 * The problem here sadly is that we don't know whether the exceptions was
946	 * one that turned interrupts off or not. So we always tell lockdep about
947	 * turning them on here when we go back to wherever we came from with EE
948	 * on, even if that may meen some redudant calls being tracked. Maybe later
949	 * we could encode what the exception did somewhere or test the exception
950	 * type in the pt_regs but that sounds overkill
951	 */
952	andi.	r10,r9,MSR_EE
953	beq	1f
954	/*
955	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
956	 * which is the stack frame here, we need to force a stack frame
957	 * in case we came from user space.
958	 */
959	stwu	r1,-32(r1)
960	mflr	r0
961	stw	r0,4(r1)
962	stwu	r1,-32(r1)
963	bl	trace_hardirqs_on
964	lwz	r1,0(r1)
965	lwz	r1,0(r1)
966	lwz	r9,_MSR(r1)
9671:
968#endif /* CONFIG_TRACE_IRQFLAGS */
969
970	lwz	r0,GPR0(r1)
971	lwz	r2,GPR2(r1)
972	REST_4GPRS(3, r1)
973	REST_2GPRS(7, r1)
974
975	lwz	r10,_XER(r1)
976	lwz	r11,_CTR(r1)
977	mtspr	SPRN_XER,r10
978	mtctr	r11
979
980	PPC405_ERR77(0,r1)
981BEGIN_FTR_SECTION
982	lwarx	r11,0,r1
983END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
984	stwcx.	r0,0,r1			/* to clear the reservation */
985
986#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
987	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
988	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
989
990	lwz	r10,_CCR(r1)
991	lwz	r11,_LINK(r1)
992	mtcrf	0xFF,r10
993	mtlr	r11
994
995	/*
996	 * Once we put values in SRR0 and SRR1, we are in a state
997	 * where exceptions are not recoverable, since taking an
998	 * exception will trash SRR0 and SRR1.  Therefore we clear the
999	 * MSR:RI bit to indicate this.  If we do take an exception,
1000	 * we can't return to the point of the exception but we
1001	 * can restart the exception exit path at the label
1002	 * exc_exit_restart below.  -- paulus
1003	 */
1004	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
1005	SYNC
1006	MTMSRD(r10)		/* clear the RI bit */
1007	.globl exc_exit_restart
1008exc_exit_restart:
1009	lwz	r12,_NIP(r1)
1010	FIX_SRR1(r9,r10)
1011	mtspr	SPRN_SRR0,r12
1012	mtspr	SPRN_SRR1,r9
1013	REST_4GPRS(9, r1)
1014	lwz	r1,GPR1(r1)
1015	.globl exc_exit_restart_end
1016exc_exit_restart_end:
1017	SYNC
1018	RFI
1019
1020#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1021	/*
1022	 * This is a bit different on 4xx/Book-E because it doesn't have
1023	 * the RI bit in the MSR.
1024	 * The TLB miss handler checks if we have interrupted
1025	 * the exception exit path and restarts it if so
1026	 * (well maybe one day it will... :).
1027	 */
1028	lwz	r11,_LINK(r1)
1029	mtlr	r11
1030	lwz	r10,_CCR(r1)
1031	mtcrf	0xff,r10
1032	REST_2GPRS(9, r1)
1033	.globl exc_exit_restart
1034exc_exit_restart:
1035	lwz	r11,_NIP(r1)
1036	lwz	r12,_MSR(r1)
1037exc_exit_start:
1038	mtspr	SPRN_SRR0,r11
1039	mtspr	SPRN_SRR1,r12
1040	REST_2GPRS(11, r1)
1041	lwz	r1,GPR1(r1)
1042	.globl exc_exit_restart_end
1043exc_exit_restart_end:
1044	PPC405_ERR77_SYNC
1045	rfi
1046	b	.			/* prevent prefetch past rfi */
1047
1048/*
1049 * Returning from a critical interrupt in user mode doesn't need
1050 * to be any different from a normal exception.  For a critical
1051 * interrupt in the kernel, we just return (without checking for
1052 * preemption) since the interrupt may have happened at some crucial
1053 * place (e.g. inside the TLB miss handler), and because we will be
1054 * running with r1 pointing into critical_stack, not the current
1055 * process's kernel stack (and therefore current_thread_info() will
1056 * give the wrong answer).
1057 * We have to restore various SPRs that may have been in use at the
1058 * time of the critical interrupt.
1059 *
1060 */
1061#ifdef CONFIG_40x
1062#define PPC_40x_TURN_OFF_MSR_DR						    \
1063	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1064	 * assume the instructions here are mapped by a pinned TLB entry */ \
1065	li	r10,MSR_IR;						    \
1066	mtmsr	r10;							    \
1067	isync;								    \
1068	tophys(r1, r1);
1069#else
1070#define PPC_40x_TURN_OFF_MSR_DR
1071#endif
1072
1073#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1074	REST_NVGPRS(r1);						\
1075	lwz	r3,_MSR(r1);						\
1076	andi.	r3,r3,MSR_PR;						\
1077	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1078	bne	user_exc_return;					\
1079	lwz	r0,GPR0(r1);						\
1080	lwz	r2,GPR2(r1);						\
1081	REST_4GPRS(3, r1);						\
1082	REST_2GPRS(7, r1);						\
1083	lwz	r10,_XER(r1);						\
1084	lwz	r11,_CTR(r1);						\
1085	mtspr	SPRN_XER,r10;						\
1086	mtctr	r11;							\
1087	PPC405_ERR77(0,r1);						\
1088	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1089	lwz	r11,_LINK(r1);						\
1090	mtlr	r11;							\
1091	lwz	r10,_CCR(r1);						\
1092	mtcrf	0xff,r10;						\
1093	PPC_40x_TURN_OFF_MSR_DR;					\
1094	lwz	r9,_DEAR(r1);						\
1095	lwz	r10,_ESR(r1);						\
1096	mtspr	SPRN_DEAR,r9;						\
1097	mtspr	SPRN_ESR,r10;						\
1098	lwz	r11,_NIP(r1);						\
1099	lwz	r12,_MSR(r1);						\
1100	mtspr	exc_lvl_srr0,r11;					\
1101	mtspr	exc_lvl_srr1,r12;					\
1102	lwz	r9,GPR9(r1);						\
1103	lwz	r12,GPR12(r1);						\
1104	lwz	r10,GPR10(r1);						\
1105	lwz	r11,GPR11(r1);						\
1106	lwz	r1,GPR1(r1);						\
1107	PPC405_ERR77_SYNC;						\
1108	exc_lvl_rfi;							\
1109	b	.;		/* prevent prefetch past exc_lvl_rfi */
1110
1111#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1112	lwz	r9,_##exc_lvl_srr0(r1);					\
1113	lwz	r10,_##exc_lvl_srr1(r1);				\
1114	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1115	mtspr	SPRN_##exc_lvl_srr1,r10;
1116
1117#if defined(CONFIG_PPC_BOOK3E_MMU)
1118#ifdef CONFIG_PHYS_64BIT
1119#define	RESTORE_MAS7							\
1120	lwz	r11,MAS7(r1);						\
1121	mtspr	SPRN_MAS7,r11;
1122#else
1123#define	RESTORE_MAS7
1124#endif /* CONFIG_PHYS_64BIT */
1125#define RESTORE_MMU_REGS						\
1126	lwz	r9,MAS0(r1);						\
1127	lwz	r10,MAS1(r1);						\
1128	lwz	r11,MAS2(r1);						\
1129	mtspr	SPRN_MAS0,r9;						\
1130	lwz	r9,MAS3(r1);						\
1131	mtspr	SPRN_MAS1,r10;						\
1132	lwz	r10,MAS6(r1);						\
1133	mtspr	SPRN_MAS2,r11;						\
1134	mtspr	SPRN_MAS3,r9;						\
1135	mtspr	SPRN_MAS6,r10;						\
1136	RESTORE_MAS7;
1137#elif defined(CONFIG_44x)
1138#define RESTORE_MMU_REGS						\
1139	lwz	r9,MMUCR(r1);						\
1140	mtspr	SPRN_MMUCR,r9;
1141#else
1142#define RESTORE_MMU_REGS
1143#endif
1144
1145#ifdef CONFIG_40x
1146	.globl	ret_from_crit_exc
1147ret_from_crit_exc:
1148	mfspr	r9,SPRN_SPRG_THREAD
1149	lis	r10,saved_ksp_limit@ha;
1150	lwz	r10,saved_ksp_limit@l(r10);
1151	tovirt(r9,r9);
1152	stw	r10,KSP_LIMIT(r9)
1153	lis	r9,crit_srr0@ha;
1154	lwz	r9,crit_srr0@l(r9);
1155	lis	r10,crit_srr1@ha;
1156	lwz	r10,crit_srr1@l(r10);
1157	mtspr	SPRN_SRR0,r9;
1158	mtspr	SPRN_SRR1,r10;
1159	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1160#endif /* CONFIG_40x */
1161
1162#ifdef CONFIG_BOOKE
1163	.globl	ret_from_crit_exc
1164ret_from_crit_exc:
1165	mfspr	r9,SPRN_SPRG_THREAD
1166	lwz	r10,SAVED_KSP_LIMIT(r1)
1167	stw	r10,KSP_LIMIT(r9)
1168	RESTORE_xSRR(SRR0,SRR1);
1169	RESTORE_MMU_REGS;
1170	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1171
1172	.globl	ret_from_debug_exc
1173ret_from_debug_exc:
1174	mfspr	r9,SPRN_SPRG_THREAD
1175	lwz	r10,SAVED_KSP_LIMIT(r1)
1176	stw	r10,KSP_LIMIT(r9)
1177	lwz	r9,THREAD_INFO-THREAD(r9)
1178	CURRENT_THREAD_INFO(r10, r1)
1179	lwz	r10,TI_PREEMPT(r10)
1180	stw	r10,TI_PREEMPT(r9)
1181	RESTORE_xSRR(SRR0,SRR1);
1182	RESTORE_xSRR(CSRR0,CSRR1);
1183	RESTORE_MMU_REGS;
1184	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1185
1186	.globl	ret_from_mcheck_exc
1187ret_from_mcheck_exc:
1188	mfspr	r9,SPRN_SPRG_THREAD
1189	lwz	r10,SAVED_KSP_LIMIT(r1)
1190	stw	r10,KSP_LIMIT(r9)
1191	RESTORE_xSRR(SRR0,SRR1);
1192	RESTORE_xSRR(CSRR0,CSRR1);
1193	RESTORE_xSRR(DSRR0,DSRR1);
1194	RESTORE_MMU_REGS;
1195	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1196#endif /* CONFIG_BOOKE */
1197
1198/*
1199 * Load the DBCR0 value for a task that is being ptraced,
1200 * having first saved away the global DBCR0.  Note that r0
1201 * has the dbcr0 value to set upon entry to this.
1202 */
1203load_dbcr0:
1204	mfmsr	r10		/* first disable debug exceptions */
1205	rlwinm	r10,r10,0,~MSR_DE
1206	mtmsr	r10
1207	isync
1208	mfspr	r10,SPRN_DBCR0
1209	lis	r11,global_dbcr0@ha
1210	addi	r11,r11,global_dbcr0@l
1211#ifdef CONFIG_SMP
1212	CURRENT_THREAD_INFO(r9, r1)
1213	lwz	r9,TI_CPU(r9)
1214	slwi	r9,r9,3
1215	add	r11,r11,r9
1216#endif
1217	stw	r10,0(r11)
1218	mtspr	SPRN_DBCR0,r0
1219	lwz	r10,4(r11)
1220	addi	r10,r10,1
1221	stw	r10,4(r11)
1222	li	r11,-1
1223	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1224	blr
1225
1226	.section .bss
1227	.align	4
1228global_dbcr0:
1229	.space	8*NR_CPUS
1230	.previous
1231#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1232
1233do_work:			/* r10 contains MSR_KERNEL here */
1234	andi.	r0,r9,_TIF_NEED_RESCHED
1235	beq	do_user_signal
1236
1237do_resched:			/* r10 contains MSR_KERNEL here */
1238	/* Note: We don't need to inform lockdep that we are enabling
1239	 * interrupts here. As far as it knows, they are already enabled
1240	 */
1241	ori	r10,r10,MSR_EE
1242	SYNC
1243	MTMSRD(r10)		/* hard-enable interrupts */
1244	bl	schedule
1245recheck:
1246	/* Note: And we don't tell it we are disabling them again
1247	 * neither. Those disable/enable cycles used to peek at
1248	 * TI_FLAGS aren't advertised.
1249	 */
1250	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1251	SYNC
1252	MTMSRD(r10)		/* disable interrupts */
1253	CURRENT_THREAD_INFO(r9, r1)
1254	lwz	r9,TI_FLAGS(r9)
1255	andi.	r0,r9,_TIF_NEED_RESCHED
1256	bne-	do_resched
1257	andi.	r0,r9,_TIF_USER_WORK_MASK
1258	beq	restore_user
1259do_user_signal:			/* r10 contains MSR_KERNEL here */
1260	ori	r10,r10,MSR_EE
1261	SYNC
1262	MTMSRD(r10)		/* hard-enable interrupts */
1263	/* save r13-r31 in the exception frame, if not already done */
1264	lwz	r3,_TRAP(r1)
1265	andi.	r0,r3,1
1266	beq	2f
1267	SAVE_NVGPRS(r1)
1268	rlwinm	r3,r3,0,0,30
1269	stw	r3,_TRAP(r1)
12702:	addi	r3,r1,STACK_FRAME_OVERHEAD
1271	mr	r4,r9
1272	bl	do_notify_resume
1273	REST_NVGPRS(r1)
1274	b	recheck
1275
1276/*
1277 * We come here when we are at the end of handling an exception
1278 * that occurred at a place where taking an exception will lose
1279 * state information, such as the contents of SRR0 and SRR1.
1280 */
1281nonrecoverable:
1282	lis	r10,exc_exit_restart_end@ha
1283	addi	r10,r10,exc_exit_restart_end@l
1284	cmplw	r12,r10
1285	bge	3f
1286	lis	r11,exc_exit_restart@ha
1287	addi	r11,r11,exc_exit_restart@l
1288	cmplw	r12,r11
1289	blt	3f
1290	lis	r10,ee_restarts@ha
1291	lwz	r12,ee_restarts@l(r10)
1292	addi	r12,r12,1
1293	stw	r12,ee_restarts@l(r10)
1294	mr	r12,r11		/* restart at exc_exit_restart */
1295	blr
12963:	/* OK, we can't recover, kill this process */
1297	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1298BEGIN_FTR_SECTION
1299	blr
1300END_FTR_SECTION_IFSET(CPU_FTR_601)
1301	lwz	r3,_TRAP(r1)
1302	andi.	r0,r3,1
1303	beq	4f
1304	SAVE_NVGPRS(r1)
1305	rlwinm	r3,r3,0,0,30
1306	stw	r3,_TRAP(r1)
13074:	addi	r3,r1,STACK_FRAME_OVERHEAD
1308	bl	nonrecoverable_exception
1309	/* shouldn't return */
1310	b	4b
1311
1312	.section .bss
1313	.align	2
1314ee_restarts:
1315	.space	4
1316	.previous
1317
1318/*
1319 * PROM code for specific machines follows.  Put it
1320 * here so it's easy to add arch-specific sections later.
1321 * -- Cort
1322 */
1323#ifdef CONFIG_PPC_RTAS
1324/*
1325 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1326 * called with the MMU off.
1327 */
1328_GLOBAL(enter_rtas)
1329	stwu	r1,-INT_FRAME_SIZE(r1)
1330	mflr	r0
1331	stw	r0,INT_FRAME_SIZE+4(r1)
1332	LOAD_REG_ADDR(r4, rtas)
1333	lis	r6,1f@ha	/* physical return address for rtas */
1334	addi	r6,r6,1f@l
1335	tophys(r6,r6)
1336	tophys(r7,r1)
1337	lwz	r8,RTASENTRY(r4)
1338	lwz	r4,RTASBASE(r4)
1339	mfmsr	r9
1340	stw	r9,8(r1)
1341	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1342	SYNC			/* disable interrupts so SRR0/1 */
1343	MTMSRD(r0)		/* don't get trashed */
1344	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1345	mtlr	r6
1346	mtspr	SPRN_SPRG_RTAS,r7
1347	mtspr	SPRN_SRR0,r8
1348	mtspr	SPRN_SRR1,r9
1349	RFI
13501:	tophys(r9,r1)
1351	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1352	lwz	r9,8(r9)	/* original msr value */
1353	FIX_SRR1(r9,r0)
1354	addi	r1,r1,INT_FRAME_SIZE
1355	li	r0,0
1356	mtspr	SPRN_SPRG_RTAS,r0
1357	mtspr	SPRN_SRR0,r8
1358	mtspr	SPRN_SRR1,r9
1359	RFI			/* return to caller */
1360
1361	.globl	machine_check_in_rtas
1362machine_check_in_rtas:
1363	twi	31,0,0
1364	/* XXX load up BATs and panic */
1365
1366#endif /* CONFIG_PPC_RTAS */
1367
1368#ifdef CONFIG_FUNCTION_TRACER
1369#ifdef CONFIG_DYNAMIC_FTRACE
1370_GLOBAL(mcount)
1371_GLOBAL(_mcount)
1372	/*
1373	 * It is required that _mcount on PPC32 must preserve the
1374	 * link register. But we have r0 to play with. We use r0
1375	 * to push the return address back to the caller of mcount
1376	 * into the ctr register, restore the link register and
1377	 * then jump back using the ctr register.
1378	 */
1379	mflr	r0
1380	mtctr	r0
1381	lwz	r0, 4(r1)
1382	mtlr	r0
1383	bctr
1384
1385_GLOBAL(ftrace_caller)
1386	MCOUNT_SAVE_FRAME
1387	/* r3 ends up with link register */
1388	subi	r3, r3, MCOUNT_INSN_SIZE
1389.globl ftrace_call
1390ftrace_call:
1391	bl	ftrace_stub
1392	nop
1393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1394.globl ftrace_graph_call
1395ftrace_graph_call:
1396	b	ftrace_graph_stub
1397_GLOBAL(ftrace_graph_stub)
1398#endif
1399	MCOUNT_RESTORE_FRAME
1400	/* old link register ends up in ctr reg */
1401	bctr
1402#else
1403_GLOBAL(mcount)
1404_GLOBAL(_mcount)
1405
1406	MCOUNT_SAVE_FRAME
1407
1408	subi	r3, r3, MCOUNT_INSN_SIZE
1409	LOAD_REG_ADDR(r5, ftrace_trace_function)
1410	lwz	r5,0(r5)
1411
1412	mtctr	r5
1413	bctrl
1414	nop
1415
1416#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1417	b	ftrace_graph_caller
1418#endif
1419	MCOUNT_RESTORE_FRAME
1420	bctr
1421#endif
1422
1423_GLOBAL(ftrace_stub)
1424	blr
1425
1426#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1427_GLOBAL(ftrace_graph_caller)
1428	/* load r4 with local address */
1429	lwz	r4, 44(r1)
1430	subi	r4, r4, MCOUNT_INSN_SIZE
1431
1432	/* get the parent address */
1433	addi	r3, r1, 52
1434
1435	bl	prepare_ftrace_return
1436	nop
1437
1438	MCOUNT_RESTORE_FRAME
1439	/* old link register ends up in ctr reg */
1440	bctr
1441
1442_GLOBAL(return_to_handler)
1443	/* need to save return values */
1444	stwu	r1, -32(r1)
1445	stw	r3, 20(r1)
1446	stw	r4, 16(r1)
1447	stw	r31, 12(r1)
1448	mr	r31, r1
1449
1450	bl	ftrace_return_to_handler
1451	nop
1452
1453	/* return value has real return address */
1454	mtlr	r3
1455
1456	lwz	r3, 20(r1)
1457	lwz	r4, 16(r1)
1458	lwz	r31,12(r1)
1459	lwz	r1, 0(r1)
1460
1461	/* Jump back to real return address */
1462	blr
1463#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1464
1465#endif /* CONFIG_MCOUNT */
1466