xref: /linux/arch/powerpc/kernel/entry_32.S (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ftrace.h>
35#include <asm/ptrace.h>
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack
91	 * and set the limit to protect the thread_info
92	 * struct
93	 */
94	mfspr	r8,SPRN_SPRG_THREAD
95	lwz	r0,KSP_LIMIT(r8)
96	stw	r0,SAVED_KSP_LIMIT(r11)
97	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
98	stw	r0,KSP_LIMIT(r8)
99	/* fall through */
100#endif
101
102#ifdef CONFIG_40x
103	.globl	crit_transfer_to_handler
104crit_transfer_to_handler:
105	lwz	r0,crit_r10@l(0)
106	stw	r0,GPR10(r11)
107	lwz	r0,crit_r11@l(0)
108	stw	r0,GPR11(r11)
109	mfspr	r0,SPRN_SRR0
110	stw	r0,crit_srr0@l(0)
111	mfspr	r0,SPRN_SRR1
112	stw	r0,crit_srr1@l(0)
113
114	/* set the stack limit to the current stack
115	 * and set the limit to protect the thread_info
116	 * struct
117	 */
118	mfspr	r8,SPRN_SPRG_THREAD
119	lwz	r0,KSP_LIMIT(r8)
120	stw	r0,saved_ksp_limit@l(0)
121	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
122	stw	r0,KSP_LIMIT(r8)
123	/* fall through */
124#endif
125
126/*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133	.globl	transfer_to_handler_full
134transfer_to_handler_full:
135	SAVE_NVGPRS(r11)
136	/* fall through */
137
138	.globl	transfer_to_handler
139transfer_to_handler:
140	stw	r2,GPR2(r11)
141	stw	r12,_NIP(r11)
142	stw	r9,_MSR(r11)
143	andi.	r2,r9,MSR_PR
144	mfctr	r12
145	mfspr	r2,SPRN_XER
146	stw	r12,_CTR(r11)
147	stw	r2,_XER(r11)
148	mfspr	r12,SPRN_SPRG_THREAD
149	addi	r2,r12,-THREAD
150	tovirt(r2,r2)			/* set r2 to current */
151	beq	2f			/* if from user, fix up THREAD.regs */
152	addi	r11,r1,STACK_FRAME_OVERHEAD
153	stw	r11,PT_REGS(r12)
154#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155	/* Check to see if the dbcr0 register is set up to debug.  Use the
156	   internal debug mode bit to do this. */
157	lwz	r12,THREAD_DBCR0(r12)
158	andis.	r12,r12,DBCR0_IDM@h
159	beq+	3f
160	/* From user and task is ptraced - load up global dbcr0 */
161	li	r12,-1			/* clear all pending debug events */
162	mtspr	SPRN_DBSR,r12
163	lis	r11,global_dbcr0@ha
164	tophys(r11,r11)
165	addi	r11,r11,global_dbcr0@l
166#ifdef CONFIG_SMP
167	CURRENT_THREAD_INFO(r9, r1)
168	lwz	r9,TI_CPU(r9)
169	slwi	r9,r9,3
170	add	r11,r11,r9
171#endif
172	lwz	r12,0(r11)
173	mtspr	SPRN_DBCR0,r12
174	lwz	r12,4(r11)
175	addi	r12,r12,-1
176	stw	r12,4(r11)
177#endif
178	b	3f
179
1802:	/* if from kernel, check interrupted DOZE/NAP mode and
181         * check for stack overflow
182         */
183	lwz	r9,KSP_LIMIT(r12)
184	cmplw	r1,r9			/* if r1 <= ksp_limit */
185	ble-	stack_ovf		/* then the kernel stack overflowed */
1865:
187#if defined(CONFIG_6xx) || defined(CONFIG_E500)
188	CURRENT_THREAD_INFO(r9, r1)
189	tophys(r9,r9)			/* check local flags */
190	lwz	r12,TI_LOCAL_FLAGS(r9)
191	mtcrf	0x01,r12
192	bt-	31-TLF_NAPPING,4f
193	bt-	31-TLF_SLEEPING,7f
194#endif /* CONFIG_6xx || CONFIG_E500 */
195	.globl transfer_to_handler_cont
196transfer_to_handler_cont:
1973:
198	mflr	r9
199	lwz	r11,0(r9)		/* virtual address of handler */
200	lwz	r9,4(r9)		/* where to go when done */
201#ifdef CONFIG_TRACE_IRQFLAGS
202	lis	r12,reenable_mmu@h
203	ori	r12,r12,reenable_mmu@l
204	mtspr	SPRN_SRR0,r12
205	mtspr	SPRN_SRR1,r10
206	SYNC
207	RFI
208reenable_mmu:				/* re-enable mmu so we can */
209	mfmsr	r10
210	lwz	r12,_MSR(r1)
211	xor	r10,r10,r12
212	andi.	r10,r10,MSR_EE		/* Did EE change? */
213	beq	1f
214
215	/*
216	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
217	 * If from user mode there is only one stack frame on the stack, and
218	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
219	 * stack frame to make trace_hardirqs_off happy.
220	 *
221	 * This is handy because we also need to save a bunch of GPRs,
222	 * r3 can be different from GPR3(r1) at this point, r9 and r11
223	 * contains the old MSR and handler address respectively,
224	 * r4 & r5 can contain page fault arguments that need to be passed
225	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
226	 * they aren't useful past this point (aren't syscall arguments),
227	 * the rest is restored from the exception frame.
228	 */
229	stwu	r1,-32(r1)
230	stw	r9,8(r1)
231	stw	r11,12(r1)
232	stw	r3,16(r1)
233	stw	r4,20(r1)
234	stw	r5,24(r1)
235	bl	trace_hardirqs_off
236	lwz	r5,24(r1)
237	lwz	r4,20(r1)
238	lwz	r3,16(r1)
239	lwz	r11,12(r1)
240	lwz	r9,8(r1)
241	addi	r1,r1,32
242	lwz	r0,GPR0(r1)
243	lwz	r6,GPR6(r1)
244	lwz	r7,GPR7(r1)
245	lwz	r8,GPR8(r1)
2461:	mtctr	r11
247	mtlr	r9
248	bctr				/* jump to handler */
249#else /* CONFIG_TRACE_IRQFLAGS */
250	mtspr	SPRN_SRR0,r11
251	mtspr	SPRN_SRR1,r10
252	mtlr	r9
253	SYNC
254	RFI				/* jump to handler, enable MMU */
255#endif /* CONFIG_TRACE_IRQFLAGS */
256
257#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2584:	rlwinm	r12,r12,0,~_TLF_NAPPING
259	stw	r12,TI_LOCAL_FLAGS(r9)
260	b	power_save_ppc32_restore
261
2627:	rlwinm	r12,r12,0,~_TLF_SLEEPING
263	stw	r12,TI_LOCAL_FLAGS(r9)
264	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
265	rlwinm	r9,r9,0,~MSR_EE
266	lwz	r12,_LINK(r11)		/* and return to address in LR */
267	b	fast_exception_return
268#endif
269
270/*
271 * On kernel stack overflow, load up an initial stack pointer
272 * and call StackOverflow(regs), which should not return.
273 */
274stack_ovf:
275	/* sometimes we use a statically-allocated stack, which is OK. */
276	lis	r12,_end@h
277	ori	r12,r12,_end@l
278	cmplw	r1,r12
279	ble	5b			/* r1 <= &_end is OK */
280	SAVE_NVGPRS(r11)
281	addi	r3,r1,STACK_FRAME_OVERHEAD
282	lis	r1,init_thread_union@ha
283	addi	r1,r1,init_thread_union@l
284	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
285	lis	r9,StackOverflow@ha
286	addi	r9,r9,StackOverflow@l
287	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
288	FIX_SRR1(r10,r12)
289	mtspr	SPRN_SRR0,r9
290	mtspr	SPRN_SRR1,r10
291	SYNC
292	RFI
293
294/*
295 * Handle a system call.
296 */
297	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
298	.stabs	"entry_32.S",N_SO,0,0,0f
2990:
300
301_GLOBAL(DoSyscall)
302	stw	r3,ORIG_GPR3(r1)
303	li	r12,0
304	stw	r12,RESULT(r1)
305	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
306	rlwinm	r11,r11,0,4,2
307	stw	r11,_CCR(r1)
308#ifdef CONFIG_TRACE_IRQFLAGS
309	/* Return from syscalls can (and generally will) hard enable
310	 * interrupts. You aren't supposed to call a syscall with
311	 * interrupts disabled in the first place. However, to ensure
312	 * that we get it right vs. lockdep if it happens, we force
313	 * that hard enable here with appropriate tracing if we see
314	 * that we have been called with interrupts off
315	 */
316	mfmsr	r11
317	andi.	r12,r11,MSR_EE
318	bne+	1f
319	/* We came in with interrupts disabled, we enable them now */
320	bl	trace_hardirqs_on
321	mfmsr	r11
322	lwz	r0,GPR0(r1)
323	lwz	r3,GPR3(r1)
324	lwz	r4,GPR4(r1)
325	ori	r11,r11,MSR_EE
326	lwz	r5,GPR5(r1)
327	lwz	r6,GPR6(r1)
328	lwz	r7,GPR7(r1)
329	lwz	r8,GPR8(r1)
330	mtmsr	r11
3311:
332#endif /* CONFIG_TRACE_IRQFLAGS */
333	CURRENT_THREAD_INFO(r10, r1)
334	lwz	r11,TI_FLAGS(r10)
335	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
336	bne-	syscall_dotrace
337syscall_dotrace_cont:
338	cmplwi	0,r0,NR_syscalls
339	lis	r10,sys_call_table@h
340	ori	r10,r10,sys_call_table@l
341	slwi	r0,r0,2
342	bge-	66f
343	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
344	mtlr	r10
345	addi	r9,r1,STACK_FRAME_OVERHEAD
346	PPC440EP_ERR42
347	blrl			/* Call handler */
348	.globl	ret_from_syscall
349ret_from_syscall:
350	mr	r6,r3
351	CURRENT_THREAD_INFO(r12, r1)
352	/* disable interrupts so current_thread_info()->flags can't change */
353	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
354	/* Note: We don't bother telling lockdep about it */
355	SYNC
356	MTMSRD(r10)
357	lwz	r9,TI_FLAGS(r12)
358	li	r8,-MAX_ERRNO
359	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
360	bne-	syscall_exit_work
361	cmplw	0,r3,r8
362	blt+	syscall_exit_cont
363	lwz	r11,_CCR(r1)			/* Load CR */
364	neg	r3,r3
365	oris	r11,r11,0x1000	/* Set SO bit in CR */
366	stw	r11,_CCR(r1)
367syscall_exit_cont:
368	lwz	r8,_MSR(r1)
369#ifdef CONFIG_TRACE_IRQFLAGS
370	/* If we are going to return from the syscall with interrupts
371	 * off, we trace that here. It shouldn't happen though but we
372	 * want to catch the bugger if it does right ?
373	 */
374	andi.	r10,r8,MSR_EE
375	bne+	1f
376	stw	r3,GPR3(r1)
377	bl      trace_hardirqs_off
378	lwz	r3,GPR3(r1)
3791:
380#endif /* CONFIG_TRACE_IRQFLAGS */
381#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
382	/* If the process has its own DBCR0 value, load it up.  The internal
383	   debug mode bit tells us that dbcr0 should be loaded. */
384	lwz	r0,THREAD+THREAD_DBCR0(r2)
385	andis.	r10,r0,DBCR0_IDM@h
386	bnel-	load_dbcr0
387#endif
388#ifdef CONFIG_44x
389BEGIN_MMU_FTR_SECTION
390	lis	r4,icache_44x_need_flush@ha
391	lwz	r5,icache_44x_need_flush@l(r4)
392	cmplwi	cr0,r5,0
393	bne-	2f
3941:
395END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
396#endif /* CONFIG_44x */
397BEGIN_FTR_SECTION
398	lwarx	r7,0,r1
399END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
400	stwcx.	r0,0,r1			/* to clear the reservation */
401	lwz	r4,_LINK(r1)
402	lwz	r5,_CCR(r1)
403	mtlr	r4
404	mtcr	r5
405	lwz	r7,_NIP(r1)
406	FIX_SRR1(r8, r0)
407	lwz	r2,GPR2(r1)
408	lwz	r1,GPR1(r1)
409	mtspr	SPRN_SRR0,r7
410	mtspr	SPRN_SRR1,r8
411	SYNC
412	RFI
413#ifdef CONFIG_44x
4142:	li	r7,0
415	iccci	r0,r0
416	stw	r7,icache_44x_need_flush@l(r4)
417	b	1b
418#endif  /* CONFIG_44x */
419
42066:	li	r3,-ENOSYS
421	b	ret_from_syscall
422
423	.globl	ret_from_fork
424ret_from_fork:
425	REST_NVGPRS(r1)
426	bl	schedule_tail
427	li	r3,0
428	b	ret_from_syscall
429
430	.globl	ret_from_kernel_thread
431ret_from_kernel_thread:
432	REST_NVGPRS(r1)
433	bl	schedule_tail
434	mtlr	r14
435	mr	r3,r15
436	PPC440EP_ERR42
437	blrl
438	li	r3,0
439	b	ret_from_syscall
440
441/* Traced system call support */
442syscall_dotrace:
443	SAVE_NVGPRS(r1)
444	li	r0,0xc00
445	stw	r0,_TRAP(r1)
446	addi	r3,r1,STACK_FRAME_OVERHEAD
447	bl	do_syscall_trace_enter
448	/*
449	 * Restore argument registers possibly just changed.
450	 * We use the return value of do_syscall_trace_enter
451	 * for call number to look up in the table (r0).
452	 */
453	mr	r0,r3
454	lwz	r3,GPR3(r1)
455	lwz	r4,GPR4(r1)
456	lwz	r5,GPR5(r1)
457	lwz	r6,GPR6(r1)
458	lwz	r7,GPR7(r1)
459	lwz	r8,GPR8(r1)
460	REST_NVGPRS(r1)
461
462	cmplwi	r0,NR_syscalls
463	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
464	bge-	ret_from_syscall
465	b	syscall_dotrace_cont
466
467syscall_exit_work:
468	andi.	r0,r9,_TIF_RESTOREALL
469	beq+	0f
470	REST_NVGPRS(r1)
471	b	2f
4720:	cmplw	0,r3,r8
473	blt+	1f
474	andi.	r0,r9,_TIF_NOERROR
475	bne-	1f
476	lwz	r11,_CCR(r1)			/* Load CR */
477	neg	r3,r3
478	oris	r11,r11,0x1000	/* Set SO bit in CR */
479	stw	r11,_CCR(r1)
480
4811:	stw	r6,RESULT(r1)	/* Save result */
482	stw	r3,GPR3(r1)	/* Update return value */
4832:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
484	beq	4f
485
486	/* Clear per-syscall TIF flags if any are set.  */
487
488	li	r11,_TIF_PERSYSCALL_MASK
489	addi	r12,r12,TI_FLAGS
4903:	lwarx	r8,0,r12
491	andc	r8,r8,r11
492#ifdef CONFIG_IBM405_ERR77
493	dcbt	0,r12
494#endif
495	stwcx.	r8,0,r12
496	bne-	3b
497	subi	r12,r12,TI_FLAGS
498
4994:	/* Anything which requires enabling interrupts? */
500	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
501	beq	ret_from_except
502
503	/* Re-enable interrupts. There is no need to trace that with
504	 * lockdep as we are supposed to have IRQs on at this point
505	 */
506	ori	r10,r10,MSR_EE
507	SYNC
508	MTMSRD(r10)
509
510	/* Save NVGPRS if they're not saved already */
511	lwz	r4,_TRAP(r1)
512	andi.	r4,r4,1
513	beq	5f
514	SAVE_NVGPRS(r1)
515	li	r4,0xc00
516	stw	r4,_TRAP(r1)
5175:
518	addi	r3,r1,STACK_FRAME_OVERHEAD
519	bl	do_syscall_trace_leave
520	b	ret_from_except_full
521
522/*
523 * The fork/clone functions need to copy the full register set into
524 * the child process. Therefore we need to save all the nonvolatile
525 * registers (r13 - r31) before calling the C code.
526 */
527	.globl	ppc_fork
528ppc_fork:
529	SAVE_NVGPRS(r1)
530	lwz	r0,_TRAP(r1)
531	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
532	stw	r0,_TRAP(r1)		/* register set saved */
533	b	sys_fork
534
535	.globl	ppc_vfork
536ppc_vfork:
537	SAVE_NVGPRS(r1)
538	lwz	r0,_TRAP(r1)
539	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
540	stw	r0,_TRAP(r1)		/* register set saved */
541	b	sys_vfork
542
543	.globl	ppc_clone
544ppc_clone:
545	SAVE_NVGPRS(r1)
546	lwz	r0,_TRAP(r1)
547	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
548	stw	r0,_TRAP(r1)		/* register set saved */
549	b	sys_clone
550
551	.globl	ppc_swapcontext
552ppc_swapcontext:
553	SAVE_NVGPRS(r1)
554	lwz	r0,_TRAP(r1)
555	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
556	stw	r0,_TRAP(r1)		/* register set saved */
557	b	sys_swapcontext
558
559/*
560 * Top-level page fault handling.
561 * This is in assembler because if do_page_fault tells us that
562 * it is a bad kernel page fault, we want to save the non-volatile
563 * registers before calling bad_page_fault.
564 */
565	.globl	handle_page_fault
566handle_page_fault:
567	stw	r4,_DAR(r1)
568	addi	r3,r1,STACK_FRAME_OVERHEAD
569	bl	do_page_fault
570	cmpwi	r3,0
571	beq+	ret_from_except
572	SAVE_NVGPRS(r1)
573	lwz	r0,_TRAP(r1)
574	clrrwi	r0,r0,1
575	stw	r0,_TRAP(r1)
576	mr	r5,r3
577	addi	r3,r1,STACK_FRAME_OVERHEAD
578	lwz	r4,_DAR(r1)
579	bl	bad_page_fault
580	b	ret_from_except_full
581
582/*
583 * This routine switches between two different tasks.  The process
584 * state of one is saved on its kernel stack.  Then the state
585 * of the other is restored from its kernel stack.  The memory
586 * management hardware is updated to the second process's state.
587 * Finally, we can return to the second process.
588 * On entry, r3 points to the THREAD for the current task, r4
589 * points to the THREAD for the new task.
590 *
591 * This routine is always called with interrupts disabled.
592 *
593 * Note: there are two ways to get to the "going out" portion
594 * of this code; either by coming in via the entry (_switch)
595 * or via "fork" which must set up an environment equivalent
596 * to the "_switch" path.  If you change this , you'll have to
597 * change the fork code also.
598 *
599 * The code which creates the new task context is in 'copy_thread'
600 * in arch/ppc/kernel/process.c
601 */
602_GLOBAL(_switch)
603	stwu	r1,-INT_FRAME_SIZE(r1)
604	mflr	r0
605	stw	r0,INT_FRAME_SIZE+4(r1)
606	/* r3-r12 are caller saved -- Cort */
607	SAVE_NVGPRS(r1)
608	stw	r0,_NIP(r1)	/* Return to switch caller */
609	mfmsr	r11
610	li	r0,MSR_FP	/* Disable floating-point */
611#ifdef CONFIG_ALTIVEC
612BEGIN_FTR_SECTION
613	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
614	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
615	stw	r12,THREAD+THREAD_VRSAVE(r2)
616END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
617#endif /* CONFIG_ALTIVEC */
618#ifdef CONFIG_SPE
619BEGIN_FTR_SECTION
620	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
621	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
622	stw	r12,THREAD+THREAD_SPEFSCR(r2)
623END_FTR_SECTION_IFSET(CPU_FTR_SPE)
624#endif /* CONFIG_SPE */
625	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
626	beq+	1f
627	andc	r11,r11,r0
628	MTMSRD(r11)
629	isync
6301:	stw	r11,_MSR(r1)
631	mfcr	r10
632	stw	r10,_CCR(r1)
633	stw	r1,KSP(r3)	/* Set old stack pointer */
634
635#ifdef CONFIG_SMP
636	/* We need a sync somewhere here to make sure that if the
637	 * previous task gets rescheduled on another CPU, it sees all
638	 * stores it has performed on this one.
639	 */
640	sync
641#endif /* CONFIG_SMP */
642
643	tophys(r0,r4)
644	CLR_TOP32(r0)
645	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
646	lwz	r1,KSP(r4)	/* Load new stack pointer */
647
648	/* save the old current 'last' for return value */
649	mr	r3,r2
650	addi	r2,r4,-THREAD	/* Update current */
651
652#ifdef CONFIG_ALTIVEC
653BEGIN_FTR_SECTION
654	lwz	r0,THREAD+THREAD_VRSAVE(r2)
655	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
656END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
657#endif /* CONFIG_ALTIVEC */
658#ifdef CONFIG_SPE
659BEGIN_FTR_SECTION
660	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
661	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
662END_FTR_SECTION_IFSET(CPU_FTR_SPE)
663#endif /* CONFIG_SPE */
664
665	lwz	r0,_CCR(r1)
666	mtcrf	0xFF,r0
667	/* r3-r12 are destroyed -- Cort */
668	REST_NVGPRS(r1)
669
670	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
671	mtlr	r4
672	addi	r1,r1,INT_FRAME_SIZE
673	blr
674
675	.globl	fast_exception_return
676fast_exception_return:
677#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
678	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
679	beq	1f			/* if not, we've got problems */
680#endif
681
6822:	REST_4GPRS(3, r11)
683	lwz	r10,_CCR(r11)
684	REST_GPR(1, r11)
685	mtcr	r10
686	lwz	r10,_LINK(r11)
687	mtlr	r10
688	REST_GPR(10, r11)
689	mtspr	SPRN_SRR1,r9
690	mtspr	SPRN_SRR0,r12
691	REST_GPR(9, r11)
692	REST_GPR(12, r11)
693	lwz	r11,GPR11(r11)
694	SYNC
695	RFI
696
697#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
698/* check if the exception happened in a restartable section */
6991:	lis	r3,exc_exit_restart_end@ha
700	addi	r3,r3,exc_exit_restart_end@l
701	cmplw	r12,r3
702	bge	3f
703	lis	r4,exc_exit_restart@ha
704	addi	r4,r4,exc_exit_restart@l
705	cmplw	r12,r4
706	blt	3f
707	lis	r3,fee_restarts@ha
708	tophys(r3,r3)
709	lwz	r5,fee_restarts@l(r3)
710	addi	r5,r5,1
711	stw	r5,fee_restarts@l(r3)
712	mr	r12,r4		/* restart at exc_exit_restart */
713	b	2b
714
715	.section .bss
716	.align	2
717fee_restarts:
718	.space	4
719	.previous
720
721/* aargh, a nonrecoverable interrupt, panic */
722/* aargh, we don't know which trap this is */
723/* but the 601 doesn't implement the RI bit, so assume it's OK */
7243:
725BEGIN_FTR_SECTION
726	b	2b
727END_FTR_SECTION_IFSET(CPU_FTR_601)
728	li	r10,-1
729	stw	r10,_TRAP(r11)
730	addi	r3,r1,STACK_FRAME_OVERHEAD
731	lis	r10,MSR_KERNEL@h
732	ori	r10,r10,MSR_KERNEL@l
733	bl	transfer_to_handler_full
734	.long	nonrecoverable_exception
735	.long	ret_from_except
736#endif
737
738	.globl	ret_from_except_full
739ret_from_except_full:
740	REST_NVGPRS(r1)
741	/* fall through */
742
743	.globl	ret_from_except
744ret_from_except:
745	/* Hard-disable interrupts so that current_thread_info()->flags
746	 * can't change between when we test it and when we return
747	 * from the interrupt. */
748	/* Note: We don't bother telling lockdep about it */
749	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
750	SYNC			/* Some chip revs have problems here... */
751	MTMSRD(r10)		/* disable interrupts */
752
753	lwz	r3,_MSR(r1)	/* Returning to user mode? */
754	andi.	r0,r3,MSR_PR
755	beq	resume_kernel
756
757user_exc_return:		/* r10 contains MSR_KERNEL here */
758	/* Check current_thread_info()->flags */
759	CURRENT_THREAD_INFO(r9, r1)
760	lwz	r9,TI_FLAGS(r9)
761	andi.	r0,r9,_TIF_USER_WORK_MASK
762	bne	do_work
763
764restore_user:
765#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
766	/* Check whether this process has its own DBCR0 value.  The internal
767	   debug mode bit tells us that dbcr0 should be loaded. */
768	lwz	r0,THREAD+THREAD_DBCR0(r2)
769	andis.	r10,r0,DBCR0_IDM@h
770	bnel-	load_dbcr0
771#endif
772
773	b	restore
774
775/* N.B. the only way to get here is from the beq following ret_from_except. */
776resume_kernel:
777	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
778	CURRENT_THREAD_INFO(r9, r1)
779	lwz	r8,TI_FLAGS(r9)
780	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
781	beq+	1f
782
783	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
784
785	lwz	r3,GPR1(r1)
786	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
787	mr	r4,r1			/* src:  current exception frame */
788	mr	r1,r3			/* Reroute the trampoline frame to r1 */
789
790	/* Copy from the original to the trampoline. */
791	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
792	li	r6,0			/* start offset: 0 */
793	mtctr	r5
7942:	lwzx	r0,r6,r4
795	stwx	r0,r6,r3
796	addi	r6,r6,4
797	bdnz	2b
798
799	/* Do real store operation to complete stwu */
800	lwz	r5,GPR1(r1)
801	stw	r8,0(r5)
802
803	/* Clear _TIF_EMULATE_STACK_STORE flag */
804	lis	r11,_TIF_EMULATE_STACK_STORE@h
805	addi	r5,r9,TI_FLAGS
8060:	lwarx	r8,0,r5
807	andc	r8,r8,r11
808#ifdef CONFIG_IBM405_ERR77
809	dcbt	0,r5
810#endif
811	stwcx.	r8,0,r5
812	bne-	0b
8131:
814
815#ifdef CONFIG_PREEMPT
816	/* check current_thread_info->preempt_count */
817	lwz	r0,TI_PREEMPT(r9)
818	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
819	bne	restore
820	andi.	r8,r8,_TIF_NEED_RESCHED
821	beq+	restore
822	lwz	r3,_MSR(r1)
823	andi.	r0,r3,MSR_EE	/* interrupts off? */
824	beq	restore		/* don't schedule if so */
825#ifdef CONFIG_TRACE_IRQFLAGS
826	/* Lockdep thinks irqs are enabled, we need to call
827	 * preempt_schedule_irq with IRQs off, so we inform lockdep
828	 * now that we -did- turn them off already
829	 */
830	bl	trace_hardirqs_off
831#endif
8321:	bl	preempt_schedule_irq
833	CURRENT_THREAD_INFO(r9, r1)
834	lwz	r3,TI_FLAGS(r9)
835	andi.	r0,r3,_TIF_NEED_RESCHED
836	bne-	1b
837#ifdef CONFIG_TRACE_IRQFLAGS
838	/* And now, to properly rebalance the above, we tell lockdep they
839	 * are being turned back on, which will happen when we return
840	 */
841	bl	trace_hardirqs_on
842#endif
843#endif /* CONFIG_PREEMPT */
844
845	/* interrupts are hard-disabled at this point */
846restore:
847#ifdef CONFIG_44x
848BEGIN_MMU_FTR_SECTION
849	b	1f
850END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
851	lis	r4,icache_44x_need_flush@ha
852	lwz	r5,icache_44x_need_flush@l(r4)
853	cmplwi	cr0,r5,0
854	beq+	1f
855	li	r6,0
856	iccci	r0,r0
857	stw	r6,icache_44x_need_flush@l(r4)
8581:
859#endif  /* CONFIG_44x */
860
861	lwz	r9,_MSR(r1)
862#ifdef CONFIG_TRACE_IRQFLAGS
863	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
864	 * off in this assembly code while peeking at TI_FLAGS() and such. However
865	 * we need to inform it if the exception turned interrupts off, and we
866	 * are about to trun them back on.
867	 *
868	 * The problem here sadly is that we don't know whether the exceptions was
869	 * one that turned interrupts off or not. So we always tell lockdep about
870	 * turning them on here when we go back to wherever we came from with EE
871	 * on, even if that may meen some redudant calls being tracked. Maybe later
872	 * we could encode what the exception did somewhere or test the exception
873	 * type in the pt_regs but that sounds overkill
874	 */
875	andi.	r10,r9,MSR_EE
876	beq	1f
877	/*
878	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
879	 * which is the stack frame here, we need to force a stack frame
880	 * in case we came from user space.
881	 */
882	stwu	r1,-32(r1)
883	mflr	r0
884	stw	r0,4(r1)
885	stwu	r1,-32(r1)
886	bl	trace_hardirqs_on
887	lwz	r1,0(r1)
888	lwz	r1,0(r1)
889	lwz	r9,_MSR(r1)
8901:
891#endif /* CONFIG_TRACE_IRQFLAGS */
892
893	lwz	r0,GPR0(r1)
894	lwz	r2,GPR2(r1)
895	REST_4GPRS(3, r1)
896	REST_2GPRS(7, r1)
897
898	lwz	r10,_XER(r1)
899	lwz	r11,_CTR(r1)
900	mtspr	SPRN_XER,r10
901	mtctr	r11
902
903	PPC405_ERR77(0,r1)
904BEGIN_FTR_SECTION
905	lwarx	r11,0,r1
906END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
907	stwcx.	r0,0,r1			/* to clear the reservation */
908
909#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
910	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
911	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
912
913	lwz	r10,_CCR(r1)
914	lwz	r11,_LINK(r1)
915	mtcrf	0xFF,r10
916	mtlr	r11
917
918	/*
919	 * Once we put values in SRR0 and SRR1, we are in a state
920	 * where exceptions are not recoverable, since taking an
921	 * exception will trash SRR0 and SRR1.  Therefore we clear the
922	 * MSR:RI bit to indicate this.  If we do take an exception,
923	 * we can't return to the point of the exception but we
924	 * can restart the exception exit path at the label
925	 * exc_exit_restart below.  -- paulus
926	 */
927	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
928	SYNC
929	MTMSRD(r10)		/* clear the RI bit */
930	.globl exc_exit_restart
931exc_exit_restart:
932	lwz	r12,_NIP(r1)
933	FIX_SRR1(r9,r10)
934	mtspr	SPRN_SRR0,r12
935	mtspr	SPRN_SRR1,r9
936	REST_4GPRS(9, r1)
937	lwz	r1,GPR1(r1)
938	.globl exc_exit_restart_end
939exc_exit_restart_end:
940	SYNC
941	RFI
942
943#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
944	/*
945	 * This is a bit different on 4xx/Book-E because it doesn't have
946	 * the RI bit in the MSR.
947	 * The TLB miss handler checks if we have interrupted
948	 * the exception exit path and restarts it if so
949	 * (well maybe one day it will... :).
950	 */
951	lwz	r11,_LINK(r1)
952	mtlr	r11
953	lwz	r10,_CCR(r1)
954	mtcrf	0xff,r10
955	REST_2GPRS(9, r1)
956	.globl exc_exit_restart
957exc_exit_restart:
958	lwz	r11,_NIP(r1)
959	lwz	r12,_MSR(r1)
960exc_exit_start:
961	mtspr	SPRN_SRR0,r11
962	mtspr	SPRN_SRR1,r12
963	REST_2GPRS(11, r1)
964	lwz	r1,GPR1(r1)
965	.globl exc_exit_restart_end
966exc_exit_restart_end:
967	PPC405_ERR77_SYNC
968	rfi
969	b	.			/* prevent prefetch past rfi */
970
971/*
972 * Returning from a critical interrupt in user mode doesn't need
973 * to be any different from a normal exception.  For a critical
974 * interrupt in the kernel, we just return (without checking for
975 * preemption) since the interrupt may have happened at some crucial
976 * place (e.g. inside the TLB miss handler), and because we will be
977 * running with r1 pointing into critical_stack, not the current
978 * process's kernel stack (and therefore current_thread_info() will
979 * give the wrong answer).
980 * We have to restore various SPRs that may have been in use at the
981 * time of the critical interrupt.
982 *
983 */
984#ifdef CONFIG_40x
985#define PPC_40x_TURN_OFF_MSR_DR						    \
986	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
987	 * assume the instructions here are mapped by a pinned TLB entry */ \
988	li	r10,MSR_IR;						    \
989	mtmsr	r10;							    \
990	isync;								    \
991	tophys(r1, r1);
992#else
993#define PPC_40x_TURN_OFF_MSR_DR
994#endif
995
996#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
997	REST_NVGPRS(r1);						\
998	lwz	r3,_MSR(r1);						\
999	andi.	r3,r3,MSR_PR;						\
1000	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1001	bne	user_exc_return;					\
1002	lwz	r0,GPR0(r1);						\
1003	lwz	r2,GPR2(r1);						\
1004	REST_4GPRS(3, r1);						\
1005	REST_2GPRS(7, r1);						\
1006	lwz	r10,_XER(r1);						\
1007	lwz	r11,_CTR(r1);						\
1008	mtspr	SPRN_XER,r10;						\
1009	mtctr	r11;							\
1010	PPC405_ERR77(0,r1);						\
1011	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1012	lwz	r11,_LINK(r1);						\
1013	mtlr	r11;							\
1014	lwz	r10,_CCR(r1);						\
1015	mtcrf	0xff,r10;						\
1016	PPC_40x_TURN_OFF_MSR_DR;					\
1017	lwz	r9,_DEAR(r1);						\
1018	lwz	r10,_ESR(r1);						\
1019	mtspr	SPRN_DEAR,r9;						\
1020	mtspr	SPRN_ESR,r10;						\
1021	lwz	r11,_NIP(r1);						\
1022	lwz	r12,_MSR(r1);						\
1023	mtspr	exc_lvl_srr0,r11;					\
1024	mtspr	exc_lvl_srr1,r12;					\
1025	lwz	r9,GPR9(r1);						\
1026	lwz	r12,GPR12(r1);						\
1027	lwz	r10,GPR10(r1);						\
1028	lwz	r11,GPR11(r1);						\
1029	lwz	r1,GPR1(r1);						\
1030	PPC405_ERR77_SYNC;						\
1031	exc_lvl_rfi;							\
1032	b	.;		/* prevent prefetch past exc_lvl_rfi */
1033
1034#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1035	lwz	r9,_##exc_lvl_srr0(r1);					\
1036	lwz	r10,_##exc_lvl_srr1(r1);				\
1037	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1038	mtspr	SPRN_##exc_lvl_srr1,r10;
1039
1040#if defined(CONFIG_PPC_BOOK3E_MMU)
1041#ifdef CONFIG_PHYS_64BIT
1042#define	RESTORE_MAS7							\
1043	lwz	r11,MAS7(r1);						\
1044	mtspr	SPRN_MAS7,r11;
1045#else
1046#define	RESTORE_MAS7
1047#endif /* CONFIG_PHYS_64BIT */
1048#define RESTORE_MMU_REGS						\
1049	lwz	r9,MAS0(r1);						\
1050	lwz	r10,MAS1(r1);						\
1051	lwz	r11,MAS2(r1);						\
1052	mtspr	SPRN_MAS0,r9;						\
1053	lwz	r9,MAS3(r1);						\
1054	mtspr	SPRN_MAS1,r10;						\
1055	lwz	r10,MAS6(r1);						\
1056	mtspr	SPRN_MAS2,r11;						\
1057	mtspr	SPRN_MAS3,r9;						\
1058	mtspr	SPRN_MAS6,r10;						\
1059	RESTORE_MAS7;
1060#elif defined(CONFIG_44x)
1061#define RESTORE_MMU_REGS						\
1062	lwz	r9,MMUCR(r1);						\
1063	mtspr	SPRN_MMUCR,r9;
1064#else
1065#define RESTORE_MMU_REGS
1066#endif
1067
1068#ifdef CONFIG_40x
1069	.globl	ret_from_crit_exc
1070ret_from_crit_exc:
1071	mfspr	r9,SPRN_SPRG_THREAD
1072	lis	r10,saved_ksp_limit@ha;
1073	lwz	r10,saved_ksp_limit@l(r10);
1074	tovirt(r9,r9);
1075	stw	r10,KSP_LIMIT(r9)
1076	lis	r9,crit_srr0@ha;
1077	lwz	r9,crit_srr0@l(r9);
1078	lis	r10,crit_srr1@ha;
1079	lwz	r10,crit_srr1@l(r10);
1080	mtspr	SPRN_SRR0,r9;
1081	mtspr	SPRN_SRR1,r10;
1082	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1083#endif /* CONFIG_40x */
1084
1085#ifdef CONFIG_BOOKE
1086	.globl	ret_from_crit_exc
1087ret_from_crit_exc:
1088	mfspr	r9,SPRN_SPRG_THREAD
1089	lwz	r10,SAVED_KSP_LIMIT(r1)
1090	stw	r10,KSP_LIMIT(r9)
1091	RESTORE_xSRR(SRR0,SRR1);
1092	RESTORE_MMU_REGS;
1093	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1094
1095	.globl	ret_from_debug_exc
1096ret_from_debug_exc:
1097	mfspr	r9,SPRN_SPRG_THREAD
1098	lwz	r10,SAVED_KSP_LIMIT(r1)
1099	stw	r10,KSP_LIMIT(r9)
1100	lwz	r9,THREAD_INFO-THREAD(r9)
1101	CURRENT_THREAD_INFO(r10, r1)
1102	lwz	r10,TI_PREEMPT(r10)
1103	stw	r10,TI_PREEMPT(r9)
1104	RESTORE_xSRR(SRR0,SRR1);
1105	RESTORE_xSRR(CSRR0,CSRR1);
1106	RESTORE_MMU_REGS;
1107	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1108
1109	.globl	ret_from_mcheck_exc
1110ret_from_mcheck_exc:
1111	mfspr	r9,SPRN_SPRG_THREAD
1112	lwz	r10,SAVED_KSP_LIMIT(r1)
1113	stw	r10,KSP_LIMIT(r9)
1114	RESTORE_xSRR(SRR0,SRR1);
1115	RESTORE_xSRR(CSRR0,CSRR1);
1116	RESTORE_xSRR(DSRR0,DSRR1);
1117	RESTORE_MMU_REGS;
1118	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1119#endif /* CONFIG_BOOKE */
1120
1121/*
1122 * Load the DBCR0 value for a task that is being ptraced,
1123 * having first saved away the global DBCR0.  Note that r0
1124 * has the dbcr0 value to set upon entry to this.
1125 */
1126load_dbcr0:
1127	mfmsr	r10		/* first disable debug exceptions */
1128	rlwinm	r10,r10,0,~MSR_DE
1129	mtmsr	r10
1130	isync
1131	mfspr	r10,SPRN_DBCR0
1132	lis	r11,global_dbcr0@ha
1133	addi	r11,r11,global_dbcr0@l
1134#ifdef CONFIG_SMP
1135	CURRENT_THREAD_INFO(r9, r1)
1136	lwz	r9,TI_CPU(r9)
1137	slwi	r9,r9,3
1138	add	r11,r11,r9
1139#endif
1140	stw	r10,0(r11)
1141	mtspr	SPRN_DBCR0,r0
1142	lwz	r10,4(r11)
1143	addi	r10,r10,1
1144	stw	r10,4(r11)
1145	li	r11,-1
1146	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1147	blr
1148
1149	.section .bss
1150	.align	4
1151global_dbcr0:
1152	.space	8*NR_CPUS
1153	.previous
1154#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1155
1156do_work:			/* r10 contains MSR_KERNEL here */
1157	andi.	r0,r9,_TIF_NEED_RESCHED
1158	beq	do_user_signal
1159
1160do_resched:			/* r10 contains MSR_KERNEL here */
1161	/* Note: We don't need to inform lockdep that we are enabling
1162	 * interrupts here. As far as it knows, they are already enabled
1163	 */
1164	ori	r10,r10,MSR_EE
1165	SYNC
1166	MTMSRD(r10)		/* hard-enable interrupts */
1167	bl	schedule
1168recheck:
1169	/* Note: And we don't tell it we are disabling them again
1170	 * neither. Those disable/enable cycles used to peek at
1171	 * TI_FLAGS aren't advertised.
1172	 */
1173	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1174	SYNC
1175	MTMSRD(r10)		/* disable interrupts */
1176	CURRENT_THREAD_INFO(r9, r1)
1177	lwz	r9,TI_FLAGS(r9)
1178	andi.	r0,r9,_TIF_NEED_RESCHED
1179	bne-	do_resched
1180	andi.	r0,r9,_TIF_USER_WORK_MASK
1181	beq	restore_user
1182do_user_signal:			/* r10 contains MSR_KERNEL here */
1183	ori	r10,r10,MSR_EE
1184	SYNC
1185	MTMSRD(r10)		/* hard-enable interrupts */
1186	/* save r13-r31 in the exception frame, if not already done */
1187	lwz	r3,_TRAP(r1)
1188	andi.	r0,r3,1
1189	beq	2f
1190	SAVE_NVGPRS(r1)
1191	rlwinm	r3,r3,0,0,30
1192	stw	r3,_TRAP(r1)
11932:	addi	r3,r1,STACK_FRAME_OVERHEAD
1194	mr	r4,r9
1195	bl	do_notify_resume
1196	REST_NVGPRS(r1)
1197	b	recheck
1198
1199/*
1200 * We come here when we are at the end of handling an exception
1201 * that occurred at a place where taking an exception will lose
1202 * state information, such as the contents of SRR0 and SRR1.
1203 */
1204nonrecoverable:
1205	lis	r10,exc_exit_restart_end@ha
1206	addi	r10,r10,exc_exit_restart_end@l
1207	cmplw	r12,r10
1208	bge	3f
1209	lis	r11,exc_exit_restart@ha
1210	addi	r11,r11,exc_exit_restart@l
1211	cmplw	r12,r11
1212	blt	3f
1213	lis	r10,ee_restarts@ha
1214	lwz	r12,ee_restarts@l(r10)
1215	addi	r12,r12,1
1216	stw	r12,ee_restarts@l(r10)
1217	mr	r12,r11		/* restart at exc_exit_restart */
1218	blr
12193:	/* OK, we can't recover, kill this process */
1220	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1221BEGIN_FTR_SECTION
1222	blr
1223END_FTR_SECTION_IFSET(CPU_FTR_601)
1224	lwz	r3,_TRAP(r1)
1225	andi.	r0,r3,1
1226	beq	4f
1227	SAVE_NVGPRS(r1)
1228	rlwinm	r3,r3,0,0,30
1229	stw	r3,_TRAP(r1)
12304:	addi	r3,r1,STACK_FRAME_OVERHEAD
1231	bl	nonrecoverable_exception
1232	/* shouldn't return */
1233	b	4b
1234
1235	.section .bss
1236	.align	2
1237ee_restarts:
1238	.space	4
1239	.previous
1240
1241/*
1242 * PROM code for specific machines follows.  Put it
1243 * here so it's easy to add arch-specific sections later.
1244 * -- Cort
1245 */
1246#ifdef CONFIG_PPC_RTAS
1247/*
1248 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1249 * called with the MMU off.
1250 */
1251_GLOBAL(enter_rtas)
1252	stwu	r1,-INT_FRAME_SIZE(r1)
1253	mflr	r0
1254	stw	r0,INT_FRAME_SIZE+4(r1)
1255	LOAD_REG_ADDR(r4, rtas)
1256	lis	r6,1f@ha	/* physical return address for rtas */
1257	addi	r6,r6,1f@l
1258	tophys(r6,r6)
1259	tophys(r7,r1)
1260	lwz	r8,RTASENTRY(r4)
1261	lwz	r4,RTASBASE(r4)
1262	mfmsr	r9
1263	stw	r9,8(r1)
1264	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1265	SYNC			/* disable interrupts so SRR0/1 */
1266	MTMSRD(r0)		/* don't get trashed */
1267	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1268	mtlr	r6
1269	mtspr	SPRN_SPRG_RTAS,r7
1270	mtspr	SPRN_SRR0,r8
1271	mtspr	SPRN_SRR1,r9
1272	RFI
12731:	tophys(r9,r1)
1274	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1275	lwz	r9,8(r9)	/* original msr value */
1276	FIX_SRR1(r9,r0)
1277	addi	r1,r1,INT_FRAME_SIZE
1278	li	r0,0
1279	mtspr	SPRN_SPRG_RTAS,r0
1280	mtspr	SPRN_SRR0,r8
1281	mtspr	SPRN_SRR1,r9
1282	RFI			/* return to caller */
1283
1284	.globl	machine_check_in_rtas
1285machine_check_in_rtas:
1286	twi	31,0,0
1287	/* XXX load up BATs and panic */
1288
1289#endif /* CONFIG_PPC_RTAS */
1290
1291#ifdef CONFIG_FUNCTION_TRACER
1292#ifdef CONFIG_DYNAMIC_FTRACE
1293_GLOBAL(mcount)
1294_GLOBAL(_mcount)
1295	/*
1296	 * It is required that _mcount on PPC32 must preserve the
1297	 * link register. But we have r0 to play with. We use r0
1298	 * to push the return address back to the caller of mcount
1299	 * into the ctr register, restore the link register and
1300	 * then jump back using the ctr register.
1301	 */
1302	mflr	r0
1303	mtctr	r0
1304	lwz	r0, 4(r1)
1305	mtlr	r0
1306	bctr
1307
1308_GLOBAL(ftrace_caller)
1309	MCOUNT_SAVE_FRAME
1310	/* r3 ends up with link register */
1311	subi	r3, r3, MCOUNT_INSN_SIZE
1312.globl ftrace_call
1313ftrace_call:
1314	bl	ftrace_stub
1315	nop
1316#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1317.globl ftrace_graph_call
1318ftrace_graph_call:
1319	b	ftrace_graph_stub
1320_GLOBAL(ftrace_graph_stub)
1321#endif
1322	MCOUNT_RESTORE_FRAME
1323	/* old link register ends up in ctr reg */
1324	bctr
1325#else
1326_GLOBAL(mcount)
1327_GLOBAL(_mcount)
1328
1329	MCOUNT_SAVE_FRAME
1330
1331	subi	r3, r3, MCOUNT_INSN_SIZE
1332	LOAD_REG_ADDR(r5, ftrace_trace_function)
1333	lwz	r5,0(r5)
1334
1335	mtctr	r5
1336	bctrl
1337	nop
1338
1339#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1340	b	ftrace_graph_caller
1341#endif
1342	MCOUNT_RESTORE_FRAME
1343	bctr
1344#endif
1345
1346_GLOBAL(ftrace_stub)
1347	blr
1348
1349#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1350_GLOBAL(ftrace_graph_caller)
1351	/* load r4 with local address */
1352	lwz	r4, 44(r1)
1353	subi	r4, r4, MCOUNT_INSN_SIZE
1354
1355	/* Grab the LR out of the caller stack frame */
1356	lwz	r3,52(r1)
1357
1358	bl	prepare_ftrace_return
1359	nop
1360
1361        /*
1362         * prepare_ftrace_return gives us the address we divert to.
1363         * Change the LR in the callers stack frame to this.
1364         */
1365	stw	r3,52(r1)
1366
1367	MCOUNT_RESTORE_FRAME
1368	/* old link register ends up in ctr reg */
1369	bctr
1370
1371_GLOBAL(return_to_handler)
1372	/* need to save return values */
1373	stwu	r1, -32(r1)
1374	stw	r3, 20(r1)
1375	stw	r4, 16(r1)
1376	stw	r31, 12(r1)
1377	mr	r31, r1
1378
1379	bl	ftrace_return_to_handler
1380	nop
1381
1382	/* return value has real return address */
1383	mtlr	r3
1384
1385	lwz	r3, 20(r1)
1386	lwz	r4, 16(r1)
1387	lwz	r31,12(r1)
1388	lwz	r1, 0(r1)
1389
1390	/* Jump back to real return address */
1391	blr
1392#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1393
1394#endif /* CONFIG_FUNCTION_TRACER */
1395