xref: /linux/arch/powerpc/kernel/entry_32.S (revision 4f58e6dceb0e44ca8f21568ed81e1df24e55964c)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ftrace.h>
35#include <asm/ptrace.h>
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack
91	 * and set the limit to protect the thread_info
92	 * struct
93	 */
94	mfspr	r8,SPRN_SPRG_THREAD
95	lwz	r0,KSP_LIMIT(r8)
96	stw	r0,SAVED_KSP_LIMIT(r11)
97	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
98	stw	r0,KSP_LIMIT(r8)
99	/* fall through */
100#endif
101
102#ifdef CONFIG_40x
103	.globl	crit_transfer_to_handler
104crit_transfer_to_handler:
105	lwz	r0,crit_r10@l(0)
106	stw	r0,GPR10(r11)
107	lwz	r0,crit_r11@l(0)
108	stw	r0,GPR11(r11)
109	mfspr	r0,SPRN_SRR0
110	stw	r0,crit_srr0@l(0)
111	mfspr	r0,SPRN_SRR1
112	stw	r0,crit_srr1@l(0)
113
114	/* set the stack limit to the current stack
115	 * and set the limit to protect the thread_info
116	 * struct
117	 */
118	mfspr	r8,SPRN_SPRG_THREAD
119	lwz	r0,KSP_LIMIT(r8)
120	stw	r0,saved_ksp_limit@l(0)
121	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
122	stw	r0,KSP_LIMIT(r8)
123	/* fall through */
124#endif
125
126/*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133	.globl	transfer_to_handler_full
134transfer_to_handler_full:
135	SAVE_NVGPRS(r11)
136	/* fall through */
137
138	.globl	transfer_to_handler
139transfer_to_handler:
140	stw	r2,GPR2(r11)
141	stw	r12,_NIP(r11)
142	stw	r9,_MSR(r11)
143	andi.	r2,r9,MSR_PR
144	mfctr	r12
145	mfspr	r2,SPRN_XER
146	stw	r12,_CTR(r11)
147	stw	r2,_XER(r11)
148	mfspr	r12,SPRN_SPRG_THREAD
149	addi	r2,r12,-THREAD
150	tovirt(r2,r2)			/* set r2 to current */
151	beq	2f			/* if from user, fix up THREAD.regs */
152	addi	r11,r1,STACK_FRAME_OVERHEAD
153	stw	r11,PT_REGS(r12)
154#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155	/* Check to see if the dbcr0 register is set up to debug.  Use the
156	   internal debug mode bit to do this. */
157	lwz	r12,THREAD_DBCR0(r12)
158	andis.	r12,r12,DBCR0_IDM@h
159	beq+	3f
160	/* From user and task is ptraced - load up global dbcr0 */
161	li	r12,-1			/* clear all pending debug events */
162	mtspr	SPRN_DBSR,r12
163	lis	r11,global_dbcr0@ha
164	tophys(r11,r11)
165	addi	r11,r11,global_dbcr0@l
166#ifdef CONFIG_SMP
167	CURRENT_THREAD_INFO(r9, r1)
168	lwz	r9,TI_CPU(r9)
169	slwi	r9,r9,3
170	add	r11,r11,r9
171#endif
172	lwz	r12,0(r11)
173	mtspr	SPRN_DBCR0,r12
174	lwz	r12,4(r11)
175	addi	r12,r12,-1
176	stw	r12,4(r11)
177#endif
178#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179	CURRENT_THREAD_INFO(r9, r1)
180	tophys(r9, r9)
181	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
182#endif
183
184	b	3f
185
1862:	/* if from kernel, check interrupted DOZE/NAP mode and
187         * check for stack overflow
188         */
189	lwz	r9,KSP_LIMIT(r12)
190	cmplw	r1,r9			/* if r1 <= ksp_limit */
191	ble-	stack_ovf		/* then the kernel stack overflowed */
1925:
193#if defined(CONFIG_6xx) || defined(CONFIG_E500)
194	CURRENT_THREAD_INFO(r9, r1)
195	tophys(r9,r9)			/* check local flags */
196	lwz	r12,TI_LOCAL_FLAGS(r9)
197	mtcrf	0x01,r12
198	bt-	31-TLF_NAPPING,4f
199	bt-	31-TLF_SLEEPING,7f
200#endif /* CONFIG_6xx || CONFIG_E500 */
201	.globl transfer_to_handler_cont
202transfer_to_handler_cont:
2033:
204	mflr	r9
205	lwz	r11,0(r9)		/* virtual address of handler */
206	lwz	r9,4(r9)		/* where to go when done */
207#ifdef CONFIG_TRACE_IRQFLAGS
208	lis	r12,reenable_mmu@h
209	ori	r12,r12,reenable_mmu@l
210	mtspr	SPRN_SRR0,r12
211	mtspr	SPRN_SRR1,r10
212	SYNC
213	RFI
214reenable_mmu:				/* re-enable mmu so we can */
215	mfmsr	r10
216	lwz	r12,_MSR(r1)
217	xor	r10,r10,r12
218	andi.	r10,r10,MSR_EE		/* Did EE change? */
219	beq	1f
220
221	/*
222	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
223	 * If from user mode there is only one stack frame on the stack, and
224	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
225	 * stack frame to make trace_hardirqs_off happy.
226	 *
227	 * This is handy because we also need to save a bunch of GPRs,
228	 * r3 can be different from GPR3(r1) at this point, r9 and r11
229	 * contains the old MSR and handler address respectively,
230	 * r4 & r5 can contain page fault arguments that need to be passed
231	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
232	 * they aren't useful past this point (aren't syscall arguments),
233	 * the rest is restored from the exception frame.
234	 */
235	stwu	r1,-32(r1)
236	stw	r9,8(r1)
237	stw	r11,12(r1)
238	stw	r3,16(r1)
239	stw	r4,20(r1)
240	stw	r5,24(r1)
241	bl	trace_hardirqs_off
242	lwz	r5,24(r1)
243	lwz	r4,20(r1)
244	lwz	r3,16(r1)
245	lwz	r11,12(r1)
246	lwz	r9,8(r1)
247	addi	r1,r1,32
248	lwz	r0,GPR0(r1)
249	lwz	r6,GPR6(r1)
250	lwz	r7,GPR7(r1)
251	lwz	r8,GPR8(r1)
2521:	mtctr	r11
253	mtlr	r9
254	bctr				/* jump to handler */
255#else /* CONFIG_TRACE_IRQFLAGS */
256	mtspr	SPRN_SRR0,r11
257	mtspr	SPRN_SRR1,r10
258	mtlr	r9
259	SYNC
260	RFI				/* jump to handler, enable MMU */
261#endif /* CONFIG_TRACE_IRQFLAGS */
262
263#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2644:	rlwinm	r12,r12,0,~_TLF_NAPPING
265	stw	r12,TI_LOCAL_FLAGS(r9)
266	b	power_save_ppc32_restore
267
2687:	rlwinm	r12,r12,0,~_TLF_SLEEPING
269	stw	r12,TI_LOCAL_FLAGS(r9)
270	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
271	rlwinm	r9,r9,0,~MSR_EE
272	lwz	r12,_LINK(r11)		/* and return to address in LR */
273	b	fast_exception_return
274#endif
275
276/*
277 * On kernel stack overflow, load up an initial stack pointer
278 * and call StackOverflow(regs), which should not return.
279 */
280stack_ovf:
281	/* sometimes we use a statically-allocated stack, which is OK. */
282	lis	r12,_end@h
283	ori	r12,r12,_end@l
284	cmplw	r1,r12
285	ble	5b			/* r1 <= &_end is OK */
286	SAVE_NVGPRS(r11)
287	addi	r3,r1,STACK_FRAME_OVERHEAD
288	lis	r1,init_thread_union@ha
289	addi	r1,r1,init_thread_union@l
290	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
291	lis	r9,StackOverflow@ha
292	addi	r9,r9,StackOverflow@l
293	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
294	FIX_SRR1(r10,r12)
295	mtspr	SPRN_SRR0,r9
296	mtspr	SPRN_SRR1,r10
297	SYNC
298	RFI
299
300/*
301 * Handle a system call.
302 */
303	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
304	.stabs	"entry_32.S",N_SO,0,0,0f
3050:
306
307_GLOBAL(DoSyscall)
308	stw	r3,ORIG_GPR3(r1)
309	li	r12,0
310	stw	r12,RESULT(r1)
311	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
312	rlwinm	r11,r11,0,4,2
313	stw	r11,_CCR(r1)
314#ifdef CONFIG_TRACE_IRQFLAGS
315	/* Return from syscalls can (and generally will) hard enable
316	 * interrupts. You aren't supposed to call a syscall with
317	 * interrupts disabled in the first place. However, to ensure
318	 * that we get it right vs. lockdep if it happens, we force
319	 * that hard enable here with appropriate tracing if we see
320	 * that we have been called with interrupts off
321	 */
322	mfmsr	r11
323	andi.	r12,r11,MSR_EE
324	bne+	1f
325	/* We came in with interrupts disabled, we enable them now */
326	bl	trace_hardirqs_on
327	mfmsr	r11
328	lwz	r0,GPR0(r1)
329	lwz	r3,GPR3(r1)
330	lwz	r4,GPR4(r1)
331	ori	r11,r11,MSR_EE
332	lwz	r5,GPR5(r1)
333	lwz	r6,GPR6(r1)
334	lwz	r7,GPR7(r1)
335	lwz	r8,GPR8(r1)
336	mtmsr	r11
3371:
338#endif /* CONFIG_TRACE_IRQFLAGS */
339	CURRENT_THREAD_INFO(r10, r1)
340	lwz	r11,TI_FLAGS(r10)
341	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
342	bne-	syscall_dotrace
343syscall_dotrace_cont:
344	cmplwi	0,r0,NR_syscalls
345	lis	r10,sys_call_table@h
346	ori	r10,r10,sys_call_table@l
347	slwi	r0,r0,2
348	bge-	66f
349	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
350	mtlr	r10
351	addi	r9,r1,STACK_FRAME_OVERHEAD
352	PPC440EP_ERR42
353	blrl			/* Call handler */
354	.globl	ret_from_syscall
355ret_from_syscall:
356	mr	r6,r3
357	CURRENT_THREAD_INFO(r12, r1)
358	/* disable interrupts so current_thread_info()->flags can't change */
359	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
360	/* Note: We don't bother telling lockdep about it */
361	SYNC
362	MTMSRD(r10)
363	lwz	r9,TI_FLAGS(r12)
364	li	r8,-MAX_ERRNO
365	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
366	bne-	syscall_exit_work
367	cmplw	0,r3,r8
368	blt+	syscall_exit_cont
369	lwz	r11,_CCR(r1)			/* Load CR */
370	neg	r3,r3
371	oris	r11,r11,0x1000	/* Set SO bit in CR */
372	stw	r11,_CCR(r1)
373syscall_exit_cont:
374	lwz	r8,_MSR(r1)
375#ifdef CONFIG_TRACE_IRQFLAGS
376	/* If we are going to return from the syscall with interrupts
377	 * off, we trace that here. It shouldn't happen though but we
378	 * want to catch the bugger if it does right ?
379	 */
380	andi.	r10,r8,MSR_EE
381	bne+	1f
382	stw	r3,GPR3(r1)
383	bl      trace_hardirqs_off
384	lwz	r3,GPR3(r1)
3851:
386#endif /* CONFIG_TRACE_IRQFLAGS */
387#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
388	/* If the process has its own DBCR0 value, load it up.  The internal
389	   debug mode bit tells us that dbcr0 should be loaded. */
390	lwz	r0,THREAD+THREAD_DBCR0(r2)
391	andis.	r10,r0,DBCR0_IDM@h
392	bnel-	load_dbcr0
393#endif
394#ifdef CONFIG_44x
395BEGIN_MMU_FTR_SECTION
396	lis	r4,icache_44x_need_flush@ha
397	lwz	r5,icache_44x_need_flush@l(r4)
398	cmplwi	cr0,r5,0
399	bne-	2f
4001:
401END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
402#endif /* CONFIG_44x */
403BEGIN_FTR_SECTION
404	lwarx	r7,0,r1
405END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
406	stwcx.	r0,0,r1			/* to clear the reservation */
407#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
408	andi.	r4,r8,MSR_PR
409	beq	3f
410	CURRENT_THREAD_INFO(r4, r1)
411	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4123:
413#endif
414	lwz	r4,_LINK(r1)
415	lwz	r5,_CCR(r1)
416	mtlr	r4
417	mtcr	r5
418	lwz	r7,_NIP(r1)
419	FIX_SRR1(r8, r0)
420	lwz	r2,GPR2(r1)
421	lwz	r1,GPR1(r1)
422	mtspr	SPRN_SRR0,r7
423	mtspr	SPRN_SRR1,r8
424	SYNC
425	RFI
426#ifdef CONFIG_44x
4272:	li	r7,0
428	iccci	r0,r0
429	stw	r7,icache_44x_need_flush@l(r4)
430	b	1b
431#endif  /* CONFIG_44x */
432
43366:	li	r3,-ENOSYS
434	b	ret_from_syscall
435
436	.globl	ret_from_fork
437ret_from_fork:
438	REST_NVGPRS(r1)
439	bl	schedule_tail
440	li	r3,0
441	b	ret_from_syscall
442
443	.globl	ret_from_kernel_thread
444ret_from_kernel_thread:
445	REST_NVGPRS(r1)
446	bl	schedule_tail
447	mtlr	r14
448	mr	r3,r15
449	PPC440EP_ERR42
450	blrl
451	li	r3,0
452	b	ret_from_syscall
453
454/* Traced system call support */
455syscall_dotrace:
456	SAVE_NVGPRS(r1)
457	li	r0,0xc00
458	stw	r0,_TRAP(r1)
459	addi	r3,r1,STACK_FRAME_OVERHEAD
460	bl	do_syscall_trace_enter
461	/*
462	 * Restore argument registers possibly just changed.
463	 * We use the return value of do_syscall_trace_enter
464	 * for call number to look up in the table (r0).
465	 */
466	mr	r0,r3
467	lwz	r3,GPR3(r1)
468	lwz	r4,GPR4(r1)
469	lwz	r5,GPR5(r1)
470	lwz	r6,GPR6(r1)
471	lwz	r7,GPR7(r1)
472	lwz	r8,GPR8(r1)
473	REST_NVGPRS(r1)
474
475	cmplwi	r0,NR_syscalls
476	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
477	bge-	ret_from_syscall
478	b	syscall_dotrace_cont
479
480syscall_exit_work:
481	andi.	r0,r9,_TIF_RESTOREALL
482	beq+	0f
483	REST_NVGPRS(r1)
484	b	2f
4850:	cmplw	0,r3,r8
486	blt+	1f
487	andi.	r0,r9,_TIF_NOERROR
488	bne-	1f
489	lwz	r11,_CCR(r1)			/* Load CR */
490	neg	r3,r3
491	oris	r11,r11,0x1000	/* Set SO bit in CR */
492	stw	r11,_CCR(r1)
493
4941:	stw	r6,RESULT(r1)	/* Save result */
495	stw	r3,GPR3(r1)	/* Update return value */
4962:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
497	beq	4f
498
499	/* Clear per-syscall TIF flags if any are set.  */
500
501	li	r11,_TIF_PERSYSCALL_MASK
502	addi	r12,r12,TI_FLAGS
5033:	lwarx	r8,0,r12
504	andc	r8,r8,r11
505#ifdef CONFIG_IBM405_ERR77
506	dcbt	0,r12
507#endif
508	stwcx.	r8,0,r12
509	bne-	3b
510	subi	r12,r12,TI_FLAGS
511
5124:	/* Anything which requires enabling interrupts? */
513	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
514	beq	ret_from_except
515
516	/* Re-enable interrupts. There is no need to trace that with
517	 * lockdep as we are supposed to have IRQs on at this point
518	 */
519	ori	r10,r10,MSR_EE
520	SYNC
521	MTMSRD(r10)
522
523	/* Save NVGPRS if they're not saved already */
524	lwz	r4,_TRAP(r1)
525	andi.	r4,r4,1
526	beq	5f
527	SAVE_NVGPRS(r1)
528	li	r4,0xc00
529	stw	r4,_TRAP(r1)
5305:
531	addi	r3,r1,STACK_FRAME_OVERHEAD
532	bl	do_syscall_trace_leave
533	b	ret_from_except_full
534
535/*
536 * The fork/clone functions need to copy the full register set into
537 * the child process. Therefore we need to save all the nonvolatile
538 * registers (r13 - r31) before calling the C code.
539 */
540	.globl	ppc_fork
541ppc_fork:
542	SAVE_NVGPRS(r1)
543	lwz	r0,_TRAP(r1)
544	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
545	stw	r0,_TRAP(r1)		/* register set saved */
546	b	sys_fork
547
548	.globl	ppc_vfork
549ppc_vfork:
550	SAVE_NVGPRS(r1)
551	lwz	r0,_TRAP(r1)
552	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
553	stw	r0,_TRAP(r1)		/* register set saved */
554	b	sys_vfork
555
556	.globl	ppc_clone
557ppc_clone:
558	SAVE_NVGPRS(r1)
559	lwz	r0,_TRAP(r1)
560	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
561	stw	r0,_TRAP(r1)		/* register set saved */
562	b	sys_clone
563
564	.globl	ppc_swapcontext
565ppc_swapcontext:
566	SAVE_NVGPRS(r1)
567	lwz	r0,_TRAP(r1)
568	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
569	stw	r0,_TRAP(r1)		/* register set saved */
570	b	sys_swapcontext
571
572/*
573 * Top-level page fault handling.
574 * This is in assembler because if do_page_fault tells us that
575 * it is a bad kernel page fault, we want to save the non-volatile
576 * registers before calling bad_page_fault.
577 */
578	.globl	handle_page_fault
579handle_page_fault:
580	stw	r4,_DAR(r1)
581	addi	r3,r1,STACK_FRAME_OVERHEAD
582	bl	do_page_fault
583	cmpwi	r3,0
584	beq+	ret_from_except
585	SAVE_NVGPRS(r1)
586	lwz	r0,_TRAP(r1)
587	clrrwi	r0,r0,1
588	stw	r0,_TRAP(r1)
589	mr	r5,r3
590	addi	r3,r1,STACK_FRAME_OVERHEAD
591	lwz	r4,_DAR(r1)
592	bl	bad_page_fault
593	b	ret_from_except_full
594
595/*
596 * This routine switches between two different tasks.  The process
597 * state of one is saved on its kernel stack.  Then the state
598 * of the other is restored from its kernel stack.  The memory
599 * management hardware is updated to the second process's state.
600 * Finally, we can return to the second process.
601 * On entry, r3 points to the THREAD for the current task, r4
602 * points to the THREAD for the new task.
603 *
604 * This routine is always called with interrupts disabled.
605 *
606 * Note: there are two ways to get to the "going out" portion
607 * of this code; either by coming in via the entry (_switch)
608 * or via "fork" which must set up an environment equivalent
609 * to the "_switch" path.  If you change this , you'll have to
610 * change the fork code also.
611 *
612 * The code which creates the new task context is in 'copy_thread'
613 * in arch/ppc/kernel/process.c
614 */
615_GLOBAL(_switch)
616	stwu	r1,-INT_FRAME_SIZE(r1)
617	mflr	r0
618	stw	r0,INT_FRAME_SIZE+4(r1)
619	/* r3-r12 are caller saved -- Cort */
620	SAVE_NVGPRS(r1)
621	stw	r0,_NIP(r1)	/* Return to switch caller */
622	mfmsr	r11
623	li	r0,MSR_FP	/* Disable floating-point */
624#ifdef CONFIG_ALTIVEC
625BEGIN_FTR_SECTION
626	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
627	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
628	stw	r12,THREAD+THREAD_VRSAVE(r2)
629END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
630#endif /* CONFIG_ALTIVEC */
631#ifdef CONFIG_SPE
632BEGIN_FTR_SECTION
633	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
634	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
635	stw	r12,THREAD+THREAD_SPEFSCR(r2)
636END_FTR_SECTION_IFSET(CPU_FTR_SPE)
637#endif /* CONFIG_SPE */
638	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
639	beq+	1f
640	andc	r11,r11,r0
641	MTMSRD(r11)
642	isync
6431:	stw	r11,_MSR(r1)
644	mfcr	r10
645	stw	r10,_CCR(r1)
646	stw	r1,KSP(r3)	/* Set old stack pointer */
647
648#ifdef CONFIG_SMP
649	/* We need a sync somewhere here to make sure that if the
650	 * previous task gets rescheduled on another CPU, it sees all
651	 * stores it has performed on this one.
652	 */
653	sync
654#endif /* CONFIG_SMP */
655
656	tophys(r0,r4)
657	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
658	lwz	r1,KSP(r4)	/* Load new stack pointer */
659
660	/* save the old current 'last' for return value */
661	mr	r3,r2
662	addi	r2,r4,-THREAD	/* Update current */
663
664#ifdef CONFIG_ALTIVEC
665BEGIN_FTR_SECTION
666	lwz	r0,THREAD+THREAD_VRSAVE(r2)
667	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
668END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
669#endif /* CONFIG_ALTIVEC */
670#ifdef CONFIG_SPE
671BEGIN_FTR_SECTION
672	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
673	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
674END_FTR_SECTION_IFSET(CPU_FTR_SPE)
675#endif /* CONFIG_SPE */
676
677	lwz	r0,_CCR(r1)
678	mtcrf	0xFF,r0
679	/* r3-r12 are destroyed -- Cort */
680	REST_NVGPRS(r1)
681
682	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
683	mtlr	r4
684	addi	r1,r1,INT_FRAME_SIZE
685	blr
686
687	.globl	fast_exception_return
688fast_exception_return:
689#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
690	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
691	beq	1f			/* if not, we've got problems */
692#endif
693
6942:	REST_4GPRS(3, r11)
695	lwz	r10,_CCR(r11)
696	REST_GPR(1, r11)
697	mtcr	r10
698	lwz	r10,_LINK(r11)
699	mtlr	r10
700	REST_GPR(10, r11)
701	mtspr	SPRN_SRR1,r9
702	mtspr	SPRN_SRR0,r12
703	REST_GPR(9, r11)
704	REST_GPR(12, r11)
705	lwz	r11,GPR11(r11)
706	SYNC
707	RFI
708
709#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
710/* check if the exception happened in a restartable section */
7111:	lis	r3,exc_exit_restart_end@ha
712	addi	r3,r3,exc_exit_restart_end@l
713	cmplw	r12,r3
714	bge	3f
715	lis	r4,exc_exit_restart@ha
716	addi	r4,r4,exc_exit_restart@l
717	cmplw	r12,r4
718	blt	3f
719	lis	r3,fee_restarts@ha
720	tophys(r3,r3)
721	lwz	r5,fee_restarts@l(r3)
722	addi	r5,r5,1
723	stw	r5,fee_restarts@l(r3)
724	mr	r12,r4		/* restart at exc_exit_restart */
725	b	2b
726
727	.section .bss
728	.align	2
729fee_restarts:
730	.space	4
731	.previous
732
733/* aargh, a nonrecoverable interrupt, panic */
734/* aargh, we don't know which trap this is */
735/* but the 601 doesn't implement the RI bit, so assume it's OK */
7363:
737BEGIN_FTR_SECTION
738	b	2b
739END_FTR_SECTION_IFSET(CPU_FTR_601)
740	li	r10,-1
741	stw	r10,_TRAP(r11)
742	addi	r3,r1,STACK_FRAME_OVERHEAD
743	lis	r10,MSR_KERNEL@h
744	ori	r10,r10,MSR_KERNEL@l
745	bl	transfer_to_handler_full
746	.long	nonrecoverable_exception
747	.long	ret_from_except
748#endif
749
750	.globl	ret_from_except_full
751ret_from_except_full:
752	REST_NVGPRS(r1)
753	/* fall through */
754
755	.globl	ret_from_except
756ret_from_except:
757	/* Hard-disable interrupts so that current_thread_info()->flags
758	 * can't change between when we test it and when we return
759	 * from the interrupt. */
760	/* Note: We don't bother telling lockdep about it */
761	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
762	SYNC			/* Some chip revs have problems here... */
763	MTMSRD(r10)		/* disable interrupts */
764
765	lwz	r3,_MSR(r1)	/* Returning to user mode? */
766	andi.	r0,r3,MSR_PR
767	beq	resume_kernel
768
769user_exc_return:		/* r10 contains MSR_KERNEL here */
770	/* Check current_thread_info()->flags */
771	CURRENT_THREAD_INFO(r9, r1)
772	lwz	r9,TI_FLAGS(r9)
773	andi.	r0,r9,_TIF_USER_WORK_MASK
774	bne	do_work
775
776restore_user:
777#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
778	/* Check whether this process has its own DBCR0 value.  The internal
779	   debug mode bit tells us that dbcr0 should be loaded. */
780	lwz	r0,THREAD+THREAD_DBCR0(r2)
781	andis.	r10,r0,DBCR0_IDM@h
782	bnel-	load_dbcr0
783#endif
784#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
785	CURRENT_THREAD_INFO(r9, r1)
786	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
787#endif
788
789	b	restore
790
791/* N.B. the only way to get here is from the beq following ret_from_except. */
792resume_kernel:
793	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
794	CURRENT_THREAD_INFO(r9, r1)
795	lwz	r8,TI_FLAGS(r9)
796	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
797	beq+	1f
798
799	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
800
801	lwz	r3,GPR1(r1)
802	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
803	mr	r4,r1			/* src:  current exception frame */
804	mr	r1,r3			/* Reroute the trampoline frame to r1 */
805
806	/* Copy from the original to the trampoline. */
807	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
808	li	r6,0			/* start offset: 0 */
809	mtctr	r5
8102:	lwzx	r0,r6,r4
811	stwx	r0,r6,r3
812	addi	r6,r6,4
813	bdnz	2b
814
815	/* Do real store operation to complete stwu */
816	lwz	r5,GPR1(r1)
817	stw	r8,0(r5)
818
819	/* Clear _TIF_EMULATE_STACK_STORE flag */
820	lis	r11,_TIF_EMULATE_STACK_STORE@h
821	addi	r5,r9,TI_FLAGS
8220:	lwarx	r8,0,r5
823	andc	r8,r8,r11
824#ifdef CONFIG_IBM405_ERR77
825	dcbt	0,r5
826#endif
827	stwcx.	r8,0,r5
828	bne-	0b
8291:
830
831#ifdef CONFIG_PREEMPT
832	/* check current_thread_info->preempt_count */
833	lwz	r0,TI_PREEMPT(r9)
834	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
835	bne	restore
836	andi.	r8,r8,_TIF_NEED_RESCHED
837	beq+	restore
838	lwz	r3,_MSR(r1)
839	andi.	r0,r3,MSR_EE	/* interrupts off? */
840	beq	restore		/* don't schedule if so */
841#ifdef CONFIG_TRACE_IRQFLAGS
842	/* Lockdep thinks irqs are enabled, we need to call
843	 * preempt_schedule_irq with IRQs off, so we inform lockdep
844	 * now that we -did- turn them off already
845	 */
846	bl	trace_hardirqs_off
847#endif
8481:	bl	preempt_schedule_irq
849	CURRENT_THREAD_INFO(r9, r1)
850	lwz	r3,TI_FLAGS(r9)
851	andi.	r0,r3,_TIF_NEED_RESCHED
852	bne-	1b
853#ifdef CONFIG_TRACE_IRQFLAGS
854	/* And now, to properly rebalance the above, we tell lockdep they
855	 * are being turned back on, which will happen when we return
856	 */
857	bl	trace_hardirqs_on
858#endif
859#endif /* CONFIG_PREEMPT */
860
861	/* interrupts are hard-disabled at this point */
862restore:
863#ifdef CONFIG_44x
864BEGIN_MMU_FTR_SECTION
865	b	1f
866END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
867	lis	r4,icache_44x_need_flush@ha
868	lwz	r5,icache_44x_need_flush@l(r4)
869	cmplwi	cr0,r5,0
870	beq+	1f
871	li	r6,0
872	iccci	r0,r0
873	stw	r6,icache_44x_need_flush@l(r4)
8741:
875#endif  /* CONFIG_44x */
876
877	lwz	r9,_MSR(r1)
878#ifdef CONFIG_TRACE_IRQFLAGS
879	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
880	 * off in this assembly code while peeking at TI_FLAGS() and such. However
881	 * we need to inform it if the exception turned interrupts off, and we
882	 * are about to trun them back on.
883	 *
884	 * The problem here sadly is that we don't know whether the exceptions was
885	 * one that turned interrupts off or not. So we always tell lockdep about
886	 * turning them on here when we go back to wherever we came from with EE
887	 * on, even if that may meen some redudant calls being tracked. Maybe later
888	 * we could encode what the exception did somewhere or test the exception
889	 * type in the pt_regs but that sounds overkill
890	 */
891	andi.	r10,r9,MSR_EE
892	beq	1f
893	/*
894	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
895	 * which is the stack frame here, we need to force a stack frame
896	 * in case we came from user space.
897	 */
898	stwu	r1,-32(r1)
899	mflr	r0
900	stw	r0,4(r1)
901	stwu	r1,-32(r1)
902	bl	trace_hardirqs_on
903	lwz	r1,0(r1)
904	lwz	r1,0(r1)
905	lwz	r9,_MSR(r1)
9061:
907#endif /* CONFIG_TRACE_IRQFLAGS */
908
909	lwz	r0,GPR0(r1)
910	lwz	r2,GPR2(r1)
911	REST_4GPRS(3, r1)
912	REST_2GPRS(7, r1)
913
914	lwz	r10,_XER(r1)
915	lwz	r11,_CTR(r1)
916	mtspr	SPRN_XER,r10
917	mtctr	r11
918
919	PPC405_ERR77(0,r1)
920BEGIN_FTR_SECTION
921	lwarx	r11,0,r1
922END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
923	stwcx.	r0,0,r1			/* to clear the reservation */
924
925#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
926	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
927	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
928
929	lwz	r10,_CCR(r1)
930	lwz	r11,_LINK(r1)
931	mtcrf	0xFF,r10
932	mtlr	r11
933
934	/*
935	 * Once we put values in SRR0 and SRR1, we are in a state
936	 * where exceptions are not recoverable, since taking an
937	 * exception will trash SRR0 and SRR1.  Therefore we clear the
938	 * MSR:RI bit to indicate this.  If we do take an exception,
939	 * we can't return to the point of the exception but we
940	 * can restart the exception exit path at the label
941	 * exc_exit_restart below.  -- paulus
942	 */
943	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
944	SYNC
945	MTMSRD(r10)		/* clear the RI bit */
946	.globl exc_exit_restart
947exc_exit_restart:
948	lwz	r12,_NIP(r1)
949	FIX_SRR1(r9,r10)
950	mtspr	SPRN_SRR0,r12
951	mtspr	SPRN_SRR1,r9
952	REST_4GPRS(9, r1)
953	lwz	r1,GPR1(r1)
954	.globl exc_exit_restart_end
955exc_exit_restart_end:
956	SYNC
957	RFI
958
959#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
960	/*
961	 * This is a bit different on 4xx/Book-E because it doesn't have
962	 * the RI bit in the MSR.
963	 * The TLB miss handler checks if we have interrupted
964	 * the exception exit path and restarts it if so
965	 * (well maybe one day it will... :).
966	 */
967	lwz	r11,_LINK(r1)
968	mtlr	r11
969	lwz	r10,_CCR(r1)
970	mtcrf	0xff,r10
971	REST_2GPRS(9, r1)
972	.globl exc_exit_restart
973exc_exit_restart:
974	lwz	r11,_NIP(r1)
975	lwz	r12,_MSR(r1)
976exc_exit_start:
977	mtspr	SPRN_SRR0,r11
978	mtspr	SPRN_SRR1,r12
979	REST_2GPRS(11, r1)
980	lwz	r1,GPR1(r1)
981	.globl exc_exit_restart_end
982exc_exit_restart_end:
983	PPC405_ERR77_SYNC
984	rfi
985	b	.			/* prevent prefetch past rfi */
986
987/*
988 * Returning from a critical interrupt in user mode doesn't need
989 * to be any different from a normal exception.  For a critical
990 * interrupt in the kernel, we just return (without checking for
991 * preemption) since the interrupt may have happened at some crucial
992 * place (e.g. inside the TLB miss handler), and because we will be
993 * running with r1 pointing into critical_stack, not the current
994 * process's kernel stack (and therefore current_thread_info() will
995 * give the wrong answer).
996 * We have to restore various SPRs that may have been in use at the
997 * time of the critical interrupt.
998 *
999 */
1000#ifdef CONFIG_40x
1001#define PPC_40x_TURN_OFF_MSR_DR						    \
1002	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1003	 * assume the instructions here are mapped by a pinned TLB entry */ \
1004	li	r10,MSR_IR;						    \
1005	mtmsr	r10;							    \
1006	isync;								    \
1007	tophys(r1, r1);
1008#else
1009#define PPC_40x_TURN_OFF_MSR_DR
1010#endif
1011
1012#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1013	REST_NVGPRS(r1);						\
1014	lwz	r3,_MSR(r1);						\
1015	andi.	r3,r3,MSR_PR;						\
1016	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1017	bne	user_exc_return;					\
1018	lwz	r0,GPR0(r1);						\
1019	lwz	r2,GPR2(r1);						\
1020	REST_4GPRS(3, r1);						\
1021	REST_2GPRS(7, r1);						\
1022	lwz	r10,_XER(r1);						\
1023	lwz	r11,_CTR(r1);						\
1024	mtspr	SPRN_XER,r10;						\
1025	mtctr	r11;							\
1026	PPC405_ERR77(0,r1);						\
1027	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1028	lwz	r11,_LINK(r1);						\
1029	mtlr	r11;							\
1030	lwz	r10,_CCR(r1);						\
1031	mtcrf	0xff,r10;						\
1032	PPC_40x_TURN_OFF_MSR_DR;					\
1033	lwz	r9,_DEAR(r1);						\
1034	lwz	r10,_ESR(r1);						\
1035	mtspr	SPRN_DEAR,r9;						\
1036	mtspr	SPRN_ESR,r10;						\
1037	lwz	r11,_NIP(r1);						\
1038	lwz	r12,_MSR(r1);						\
1039	mtspr	exc_lvl_srr0,r11;					\
1040	mtspr	exc_lvl_srr1,r12;					\
1041	lwz	r9,GPR9(r1);						\
1042	lwz	r12,GPR12(r1);						\
1043	lwz	r10,GPR10(r1);						\
1044	lwz	r11,GPR11(r1);						\
1045	lwz	r1,GPR1(r1);						\
1046	PPC405_ERR77_SYNC;						\
1047	exc_lvl_rfi;							\
1048	b	.;		/* prevent prefetch past exc_lvl_rfi */
1049
1050#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1051	lwz	r9,_##exc_lvl_srr0(r1);					\
1052	lwz	r10,_##exc_lvl_srr1(r1);				\
1053	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1054	mtspr	SPRN_##exc_lvl_srr1,r10;
1055
1056#if defined(CONFIG_PPC_BOOK3E_MMU)
1057#ifdef CONFIG_PHYS_64BIT
1058#define	RESTORE_MAS7							\
1059	lwz	r11,MAS7(r1);						\
1060	mtspr	SPRN_MAS7,r11;
1061#else
1062#define	RESTORE_MAS7
1063#endif /* CONFIG_PHYS_64BIT */
1064#define RESTORE_MMU_REGS						\
1065	lwz	r9,MAS0(r1);						\
1066	lwz	r10,MAS1(r1);						\
1067	lwz	r11,MAS2(r1);						\
1068	mtspr	SPRN_MAS0,r9;						\
1069	lwz	r9,MAS3(r1);						\
1070	mtspr	SPRN_MAS1,r10;						\
1071	lwz	r10,MAS6(r1);						\
1072	mtspr	SPRN_MAS2,r11;						\
1073	mtspr	SPRN_MAS3,r9;						\
1074	mtspr	SPRN_MAS6,r10;						\
1075	RESTORE_MAS7;
1076#elif defined(CONFIG_44x)
1077#define RESTORE_MMU_REGS						\
1078	lwz	r9,MMUCR(r1);						\
1079	mtspr	SPRN_MMUCR,r9;
1080#else
1081#define RESTORE_MMU_REGS
1082#endif
1083
1084#ifdef CONFIG_40x
1085	.globl	ret_from_crit_exc
1086ret_from_crit_exc:
1087	mfspr	r9,SPRN_SPRG_THREAD
1088	lis	r10,saved_ksp_limit@ha;
1089	lwz	r10,saved_ksp_limit@l(r10);
1090	tovirt(r9,r9);
1091	stw	r10,KSP_LIMIT(r9)
1092	lis	r9,crit_srr0@ha;
1093	lwz	r9,crit_srr0@l(r9);
1094	lis	r10,crit_srr1@ha;
1095	lwz	r10,crit_srr1@l(r10);
1096	mtspr	SPRN_SRR0,r9;
1097	mtspr	SPRN_SRR1,r10;
1098	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1099#endif /* CONFIG_40x */
1100
1101#ifdef CONFIG_BOOKE
1102	.globl	ret_from_crit_exc
1103ret_from_crit_exc:
1104	mfspr	r9,SPRN_SPRG_THREAD
1105	lwz	r10,SAVED_KSP_LIMIT(r1)
1106	stw	r10,KSP_LIMIT(r9)
1107	RESTORE_xSRR(SRR0,SRR1);
1108	RESTORE_MMU_REGS;
1109	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1110
1111	.globl	ret_from_debug_exc
1112ret_from_debug_exc:
1113	mfspr	r9,SPRN_SPRG_THREAD
1114	lwz	r10,SAVED_KSP_LIMIT(r1)
1115	stw	r10,KSP_LIMIT(r9)
1116	lwz	r9,THREAD_INFO-THREAD(r9)
1117	CURRENT_THREAD_INFO(r10, r1)
1118	lwz	r10,TI_PREEMPT(r10)
1119	stw	r10,TI_PREEMPT(r9)
1120	RESTORE_xSRR(SRR0,SRR1);
1121	RESTORE_xSRR(CSRR0,CSRR1);
1122	RESTORE_MMU_REGS;
1123	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1124
1125	.globl	ret_from_mcheck_exc
1126ret_from_mcheck_exc:
1127	mfspr	r9,SPRN_SPRG_THREAD
1128	lwz	r10,SAVED_KSP_LIMIT(r1)
1129	stw	r10,KSP_LIMIT(r9)
1130	RESTORE_xSRR(SRR0,SRR1);
1131	RESTORE_xSRR(CSRR0,CSRR1);
1132	RESTORE_xSRR(DSRR0,DSRR1);
1133	RESTORE_MMU_REGS;
1134	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1135#endif /* CONFIG_BOOKE */
1136
1137/*
1138 * Load the DBCR0 value for a task that is being ptraced,
1139 * having first saved away the global DBCR0.  Note that r0
1140 * has the dbcr0 value to set upon entry to this.
1141 */
1142load_dbcr0:
1143	mfmsr	r10		/* first disable debug exceptions */
1144	rlwinm	r10,r10,0,~MSR_DE
1145	mtmsr	r10
1146	isync
1147	mfspr	r10,SPRN_DBCR0
1148	lis	r11,global_dbcr0@ha
1149	addi	r11,r11,global_dbcr0@l
1150#ifdef CONFIG_SMP
1151	CURRENT_THREAD_INFO(r9, r1)
1152	lwz	r9,TI_CPU(r9)
1153	slwi	r9,r9,3
1154	add	r11,r11,r9
1155#endif
1156	stw	r10,0(r11)
1157	mtspr	SPRN_DBCR0,r0
1158	lwz	r10,4(r11)
1159	addi	r10,r10,1
1160	stw	r10,4(r11)
1161	li	r11,-1
1162	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1163	blr
1164
1165	.section .bss
1166	.align	4
1167global_dbcr0:
1168	.space	8*NR_CPUS
1169	.previous
1170#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1171
1172do_work:			/* r10 contains MSR_KERNEL here */
1173	andi.	r0,r9,_TIF_NEED_RESCHED
1174	beq	do_user_signal
1175
1176do_resched:			/* r10 contains MSR_KERNEL here */
1177	/* Note: We don't need to inform lockdep that we are enabling
1178	 * interrupts here. As far as it knows, they are already enabled
1179	 */
1180	ori	r10,r10,MSR_EE
1181	SYNC
1182	MTMSRD(r10)		/* hard-enable interrupts */
1183	bl	schedule
1184recheck:
1185	/* Note: And we don't tell it we are disabling them again
1186	 * neither. Those disable/enable cycles used to peek at
1187	 * TI_FLAGS aren't advertised.
1188	 */
1189	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1190	SYNC
1191	MTMSRD(r10)		/* disable interrupts */
1192	CURRENT_THREAD_INFO(r9, r1)
1193	lwz	r9,TI_FLAGS(r9)
1194	andi.	r0,r9,_TIF_NEED_RESCHED
1195	bne-	do_resched
1196	andi.	r0,r9,_TIF_USER_WORK_MASK
1197	beq	restore_user
1198do_user_signal:			/* r10 contains MSR_KERNEL here */
1199	ori	r10,r10,MSR_EE
1200	SYNC
1201	MTMSRD(r10)		/* hard-enable interrupts */
1202	/* save r13-r31 in the exception frame, if not already done */
1203	lwz	r3,_TRAP(r1)
1204	andi.	r0,r3,1
1205	beq	2f
1206	SAVE_NVGPRS(r1)
1207	rlwinm	r3,r3,0,0,30
1208	stw	r3,_TRAP(r1)
12092:	addi	r3,r1,STACK_FRAME_OVERHEAD
1210	mr	r4,r9
1211	bl	do_notify_resume
1212	REST_NVGPRS(r1)
1213	b	recheck
1214
1215/*
1216 * We come here when we are at the end of handling an exception
1217 * that occurred at a place where taking an exception will lose
1218 * state information, such as the contents of SRR0 and SRR1.
1219 */
1220nonrecoverable:
1221	lis	r10,exc_exit_restart_end@ha
1222	addi	r10,r10,exc_exit_restart_end@l
1223	cmplw	r12,r10
1224	bge	3f
1225	lis	r11,exc_exit_restart@ha
1226	addi	r11,r11,exc_exit_restart@l
1227	cmplw	r12,r11
1228	blt	3f
1229	lis	r10,ee_restarts@ha
1230	lwz	r12,ee_restarts@l(r10)
1231	addi	r12,r12,1
1232	stw	r12,ee_restarts@l(r10)
1233	mr	r12,r11		/* restart at exc_exit_restart */
1234	blr
12353:	/* OK, we can't recover, kill this process */
1236	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1237BEGIN_FTR_SECTION
1238	blr
1239END_FTR_SECTION_IFSET(CPU_FTR_601)
1240	lwz	r3,_TRAP(r1)
1241	andi.	r0,r3,1
1242	beq	4f
1243	SAVE_NVGPRS(r1)
1244	rlwinm	r3,r3,0,0,30
1245	stw	r3,_TRAP(r1)
12464:	addi	r3,r1,STACK_FRAME_OVERHEAD
1247	bl	nonrecoverable_exception
1248	/* shouldn't return */
1249	b	4b
1250
1251	.section .bss
1252	.align	2
1253ee_restarts:
1254	.space	4
1255	.previous
1256
1257/*
1258 * PROM code for specific machines follows.  Put it
1259 * here so it's easy to add arch-specific sections later.
1260 * -- Cort
1261 */
1262#ifdef CONFIG_PPC_RTAS
1263/*
1264 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1265 * called with the MMU off.
1266 */
1267_GLOBAL(enter_rtas)
1268	stwu	r1,-INT_FRAME_SIZE(r1)
1269	mflr	r0
1270	stw	r0,INT_FRAME_SIZE+4(r1)
1271	LOAD_REG_ADDR(r4, rtas)
1272	lis	r6,1f@ha	/* physical return address for rtas */
1273	addi	r6,r6,1f@l
1274	tophys(r6,r6)
1275	tophys(r7,r1)
1276	lwz	r8,RTASENTRY(r4)
1277	lwz	r4,RTASBASE(r4)
1278	mfmsr	r9
1279	stw	r9,8(r1)
1280	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1281	SYNC			/* disable interrupts so SRR0/1 */
1282	MTMSRD(r0)		/* don't get trashed */
1283	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1284	mtlr	r6
1285	mtspr	SPRN_SPRG_RTAS,r7
1286	mtspr	SPRN_SRR0,r8
1287	mtspr	SPRN_SRR1,r9
1288	RFI
12891:	tophys(r9,r1)
1290	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1291	lwz	r9,8(r9)	/* original msr value */
1292	FIX_SRR1(r9,r0)
1293	addi	r1,r1,INT_FRAME_SIZE
1294	li	r0,0
1295	mtspr	SPRN_SPRG_RTAS,r0
1296	mtspr	SPRN_SRR0,r8
1297	mtspr	SPRN_SRR1,r9
1298	RFI			/* return to caller */
1299
1300	.globl	machine_check_in_rtas
1301machine_check_in_rtas:
1302	twi	31,0,0
1303	/* XXX load up BATs and panic */
1304
1305#endif /* CONFIG_PPC_RTAS */
1306
1307#ifdef CONFIG_FUNCTION_TRACER
1308#ifdef CONFIG_DYNAMIC_FTRACE
1309_GLOBAL(mcount)
1310_GLOBAL(_mcount)
1311	/*
1312	 * It is required that _mcount on PPC32 must preserve the
1313	 * link register. But we have r0 to play with. We use r0
1314	 * to push the return address back to the caller of mcount
1315	 * into the ctr register, restore the link register and
1316	 * then jump back using the ctr register.
1317	 */
1318	mflr	r0
1319	mtctr	r0
1320	lwz	r0, 4(r1)
1321	mtlr	r0
1322	bctr
1323
1324_GLOBAL(ftrace_caller)
1325	MCOUNT_SAVE_FRAME
1326	/* r3 ends up with link register */
1327	subi	r3, r3, MCOUNT_INSN_SIZE
1328.globl ftrace_call
1329ftrace_call:
1330	bl	ftrace_stub
1331	nop
1332#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1333.globl ftrace_graph_call
1334ftrace_graph_call:
1335	b	ftrace_graph_stub
1336_GLOBAL(ftrace_graph_stub)
1337#endif
1338	MCOUNT_RESTORE_FRAME
1339	/* old link register ends up in ctr reg */
1340	bctr
1341#else
1342_GLOBAL(mcount)
1343_GLOBAL(_mcount)
1344
1345	MCOUNT_SAVE_FRAME
1346
1347	subi	r3, r3, MCOUNT_INSN_SIZE
1348	LOAD_REG_ADDR(r5, ftrace_trace_function)
1349	lwz	r5,0(r5)
1350
1351	mtctr	r5
1352	bctrl
1353	nop
1354
1355#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1356	b	ftrace_graph_caller
1357#endif
1358	MCOUNT_RESTORE_FRAME
1359	bctr
1360#endif
1361
1362_GLOBAL(ftrace_stub)
1363	blr
1364
1365#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1366_GLOBAL(ftrace_graph_caller)
1367	/* load r4 with local address */
1368	lwz	r4, 44(r1)
1369	subi	r4, r4, MCOUNT_INSN_SIZE
1370
1371	/* Grab the LR out of the caller stack frame */
1372	lwz	r3,52(r1)
1373
1374	bl	prepare_ftrace_return
1375	nop
1376
1377        /*
1378         * prepare_ftrace_return gives us the address we divert to.
1379         * Change the LR in the callers stack frame to this.
1380         */
1381	stw	r3,52(r1)
1382
1383	MCOUNT_RESTORE_FRAME
1384	/* old link register ends up in ctr reg */
1385	bctr
1386
1387_GLOBAL(return_to_handler)
1388	/* need to save return values */
1389	stwu	r1, -32(r1)
1390	stw	r3, 20(r1)
1391	stw	r4, 16(r1)
1392	stw	r31, 12(r1)
1393	mr	r31, r1
1394
1395	bl	ftrace_return_to_handler
1396	nop
1397
1398	/* return value has real return address */
1399	mtlr	r3
1400
1401	lwz	r3, 20(r1)
1402	lwz	r4, 16(r1)
1403	lwz	r31,12(r1)
1404	lwz	r1, 0(r1)
1405
1406	/* Jump back to real return address */
1407	blr
1408#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1409
1410#endif /* CONFIG_FUNCTION_TRACER */
1411