xref: /linux/arch/powerpc/kernel/entry_32.S (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ftrace.h>
35#include <asm/ptrace.h>
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack
91	 * and set the limit to protect the thread_info
92	 * struct
93	 */
94	mfspr	r8,SPRN_SPRG_THREAD
95	lwz	r0,KSP_LIMIT(r8)
96	stw	r0,SAVED_KSP_LIMIT(r11)
97	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
98	stw	r0,KSP_LIMIT(r8)
99	/* fall through */
100#endif
101
102#ifdef CONFIG_40x
103	.globl	crit_transfer_to_handler
104crit_transfer_to_handler:
105	lwz	r0,crit_r10@l(0)
106	stw	r0,GPR10(r11)
107	lwz	r0,crit_r11@l(0)
108	stw	r0,GPR11(r11)
109	mfspr	r0,SPRN_SRR0
110	stw	r0,crit_srr0@l(0)
111	mfspr	r0,SPRN_SRR1
112	stw	r0,crit_srr1@l(0)
113
114	/* set the stack limit to the current stack
115	 * and set the limit to protect the thread_info
116	 * struct
117	 */
118	mfspr	r8,SPRN_SPRG_THREAD
119	lwz	r0,KSP_LIMIT(r8)
120	stw	r0,saved_ksp_limit@l(0)
121	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
122	stw	r0,KSP_LIMIT(r8)
123	/* fall through */
124#endif
125
126/*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133	.globl	transfer_to_handler_full
134transfer_to_handler_full:
135	SAVE_NVGPRS(r11)
136	/* fall through */
137
138	.globl	transfer_to_handler
139transfer_to_handler:
140	stw	r2,GPR2(r11)
141	stw	r12,_NIP(r11)
142	stw	r9,_MSR(r11)
143	andi.	r2,r9,MSR_PR
144	mfctr	r12
145	mfspr	r2,SPRN_XER
146	stw	r12,_CTR(r11)
147	stw	r2,_XER(r11)
148	mfspr	r12,SPRN_SPRG_THREAD
149	addi	r2,r12,-THREAD
150	tovirt(r2,r2)			/* set r2 to current */
151	beq	2f			/* if from user, fix up THREAD.regs */
152	addi	r11,r1,STACK_FRAME_OVERHEAD
153	stw	r11,PT_REGS(r12)
154#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155	/* Check to see if the dbcr0 register is set up to debug.  Use the
156	   internal debug mode bit to do this. */
157	lwz	r12,THREAD_DBCR0(r12)
158	andis.	r12,r12,DBCR0_IDM@h
159	beq+	3f
160	/* From user and task is ptraced - load up global dbcr0 */
161	li	r12,-1			/* clear all pending debug events */
162	mtspr	SPRN_DBSR,r12
163	lis	r11,global_dbcr0@ha
164	tophys(r11,r11)
165	addi	r11,r11,global_dbcr0@l
166#ifdef CONFIG_SMP
167	CURRENT_THREAD_INFO(r9, r1)
168	lwz	r9,TI_CPU(r9)
169	slwi	r9,r9,3
170	add	r11,r11,r9
171#endif
172	lwz	r12,0(r11)
173	mtspr	SPRN_DBCR0,r12
174	lwz	r12,4(r11)
175	addi	r12,r12,-1
176	stw	r12,4(r11)
177#endif
178#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179	CURRENT_THREAD_INFO(r9, r1)
180	tophys(r9, r9)
181	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
182#endif
183
184	b	3f
185
1862:	/* if from kernel, check interrupted DOZE/NAP mode and
187         * check for stack overflow
188         */
189	lwz	r9,KSP_LIMIT(r12)
190	cmplw	r1,r9			/* if r1 <= ksp_limit */
191	ble-	stack_ovf		/* then the kernel stack overflowed */
1925:
193#if defined(CONFIG_6xx) || defined(CONFIG_E500)
194	CURRENT_THREAD_INFO(r9, r1)
195	tophys(r9,r9)			/* check local flags */
196	lwz	r12,TI_LOCAL_FLAGS(r9)
197	mtcrf	0x01,r12
198	bt-	31-TLF_NAPPING,4f
199	bt-	31-TLF_SLEEPING,7f
200#endif /* CONFIG_6xx || CONFIG_E500 */
201	.globl transfer_to_handler_cont
202transfer_to_handler_cont:
2033:
204	mflr	r9
205	lwz	r11,0(r9)		/* virtual address of handler */
206	lwz	r9,4(r9)		/* where to go when done */
207#ifdef CONFIG_TRACE_IRQFLAGS
208	lis	r12,reenable_mmu@h
209	ori	r12,r12,reenable_mmu@l
210	mtspr	SPRN_SRR0,r12
211	mtspr	SPRN_SRR1,r10
212	SYNC
213	RFI
214reenable_mmu:				/* re-enable mmu so we can */
215	mfmsr	r10
216	lwz	r12,_MSR(r1)
217	xor	r10,r10,r12
218	andi.	r10,r10,MSR_EE		/* Did EE change? */
219	beq	1f
220
221	/*
222	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
223	 * If from user mode there is only one stack frame on the stack, and
224	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
225	 * stack frame to make trace_hardirqs_off happy.
226	 *
227	 * This is handy because we also need to save a bunch of GPRs,
228	 * r3 can be different from GPR3(r1) at this point, r9 and r11
229	 * contains the old MSR and handler address respectively,
230	 * r4 & r5 can contain page fault arguments that need to be passed
231	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
232	 * they aren't useful past this point (aren't syscall arguments),
233	 * the rest is restored from the exception frame.
234	 */
235	stwu	r1,-32(r1)
236	stw	r9,8(r1)
237	stw	r11,12(r1)
238	stw	r3,16(r1)
239	stw	r4,20(r1)
240	stw	r5,24(r1)
241	bl	trace_hardirqs_off
242	lwz	r5,24(r1)
243	lwz	r4,20(r1)
244	lwz	r3,16(r1)
245	lwz	r11,12(r1)
246	lwz	r9,8(r1)
247	addi	r1,r1,32
248	lwz	r0,GPR0(r1)
249	lwz	r6,GPR6(r1)
250	lwz	r7,GPR7(r1)
251	lwz	r8,GPR8(r1)
2521:	mtctr	r11
253	mtlr	r9
254	bctr				/* jump to handler */
255#else /* CONFIG_TRACE_IRQFLAGS */
256	mtspr	SPRN_SRR0,r11
257	mtspr	SPRN_SRR1,r10
258	mtlr	r9
259	SYNC
260	RFI				/* jump to handler, enable MMU */
261#endif /* CONFIG_TRACE_IRQFLAGS */
262
263#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2644:	rlwinm	r12,r12,0,~_TLF_NAPPING
265	stw	r12,TI_LOCAL_FLAGS(r9)
266	b	power_save_ppc32_restore
267
2687:	rlwinm	r12,r12,0,~_TLF_SLEEPING
269	stw	r12,TI_LOCAL_FLAGS(r9)
270	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
271	rlwinm	r9,r9,0,~MSR_EE
272	lwz	r12,_LINK(r11)		/* and return to address in LR */
273	b	fast_exception_return
274#endif
275
276/*
277 * On kernel stack overflow, load up an initial stack pointer
278 * and call StackOverflow(regs), which should not return.
279 */
280stack_ovf:
281	/* sometimes we use a statically-allocated stack, which is OK. */
282	lis	r12,_end@h
283	ori	r12,r12,_end@l
284	cmplw	r1,r12
285	ble	5b			/* r1 <= &_end is OK */
286	SAVE_NVGPRS(r11)
287	addi	r3,r1,STACK_FRAME_OVERHEAD
288	lis	r1,init_thread_union@ha
289	addi	r1,r1,init_thread_union@l
290	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
291	lis	r9,StackOverflow@ha
292	addi	r9,r9,StackOverflow@l
293	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
294	FIX_SRR1(r10,r12)
295	mtspr	SPRN_SRR0,r9
296	mtspr	SPRN_SRR1,r10
297	SYNC
298	RFI
299
300/*
301 * Handle a system call.
302 */
303	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
304	.stabs	"entry_32.S",N_SO,0,0,0f
3050:
306
307_GLOBAL(DoSyscall)
308	stw	r3,ORIG_GPR3(r1)
309	li	r12,0
310	stw	r12,RESULT(r1)
311	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
312	rlwinm	r11,r11,0,4,2
313	stw	r11,_CCR(r1)
314#ifdef CONFIG_TRACE_IRQFLAGS
315	/* Return from syscalls can (and generally will) hard enable
316	 * interrupts. You aren't supposed to call a syscall with
317	 * interrupts disabled in the first place. However, to ensure
318	 * that we get it right vs. lockdep if it happens, we force
319	 * that hard enable here with appropriate tracing if we see
320	 * that we have been called with interrupts off
321	 */
322	mfmsr	r11
323	andi.	r12,r11,MSR_EE
324	bne+	1f
325	/* We came in with interrupts disabled, we enable them now */
326	bl	trace_hardirqs_on
327	mfmsr	r11
328	lwz	r0,GPR0(r1)
329	lwz	r3,GPR3(r1)
330	lwz	r4,GPR4(r1)
331	ori	r11,r11,MSR_EE
332	lwz	r5,GPR5(r1)
333	lwz	r6,GPR6(r1)
334	lwz	r7,GPR7(r1)
335	lwz	r8,GPR8(r1)
336	mtmsr	r11
3371:
338#endif /* CONFIG_TRACE_IRQFLAGS */
339	CURRENT_THREAD_INFO(r10, r1)
340	lwz	r11,TI_FLAGS(r10)
341	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
342	bne-	syscall_dotrace
343syscall_dotrace_cont:
344	cmplwi	0,r0,NR_syscalls
345	lis	r10,sys_call_table@h
346	ori	r10,r10,sys_call_table@l
347	slwi	r0,r0,2
348	bge-	66f
349	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
350	mtlr	r10
351	addi	r9,r1,STACK_FRAME_OVERHEAD
352	PPC440EP_ERR42
353	blrl			/* Call handler */
354	.globl	ret_from_syscall
355ret_from_syscall:
356	mr	r6,r3
357	CURRENT_THREAD_INFO(r12, r1)
358	/* disable interrupts so current_thread_info()->flags can't change */
359	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
360	/* Note: We don't bother telling lockdep about it */
361	SYNC
362	MTMSRD(r10)
363	lwz	r9,TI_FLAGS(r12)
364	li	r8,-MAX_ERRNO
365	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
366	bne-	syscall_exit_work
367	cmplw	0,r3,r8
368	blt+	syscall_exit_cont
369	lwz	r11,_CCR(r1)			/* Load CR */
370	neg	r3,r3
371	oris	r11,r11,0x1000	/* Set SO bit in CR */
372	stw	r11,_CCR(r1)
373syscall_exit_cont:
374	lwz	r8,_MSR(r1)
375#ifdef CONFIG_TRACE_IRQFLAGS
376	/* If we are going to return from the syscall with interrupts
377	 * off, we trace that here. It shouldn't happen though but we
378	 * want to catch the bugger if it does right ?
379	 */
380	andi.	r10,r8,MSR_EE
381	bne+	1f
382	stw	r3,GPR3(r1)
383	bl      trace_hardirqs_off
384	lwz	r3,GPR3(r1)
3851:
386#endif /* CONFIG_TRACE_IRQFLAGS */
387#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
388	/* If the process has its own DBCR0 value, load it up.  The internal
389	   debug mode bit tells us that dbcr0 should be loaded. */
390	lwz	r0,THREAD+THREAD_DBCR0(r2)
391	andis.	r10,r0,DBCR0_IDM@h
392	bnel-	load_dbcr0
393#endif
394#ifdef CONFIG_44x
395BEGIN_MMU_FTR_SECTION
396	lis	r4,icache_44x_need_flush@ha
397	lwz	r5,icache_44x_need_flush@l(r4)
398	cmplwi	cr0,r5,0
399	bne-	2f
4001:
401END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
402#endif /* CONFIG_44x */
403BEGIN_FTR_SECTION
404	lwarx	r7,0,r1
405END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
406	stwcx.	r0,0,r1			/* to clear the reservation */
407#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
408	andi.	r4,r8,MSR_PR
409	beq	3f
410	CURRENT_THREAD_INFO(r4, r1)
411	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4123:
413#endif
414	lwz	r4,_LINK(r1)
415	lwz	r5,_CCR(r1)
416	mtlr	r4
417	mtcr	r5
418	lwz	r7,_NIP(r1)
419	FIX_SRR1(r8, r0)
420	lwz	r2,GPR2(r1)
421	lwz	r1,GPR1(r1)
422	mtspr	SPRN_SRR0,r7
423	mtspr	SPRN_SRR1,r8
424	SYNC
425	RFI
426#ifdef CONFIG_44x
4272:	li	r7,0
428	iccci	r0,r0
429	stw	r7,icache_44x_need_flush@l(r4)
430	b	1b
431#endif  /* CONFIG_44x */
432
43366:	li	r3,-ENOSYS
434	b	ret_from_syscall
435
436	.globl	ret_from_fork
437ret_from_fork:
438	REST_NVGPRS(r1)
439	bl	schedule_tail
440	li	r3,0
441	b	ret_from_syscall
442
443	.globl	ret_from_kernel_thread
444ret_from_kernel_thread:
445	REST_NVGPRS(r1)
446	bl	schedule_tail
447	mtlr	r14
448	mr	r3,r15
449	PPC440EP_ERR42
450	blrl
451	li	r3,0
452	b	ret_from_syscall
453
454/* Traced system call support */
455syscall_dotrace:
456	SAVE_NVGPRS(r1)
457	li	r0,0xc00
458	stw	r0,_TRAP(r1)
459	addi	r3,r1,STACK_FRAME_OVERHEAD
460	bl	do_syscall_trace_enter
461	/*
462	 * Restore argument registers possibly just changed.
463	 * We use the return value of do_syscall_trace_enter
464	 * for call number to look up in the table (r0).
465	 */
466	mr	r0,r3
467	lwz	r3,GPR3(r1)
468	lwz	r4,GPR4(r1)
469	lwz	r5,GPR5(r1)
470	lwz	r6,GPR6(r1)
471	lwz	r7,GPR7(r1)
472	lwz	r8,GPR8(r1)
473	REST_NVGPRS(r1)
474
475	cmplwi	r0,NR_syscalls
476	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
477	bge-	ret_from_syscall
478	b	syscall_dotrace_cont
479
480syscall_exit_work:
481	andi.	r0,r9,_TIF_RESTOREALL
482	beq+	0f
483	REST_NVGPRS(r1)
484	b	2f
4850:	cmplw	0,r3,r8
486	blt+	1f
487	andi.	r0,r9,_TIF_NOERROR
488	bne-	1f
489	lwz	r11,_CCR(r1)			/* Load CR */
490	neg	r3,r3
491	oris	r11,r11,0x1000	/* Set SO bit in CR */
492	stw	r11,_CCR(r1)
493
4941:	stw	r6,RESULT(r1)	/* Save result */
495	stw	r3,GPR3(r1)	/* Update return value */
4962:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
497	beq	4f
498
499	/* Clear per-syscall TIF flags if any are set.  */
500
501	li	r11,_TIF_PERSYSCALL_MASK
502	addi	r12,r12,TI_FLAGS
5033:	lwarx	r8,0,r12
504	andc	r8,r8,r11
505#ifdef CONFIG_IBM405_ERR77
506	dcbt	0,r12
507#endif
508	stwcx.	r8,0,r12
509	bne-	3b
510	subi	r12,r12,TI_FLAGS
511
5124:	/* Anything which requires enabling interrupts? */
513	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
514	beq	ret_from_except
515
516	/* Re-enable interrupts. There is no need to trace that with
517	 * lockdep as we are supposed to have IRQs on at this point
518	 */
519	ori	r10,r10,MSR_EE
520	SYNC
521	MTMSRD(r10)
522
523	/* Save NVGPRS if they're not saved already */
524	lwz	r4,_TRAP(r1)
525	andi.	r4,r4,1
526	beq	5f
527	SAVE_NVGPRS(r1)
528	li	r4,0xc00
529	stw	r4,_TRAP(r1)
5305:
531	addi	r3,r1,STACK_FRAME_OVERHEAD
532	bl	do_syscall_trace_leave
533	b	ret_from_except_full
534
535/*
536 * The fork/clone functions need to copy the full register set into
537 * the child process. Therefore we need to save all the nonvolatile
538 * registers (r13 - r31) before calling the C code.
539 */
540	.globl	ppc_fork
541ppc_fork:
542	SAVE_NVGPRS(r1)
543	lwz	r0,_TRAP(r1)
544	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
545	stw	r0,_TRAP(r1)		/* register set saved */
546	b	sys_fork
547
548	.globl	ppc_vfork
549ppc_vfork:
550	SAVE_NVGPRS(r1)
551	lwz	r0,_TRAP(r1)
552	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
553	stw	r0,_TRAP(r1)		/* register set saved */
554	b	sys_vfork
555
556	.globl	ppc_clone
557ppc_clone:
558	SAVE_NVGPRS(r1)
559	lwz	r0,_TRAP(r1)
560	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
561	stw	r0,_TRAP(r1)		/* register set saved */
562	b	sys_clone
563
564	.globl	ppc_swapcontext
565ppc_swapcontext:
566	SAVE_NVGPRS(r1)
567	lwz	r0,_TRAP(r1)
568	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
569	stw	r0,_TRAP(r1)		/* register set saved */
570	b	sys_swapcontext
571
572/*
573 * Top-level page fault handling.
574 * This is in assembler because if do_page_fault tells us that
575 * it is a bad kernel page fault, we want to save the non-volatile
576 * registers before calling bad_page_fault.
577 */
578	.globl	handle_page_fault
579handle_page_fault:
580	stw	r4,_DAR(r1)
581	addi	r3,r1,STACK_FRAME_OVERHEAD
582	bl	do_page_fault
583	cmpwi	r3,0
584	beq+	ret_from_except
585	SAVE_NVGPRS(r1)
586	lwz	r0,_TRAP(r1)
587	clrrwi	r0,r0,1
588	stw	r0,_TRAP(r1)
589	mr	r5,r3
590	addi	r3,r1,STACK_FRAME_OVERHEAD
591	lwz	r4,_DAR(r1)
592	bl	bad_page_fault
593	b	ret_from_except_full
594
595/*
596 * This routine switches between two different tasks.  The process
597 * state of one is saved on its kernel stack.  Then the state
598 * of the other is restored from its kernel stack.  The memory
599 * management hardware is updated to the second process's state.
600 * Finally, we can return to the second process.
601 * On entry, r3 points to the THREAD for the current task, r4
602 * points to the THREAD for the new task.
603 *
604 * This routine is always called with interrupts disabled.
605 *
606 * Note: there are two ways to get to the "going out" portion
607 * of this code; either by coming in via the entry (_switch)
608 * or via "fork" which must set up an environment equivalent
609 * to the "_switch" path.  If you change this , you'll have to
610 * change the fork code also.
611 *
612 * The code which creates the new task context is in 'copy_thread'
613 * in arch/ppc/kernel/process.c
614 */
615_GLOBAL(_switch)
616	stwu	r1,-INT_FRAME_SIZE(r1)
617	mflr	r0
618	stw	r0,INT_FRAME_SIZE+4(r1)
619	/* r3-r12 are caller saved -- Cort */
620	SAVE_NVGPRS(r1)
621	stw	r0,_NIP(r1)	/* Return to switch caller */
622	mfmsr	r11
623	li	r0,MSR_FP	/* Disable floating-point */
624#ifdef CONFIG_ALTIVEC
625BEGIN_FTR_SECTION
626	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
627	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
628	stw	r12,THREAD+THREAD_VRSAVE(r2)
629END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
630#endif /* CONFIG_ALTIVEC */
631#ifdef CONFIG_SPE
632BEGIN_FTR_SECTION
633	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
634	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
635	stw	r12,THREAD+THREAD_SPEFSCR(r2)
636END_FTR_SECTION_IFSET(CPU_FTR_SPE)
637#endif /* CONFIG_SPE */
638	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
639	beq+	1f
640	andc	r11,r11,r0
641	MTMSRD(r11)
642	isync
6431:	stw	r11,_MSR(r1)
644	mfcr	r10
645	stw	r10,_CCR(r1)
646	stw	r1,KSP(r3)	/* Set old stack pointer */
647
648#ifdef CONFIG_SMP
649	/* We need a sync somewhere here to make sure that if the
650	 * previous task gets rescheduled on another CPU, it sees all
651	 * stores it has performed on this one.
652	 */
653	sync
654#endif /* CONFIG_SMP */
655
656	tophys(r0,r4)
657	CLR_TOP32(r0)
658	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
659	lwz	r1,KSP(r4)	/* Load new stack pointer */
660
661	/* save the old current 'last' for return value */
662	mr	r3,r2
663	addi	r2,r4,-THREAD	/* Update current */
664
665#ifdef CONFIG_ALTIVEC
666BEGIN_FTR_SECTION
667	lwz	r0,THREAD+THREAD_VRSAVE(r2)
668	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
670#endif /* CONFIG_ALTIVEC */
671#ifdef CONFIG_SPE
672BEGIN_FTR_SECTION
673	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
674	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
676#endif /* CONFIG_SPE */
677
678	lwz	r0,_CCR(r1)
679	mtcrf	0xFF,r0
680	/* r3-r12 are destroyed -- Cort */
681	REST_NVGPRS(r1)
682
683	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
684	mtlr	r4
685	addi	r1,r1,INT_FRAME_SIZE
686	blr
687
688	.globl	fast_exception_return
689fast_exception_return:
690#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
691	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
692	beq	1f			/* if not, we've got problems */
693#endif
694
6952:	REST_4GPRS(3, r11)
696	lwz	r10,_CCR(r11)
697	REST_GPR(1, r11)
698	mtcr	r10
699	lwz	r10,_LINK(r11)
700	mtlr	r10
701	REST_GPR(10, r11)
702	mtspr	SPRN_SRR1,r9
703	mtspr	SPRN_SRR0,r12
704	REST_GPR(9, r11)
705	REST_GPR(12, r11)
706	lwz	r11,GPR11(r11)
707	SYNC
708	RFI
709
710#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
711/* check if the exception happened in a restartable section */
7121:	lis	r3,exc_exit_restart_end@ha
713	addi	r3,r3,exc_exit_restart_end@l
714	cmplw	r12,r3
715	bge	3f
716	lis	r4,exc_exit_restart@ha
717	addi	r4,r4,exc_exit_restart@l
718	cmplw	r12,r4
719	blt	3f
720	lis	r3,fee_restarts@ha
721	tophys(r3,r3)
722	lwz	r5,fee_restarts@l(r3)
723	addi	r5,r5,1
724	stw	r5,fee_restarts@l(r3)
725	mr	r12,r4		/* restart at exc_exit_restart */
726	b	2b
727
728	.section .bss
729	.align	2
730fee_restarts:
731	.space	4
732	.previous
733
734/* aargh, a nonrecoverable interrupt, panic */
735/* aargh, we don't know which trap this is */
736/* but the 601 doesn't implement the RI bit, so assume it's OK */
7373:
738BEGIN_FTR_SECTION
739	b	2b
740END_FTR_SECTION_IFSET(CPU_FTR_601)
741	li	r10,-1
742	stw	r10,_TRAP(r11)
743	addi	r3,r1,STACK_FRAME_OVERHEAD
744	lis	r10,MSR_KERNEL@h
745	ori	r10,r10,MSR_KERNEL@l
746	bl	transfer_to_handler_full
747	.long	nonrecoverable_exception
748	.long	ret_from_except
749#endif
750
751	.globl	ret_from_except_full
752ret_from_except_full:
753	REST_NVGPRS(r1)
754	/* fall through */
755
756	.globl	ret_from_except
757ret_from_except:
758	/* Hard-disable interrupts so that current_thread_info()->flags
759	 * can't change between when we test it and when we return
760	 * from the interrupt. */
761	/* Note: We don't bother telling lockdep about it */
762	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
763	SYNC			/* Some chip revs have problems here... */
764	MTMSRD(r10)		/* disable interrupts */
765
766	lwz	r3,_MSR(r1)	/* Returning to user mode? */
767	andi.	r0,r3,MSR_PR
768	beq	resume_kernel
769
770user_exc_return:		/* r10 contains MSR_KERNEL here */
771	/* Check current_thread_info()->flags */
772	CURRENT_THREAD_INFO(r9, r1)
773	lwz	r9,TI_FLAGS(r9)
774	andi.	r0,r9,_TIF_USER_WORK_MASK
775	bne	do_work
776
777restore_user:
778#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
779	/* Check whether this process has its own DBCR0 value.  The internal
780	   debug mode bit tells us that dbcr0 should be loaded. */
781	lwz	r0,THREAD+THREAD_DBCR0(r2)
782	andis.	r10,r0,DBCR0_IDM@h
783	bnel-	load_dbcr0
784#endif
785#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
786	CURRENT_THREAD_INFO(r9, r1)
787	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
788#endif
789
790	b	restore
791
792/* N.B. the only way to get here is from the beq following ret_from_except. */
793resume_kernel:
794	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
795	CURRENT_THREAD_INFO(r9, r1)
796	lwz	r8,TI_FLAGS(r9)
797	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
798	beq+	1f
799
800	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
801
802	lwz	r3,GPR1(r1)
803	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
804	mr	r4,r1			/* src:  current exception frame */
805	mr	r1,r3			/* Reroute the trampoline frame to r1 */
806
807	/* Copy from the original to the trampoline. */
808	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
809	li	r6,0			/* start offset: 0 */
810	mtctr	r5
8112:	lwzx	r0,r6,r4
812	stwx	r0,r6,r3
813	addi	r6,r6,4
814	bdnz	2b
815
816	/* Do real store operation to complete stwu */
817	lwz	r5,GPR1(r1)
818	stw	r8,0(r5)
819
820	/* Clear _TIF_EMULATE_STACK_STORE flag */
821	lis	r11,_TIF_EMULATE_STACK_STORE@h
822	addi	r5,r9,TI_FLAGS
8230:	lwarx	r8,0,r5
824	andc	r8,r8,r11
825#ifdef CONFIG_IBM405_ERR77
826	dcbt	0,r5
827#endif
828	stwcx.	r8,0,r5
829	bne-	0b
8301:
831
832#ifdef CONFIG_PREEMPT
833	/* check current_thread_info->preempt_count */
834	lwz	r0,TI_PREEMPT(r9)
835	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
836	bne	restore
837	andi.	r8,r8,_TIF_NEED_RESCHED
838	beq+	restore
839	lwz	r3,_MSR(r1)
840	andi.	r0,r3,MSR_EE	/* interrupts off? */
841	beq	restore		/* don't schedule if so */
842#ifdef CONFIG_TRACE_IRQFLAGS
843	/* Lockdep thinks irqs are enabled, we need to call
844	 * preempt_schedule_irq with IRQs off, so we inform lockdep
845	 * now that we -did- turn them off already
846	 */
847	bl	trace_hardirqs_off
848#endif
8491:	bl	preempt_schedule_irq
850	CURRENT_THREAD_INFO(r9, r1)
851	lwz	r3,TI_FLAGS(r9)
852	andi.	r0,r3,_TIF_NEED_RESCHED
853	bne-	1b
854#ifdef CONFIG_TRACE_IRQFLAGS
855	/* And now, to properly rebalance the above, we tell lockdep they
856	 * are being turned back on, which will happen when we return
857	 */
858	bl	trace_hardirqs_on
859#endif
860#endif /* CONFIG_PREEMPT */
861
862	/* interrupts are hard-disabled at this point */
863restore:
864#ifdef CONFIG_44x
865BEGIN_MMU_FTR_SECTION
866	b	1f
867END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
868	lis	r4,icache_44x_need_flush@ha
869	lwz	r5,icache_44x_need_flush@l(r4)
870	cmplwi	cr0,r5,0
871	beq+	1f
872	li	r6,0
873	iccci	r0,r0
874	stw	r6,icache_44x_need_flush@l(r4)
8751:
876#endif  /* CONFIG_44x */
877
878	lwz	r9,_MSR(r1)
879#ifdef CONFIG_TRACE_IRQFLAGS
880	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
881	 * off in this assembly code while peeking at TI_FLAGS() and such. However
882	 * we need to inform it if the exception turned interrupts off, and we
883	 * are about to trun them back on.
884	 *
885	 * The problem here sadly is that we don't know whether the exceptions was
886	 * one that turned interrupts off or not. So we always tell lockdep about
887	 * turning them on here when we go back to wherever we came from with EE
888	 * on, even if that may meen some redudant calls being tracked. Maybe later
889	 * we could encode what the exception did somewhere or test the exception
890	 * type in the pt_regs but that sounds overkill
891	 */
892	andi.	r10,r9,MSR_EE
893	beq	1f
894	/*
895	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
896	 * which is the stack frame here, we need to force a stack frame
897	 * in case we came from user space.
898	 */
899	stwu	r1,-32(r1)
900	mflr	r0
901	stw	r0,4(r1)
902	stwu	r1,-32(r1)
903	bl	trace_hardirqs_on
904	lwz	r1,0(r1)
905	lwz	r1,0(r1)
906	lwz	r9,_MSR(r1)
9071:
908#endif /* CONFIG_TRACE_IRQFLAGS */
909
910	lwz	r0,GPR0(r1)
911	lwz	r2,GPR2(r1)
912	REST_4GPRS(3, r1)
913	REST_2GPRS(7, r1)
914
915	lwz	r10,_XER(r1)
916	lwz	r11,_CTR(r1)
917	mtspr	SPRN_XER,r10
918	mtctr	r11
919
920	PPC405_ERR77(0,r1)
921BEGIN_FTR_SECTION
922	lwarx	r11,0,r1
923END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
924	stwcx.	r0,0,r1			/* to clear the reservation */
925
926#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
927	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
928	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
929
930	lwz	r10,_CCR(r1)
931	lwz	r11,_LINK(r1)
932	mtcrf	0xFF,r10
933	mtlr	r11
934
935	/*
936	 * Once we put values in SRR0 and SRR1, we are in a state
937	 * where exceptions are not recoverable, since taking an
938	 * exception will trash SRR0 and SRR1.  Therefore we clear the
939	 * MSR:RI bit to indicate this.  If we do take an exception,
940	 * we can't return to the point of the exception but we
941	 * can restart the exception exit path at the label
942	 * exc_exit_restart below.  -- paulus
943	 */
944	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
945	SYNC
946	MTMSRD(r10)		/* clear the RI bit */
947	.globl exc_exit_restart
948exc_exit_restart:
949	lwz	r12,_NIP(r1)
950	FIX_SRR1(r9,r10)
951	mtspr	SPRN_SRR0,r12
952	mtspr	SPRN_SRR1,r9
953	REST_4GPRS(9, r1)
954	lwz	r1,GPR1(r1)
955	.globl exc_exit_restart_end
956exc_exit_restart_end:
957	SYNC
958	RFI
959
960#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
961	/*
962	 * This is a bit different on 4xx/Book-E because it doesn't have
963	 * the RI bit in the MSR.
964	 * The TLB miss handler checks if we have interrupted
965	 * the exception exit path and restarts it if so
966	 * (well maybe one day it will... :).
967	 */
968	lwz	r11,_LINK(r1)
969	mtlr	r11
970	lwz	r10,_CCR(r1)
971	mtcrf	0xff,r10
972	REST_2GPRS(9, r1)
973	.globl exc_exit_restart
974exc_exit_restart:
975	lwz	r11,_NIP(r1)
976	lwz	r12,_MSR(r1)
977exc_exit_start:
978	mtspr	SPRN_SRR0,r11
979	mtspr	SPRN_SRR1,r12
980	REST_2GPRS(11, r1)
981	lwz	r1,GPR1(r1)
982	.globl exc_exit_restart_end
983exc_exit_restart_end:
984	PPC405_ERR77_SYNC
985	rfi
986	b	.			/* prevent prefetch past rfi */
987
988/*
989 * Returning from a critical interrupt in user mode doesn't need
990 * to be any different from a normal exception.  For a critical
991 * interrupt in the kernel, we just return (without checking for
992 * preemption) since the interrupt may have happened at some crucial
993 * place (e.g. inside the TLB miss handler), and because we will be
994 * running with r1 pointing into critical_stack, not the current
995 * process's kernel stack (and therefore current_thread_info() will
996 * give the wrong answer).
997 * We have to restore various SPRs that may have been in use at the
998 * time of the critical interrupt.
999 *
1000 */
1001#ifdef CONFIG_40x
1002#define PPC_40x_TURN_OFF_MSR_DR						    \
1003	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1004	 * assume the instructions here are mapped by a pinned TLB entry */ \
1005	li	r10,MSR_IR;						    \
1006	mtmsr	r10;							    \
1007	isync;								    \
1008	tophys(r1, r1);
1009#else
1010#define PPC_40x_TURN_OFF_MSR_DR
1011#endif
1012
1013#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1014	REST_NVGPRS(r1);						\
1015	lwz	r3,_MSR(r1);						\
1016	andi.	r3,r3,MSR_PR;						\
1017	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1018	bne	user_exc_return;					\
1019	lwz	r0,GPR0(r1);						\
1020	lwz	r2,GPR2(r1);						\
1021	REST_4GPRS(3, r1);						\
1022	REST_2GPRS(7, r1);						\
1023	lwz	r10,_XER(r1);						\
1024	lwz	r11,_CTR(r1);						\
1025	mtspr	SPRN_XER,r10;						\
1026	mtctr	r11;							\
1027	PPC405_ERR77(0,r1);						\
1028	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1029	lwz	r11,_LINK(r1);						\
1030	mtlr	r11;							\
1031	lwz	r10,_CCR(r1);						\
1032	mtcrf	0xff,r10;						\
1033	PPC_40x_TURN_OFF_MSR_DR;					\
1034	lwz	r9,_DEAR(r1);						\
1035	lwz	r10,_ESR(r1);						\
1036	mtspr	SPRN_DEAR,r9;						\
1037	mtspr	SPRN_ESR,r10;						\
1038	lwz	r11,_NIP(r1);						\
1039	lwz	r12,_MSR(r1);						\
1040	mtspr	exc_lvl_srr0,r11;					\
1041	mtspr	exc_lvl_srr1,r12;					\
1042	lwz	r9,GPR9(r1);						\
1043	lwz	r12,GPR12(r1);						\
1044	lwz	r10,GPR10(r1);						\
1045	lwz	r11,GPR11(r1);						\
1046	lwz	r1,GPR1(r1);						\
1047	PPC405_ERR77_SYNC;						\
1048	exc_lvl_rfi;							\
1049	b	.;		/* prevent prefetch past exc_lvl_rfi */
1050
1051#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1052	lwz	r9,_##exc_lvl_srr0(r1);					\
1053	lwz	r10,_##exc_lvl_srr1(r1);				\
1054	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1055	mtspr	SPRN_##exc_lvl_srr1,r10;
1056
1057#if defined(CONFIG_PPC_BOOK3E_MMU)
1058#ifdef CONFIG_PHYS_64BIT
1059#define	RESTORE_MAS7							\
1060	lwz	r11,MAS7(r1);						\
1061	mtspr	SPRN_MAS7,r11;
1062#else
1063#define	RESTORE_MAS7
1064#endif /* CONFIG_PHYS_64BIT */
1065#define RESTORE_MMU_REGS						\
1066	lwz	r9,MAS0(r1);						\
1067	lwz	r10,MAS1(r1);						\
1068	lwz	r11,MAS2(r1);						\
1069	mtspr	SPRN_MAS0,r9;						\
1070	lwz	r9,MAS3(r1);						\
1071	mtspr	SPRN_MAS1,r10;						\
1072	lwz	r10,MAS6(r1);						\
1073	mtspr	SPRN_MAS2,r11;						\
1074	mtspr	SPRN_MAS3,r9;						\
1075	mtspr	SPRN_MAS6,r10;						\
1076	RESTORE_MAS7;
1077#elif defined(CONFIG_44x)
1078#define RESTORE_MMU_REGS						\
1079	lwz	r9,MMUCR(r1);						\
1080	mtspr	SPRN_MMUCR,r9;
1081#else
1082#define RESTORE_MMU_REGS
1083#endif
1084
1085#ifdef CONFIG_40x
1086	.globl	ret_from_crit_exc
1087ret_from_crit_exc:
1088	mfspr	r9,SPRN_SPRG_THREAD
1089	lis	r10,saved_ksp_limit@ha;
1090	lwz	r10,saved_ksp_limit@l(r10);
1091	tovirt(r9,r9);
1092	stw	r10,KSP_LIMIT(r9)
1093	lis	r9,crit_srr0@ha;
1094	lwz	r9,crit_srr0@l(r9);
1095	lis	r10,crit_srr1@ha;
1096	lwz	r10,crit_srr1@l(r10);
1097	mtspr	SPRN_SRR0,r9;
1098	mtspr	SPRN_SRR1,r10;
1099	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1100#endif /* CONFIG_40x */
1101
1102#ifdef CONFIG_BOOKE
1103	.globl	ret_from_crit_exc
1104ret_from_crit_exc:
1105	mfspr	r9,SPRN_SPRG_THREAD
1106	lwz	r10,SAVED_KSP_LIMIT(r1)
1107	stw	r10,KSP_LIMIT(r9)
1108	RESTORE_xSRR(SRR0,SRR1);
1109	RESTORE_MMU_REGS;
1110	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1111
1112	.globl	ret_from_debug_exc
1113ret_from_debug_exc:
1114	mfspr	r9,SPRN_SPRG_THREAD
1115	lwz	r10,SAVED_KSP_LIMIT(r1)
1116	stw	r10,KSP_LIMIT(r9)
1117	lwz	r9,THREAD_INFO-THREAD(r9)
1118	CURRENT_THREAD_INFO(r10, r1)
1119	lwz	r10,TI_PREEMPT(r10)
1120	stw	r10,TI_PREEMPT(r9)
1121	RESTORE_xSRR(SRR0,SRR1);
1122	RESTORE_xSRR(CSRR0,CSRR1);
1123	RESTORE_MMU_REGS;
1124	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1125
1126	.globl	ret_from_mcheck_exc
1127ret_from_mcheck_exc:
1128	mfspr	r9,SPRN_SPRG_THREAD
1129	lwz	r10,SAVED_KSP_LIMIT(r1)
1130	stw	r10,KSP_LIMIT(r9)
1131	RESTORE_xSRR(SRR0,SRR1);
1132	RESTORE_xSRR(CSRR0,CSRR1);
1133	RESTORE_xSRR(DSRR0,DSRR1);
1134	RESTORE_MMU_REGS;
1135	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1136#endif /* CONFIG_BOOKE */
1137
1138/*
1139 * Load the DBCR0 value for a task that is being ptraced,
1140 * having first saved away the global DBCR0.  Note that r0
1141 * has the dbcr0 value to set upon entry to this.
1142 */
1143load_dbcr0:
1144	mfmsr	r10		/* first disable debug exceptions */
1145	rlwinm	r10,r10,0,~MSR_DE
1146	mtmsr	r10
1147	isync
1148	mfspr	r10,SPRN_DBCR0
1149	lis	r11,global_dbcr0@ha
1150	addi	r11,r11,global_dbcr0@l
1151#ifdef CONFIG_SMP
1152	CURRENT_THREAD_INFO(r9, r1)
1153	lwz	r9,TI_CPU(r9)
1154	slwi	r9,r9,3
1155	add	r11,r11,r9
1156#endif
1157	stw	r10,0(r11)
1158	mtspr	SPRN_DBCR0,r0
1159	lwz	r10,4(r11)
1160	addi	r10,r10,1
1161	stw	r10,4(r11)
1162	li	r11,-1
1163	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1164	blr
1165
1166	.section .bss
1167	.align	4
1168global_dbcr0:
1169	.space	8*NR_CPUS
1170	.previous
1171#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1172
1173do_work:			/* r10 contains MSR_KERNEL here */
1174	andi.	r0,r9,_TIF_NEED_RESCHED
1175	beq	do_user_signal
1176
1177do_resched:			/* r10 contains MSR_KERNEL here */
1178	/* Note: We don't need to inform lockdep that we are enabling
1179	 * interrupts here. As far as it knows, they are already enabled
1180	 */
1181	ori	r10,r10,MSR_EE
1182	SYNC
1183	MTMSRD(r10)		/* hard-enable interrupts */
1184	bl	schedule
1185recheck:
1186	/* Note: And we don't tell it we are disabling them again
1187	 * neither. Those disable/enable cycles used to peek at
1188	 * TI_FLAGS aren't advertised.
1189	 */
1190	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1191	SYNC
1192	MTMSRD(r10)		/* disable interrupts */
1193	CURRENT_THREAD_INFO(r9, r1)
1194	lwz	r9,TI_FLAGS(r9)
1195	andi.	r0,r9,_TIF_NEED_RESCHED
1196	bne-	do_resched
1197	andi.	r0,r9,_TIF_USER_WORK_MASK
1198	beq	restore_user
1199do_user_signal:			/* r10 contains MSR_KERNEL here */
1200	ori	r10,r10,MSR_EE
1201	SYNC
1202	MTMSRD(r10)		/* hard-enable interrupts */
1203	/* save r13-r31 in the exception frame, if not already done */
1204	lwz	r3,_TRAP(r1)
1205	andi.	r0,r3,1
1206	beq	2f
1207	SAVE_NVGPRS(r1)
1208	rlwinm	r3,r3,0,0,30
1209	stw	r3,_TRAP(r1)
12102:	addi	r3,r1,STACK_FRAME_OVERHEAD
1211	mr	r4,r9
1212	bl	do_notify_resume
1213	REST_NVGPRS(r1)
1214	b	recheck
1215
1216/*
1217 * We come here when we are at the end of handling an exception
1218 * that occurred at a place where taking an exception will lose
1219 * state information, such as the contents of SRR0 and SRR1.
1220 */
1221nonrecoverable:
1222	lis	r10,exc_exit_restart_end@ha
1223	addi	r10,r10,exc_exit_restart_end@l
1224	cmplw	r12,r10
1225	bge	3f
1226	lis	r11,exc_exit_restart@ha
1227	addi	r11,r11,exc_exit_restart@l
1228	cmplw	r12,r11
1229	blt	3f
1230	lis	r10,ee_restarts@ha
1231	lwz	r12,ee_restarts@l(r10)
1232	addi	r12,r12,1
1233	stw	r12,ee_restarts@l(r10)
1234	mr	r12,r11		/* restart at exc_exit_restart */
1235	blr
12363:	/* OK, we can't recover, kill this process */
1237	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1238BEGIN_FTR_SECTION
1239	blr
1240END_FTR_SECTION_IFSET(CPU_FTR_601)
1241	lwz	r3,_TRAP(r1)
1242	andi.	r0,r3,1
1243	beq	4f
1244	SAVE_NVGPRS(r1)
1245	rlwinm	r3,r3,0,0,30
1246	stw	r3,_TRAP(r1)
12474:	addi	r3,r1,STACK_FRAME_OVERHEAD
1248	bl	nonrecoverable_exception
1249	/* shouldn't return */
1250	b	4b
1251
1252	.section .bss
1253	.align	2
1254ee_restarts:
1255	.space	4
1256	.previous
1257
1258/*
1259 * PROM code for specific machines follows.  Put it
1260 * here so it's easy to add arch-specific sections later.
1261 * -- Cort
1262 */
1263#ifdef CONFIG_PPC_RTAS
1264/*
1265 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1266 * called with the MMU off.
1267 */
1268_GLOBAL(enter_rtas)
1269	stwu	r1,-INT_FRAME_SIZE(r1)
1270	mflr	r0
1271	stw	r0,INT_FRAME_SIZE+4(r1)
1272	LOAD_REG_ADDR(r4, rtas)
1273	lis	r6,1f@ha	/* physical return address for rtas */
1274	addi	r6,r6,1f@l
1275	tophys(r6,r6)
1276	tophys(r7,r1)
1277	lwz	r8,RTASENTRY(r4)
1278	lwz	r4,RTASBASE(r4)
1279	mfmsr	r9
1280	stw	r9,8(r1)
1281	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1282	SYNC			/* disable interrupts so SRR0/1 */
1283	MTMSRD(r0)		/* don't get trashed */
1284	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1285	mtlr	r6
1286	mtspr	SPRN_SPRG_RTAS,r7
1287	mtspr	SPRN_SRR0,r8
1288	mtspr	SPRN_SRR1,r9
1289	RFI
12901:	tophys(r9,r1)
1291	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1292	lwz	r9,8(r9)	/* original msr value */
1293	FIX_SRR1(r9,r0)
1294	addi	r1,r1,INT_FRAME_SIZE
1295	li	r0,0
1296	mtspr	SPRN_SPRG_RTAS,r0
1297	mtspr	SPRN_SRR0,r8
1298	mtspr	SPRN_SRR1,r9
1299	RFI			/* return to caller */
1300
1301	.globl	machine_check_in_rtas
1302machine_check_in_rtas:
1303	twi	31,0,0
1304	/* XXX load up BATs and panic */
1305
1306#endif /* CONFIG_PPC_RTAS */
1307
1308#ifdef CONFIG_FUNCTION_TRACER
1309#ifdef CONFIG_DYNAMIC_FTRACE
1310_GLOBAL(mcount)
1311_GLOBAL(_mcount)
1312	/*
1313	 * It is required that _mcount on PPC32 must preserve the
1314	 * link register. But we have r0 to play with. We use r0
1315	 * to push the return address back to the caller of mcount
1316	 * into the ctr register, restore the link register and
1317	 * then jump back using the ctr register.
1318	 */
1319	mflr	r0
1320	mtctr	r0
1321	lwz	r0, 4(r1)
1322	mtlr	r0
1323	bctr
1324
1325_GLOBAL(ftrace_caller)
1326	MCOUNT_SAVE_FRAME
1327	/* r3 ends up with link register */
1328	subi	r3, r3, MCOUNT_INSN_SIZE
1329.globl ftrace_call
1330ftrace_call:
1331	bl	ftrace_stub
1332	nop
1333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1334.globl ftrace_graph_call
1335ftrace_graph_call:
1336	b	ftrace_graph_stub
1337_GLOBAL(ftrace_graph_stub)
1338#endif
1339	MCOUNT_RESTORE_FRAME
1340	/* old link register ends up in ctr reg */
1341	bctr
1342#else
1343_GLOBAL(mcount)
1344_GLOBAL(_mcount)
1345
1346	MCOUNT_SAVE_FRAME
1347
1348	subi	r3, r3, MCOUNT_INSN_SIZE
1349	LOAD_REG_ADDR(r5, ftrace_trace_function)
1350	lwz	r5,0(r5)
1351
1352	mtctr	r5
1353	bctrl
1354	nop
1355
1356#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1357	b	ftrace_graph_caller
1358#endif
1359	MCOUNT_RESTORE_FRAME
1360	bctr
1361#endif
1362
1363_GLOBAL(ftrace_stub)
1364	blr
1365
1366#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1367_GLOBAL(ftrace_graph_caller)
1368	/* load r4 with local address */
1369	lwz	r4, 44(r1)
1370	subi	r4, r4, MCOUNT_INSN_SIZE
1371
1372	/* Grab the LR out of the caller stack frame */
1373	lwz	r3,52(r1)
1374
1375	bl	prepare_ftrace_return
1376	nop
1377
1378        /*
1379         * prepare_ftrace_return gives us the address we divert to.
1380         * Change the LR in the callers stack frame to this.
1381         */
1382	stw	r3,52(r1)
1383
1384	MCOUNT_RESTORE_FRAME
1385	/* old link register ends up in ctr reg */
1386	bctr
1387
1388_GLOBAL(return_to_handler)
1389	/* need to save return values */
1390	stwu	r1, -32(r1)
1391	stw	r3, 20(r1)
1392	stw	r4, 16(r1)
1393	stw	r31, 12(r1)
1394	mr	r31, r1
1395
1396	bl	ftrace_return_to_handler
1397	nop
1398
1399	/* return value has real return address */
1400	mtlr	r3
1401
1402	lwz	r3, 20(r1)
1403	lwz	r4, 16(r1)
1404	lwz	r31,12(r1)
1405	lwz	r1, 0(r1)
1406
1407	/* Jump back to real return address */
1408	blr
1409#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1410
1411#endif /* CONFIG_FUNCTION_TRACER */
1412