xref: /linux/arch/powerpc/kernel/entry_32.S (revision 2c86cd188f8a5631f3d75a1dea14d22df85189b4)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ptrace.h>
35#include <asm/export.h>
36#include <asm/asm-405.h>
37#include <asm/feature-fixups.h>
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48/*
49 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
50 * fit into one page in order to not encounter a TLB miss between the
51 * modification of srr0/srr1 and the associated rfi.
52 */
53	.align	12
54
55#ifdef CONFIG_BOOKE
56	.globl	mcheck_transfer_to_handler
57mcheck_transfer_to_handler:
58	mfspr	r0,SPRN_DSRR0
59	stw	r0,_DSRR0(r11)
60	mfspr	r0,SPRN_DSRR1
61	stw	r0,_DSRR1(r11)
62	/* fall through */
63
64	.globl	debug_transfer_to_handler
65debug_transfer_to_handler:
66	mfspr	r0,SPRN_CSRR0
67	stw	r0,_CSRR0(r11)
68	mfspr	r0,SPRN_CSRR1
69	stw	r0,_CSRR1(r11)
70	/* fall through */
71
72	.globl	crit_transfer_to_handler
73crit_transfer_to_handler:
74#ifdef CONFIG_PPC_BOOK3E_MMU
75	mfspr	r0,SPRN_MAS0
76	stw	r0,MAS0(r11)
77	mfspr	r0,SPRN_MAS1
78	stw	r0,MAS1(r11)
79	mfspr	r0,SPRN_MAS2
80	stw	r0,MAS2(r11)
81	mfspr	r0,SPRN_MAS3
82	stw	r0,MAS3(r11)
83	mfspr	r0,SPRN_MAS6
84	stw	r0,MAS6(r11)
85#ifdef CONFIG_PHYS_64BIT
86	mfspr	r0,SPRN_MAS7
87	stw	r0,MAS7(r11)
88#endif /* CONFIG_PHYS_64BIT */
89#endif /* CONFIG_PPC_BOOK3E_MMU */
90#ifdef CONFIG_44x
91	mfspr	r0,SPRN_MMUCR
92	stw	r0,MMUCR(r11)
93#endif
94	mfspr	r0,SPRN_SRR0
95	stw	r0,_SRR0(r11)
96	mfspr	r0,SPRN_SRR1
97	stw	r0,_SRR1(r11)
98
99	/* set the stack limit to the current stack
100	 * and set the limit to protect the thread_info
101	 * struct
102	 */
103	mfspr	r8,SPRN_SPRG_THREAD
104	lwz	r0,KSP_LIMIT(r8)
105	stw	r0,SAVED_KSP_LIMIT(r11)
106	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
107	stw	r0,KSP_LIMIT(r8)
108	/* fall through */
109#endif
110
111#ifdef CONFIG_40x
112	.globl	crit_transfer_to_handler
113crit_transfer_to_handler:
114	lwz	r0,crit_r10@l(0)
115	stw	r0,GPR10(r11)
116	lwz	r0,crit_r11@l(0)
117	stw	r0,GPR11(r11)
118	mfspr	r0,SPRN_SRR0
119	stw	r0,crit_srr0@l(0)
120	mfspr	r0,SPRN_SRR1
121	stw	r0,crit_srr1@l(0)
122
123	/* set the stack limit to the current stack
124	 * and set the limit to protect the thread_info
125	 * struct
126	 */
127	mfspr	r8,SPRN_SPRG_THREAD
128	lwz	r0,KSP_LIMIT(r8)
129	stw	r0,saved_ksp_limit@l(0)
130	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
131	stw	r0,KSP_LIMIT(r8)
132	/* fall through */
133#endif
134
135/*
136 * This code finishes saving the registers to the exception frame
137 * and jumps to the appropriate handler for the exception, turning
138 * on address translation.
139 * Note that we rely on the caller having set cr0.eq iff the exception
140 * occurred in kernel mode (i.e. MSR:PR = 0).
141 */
142	.globl	transfer_to_handler_full
143transfer_to_handler_full:
144	SAVE_NVGPRS(r11)
145	/* fall through */
146
147	.globl	transfer_to_handler
148transfer_to_handler:
149	stw	r2,GPR2(r11)
150	stw	r12,_NIP(r11)
151	stw	r9,_MSR(r11)
152	andi.	r2,r9,MSR_PR
153	mfctr	r12
154	mfspr	r2,SPRN_XER
155	stw	r12,_CTR(r11)
156	stw	r2,_XER(r11)
157	mfspr	r12,SPRN_SPRG_THREAD
158	addi	r2,r12,-THREAD
159	tovirt(r2,r2)			/* set r2 to current */
160	beq	2f			/* if from user, fix up THREAD.regs */
161	addi	r11,r1,STACK_FRAME_OVERHEAD
162	stw	r11,PT_REGS(r12)
163#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
164	/* Check to see if the dbcr0 register is set up to debug.  Use the
165	   internal debug mode bit to do this. */
166	lwz	r12,THREAD_DBCR0(r12)
167	andis.	r12,r12,DBCR0_IDM@h
168	beq+	3f
169	/* From user and task is ptraced - load up global dbcr0 */
170	li	r12,-1			/* clear all pending debug events */
171	mtspr	SPRN_DBSR,r12
172	lis	r11,global_dbcr0@ha
173	tophys(r11,r11)
174	addi	r11,r11,global_dbcr0@l
175#ifdef CONFIG_SMP
176	CURRENT_THREAD_INFO(r9, r1)
177	lwz	r9,TI_CPU(r9)
178	slwi	r9,r9,3
179	add	r11,r11,r9
180#endif
181	lwz	r12,0(r11)
182	mtspr	SPRN_DBCR0,r12
183	lwz	r12,4(r11)
184	addi	r12,r12,-1
185	stw	r12,4(r11)
186#endif
187#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
188	CURRENT_THREAD_INFO(r9, r1)
189	tophys(r9, r9)
190	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
191#endif
192
193	b	3f
194
1952:	/* if from kernel, check interrupted DOZE/NAP mode and
196         * check for stack overflow
197         */
198	lwz	r9,KSP_LIMIT(r12)
199	cmplw	r1,r9			/* if r1 <= ksp_limit */
200	ble-	stack_ovf		/* then the kernel stack overflowed */
2015:
202#if defined(CONFIG_6xx) || defined(CONFIG_E500)
203	CURRENT_THREAD_INFO(r9, r1)
204	tophys(r9,r9)			/* check local flags */
205	lwz	r12,TI_LOCAL_FLAGS(r9)
206	mtcrf	0x01,r12
207	bt-	31-TLF_NAPPING,4f
208	bt-	31-TLF_SLEEPING,7f
209#endif /* CONFIG_6xx || CONFIG_E500 */
210	.globl transfer_to_handler_cont
211transfer_to_handler_cont:
2123:
213	mflr	r9
214	lwz	r11,0(r9)		/* virtual address of handler */
215	lwz	r9,4(r9)		/* where to go when done */
216#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
217	mtspr	SPRN_NRI, r0
218#endif
219#ifdef CONFIG_TRACE_IRQFLAGS
220	lis	r12,reenable_mmu@h
221	ori	r12,r12,reenable_mmu@l
222	mtspr	SPRN_SRR0,r12
223	mtspr	SPRN_SRR1,r10
224	SYNC
225	RFI
226reenable_mmu:				/* re-enable mmu so we can */
227	mfmsr	r10
228	lwz	r12,_MSR(r1)
229	xor	r10,r10,r12
230	andi.	r10,r10,MSR_EE		/* Did EE change? */
231	beq	1f
232
233	/*
234	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
235	 * If from user mode there is only one stack frame on the stack, and
236	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
237	 * stack frame to make trace_hardirqs_off happy.
238	 *
239	 * This is handy because we also need to save a bunch of GPRs,
240	 * r3 can be different from GPR3(r1) at this point, r9 and r11
241	 * contains the old MSR and handler address respectively,
242	 * r4 & r5 can contain page fault arguments that need to be passed
243	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
244	 * they aren't useful past this point (aren't syscall arguments),
245	 * the rest is restored from the exception frame.
246	 */
247	stwu	r1,-32(r1)
248	stw	r9,8(r1)
249	stw	r11,12(r1)
250	stw	r3,16(r1)
251	stw	r4,20(r1)
252	stw	r5,24(r1)
253	bl	trace_hardirqs_off
254	lwz	r5,24(r1)
255	lwz	r4,20(r1)
256	lwz	r3,16(r1)
257	lwz	r11,12(r1)
258	lwz	r9,8(r1)
259	addi	r1,r1,32
260	lwz	r0,GPR0(r1)
261	lwz	r6,GPR6(r1)
262	lwz	r7,GPR7(r1)
263	lwz	r8,GPR8(r1)
2641:	mtctr	r11
265	mtlr	r9
266	bctr				/* jump to handler */
267#else /* CONFIG_TRACE_IRQFLAGS */
268	mtspr	SPRN_SRR0,r11
269	mtspr	SPRN_SRR1,r10
270	mtlr	r9
271	SYNC
272	RFI				/* jump to handler, enable MMU */
273#endif /* CONFIG_TRACE_IRQFLAGS */
274
275#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2764:	rlwinm	r12,r12,0,~_TLF_NAPPING
277	stw	r12,TI_LOCAL_FLAGS(r9)
278	b	power_save_ppc32_restore
279
2807:	rlwinm	r12,r12,0,~_TLF_SLEEPING
281	stw	r12,TI_LOCAL_FLAGS(r9)
282	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
283	rlwinm	r9,r9,0,~MSR_EE
284	lwz	r12,_LINK(r11)		/* and return to address in LR */
285	b	fast_exception_return
286#endif
287
288/*
289 * On kernel stack overflow, load up an initial stack pointer
290 * and call StackOverflow(regs), which should not return.
291 */
292stack_ovf:
293	/* sometimes we use a statically-allocated stack, which is OK. */
294	lis	r12,_end@h
295	ori	r12,r12,_end@l
296	cmplw	r1,r12
297	ble	5b			/* r1 <= &_end is OK */
298	SAVE_NVGPRS(r11)
299	addi	r3,r1,STACK_FRAME_OVERHEAD
300	lis	r1,init_thread_union@ha
301	addi	r1,r1,init_thread_union@l
302	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
303	lis	r9,StackOverflow@ha
304	addi	r9,r9,StackOverflow@l
305	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
306#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
307	mtspr	SPRN_NRI, r0
308#endif
309	mtspr	SPRN_SRR0,r9
310	mtspr	SPRN_SRR1,r10
311	SYNC
312	RFI
313
314/*
315 * Handle a system call.
316 */
317	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
318	.stabs	"entry_32.S",N_SO,0,0,0f
3190:
320
321_GLOBAL(DoSyscall)
322	stw	r3,ORIG_GPR3(r1)
323	li	r12,0
324	stw	r12,RESULT(r1)
325	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
326	rlwinm	r11,r11,0,4,2
327	stw	r11,_CCR(r1)
328#ifdef CONFIG_TRACE_IRQFLAGS
329	/* Return from syscalls can (and generally will) hard enable
330	 * interrupts. You aren't supposed to call a syscall with
331	 * interrupts disabled in the first place. However, to ensure
332	 * that we get it right vs. lockdep if it happens, we force
333	 * that hard enable here with appropriate tracing if we see
334	 * that we have been called with interrupts off
335	 */
336	mfmsr	r11
337	andi.	r12,r11,MSR_EE
338	bne+	1f
339	/* We came in with interrupts disabled, we enable them now */
340	bl	trace_hardirqs_on
341	mfmsr	r11
342	lwz	r0,GPR0(r1)
343	lwz	r3,GPR3(r1)
344	lwz	r4,GPR4(r1)
345	ori	r11,r11,MSR_EE
346	lwz	r5,GPR5(r1)
347	lwz	r6,GPR6(r1)
348	lwz	r7,GPR7(r1)
349	lwz	r8,GPR8(r1)
350	mtmsr	r11
3511:
352#endif /* CONFIG_TRACE_IRQFLAGS */
353	CURRENT_THREAD_INFO(r10, r1)
354	lwz	r11,TI_FLAGS(r10)
355	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
356	bne-	syscall_dotrace
357syscall_dotrace_cont:
358	cmplwi	0,r0,NR_syscalls
359	lis	r10,sys_call_table@h
360	ori	r10,r10,sys_call_table@l
361	slwi	r0,r0,2
362	bge-	66f
363	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
364	mtlr	r10
365	addi	r9,r1,STACK_FRAME_OVERHEAD
366	PPC440EP_ERR42
367	blrl			/* Call handler */
368	.globl	ret_from_syscall
369ret_from_syscall:
370#ifdef CONFIG_DEBUG_RSEQ
371	/* Check whether the syscall is issued inside a restartable sequence */
372	stw	r3,GPR3(r1)
373	addi    r3,r1,STACK_FRAME_OVERHEAD
374	bl      rseq_syscall
375	lwz	r3,GPR3(r1)
376#endif
377	mr	r6,r3
378	CURRENT_THREAD_INFO(r12, r1)
379	/* disable interrupts so current_thread_info()->flags can't change */
380	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
381	/* Note: We don't bother telling lockdep about it */
382	SYNC
383	MTMSRD(r10)
384	lwz	r9,TI_FLAGS(r12)
385	li	r8,-MAX_ERRNO
386	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
387	bne-	syscall_exit_work
388	cmplw	0,r3,r8
389	blt+	syscall_exit_cont
390	lwz	r11,_CCR(r1)			/* Load CR */
391	neg	r3,r3
392	oris	r11,r11,0x1000	/* Set SO bit in CR */
393	stw	r11,_CCR(r1)
394syscall_exit_cont:
395	lwz	r8,_MSR(r1)
396#ifdef CONFIG_TRACE_IRQFLAGS
397	/* If we are going to return from the syscall with interrupts
398	 * off, we trace that here. It shouldn't happen though but we
399	 * want to catch the bugger if it does right ?
400	 */
401	andi.	r10,r8,MSR_EE
402	bne+	1f
403	stw	r3,GPR3(r1)
404	bl      trace_hardirqs_off
405	lwz	r3,GPR3(r1)
4061:
407#endif /* CONFIG_TRACE_IRQFLAGS */
408#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
409	/* If the process has its own DBCR0 value, load it up.  The internal
410	   debug mode bit tells us that dbcr0 should be loaded. */
411	lwz	r0,THREAD+THREAD_DBCR0(r2)
412	andis.	r10,r0,DBCR0_IDM@h
413	bnel-	load_dbcr0
414#endif
415#ifdef CONFIG_44x
416BEGIN_MMU_FTR_SECTION
417	lis	r4,icache_44x_need_flush@ha
418	lwz	r5,icache_44x_need_flush@l(r4)
419	cmplwi	cr0,r5,0
420	bne-	2f
4211:
422END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
423#endif /* CONFIG_44x */
424BEGIN_FTR_SECTION
425	lwarx	r7,0,r1
426END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
427	stwcx.	r0,0,r1			/* to clear the reservation */
428#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
429	andi.	r4,r8,MSR_PR
430	beq	3f
431	CURRENT_THREAD_INFO(r4, r1)
432	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4333:
434#endif
435	lwz	r4,_LINK(r1)
436	lwz	r5,_CCR(r1)
437	mtlr	r4
438	mtcr	r5
439	lwz	r7,_NIP(r1)
440	lwz	r2,GPR2(r1)
441	lwz	r1,GPR1(r1)
442#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
443	mtspr	SPRN_NRI, r0
444#endif
445	mtspr	SPRN_SRR0,r7
446	mtspr	SPRN_SRR1,r8
447	SYNC
448	RFI
449#ifdef CONFIG_44x
4502:	li	r7,0
451	iccci	r0,r0
452	stw	r7,icache_44x_need_flush@l(r4)
453	b	1b
454#endif  /* CONFIG_44x */
455
45666:	li	r3,-ENOSYS
457	b	ret_from_syscall
458
459	.globl	ret_from_fork
460ret_from_fork:
461	REST_NVGPRS(r1)
462	bl	schedule_tail
463	li	r3,0
464	b	ret_from_syscall
465
466	.globl	ret_from_kernel_thread
467ret_from_kernel_thread:
468	REST_NVGPRS(r1)
469	bl	schedule_tail
470	mtlr	r14
471	mr	r3,r15
472	PPC440EP_ERR42
473	blrl
474	li	r3,0
475	b	ret_from_syscall
476
477/* Traced system call support */
478syscall_dotrace:
479	SAVE_NVGPRS(r1)
480	li	r0,0xc00
481	stw	r0,_TRAP(r1)
482	addi	r3,r1,STACK_FRAME_OVERHEAD
483	bl	do_syscall_trace_enter
484	/*
485	 * Restore argument registers possibly just changed.
486	 * We use the return value of do_syscall_trace_enter
487	 * for call number to look up in the table (r0).
488	 */
489	mr	r0,r3
490	lwz	r3,GPR3(r1)
491	lwz	r4,GPR4(r1)
492	lwz	r5,GPR5(r1)
493	lwz	r6,GPR6(r1)
494	lwz	r7,GPR7(r1)
495	lwz	r8,GPR8(r1)
496	REST_NVGPRS(r1)
497
498	cmplwi	r0,NR_syscalls
499	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
500	bge-	ret_from_syscall
501	b	syscall_dotrace_cont
502
503syscall_exit_work:
504	andi.	r0,r9,_TIF_RESTOREALL
505	beq+	0f
506	REST_NVGPRS(r1)
507	b	2f
5080:	cmplw	0,r3,r8
509	blt+	1f
510	andi.	r0,r9,_TIF_NOERROR
511	bne-	1f
512	lwz	r11,_CCR(r1)			/* Load CR */
513	neg	r3,r3
514	oris	r11,r11,0x1000	/* Set SO bit in CR */
515	stw	r11,_CCR(r1)
516
5171:	stw	r6,RESULT(r1)	/* Save result */
518	stw	r3,GPR3(r1)	/* Update return value */
5192:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
520	beq	4f
521
522	/* Clear per-syscall TIF flags if any are set.  */
523
524	li	r11,_TIF_PERSYSCALL_MASK
525	addi	r12,r12,TI_FLAGS
5263:	lwarx	r8,0,r12
527	andc	r8,r8,r11
528#ifdef CONFIG_IBM405_ERR77
529	dcbt	0,r12
530#endif
531	stwcx.	r8,0,r12
532	bne-	3b
533	subi	r12,r12,TI_FLAGS
534
5354:	/* Anything which requires enabling interrupts? */
536	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
537	beq	ret_from_except
538
539	/* Re-enable interrupts. There is no need to trace that with
540	 * lockdep as we are supposed to have IRQs on at this point
541	 */
542	ori	r10,r10,MSR_EE
543	SYNC
544	MTMSRD(r10)
545
546	/* Save NVGPRS if they're not saved already */
547	lwz	r4,_TRAP(r1)
548	andi.	r4,r4,1
549	beq	5f
550	SAVE_NVGPRS(r1)
551	li	r4,0xc00
552	stw	r4,_TRAP(r1)
5535:
554	addi	r3,r1,STACK_FRAME_OVERHEAD
555	bl	do_syscall_trace_leave
556	b	ret_from_except_full
557
558/*
559 * The fork/clone functions need to copy the full register set into
560 * the child process. Therefore we need to save all the nonvolatile
561 * registers (r13 - r31) before calling the C code.
562 */
563	.globl	ppc_fork
564ppc_fork:
565	SAVE_NVGPRS(r1)
566	lwz	r0,_TRAP(r1)
567	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
568	stw	r0,_TRAP(r1)		/* register set saved */
569	b	sys_fork
570
571	.globl	ppc_vfork
572ppc_vfork:
573	SAVE_NVGPRS(r1)
574	lwz	r0,_TRAP(r1)
575	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
576	stw	r0,_TRAP(r1)		/* register set saved */
577	b	sys_vfork
578
579	.globl	ppc_clone
580ppc_clone:
581	SAVE_NVGPRS(r1)
582	lwz	r0,_TRAP(r1)
583	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
584	stw	r0,_TRAP(r1)		/* register set saved */
585	b	sys_clone
586
587	.globl	ppc_swapcontext
588ppc_swapcontext:
589	SAVE_NVGPRS(r1)
590	lwz	r0,_TRAP(r1)
591	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
592	stw	r0,_TRAP(r1)		/* register set saved */
593	b	sys_swapcontext
594
595/*
596 * Top-level page fault handling.
597 * This is in assembler because if do_page_fault tells us that
598 * it is a bad kernel page fault, we want to save the non-volatile
599 * registers before calling bad_page_fault.
600 */
601	.globl	handle_page_fault
602handle_page_fault:
603	stw	r4,_DAR(r1)
604	addi	r3,r1,STACK_FRAME_OVERHEAD
605#ifdef CONFIG_6xx
606	andis.  r0,r5,DSISR_DABRMATCH@h
607	bne-    handle_dabr_fault
608#endif
609	bl	do_page_fault
610	cmpwi	r3,0
611	beq+	ret_from_except
612	SAVE_NVGPRS(r1)
613	lwz	r0,_TRAP(r1)
614	clrrwi	r0,r0,1
615	stw	r0,_TRAP(r1)
616	mr	r5,r3
617	addi	r3,r1,STACK_FRAME_OVERHEAD
618	lwz	r4,_DAR(r1)
619	bl	bad_page_fault
620	b	ret_from_except_full
621
622#ifdef CONFIG_6xx
623	/* We have a data breakpoint exception - handle it */
624handle_dabr_fault:
625	SAVE_NVGPRS(r1)
626	lwz	r0,_TRAP(r1)
627	clrrwi	r0,r0,1
628	stw	r0,_TRAP(r1)
629	bl      do_break
630	b	ret_from_except_full
631#endif
632
633/*
634 * This routine switches between two different tasks.  The process
635 * state of one is saved on its kernel stack.  Then the state
636 * of the other is restored from its kernel stack.  The memory
637 * management hardware is updated to the second process's state.
638 * Finally, we can return to the second process.
639 * On entry, r3 points to the THREAD for the current task, r4
640 * points to the THREAD for the new task.
641 *
642 * This routine is always called with interrupts disabled.
643 *
644 * Note: there are two ways to get to the "going out" portion
645 * of this code; either by coming in via the entry (_switch)
646 * or via "fork" which must set up an environment equivalent
647 * to the "_switch" path.  If you change this , you'll have to
648 * change the fork code also.
649 *
650 * The code which creates the new task context is in 'copy_thread'
651 * in arch/ppc/kernel/process.c
652 */
653_GLOBAL(_switch)
654	stwu	r1,-INT_FRAME_SIZE(r1)
655	mflr	r0
656	stw	r0,INT_FRAME_SIZE+4(r1)
657	/* r3-r12 are caller saved -- Cort */
658	SAVE_NVGPRS(r1)
659	stw	r0,_NIP(r1)	/* Return to switch caller */
660	mfmsr	r11
661	li	r0,MSR_FP	/* Disable floating-point */
662#ifdef CONFIG_ALTIVEC
663BEGIN_FTR_SECTION
664	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
665	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
666	stw	r12,THREAD+THREAD_VRSAVE(r2)
667END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
668#endif /* CONFIG_ALTIVEC */
669#ifdef CONFIG_SPE
670BEGIN_FTR_SECTION
671	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
672	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
673	stw	r12,THREAD+THREAD_SPEFSCR(r2)
674END_FTR_SECTION_IFSET(CPU_FTR_SPE)
675#endif /* CONFIG_SPE */
676	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
677	beq+	1f
678	andc	r11,r11,r0
679	MTMSRD(r11)
680	isync
6811:	stw	r11,_MSR(r1)
682	mfcr	r10
683	stw	r10,_CCR(r1)
684	stw	r1,KSP(r3)	/* Set old stack pointer */
685
686#ifdef CONFIG_SMP
687	/* We need a sync somewhere here to make sure that if the
688	 * previous task gets rescheduled on another CPU, it sees all
689	 * stores it has performed on this one.
690	 */
691	sync
692#endif /* CONFIG_SMP */
693
694	tophys(r0,r4)
695	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
696	lwz	r1,KSP(r4)	/* Load new stack pointer */
697
698	/* save the old current 'last' for return value */
699	mr	r3,r2
700	addi	r2,r4,-THREAD	/* Update current */
701
702#ifdef CONFIG_ALTIVEC
703BEGIN_FTR_SECTION
704	lwz	r0,THREAD+THREAD_VRSAVE(r2)
705	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
706END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
707#endif /* CONFIG_ALTIVEC */
708#ifdef CONFIG_SPE
709BEGIN_FTR_SECTION
710	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
711	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
712END_FTR_SECTION_IFSET(CPU_FTR_SPE)
713#endif /* CONFIG_SPE */
714
715	lwz	r0,_CCR(r1)
716	mtcrf	0xFF,r0
717	/* r3-r12 are destroyed -- Cort */
718	REST_NVGPRS(r1)
719
720	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
721	mtlr	r4
722	addi	r1,r1,INT_FRAME_SIZE
723	blr
724
725	.globl	fast_exception_return
726fast_exception_return:
727#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
728	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
729	beq	1f			/* if not, we've got problems */
730#endif
731
7322:	REST_4GPRS(3, r11)
733	lwz	r10,_CCR(r11)
734	REST_GPR(1, r11)
735	mtcr	r10
736	lwz	r10,_LINK(r11)
737	mtlr	r10
738	REST_GPR(10, r11)
739#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
740	mtspr	SPRN_NRI, r0
741#endif
742	mtspr	SPRN_SRR1,r9
743	mtspr	SPRN_SRR0,r12
744	REST_GPR(9, r11)
745	REST_GPR(12, r11)
746	lwz	r11,GPR11(r11)
747	SYNC
748	RFI
749
750#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
751/* check if the exception happened in a restartable section */
7521:	lis	r3,exc_exit_restart_end@ha
753	addi	r3,r3,exc_exit_restart_end@l
754	cmplw	r12,r3
755	bge	3f
756	lis	r4,exc_exit_restart@ha
757	addi	r4,r4,exc_exit_restart@l
758	cmplw	r12,r4
759	blt	3f
760	lis	r3,fee_restarts@ha
761	tophys(r3,r3)
762	lwz	r5,fee_restarts@l(r3)
763	addi	r5,r5,1
764	stw	r5,fee_restarts@l(r3)
765	mr	r12,r4		/* restart at exc_exit_restart */
766	b	2b
767
768	.section .bss
769	.align	2
770fee_restarts:
771	.space	4
772	.previous
773
774/* aargh, a nonrecoverable interrupt, panic */
775/* aargh, we don't know which trap this is */
776/* but the 601 doesn't implement the RI bit, so assume it's OK */
7773:
778BEGIN_FTR_SECTION
779	b	2b
780END_FTR_SECTION_IFSET(CPU_FTR_601)
781	li	r10,-1
782	stw	r10,_TRAP(r11)
783	addi	r3,r1,STACK_FRAME_OVERHEAD
784	lis	r10,MSR_KERNEL@h
785	ori	r10,r10,MSR_KERNEL@l
786	bl	transfer_to_handler_full
787	.long	nonrecoverable_exception
788	.long	ret_from_except
789#endif
790
791	.globl	ret_from_except_full
792ret_from_except_full:
793	REST_NVGPRS(r1)
794	/* fall through */
795
796	.globl	ret_from_except
797ret_from_except:
798	/* Hard-disable interrupts so that current_thread_info()->flags
799	 * can't change between when we test it and when we return
800	 * from the interrupt. */
801	/* Note: We don't bother telling lockdep about it */
802	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
803	SYNC			/* Some chip revs have problems here... */
804	MTMSRD(r10)		/* disable interrupts */
805
806	lwz	r3,_MSR(r1)	/* Returning to user mode? */
807	andi.	r0,r3,MSR_PR
808	beq	resume_kernel
809
810user_exc_return:		/* r10 contains MSR_KERNEL here */
811	/* Check current_thread_info()->flags */
812	CURRENT_THREAD_INFO(r9, r1)
813	lwz	r9,TI_FLAGS(r9)
814	andi.	r0,r9,_TIF_USER_WORK_MASK
815	bne	do_work
816
817restore_user:
818#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
819	/* Check whether this process has its own DBCR0 value.  The internal
820	   debug mode bit tells us that dbcr0 should be loaded. */
821	lwz	r0,THREAD+THREAD_DBCR0(r2)
822	andis.	r10,r0,DBCR0_IDM@h
823	bnel-	load_dbcr0
824#endif
825#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
826	CURRENT_THREAD_INFO(r9, r1)
827	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
828#endif
829
830	b	restore
831
832/* N.B. the only way to get here is from the beq following ret_from_except. */
833resume_kernel:
834	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
835	CURRENT_THREAD_INFO(r9, r1)
836	lwz	r8,TI_FLAGS(r9)
837	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
838	beq+	1f
839
840	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
841
842	lwz	r3,GPR1(r1)
843	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
844	mr	r4,r1			/* src:  current exception frame */
845	mr	r1,r3			/* Reroute the trampoline frame to r1 */
846
847	/* Copy from the original to the trampoline. */
848	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
849	li	r6,0			/* start offset: 0 */
850	mtctr	r5
8512:	lwzx	r0,r6,r4
852	stwx	r0,r6,r3
853	addi	r6,r6,4
854	bdnz	2b
855
856	/* Do real store operation to complete stwu */
857	lwz	r5,GPR1(r1)
858	stw	r8,0(r5)
859
860	/* Clear _TIF_EMULATE_STACK_STORE flag */
861	lis	r11,_TIF_EMULATE_STACK_STORE@h
862	addi	r5,r9,TI_FLAGS
8630:	lwarx	r8,0,r5
864	andc	r8,r8,r11
865#ifdef CONFIG_IBM405_ERR77
866	dcbt	0,r5
867#endif
868	stwcx.	r8,0,r5
869	bne-	0b
8701:
871
872#ifdef CONFIG_PREEMPT
873	/* check current_thread_info->preempt_count */
874	lwz	r0,TI_PREEMPT(r9)
875	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
876	bne	restore
877	andi.	r8,r8,_TIF_NEED_RESCHED
878	beq+	restore
879	lwz	r3,_MSR(r1)
880	andi.	r0,r3,MSR_EE	/* interrupts off? */
881	beq	restore		/* don't schedule if so */
882#ifdef CONFIG_TRACE_IRQFLAGS
883	/* Lockdep thinks irqs are enabled, we need to call
884	 * preempt_schedule_irq with IRQs off, so we inform lockdep
885	 * now that we -did- turn them off already
886	 */
887	bl	trace_hardirqs_off
888#endif
8891:	bl	preempt_schedule_irq
890	CURRENT_THREAD_INFO(r9, r1)
891	lwz	r3,TI_FLAGS(r9)
892	andi.	r0,r3,_TIF_NEED_RESCHED
893	bne-	1b
894#ifdef CONFIG_TRACE_IRQFLAGS
895	/* And now, to properly rebalance the above, we tell lockdep they
896	 * are being turned back on, which will happen when we return
897	 */
898	bl	trace_hardirqs_on
899#endif
900#endif /* CONFIG_PREEMPT */
901
902	/* interrupts are hard-disabled at this point */
903restore:
904#ifdef CONFIG_44x
905BEGIN_MMU_FTR_SECTION
906	b	1f
907END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
908	lis	r4,icache_44x_need_flush@ha
909	lwz	r5,icache_44x_need_flush@l(r4)
910	cmplwi	cr0,r5,0
911	beq+	1f
912	li	r6,0
913	iccci	r0,r0
914	stw	r6,icache_44x_need_flush@l(r4)
9151:
916#endif  /* CONFIG_44x */
917
918	lwz	r9,_MSR(r1)
919#ifdef CONFIG_TRACE_IRQFLAGS
920	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
921	 * off in this assembly code while peeking at TI_FLAGS() and such. However
922	 * we need to inform it if the exception turned interrupts off, and we
923	 * are about to trun them back on.
924	 *
925	 * The problem here sadly is that we don't know whether the exceptions was
926	 * one that turned interrupts off or not. So we always tell lockdep about
927	 * turning them on here when we go back to wherever we came from with EE
928	 * on, even if that may meen some redudant calls being tracked. Maybe later
929	 * we could encode what the exception did somewhere or test the exception
930	 * type in the pt_regs but that sounds overkill
931	 */
932	andi.	r10,r9,MSR_EE
933	beq	1f
934	/*
935	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
936	 * which is the stack frame here, we need to force a stack frame
937	 * in case we came from user space.
938	 */
939	stwu	r1,-32(r1)
940	mflr	r0
941	stw	r0,4(r1)
942	stwu	r1,-32(r1)
943	bl	trace_hardirqs_on
944	lwz	r1,0(r1)
945	lwz	r1,0(r1)
946	lwz	r9,_MSR(r1)
9471:
948#endif /* CONFIG_TRACE_IRQFLAGS */
949
950	lwz	r0,GPR0(r1)
951	lwz	r2,GPR2(r1)
952	REST_4GPRS(3, r1)
953	REST_2GPRS(7, r1)
954
955	lwz	r10,_XER(r1)
956	lwz	r11,_CTR(r1)
957	mtspr	SPRN_XER,r10
958	mtctr	r11
959
960	PPC405_ERR77(0,r1)
961BEGIN_FTR_SECTION
962	lwarx	r11,0,r1
963END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
964	stwcx.	r0,0,r1			/* to clear the reservation */
965
966#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
967	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
968	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
969
970	lwz	r10,_CCR(r1)
971	lwz	r11,_LINK(r1)
972	mtcrf	0xFF,r10
973	mtlr	r11
974
975	/*
976	 * Once we put values in SRR0 and SRR1, we are in a state
977	 * where exceptions are not recoverable, since taking an
978	 * exception will trash SRR0 and SRR1.  Therefore we clear the
979	 * MSR:RI bit to indicate this.  If we do take an exception,
980	 * we can't return to the point of the exception but we
981	 * can restart the exception exit path at the label
982	 * exc_exit_restart below.  -- paulus
983	 */
984	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
985	SYNC
986	MTMSRD(r10)		/* clear the RI bit */
987	.globl exc_exit_restart
988exc_exit_restart:
989	lwz	r12,_NIP(r1)
990#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
991	mtspr	SPRN_NRI, r0
992#endif
993	mtspr	SPRN_SRR0,r12
994	mtspr	SPRN_SRR1,r9
995	REST_4GPRS(9, r1)
996	lwz	r1,GPR1(r1)
997	.globl exc_exit_restart_end
998exc_exit_restart_end:
999	SYNC
1000	RFI
1001
1002#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1003	/*
1004	 * This is a bit different on 4xx/Book-E because it doesn't have
1005	 * the RI bit in the MSR.
1006	 * The TLB miss handler checks if we have interrupted
1007	 * the exception exit path and restarts it if so
1008	 * (well maybe one day it will... :).
1009	 */
1010	lwz	r11,_LINK(r1)
1011	mtlr	r11
1012	lwz	r10,_CCR(r1)
1013	mtcrf	0xff,r10
1014	REST_2GPRS(9, r1)
1015	.globl exc_exit_restart
1016exc_exit_restart:
1017	lwz	r11,_NIP(r1)
1018	lwz	r12,_MSR(r1)
1019exc_exit_start:
1020	mtspr	SPRN_SRR0,r11
1021	mtspr	SPRN_SRR1,r12
1022	REST_2GPRS(11, r1)
1023	lwz	r1,GPR1(r1)
1024	.globl exc_exit_restart_end
1025exc_exit_restart_end:
1026	PPC405_ERR77_SYNC
1027	rfi
1028	b	.			/* prevent prefetch past rfi */
1029
1030/*
1031 * Returning from a critical interrupt in user mode doesn't need
1032 * to be any different from a normal exception.  For a critical
1033 * interrupt in the kernel, we just return (without checking for
1034 * preemption) since the interrupt may have happened at some crucial
1035 * place (e.g. inside the TLB miss handler), and because we will be
1036 * running with r1 pointing into critical_stack, not the current
1037 * process's kernel stack (and therefore current_thread_info() will
1038 * give the wrong answer).
1039 * We have to restore various SPRs that may have been in use at the
1040 * time of the critical interrupt.
1041 *
1042 */
1043#ifdef CONFIG_40x
1044#define PPC_40x_TURN_OFF_MSR_DR						    \
1045	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1046	 * assume the instructions here are mapped by a pinned TLB entry */ \
1047	li	r10,MSR_IR;						    \
1048	mtmsr	r10;							    \
1049	isync;								    \
1050	tophys(r1, r1);
1051#else
1052#define PPC_40x_TURN_OFF_MSR_DR
1053#endif
1054
1055#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1056	REST_NVGPRS(r1);						\
1057	lwz	r3,_MSR(r1);						\
1058	andi.	r3,r3,MSR_PR;						\
1059	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1060	bne	user_exc_return;					\
1061	lwz	r0,GPR0(r1);						\
1062	lwz	r2,GPR2(r1);						\
1063	REST_4GPRS(3, r1);						\
1064	REST_2GPRS(7, r1);						\
1065	lwz	r10,_XER(r1);						\
1066	lwz	r11,_CTR(r1);						\
1067	mtspr	SPRN_XER,r10;						\
1068	mtctr	r11;							\
1069	PPC405_ERR77(0,r1);						\
1070	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1071	lwz	r11,_LINK(r1);						\
1072	mtlr	r11;							\
1073	lwz	r10,_CCR(r1);						\
1074	mtcrf	0xff,r10;						\
1075	PPC_40x_TURN_OFF_MSR_DR;					\
1076	lwz	r9,_DEAR(r1);						\
1077	lwz	r10,_ESR(r1);						\
1078	mtspr	SPRN_DEAR,r9;						\
1079	mtspr	SPRN_ESR,r10;						\
1080	lwz	r11,_NIP(r1);						\
1081	lwz	r12,_MSR(r1);						\
1082	mtspr	exc_lvl_srr0,r11;					\
1083	mtspr	exc_lvl_srr1,r12;					\
1084	lwz	r9,GPR9(r1);						\
1085	lwz	r12,GPR12(r1);						\
1086	lwz	r10,GPR10(r1);						\
1087	lwz	r11,GPR11(r1);						\
1088	lwz	r1,GPR1(r1);						\
1089	PPC405_ERR77_SYNC;						\
1090	exc_lvl_rfi;							\
1091	b	.;		/* prevent prefetch past exc_lvl_rfi */
1092
1093#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1094	lwz	r9,_##exc_lvl_srr0(r1);					\
1095	lwz	r10,_##exc_lvl_srr1(r1);				\
1096	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1097	mtspr	SPRN_##exc_lvl_srr1,r10;
1098
1099#if defined(CONFIG_PPC_BOOK3E_MMU)
1100#ifdef CONFIG_PHYS_64BIT
1101#define	RESTORE_MAS7							\
1102	lwz	r11,MAS7(r1);						\
1103	mtspr	SPRN_MAS7,r11;
1104#else
1105#define	RESTORE_MAS7
1106#endif /* CONFIG_PHYS_64BIT */
1107#define RESTORE_MMU_REGS						\
1108	lwz	r9,MAS0(r1);						\
1109	lwz	r10,MAS1(r1);						\
1110	lwz	r11,MAS2(r1);						\
1111	mtspr	SPRN_MAS0,r9;						\
1112	lwz	r9,MAS3(r1);						\
1113	mtspr	SPRN_MAS1,r10;						\
1114	lwz	r10,MAS6(r1);						\
1115	mtspr	SPRN_MAS2,r11;						\
1116	mtspr	SPRN_MAS3,r9;						\
1117	mtspr	SPRN_MAS6,r10;						\
1118	RESTORE_MAS7;
1119#elif defined(CONFIG_44x)
1120#define RESTORE_MMU_REGS						\
1121	lwz	r9,MMUCR(r1);						\
1122	mtspr	SPRN_MMUCR,r9;
1123#else
1124#define RESTORE_MMU_REGS
1125#endif
1126
1127#ifdef CONFIG_40x
1128	.globl	ret_from_crit_exc
1129ret_from_crit_exc:
1130	mfspr	r9,SPRN_SPRG_THREAD
1131	lis	r10,saved_ksp_limit@ha;
1132	lwz	r10,saved_ksp_limit@l(r10);
1133	tovirt(r9,r9);
1134	stw	r10,KSP_LIMIT(r9)
1135	lis	r9,crit_srr0@ha;
1136	lwz	r9,crit_srr0@l(r9);
1137	lis	r10,crit_srr1@ha;
1138	lwz	r10,crit_srr1@l(r10);
1139	mtspr	SPRN_SRR0,r9;
1140	mtspr	SPRN_SRR1,r10;
1141	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1142#endif /* CONFIG_40x */
1143
1144#ifdef CONFIG_BOOKE
1145	.globl	ret_from_crit_exc
1146ret_from_crit_exc:
1147	mfspr	r9,SPRN_SPRG_THREAD
1148	lwz	r10,SAVED_KSP_LIMIT(r1)
1149	stw	r10,KSP_LIMIT(r9)
1150	RESTORE_xSRR(SRR0,SRR1);
1151	RESTORE_MMU_REGS;
1152	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1153
1154	.globl	ret_from_debug_exc
1155ret_from_debug_exc:
1156	mfspr	r9,SPRN_SPRG_THREAD
1157	lwz	r10,SAVED_KSP_LIMIT(r1)
1158	stw	r10,KSP_LIMIT(r9)
1159	lwz	r9,THREAD_INFO-THREAD(r9)
1160	CURRENT_THREAD_INFO(r10, r1)
1161	lwz	r10,TI_PREEMPT(r10)
1162	stw	r10,TI_PREEMPT(r9)
1163	RESTORE_xSRR(SRR0,SRR1);
1164	RESTORE_xSRR(CSRR0,CSRR1);
1165	RESTORE_MMU_REGS;
1166	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1167
1168	.globl	ret_from_mcheck_exc
1169ret_from_mcheck_exc:
1170	mfspr	r9,SPRN_SPRG_THREAD
1171	lwz	r10,SAVED_KSP_LIMIT(r1)
1172	stw	r10,KSP_LIMIT(r9)
1173	RESTORE_xSRR(SRR0,SRR1);
1174	RESTORE_xSRR(CSRR0,CSRR1);
1175	RESTORE_xSRR(DSRR0,DSRR1);
1176	RESTORE_MMU_REGS;
1177	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1178#endif /* CONFIG_BOOKE */
1179
1180/*
1181 * Load the DBCR0 value for a task that is being ptraced,
1182 * having first saved away the global DBCR0.  Note that r0
1183 * has the dbcr0 value to set upon entry to this.
1184 */
1185load_dbcr0:
1186	mfmsr	r10		/* first disable debug exceptions */
1187	rlwinm	r10,r10,0,~MSR_DE
1188	mtmsr	r10
1189	isync
1190	mfspr	r10,SPRN_DBCR0
1191	lis	r11,global_dbcr0@ha
1192	addi	r11,r11,global_dbcr0@l
1193#ifdef CONFIG_SMP
1194	CURRENT_THREAD_INFO(r9, r1)
1195	lwz	r9,TI_CPU(r9)
1196	slwi	r9,r9,3
1197	add	r11,r11,r9
1198#endif
1199	stw	r10,0(r11)
1200	mtspr	SPRN_DBCR0,r0
1201	lwz	r10,4(r11)
1202	addi	r10,r10,1
1203	stw	r10,4(r11)
1204	li	r11,-1
1205	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1206	blr
1207
1208	.section .bss
1209	.align	4
1210global_dbcr0:
1211	.space	8*NR_CPUS
1212	.previous
1213#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1214
1215do_work:			/* r10 contains MSR_KERNEL here */
1216	andi.	r0,r9,_TIF_NEED_RESCHED
1217	beq	do_user_signal
1218
1219do_resched:			/* r10 contains MSR_KERNEL here */
1220	/* Note: We don't need to inform lockdep that we are enabling
1221	 * interrupts here. As far as it knows, they are already enabled
1222	 */
1223	ori	r10,r10,MSR_EE
1224	SYNC
1225	MTMSRD(r10)		/* hard-enable interrupts */
1226	bl	schedule
1227recheck:
1228	/* Note: And we don't tell it we are disabling them again
1229	 * neither. Those disable/enable cycles used to peek at
1230	 * TI_FLAGS aren't advertised.
1231	 */
1232	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1233	SYNC
1234	MTMSRD(r10)		/* disable interrupts */
1235	CURRENT_THREAD_INFO(r9, r1)
1236	lwz	r9,TI_FLAGS(r9)
1237	andi.	r0,r9,_TIF_NEED_RESCHED
1238	bne-	do_resched
1239	andi.	r0,r9,_TIF_USER_WORK_MASK
1240	beq	restore_user
1241do_user_signal:			/* r10 contains MSR_KERNEL here */
1242	ori	r10,r10,MSR_EE
1243	SYNC
1244	MTMSRD(r10)		/* hard-enable interrupts */
1245	/* save r13-r31 in the exception frame, if not already done */
1246	lwz	r3,_TRAP(r1)
1247	andi.	r0,r3,1
1248	beq	2f
1249	SAVE_NVGPRS(r1)
1250	rlwinm	r3,r3,0,0,30
1251	stw	r3,_TRAP(r1)
12522:	addi	r3,r1,STACK_FRAME_OVERHEAD
1253	mr	r4,r9
1254	bl	do_notify_resume
1255	REST_NVGPRS(r1)
1256	b	recheck
1257
1258/*
1259 * We come here when we are at the end of handling an exception
1260 * that occurred at a place where taking an exception will lose
1261 * state information, such as the contents of SRR0 and SRR1.
1262 */
1263nonrecoverable:
1264	lis	r10,exc_exit_restart_end@ha
1265	addi	r10,r10,exc_exit_restart_end@l
1266	cmplw	r12,r10
1267	bge	3f
1268	lis	r11,exc_exit_restart@ha
1269	addi	r11,r11,exc_exit_restart@l
1270	cmplw	r12,r11
1271	blt	3f
1272	lis	r10,ee_restarts@ha
1273	lwz	r12,ee_restarts@l(r10)
1274	addi	r12,r12,1
1275	stw	r12,ee_restarts@l(r10)
1276	mr	r12,r11		/* restart at exc_exit_restart */
1277	blr
12783:	/* OK, we can't recover, kill this process */
1279	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1280BEGIN_FTR_SECTION
1281	blr
1282END_FTR_SECTION_IFSET(CPU_FTR_601)
1283	lwz	r3,_TRAP(r1)
1284	andi.	r0,r3,1
1285	beq	4f
1286	SAVE_NVGPRS(r1)
1287	rlwinm	r3,r3,0,0,30
1288	stw	r3,_TRAP(r1)
12894:	addi	r3,r1,STACK_FRAME_OVERHEAD
1290	bl	nonrecoverable_exception
1291	/* shouldn't return */
1292	b	4b
1293
1294	.section .bss
1295	.align	2
1296ee_restarts:
1297	.space	4
1298	.previous
1299
1300/*
1301 * PROM code for specific machines follows.  Put it
1302 * here so it's easy to add arch-specific sections later.
1303 * -- Cort
1304 */
1305#ifdef CONFIG_PPC_RTAS
1306/*
1307 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1308 * called with the MMU off.
1309 */
1310_GLOBAL(enter_rtas)
1311	stwu	r1,-INT_FRAME_SIZE(r1)
1312	mflr	r0
1313	stw	r0,INT_FRAME_SIZE+4(r1)
1314	LOAD_REG_ADDR(r4, rtas)
1315	lis	r6,1f@ha	/* physical return address for rtas */
1316	addi	r6,r6,1f@l
1317	tophys(r6,r6)
1318	tophys(r7,r1)
1319	lwz	r8,RTASENTRY(r4)
1320	lwz	r4,RTASBASE(r4)
1321	mfmsr	r9
1322	stw	r9,8(r1)
1323	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1324	SYNC			/* disable interrupts so SRR0/1 */
1325	MTMSRD(r0)		/* don't get trashed */
1326	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1327	mtlr	r6
1328	mtspr	SPRN_SPRG_RTAS,r7
1329	mtspr	SPRN_SRR0,r8
1330	mtspr	SPRN_SRR1,r9
1331	RFI
13321:	tophys(r9,r1)
1333	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1334	lwz	r9,8(r9)	/* original msr value */
1335	addi	r1,r1,INT_FRAME_SIZE
1336	li	r0,0
1337	mtspr	SPRN_SPRG_RTAS,r0
1338	mtspr	SPRN_SRR0,r8
1339	mtspr	SPRN_SRR1,r9
1340	RFI			/* return to caller */
1341
1342	.globl	machine_check_in_rtas
1343machine_check_in_rtas:
1344	twi	31,0,0
1345	/* XXX load up BATs and panic */
1346
1347#endif /* CONFIG_PPC_RTAS */
1348