xref: /linux/arch/powerpc/kernel/entry_32.S (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49	.globl	mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51	mfspr	r0,SPRN_DSRR0
52	stw	r0,_DSRR0(r11)
53	mfspr	r0,SPRN_DSRR1
54	stw	r0,_DSRR1(r11)
55	/* fall through */
56
57	.globl	debug_transfer_to_handler
58debug_transfer_to_handler:
59	mfspr	r0,SPRN_CSRR0
60	stw	r0,_CSRR0(r11)
61	mfspr	r0,SPRN_CSRR1
62	stw	r0,_CSRR1(r11)
63	/* fall through */
64
65	.globl	crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68	mfspr	r0,SPRN_MAS0
69	stw	r0,MAS0(r11)
70	mfspr	r0,SPRN_MAS1
71	stw	r0,MAS1(r11)
72	mfspr	r0,SPRN_MAS2
73	stw	r0,MAS2(r11)
74	mfspr	r0,SPRN_MAS3
75	stw	r0,MAS3(r11)
76	mfspr	r0,SPRN_MAS6
77	stw	r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79	mfspr	r0,SPRN_MAS7
80	stw	r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84	mfspr	r0,SPRN_MMUCR
85	stw	r0,MMUCR(r11)
86#endif
87	mfspr	r0,SPRN_SRR0
88	stw	r0,_SRR0(r11)
89	mfspr	r0,SPRN_SRR1
90	stw	r0,_SRR1(r11)
91
92	mfspr	r8,SPRN_SPRG_THREAD
93	lwz	r0,KSP_LIMIT(r8)
94	stw	r0,SAVED_KSP_LIMIT(r11)
95	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
96	stw	r0,KSP_LIMIT(r8)
97	/* fall through */
98#endif
99
100#ifdef CONFIG_40x
101	.globl	crit_transfer_to_handler
102crit_transfer_to_handler:
103	lwz	r0,crit_r10@l(0)
104	stw	r0,GPR10(r11)
105	lwz	r0,crit_r11@l(0)
106	stw	r0,GPR11(r11)
107	mfspr	r0,SPRN_SRR0
108	stw	r0,crit_srr0@l(0)
109	mfspr	r0,SPRN_SRR1
110	stw	r0,crit_srr1@l(0)
111
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	addi	r2,r12,-THREAD
144	tovirt(r2,r2)			/* set r2 to current */
145	beq	2f			/* if from user, fix up THREAD.regs */
146	addi	r11,r1,STACK_FRAME_OVERHEAD
147	stw	r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149	/* Check to see if the dbcr0 register is set up to debug.  Use the
150	   internal debug mode bit to do this. */
151	lwz	r12,THREAD_DBCR0(r12)
152	andis.	r12,r12,DBCR0_IDM@h
153	beq+	3f
154	/* From user and task is ptraced - load up global dbcr0 */
155	li	r12,-1			/* clear all pending debug events */
156	mtspr	SPRN_DBSR,r12
157	lis	r11,global_dbcr0@ha
158	tophys(r11,r11)
159	addi	r11,r11,global_dbcr0@l
160#ifdef CONFIG_SMP
161	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
162	lwz	r9,TI_CPU(r9)
163	slwi	r9,r9,3
164	add	r11,r11,r9
165#endif
166	lwz	r12,0(r11)
167	mtspr	SPRN_DBCR0,r12
168	lwz	r12,4(r11)
169	addi	r12,r12,-1
170	stw	r12,4(r11)
171#endif
172	b	3f
173
1742:	/* if from kernel, check interrupted DOZE/NAP mode and
175         * check for stack overflow
176         */
177	lwz	r9,KSP_LIMIT(r12)
178	cmplw	r1,r9			/* if r1 <= ksp_limit */
179	ble-	stack_ovf		/* then the kernel stack overflowed */
1805:
181#if defined(CONFIG_6xx) || defined(CONFIG_E500)
182	rlwinm	r9,r1,0,0,31-THREAD_SHIFT
183	tophys(r9,r9)			/* check local flags */
184	lwz	r12,TI_LOCAL_FLAGS(r9)
185	mtcrf	0x01,r12
186	bt-	31-TLF_NAPPING,4f
187	bt-	31-TLF_SLEEPING,7f
188#endif /* CONFIG_6xx || CONFIG_E500 */
189	.globl transfer_to_handler_cont
190transfer_to_handler_cont:
1913:
192	mflr	r9
193	lwz	r11,0(r9)		/* virtual address of handler */
194	lwz	r9,4(r9)		/* where to go when done */
195#ifdef CONFIG_TRACE_IRQFLAGS
196	lis	r12,reenable_mmu@h
197	ori	r12,r12,reenable_mmu@l
198	mtspr	SPRN_SRR0,r12
199	mtspr	SPRN_SRR1,r10
200	SYNC
201	RFI
202reenable_mmu:				/* re-enable mmu so we can */
203	mfmsr	r10
204	lwz	r12,_MSR(r1)
205	xor	r10,r10,r12
206	andi.	r10,r10,MSR_EE		/* Did EE change? */
207	beq	1f
208
209	/* Save handler and return address into the 2 unused words
210	 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
211	 * else can be recovered from the pt_regs except r3 which for
212	 * normal interrupts has been set to pt_regs and for syscalls
213	 * is an argument, so we temporarily use ORIG_GPR3 to save it
214	 */
215	stw	r9,8(r1)
216	stw	r11,12(r1)
217	stw	r3,ORIG_GPR3(r1)
218	/*
219	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220	 * If from user mode there is only one stack frame on the stack, and
221	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222	 * stack frame to make trace_hardirqs_off happy.
223	 */
224	andi.	r12,r12,MSR_PR
225	beq	11f
226	stwu	r1,-16(r1)
227	bl	trace_hardirqs_off
228	addi	r1,r1,16
229	b	12f
230
23111:
232	bl	trace_hardirqs_off
23312:
234	lwz	r0,GPR0(r1)
235	lwz	r3,ORIG_GPR3(r1)
236	lwz	r4,GPR4(r1)
237	lwz	r5,GPR5(r1)
238	lwz	r6,GPR6(r1)
239	lwz	r7,GPR7(r1)
240	lwz	r8,GPR8(r1)
241	lwz	r9,8(r1)
242	lwz	r11,12(r1)
2431:	mtctr	r11
244	mtlr	r9
245	bctr				/* jump to handler */
246#else /* CONFIG_TRACE_IRQFLAGS */
247	mtspr	SPRN_SRR0,r11
248	mtspr	SPRN_SRR1,r10
249	mtlr	r9
250	SYNC
251	RFI				/* jump to handler, enable MMU */
252#endif /* CONFIG_TRACE_IRQFLAGS */
253
254#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2554:	rlwinm	r12,r12,0,~_TLF_NAPPING
256	stw	r12,TI_LOCAL_FLAGS(r9)
257	b	power_save_ppc32_restore
258
2597:	rlwinm	r12,r12,0,~_TLF_SLEEPING
260	stw	r12,TI_LOCAL_FLAGS(r9)
261	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
262	rlwinm	r9,r9,0,~MSR_EE
263	lwz	r12,_LINK(r11)		/* and return to address in LR */
264	b	fast_exception_return
265#endif
266
267/*
268 * On kernel stack overflow, load up an initial stack pointer
269 * and call StackOverflow(regs), which should not return.
270 */
271stack_ovf:
272	/* sometimes we use a statically-allocated stack, which is OK. */
273	lis	r12,_end@h
274	ori	r12,r12,_end@l
275	cmplw	r1,r12
276	ble	5b			/* r1 <= &_end is OK */
277	SAVE_NVGPRS(r11)
278	addi	r3,r1,STACK_FRAME_OVERHEAD
279	lis	r1,init_thread_union@ha
280	addi	r1,r1,init_thread_union@l
281	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
282	lis	r9,StackOverflow@ha
283	addi	r9,r9,StackOverflow@l
284	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
285	FIX_SRR1(r10,r12)
286	mtspr	SPRN_SRR0,r9
287	mtspr	SPRN_SRR1,r10
288	SYNC
289	RFI
290
291/*
292 * Handle a system call.
293 */
294	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
295	.stabs	"entry_32.S",N_SO,0,0,0f
2960:
297
298_GLOBAL(DoSyscall)
299	stw	r3,ORIG_GPR3(r1)
300	li	r12,0
301	stw	r12,RESULT(r1)
302	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
303	rlwinm	r11,r11,0,4,2
304	stw	r11,_CCR(r1)
305#ifdef SHOW_SYSCALLS
306	bl	do_show_syscall
307#endif /* SHOW_SYSCALLS */
308#ifdef CONFIG_TRACE_IRQFLAGS
309	/* Return from syscalls can (and generally will) hard enable
310	 * interrupts. You aren't supposed to call a syscall with
311	 * interrupts disabled in the first place. However, to ensure
312	 * that we get it right vs. lockdep if it happens, we force
313	 * that hard enable here with appropriate tracing if we see
314	 * that we have been called with interrupts off
315	 */
316	mfmsr	r11
317	andi.	r12,r11,MSR_EE
318	bne+	1f
319	/* We came in with interrupts disabled, we enable them now */
320	bl	trace_hardirqs_on
321	mfmsr	r11
322	lwz	r0,GPR0(r1)
323	lwz	r3,GPR3(r1)
324	lwz	r4,GPR4(r1)
325	ori	r11,r11,MSR_EE
326	lwz	r5,GPR5(r1)
327	lwz	r6,GPR6(r1)
328	lwz	r7,GPR7(r1)
329	lwz	r8,GPR8(r1)
330	mtmsr	r11
3311:
332#endif /* CONFIG_TRACE_IRQFLAGS */
333	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
334	lwz	r11,TI_FLAGS(r10)
335	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
336	bne-	syscall_dotrace
337syscall_dotrace_cont:
338	cmplwi	0,r0,NR_syscalls
339	lis	r10,sys_call_table@h
340	ori	r10,r10,sys_call_table@l
341	slwi	r0,r0,2
342	bge-	66f
343	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
344	mtlr	r10
345	addi	r9,r1,STACK_FRAME_OVERHEAD
346	PPC440EP_ERR42
347	blrl			/* Call handler */
348	.globl	ret_from_syscall
349ret_from_syscall:
350#ifdef SHOW_SYSCALLS
351	bl	do_show_syscall_exit
352#endif
353	mr	r6,r3
354	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
355	/* disable interrupts so current_thread_info()->flags can't change */
356	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
357	/* Note: We don't bother telling lockdep about it */
358	SYNC
359	MTMSRD(r10)
360	lwz	r9,TI_FLAGS(r12)
361	li	r8,-_LAST_ERRNO
362	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
363	bne-	syscall_exit_work
364	cmplw	0,r3,r8
365	blt+	syscall_exit_cont
366	lwz	r11,_CCR(r1)			/* Load CR */
367	neg	r3,r3
368	oris	r11,r11,0x1000	/* Set SO bit in CR */
369	stw	r11,_CCR(r1)
370syscall_exit_cont:
371	lwz	r8,_MSR(r1)
372#ifdef CONFIG_TRACE_IRQFLAGS
373	/* If we are going to return from the syscall with interrupts
374	 * off, we trace that here. It shouldn't happen though but we
375	 * want to catch the bugger if it does right ?
376	 */
377	andi.	r10,r8,MSR_EE
378	bne+	1f
379	stw	r3,GPR3(r1)
380	bl      trace_hardirqs_off
381	lwz	r3,GPR3(r1)
3821:
383#endif /* CONFIG_TRACE_IRQFLAGS */
384#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
385	/* If the process has its own DBCR0 value, load it up.  The internal
386	   debug mode bit tells us that dbcr0 should be loaded. */
387	lwz	r0,THREAD+THREAD_DBCR0(r2)
388	andis.	r10,r0,DBCR0_IDM@h
389	bnel-	load_dbcr0
390#endif
391#ifdef CONFIG_44x
392BEGIN_MMU_FTR_SECTION
393	lis	r4,icache_44x_need_flush@ha
394	lwz	r5,icache_44x_need_flush@l(r4)
395	cmplwi	cr0,r5,0
396	bne-	2f
3971:
398END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
399#endif /* CONFIG_44x */
400BEGIN_FTR_SECTION
401	lwarx	r7,0,r1
402END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
403	stwcx.	r0,0,r1			/* to clear the reservation */
404	lwz	r4,_LINK(r1)
405	lwz	r5,_CCR(r1)
406	mtlr	r4
407	mtcr	r5
408	lwz	r7,_NIP(r1)
409	FIX_SRR1(r8, r0)
410	lwz	r2,GPR2(r1)
411	lwz	r1,GPR1(r1)
412	mtspr	SPRN_SRR0,r7
413	mtspr	SPRN_SRR1,r8
414	SYNC
415	RFI
416#ifdef CONFIG_44x
4172:	li	r7,0
418	iccci	r0,r0
419	stw	r7,icache_44x_need_flush@l(r4)
420	b	1b
421#endif  /* CONFIG_44x */
422
42366:	li	r3,-ENOSYS
424	b	ret_from_syscall
425
426	.globl	ret_from_fork
427ret_from_fork:
428	REST_NVGPRS(r1)
429	bl	schedule_tail
430	li	r3,0
431	b	ret_from_syscall
432
433/* Traced system call support */
434syscall_dotrace:
435	SAVE_NVGPRS(r1)
436	li	r0,0xc00
437	stw	r0,_TRAP(r1)
438	addi	r3,r1,STACK_FRAME_OVERHEAD
439	bl	do_syscall_trace_enter
440	/*
441	 * Restore argument registers possibly just changed.
442	 * We use the return value of do_syscall_trace_enter
443	 * for call number to look up in the table (r0).
444	 */
445	mr	r0,r3
446	lwz	r3,GPR3(r1)
447	lwz	r4,GPR4(r1)
448	lwz	r5,GPR5(r1)
449	lwz	r6,GPR6(r1)
450	lwz	r7,GPR7(r1)
451	lwz	r8,GPR8(r1)
452	REST_NVGPRS(r1)
453	b	syscall_dotrace_cont
454
455syscall_exit_work:
456	andi.	r0,r9,_TIF_RESTOREALL
457	beq+	0f
458	REST_NVGPRS(r1)
459	b	2f
4600:	cmplw	0,r3,r8
461	blt+	1f
462	andi.	r0,r9,_TIF_NOERROR
463	bne-	1f
464	lwz	r11,_CCR(r1)			/* Load CR */
465	neg	r3,r3
466	oris	r11,r11,0x1000	/* Set SO bit in CR */
467	stw	r11,_CCR(r1)
468
4691:	stw	r6,RESULT(r1)	/* Save result */
470	stw	r3,GPR3(r1)	/* Update return value */
4712:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
472	beq	4f
473
474	/* Clear per-syscall TIF flags if any are set.  */
475
476	li	r11,_TIF_PERSYSCALL_MASK
477	addi	r12,r12,TI_FLAGS
4783:	lwarx	r8,0,r12
479	andc	r8,r8,r11
480#ifdef CONFIG_IBM405_ERR77
481	dcbt	0,r12
482#endif
483	stwcx.	r8,0,r12
484	bne-	3b
485	subi	r12,r12,TI_FLAGS
486
4874:	/* Anything which requires enabling interrupts? */
488	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
489	beq	ret_from_except
490
491	/* Re-enable interrupts. There is no need to trace that with
492	 * lockdep as we are supposed to have IRQs on at this point
493	 */
494	ori	r10,r10,MSR_EE
495	SYNC
496	MTMSRD(r10)
497
498	/* Save NVGPRS if they're not saved already */
499	lwz	r4,_TRAP(r1)
500	andi.	r4,r4,1
501	beq	5f
502	SAVE_NVGPRS(r1)
503	li	r4,0xc00
504	stw	r4,_TRAP(r1)
5055:
506	addi	r3,r1,STACK_FRAME_OVERHEAD
507	bl	do_syscall_trace_leave
508	b	ret_from_except_full
509
510#ifdef SHOW_SYSCALLS
511do_show_syscall:
512#ifdef SHOW_SYSCALLS_TASK
513	lis	r11,show_syscalls_task@ha
514	lwz	r11,show_syscalls_task@l(r11)
515	cmp	0,r2,r11
516	bnelr
517#endif
518	stw	r31,GPR31(r1)
519	mflr	r31
520	lis	r3,7f@ha
521	addi	r3,r3,7f@l
522	lwz	r4,GPR0(r1)
523	lwz	r5,GPR3(r1)
524	lwz	r6,GPR4(r1)
525	lwz	r7,GPR5(r1)
526	lwz	r8,GPR6(r1)
527	lwz	r9,GPR7(r1)
528	bl	printk
529	lis	r3,77f@ha
530	addi	r3,r3,77f@l
531	lwz	r4,GPR8(r1)
532	mr	r5,r2
533	bl	printk
534	lwz	r0,GPR0(r1)
535	lwz	r3,GPR3(r1)
536	lwz	r4,GPR4(r1)
537	lwz	r5,GPR5(r1)
538	lwz	r6,GPR6(r1)
539	lwz	r7,GPR7(r1)
540	lwz	r8,GPR8(r1)
541	mtlr	r31
542	lwz	r31,GPR31(r1)
543	blr
544
545do_show_syscall_exit:
546#ifdef SHOW_SYSCALLS_TASK
547	lis	r11,show_syscalls_task@ha
548	lwz	r11,show_syscalls_task@l(r11)
549	cmp	0,r2,r11
550	bnelr
551#endif
552	stw	r31,GPR31(r1)
553	mflr	r31
554	stw	r3,RESULT(r1)	/* Save result */
555	mr	r4,r3
556	lis	r3,79f@ha
557	addi	r3,r3,79f@l
558	bl	printk
559	lwz	r3,RESULT(r1)
560	mtlr	r31
561	lwz	r31,GPR31(r1)
562	blr
563
5647:	.string	"syscall %d(%x, %x, %x, %x, %x, "
56577:	.string	"%x), current=%p\n"
56679:	.string	" -> %x\n"
567	.align	2,0
568
569#ifdef SHOW_SYSCALLS_TASK
570	.data
571	.globl	show_syscalls_task
572show_syscalls_task:
573	.long	-1
574	.text
575#endif
576#endif /* SHOW_SYSCALLS */
577
578/*
579 * The fork/clone functions need to copy the full register set into
580 * the child process. Therefore we need to save all the nonvolatile
581 * registers (r13 - r31) before calling the C code.
582 */
583	.globl	ppc_fork
584ppc_fork:
585	SAVE_NVGPRS(r1)
586	lwz	r0,_TRAP(r1)
587	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
588	stw	r0,_TRAP(r1)		/* register set saved */
589	b	sys_fork
590
591	.globl	ppc_vfork
592ppc_vfork:
593	SAVE_NVGPRS(r1)
594	lwz	r0,_TRAP(r1)
595	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
596	stw	r0,_TRAP(r1)		/* register set saved */
597	b	sys_vfork
598
599	.globl	ppc_clone
600ppc_clone:
601	SAVE_NVGPRS(r1)
602	lwz	r0,_TRAP(r1)
603	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
604	stw	r0,_TRAP(r1)		/* register set saved */
605	b	sys_clone
606
607	.globl	ppc_swapcontext
608ppc_swapcontext:
609	SAVE_NVGPRS(r1)
610	lwz	r0,_TRAP(r1)
611	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
612	stw	r0,_TRAP(r1)		/* register set saved */
613	b	sys_swapcontext
614
615/*
616 * Top-level page fault handling.
617 * This is in assembler because if do_page_fault tells us that
618 * it is a bad kernel page fault, we want to save the non-volatile
619 * registers before calling bad_page_fault.
620 */
621	.globl	handle_page_fault
622handle_page_fault:
623	stw	r4,_DAR(r1)
624	addi	r3,r1,STACK_FRAME_OVERHEAD
625	bl	do_page_fault
626	cmpwi	r3,0
627	beq+	ret_from_except
628	SAVE_NVGPRS(r1)
629	lwz	r0,_TRAP(r1)
630	clrrwi	r0,r0,1
631	stw	r0,_TRAP(r1)
632	mr	r5,r3
633	addi	r3,r1,STACK_FRAME_OVERHEAD
634	lwz	r4,_DAR(r1)
635	bl	bad_page_fault
636	b	ret_from_except_full
637
638/*
639 * This routine switches between two different tasks.  The process
640 * state of one is saved on its kernel stack.  Then the state
641 * of the other is restored from its kernel stack.  The memory
642 * management hardware is updated to the second process's state.
643 * Finally, we can return to the second process.
644 * On entry, r3 points to the THREAD for the current task, r4
645 * points to the THREAD for the new task.
646 *
647 * This routine is always called with interrupts disabled.
648 *
649 * Note: there are two ways to get to the "going out" portion
650 * of this code; either by coming in via the entry (_switch)
651 * or via "fork" which must set up an environment equivalent
652 * to the "_switch" path.  If you change this , you'll have to
653 * change the fork code also.
654 *
655 * The code which creates the new task context is in 'copy_thread'
656 * in arch/ppc/kernel/process.c
657 */
658_GLOBAL(_switch)
659	stwu	r1,-INT_FRAME_SIZE(r1)
660	mflr	r0
661	stw	r0,INT_FRAME_SIZE+4(r1)
662	/* r3-r12 are caller saved -- Cort */
663	SAVE_NVGPRS(r1)
664	stw	r0,_NIP(r1)	/* Return to switch caller */
665	mfmsr	r11
666	li	r0,MSR_FP	/* Disable floating-point */
667#ifdef CONFIG_ALTIVEC
668BEGIN_FTR_SECTION
669	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
670	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
671	stw	r12,THREAD+THREAD_VRSAVE(r2)
672END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
673#endif /* CONFIG_ALTIVEC */
674#ifdef CONFIG_SPE
675BEGIN_FTR_SECTION
676	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
677	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
678	stw	r12,THREAD+THREAD_SPEFSCR(r2)
679END_FTR_SECTION_IFSET(CPU_FTR_SPE)
680#endif /* CONFIG_SPE */
681	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
682	beq+	1f
683	andc	r11,r11,r0
684	MTMSRD(r11)
685	isync
6861:	stw	r11,_MSR(r1)
687	mfcr	r10
688	stw	r10,_CCR(r1)
689	stw	r1,KSP(r3)	/* Set old stack pointer */
690
691#ifdef CONFIG_SMP
692	/* We need a sync somewhere here to make sure that if the
693	 * previous task gets rescheduled on another CPU, it sees all
694	 * stores it has performed on this one.
695	 */
696	sync
697#endif /* CONFIG_SMP */
698
699	tophys(r0,r4)
700	CLR_TOP32(r0)
701	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
702	lwz	r1,KSP(r4)	/* Load new stack pointer */
703
704	/* save the old current 'last' for return value */
705	mr	r3,r2
706	addi	r2,r4,-THREAD	/* Update current */
707
708#ifdef CONFIG_ALTIVEC
709BEGIN_FTR_SECTION
710	lwz	r0,THREAD+THREAD_VRSAVE(r2)
711	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
712END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
713#endif /* CONFIG_ALTIVEC */
714#ifdef CONFIG_SPE
715BEGIN_FTR_SECTION
716	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
717	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
718END_FTR_SECTION_IFSET(CPU_FTR_SPE)
719#endif /* CONFIG_SPE */
720
721	lwz	r0,_CCR(r1)
722	mtcrf	0xFF,r0
723	/* r3-r12 are destroyed -- Cort */
724	REST_NVGPRS(r1)
725
726	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
727	mtlr	r4
728	addi	r1,r1,INT_FRAME_SIZE
729	blr
730
731	.globl	fast_exception_return
732fast_exception_return:
733#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
734	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
735	beq	1f			/* if not, we've got problems */
736#endif
737
7382:	REST_4GPRS(3, r11)
739	lwz	r10,_CCR(r11)
740	REST_GPR(1, r11)
741	mtcr	r10
742	lwz	r10,_LINK(r11)
743	mtlr	r10
744	REST_GPR(10, r11)
745	mtspr	SPRN_SRR1,r9
746	mtspr	SPRN_SRR0,r12
747	REST_GPR(9, r11)
748	REST_GPR(12, r11)
749	lwz	r11,GPR11(r11)
750	SYNC
751	RFI
752
753#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
754/* check if the exception happened in a restartable section */
7551:	lis	r3,exc_exit_restart_end@ha
756	addi	r3,r3,exc_exit_restart_end@l
757	cmplw	r12,r3
758	bge	3f
759	lis	r4,exc_exit_restart@ha
760	addi	r4,r4,exc_exit_restart@l
761	cmplw	r12,r4
762	blt	3f
763	lis	r3,fee_restarts@ha
764	tophys(r3,r3)
765	lwz	r5,fee_restarts@l(r3)
766	addi	r5,r5,1
767	stw	r5,fee_restarts@l(r3)
768	mr	r12,r4		/* restart at exc_exit_restart */
769	b	2b
770
771	.section .bss
772	.align	2
773fee_restarts:
774	.space	4
775	.previous
776
777/* aargh, a nonrecoverable interrupt, panic */
778/* aargh, we don't know which trap this is */
779/* but the 601 doesn't implement the RI bit, so assume it's OK */
7803:
781BEGIN_FTR_SECTION
782	b	2b
783END_FTR_SECTION_IFSET(CPU_FTR_601)
784	li	r10,-1
785	stw	r10,_TRAP(r11)
786	addi	r3,r1,STACK_FRAME_OVERHEAD
787	lis	r10,MSR_KERNEL@h
788	ori	r10,r10,MSR_KERNEL@l
789	bl	transfer_to_handler_full
790	.long	nonrecoverable_exception
791	.long	ret_from_except
792#endif
793
794	.globl	ret_from_except_full
795ret_from_except_full:
796	REST_NVGPRS(r1)
797	/* fall through */
798
799	.globl	ret_from_except
800ret_from_except:
801	/* Hard-disable interrupts so that current_thread_info()->flags
802	 * can't change between when we test it and when we return
803	 * from the interrupt. */
804	/* Note: We don't bother telling lockdep about it */
805	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
806	SYNC			/* Some chip revs have problems here... */
807	MTMSRD(r10)		/* disable interrupts */
808
809	lwz	r3,_MSR(r1)	/* Returning to user mode? */
810	andi.	r0,r3,MSR_PR
811	beq	resume_kernel
812
813user_exc_return:		/* r10 contains MSR_KERNEL here */
814	/* Check current_thread_info()->flags */
815	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
816	lwz	r9,TI_FLAGS(r9)
817	andi.	r0,r9,_TIF_USER_WORK_MASK
818	bne	do_work
819
820restore_user:
821#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
822	/* Check whether this process has its own DBCR0 value.  The internal
823	   debug mode bit tells us that dbcr0 should be loaded. */
824	lwz	r0,THREAD+THREAD_DBCR0(r2)
825	andis.	r10,r0,DBCR0_IDM@h
826	bnel-	load_dbcr0
827#endif
828
829#ifdef CONFIG_PREEMPT
830	b	restore
831
832/* N.B. the only way to get here is from the beq following ret_from_except. */
833resume_kernel:
834	/* check current_thread_info->preempt_count */
835	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
836	lwz	r0,TI_PREEMPT(r9)
837	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
838	bne	restore
839	lwz	r0,TI_FLAGS(r9)
840	andi.	r0,r0,_TIF_NEED_RESCHED
841	beq+	restore
842	andi.	r0,r3,MSR_EE	/* interrupts off? */
843	beq	restore		/* don't schedule if so */
844#ifdef CONFIG_TRACE_IRQFLAGS
845	/* Lockdep thinks irqs are enabled, we need to call
846	 * preempt_schedule_irq with IRQs off, so we inform lockdep
847	 * now that we -did- turn them off already
848	 */
849	bl	trace_hardirqs_off
850#endif
8511:	bl	preempt_schedule_irq
852	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
853	lwz	r3,TI_FLAGS(r9)
854	andi.	r0,r3,_TIF_NEED_RESCHED
855	bne-	1b
856#ifdef CONFIG_TRACE_IRQFLAGS
857	/* And now, to properly rebalance the above, we tell lockdep they
858	 * are being turned back on, which will happen when we return
859	 */
860	bl	trace_hardirqs_on
861#endif
862#else
863resume_kernel:
864#endif /* CONFIG_PREEMPT */
865
866	/* interrupts are hard-disabled at this point */
867restore:
868#ifdef CONFIG_44x
869BEGIN_MMU_FTR_SECTION
870	b	1f
871END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
872	lis	r4,icache_44x_need_flush@ha
873	lwz	r5,icache_44x_need_flush@l(r4)
874	cmplwi	cr0,r5,0
875	beq+	1f
876	li	r6,0
877	iccci	r0,r0
878	stw	r6,icache_44x_need_flush@l(r4)
8791:
880#endif  /* CONFIG_44x */
881
882	lwz	r9,_MSR(r1)
883#ifdef CONFIG_TRACE_IRQFLAGS
884	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
885	 * off in this assembly code while peeking at TI_FLAGS() and such. However
886	 * we need to inform it if the exception turned interrupts off, and we
887	 * are about to trun them back on.
888	 *
889	 * The problem here sadly is that we don't know whether the exceptions was
890	 * one that turned interrupts off or not. So we always tell lockdep about
891	 * turning them on here when we go back to wherever we came from with EE
892	 * on, even if that may meen some redudant calls being tracked. Maybe later
893	 * we could encode what the exception did somewhere or test the exception
894	 * type in the pt_regs but that sounds overkill
895	 */
896	andi.	r10,r9,MSR_EE
897	beq	1f
898	/*
899	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
900	 * which is the stack frame here, we need to force a stack frame
901	 * in case we came from user space.
902	 */
903	stwu	r1,-32(r1)
904	mflr	r0
905	stw	r0,4(r1)
906	stwu	r1,-32(r1)
907	bl	trace_hardirqs_on
908	lwz	r1,0(r1)
909	lwz	r1,0(r1)
910	lwz	r9,_MSR(r1)
9111:
912#endif /* CONFIG_TRACE_IRQFLAGS */
913
914	lwz	r0,GPR0(r1)
915	lwz	r2,GPR2(r1)
916	REST_4GPRS(3, r1)
917	REST_2GPRS(7, r1)
918
919	lwz	r10,_XER(r1)
920	lwz	r11,_CTR(r1)
921	mtspr	SPRN_XER,r10
922	mtctr	r11
923
924	PPC405_ERR77(0,r1)
925BEGIN_FTR_SECTION
926	lwarx	r11,0,r1
927END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
928	stwcx.	r0,0,r1			/* to clear the reservation */
929
930#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
931	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
932	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
933
934	lwz	r10,_CCR(r1)
935	lwz	r11,_LINK(r1)
936	mtcrf	0xFF,r10
937	mtlr	r11
938
939	/*
940	 * Once we put values in SRR0 and SRR1, we are in a state
941	 * where exceptions are not recoverable, since taking an
942	 * exception will trash SRR0 and SRR1.  Therefore we clear the
943	 * MSR:RI bit to indicate this.  If we do take an exception,
944	 * we can't return to the point of the exception but we
945	 * can restart the exception exit path at the label
946	 * exc_exit_restart below.  -- paulus
947	 */
948	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
949	SYNC
950	MTMSRD(r10)		/* clear the RI bit */
951	.globl exc_exit_restart
952exc_exit_restart:
953	lwz	r12,_NIP(r1)
954	FIX_SRR1(r9,r10)
955	mtspr	SPRN_SRR0,r12
956	mtspr	SPRN_SRR1,r9
957	REST_4GPRS(9, r1)
958	lwz	r1,GPR1(r1)
959	.globl exc_exit_restart_end
960exc_exit_restart_end:
961	SYNC
962	RFI
963
964#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
965	/*
966	 * This is a bit different on 4xx/Book-E because it doesn't have
967	 * the RI bit in the MSR.
968	 * The TLB miss handler checks if we have interrupted
969	 * the exception exit path and restarts it if so
970	 * (well maybe one day it will... :).
971	 */
972	lwz	r11,_LINK(r1)
973	mtlr	r11
974	lwz	r10,_CCR(r1)
975	mtcrf	0xff,r10
976	REST_2GPRS(9, r1)
977	.globl exc_exit_restart
978exc_exit_restart:
979	lwz	r11,_NIP(r1)
980	lwz	r12,_MSR(r1)
981exc_exit_start:
982	mtspr	SPRN_SRR0,r11
983	mtspr	SPRN_SRR1,r12
984	REST_2GPRS(11, r1)
985	lwz	r1,GPR1(r1)
986	.globl exc_exit_restart_end
987exc_exit_restart_end:
988	PPC405_ERR77_SYNC
989	rfi
990	b	.			/* prevent prefetch past rfi */
991
992/*
993 * Returning from a critical interrupt in user mode doesn't need
994 * to be any different from a normal exception.  For a critical
995 * interrupt in the kernel, we just return (without checking for
996 * preemption) since the interrupt may have happened at some crucial
997 * place (e.g. inside the TLB miss handler), and because we will be
998 * running with r1 pointing into critical_stack, not the current
999 * process's kernel stack (and therefore current_thread_info() will
1000 * give the wrong answer).
1001 * We have to restore various SPRs that may have been in use at the
1002 * time of the critical interrupt.
1003 *
1004 */
1005#ifdef CONFIG_40x
1006#define PPC_40x_TURN_OFF_MSR_DR						    \
1007	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1008	 * assume the instructions here are mapped by a pinned TLB entry */ \
1009	li	r10,MSR_IR;						    \
1010	mtmsr	r10;							    \
1011	isync;								    \
1012	tophys(r1, r1);
1013#else
1014#define PPC_40x_TURN_OFF_MSR_DR
1015#endif
1016
1017#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1018	REST_NVGPRS(r1);						\
1019	lwz	r3,_MSR(r1);						\
1020	andi.	r3,r3,MSR_PR;						\
1021	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1022	bne	user_exc_return;					\
1023	lwz	r0,GPR0(r1);						\
1024	lwz	r2,GPR2(r1);						\
1025	REST_4GPRS(3, r1);						\
1026	REST_2GPRS(7, r1);						\
1027	lwz	r10,_XER(r1);						\
1028	lwz	r11,_CTR(r1);						\
1029	mtspr	SPRN_XER,r10;						\
1030	mtctr	r11;							\
1031	PPC405_ERR77(0,r1);						\
1032	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1033	lwz	r11,_LINK(r1);						\
1034	mtlr	r11;							\
1035	lwz	r10,_CCR(r1);						\
1036	mtcrf	0xff,r10;						\
1037	PPC_40x_TURN_OFF_MSR_DR;					\
1038	lwz	r9,_DEAR(r1);						\
1039	lwz	r10,_ESR(r1);						\
1040	mtspr	SPRN_DEAR,r9;						\
1041	mtspr	SPRN_ESR,r10;						\
1042	lwz	r11,_NIP(r1);						\
1043	lwz	r12,_MSR(r1);						\
1044	mtspr	exc_lvl_srr0,r11;					\
1045	mtspr	exc_lvl_srr1,r12;					\
1046	lwz	r9,GPR9(r1);						\
1047	lwz	r12,GPR12(r1);						\
1048	lwz	r10,GPR10(r1);						\
1049	lwz	r11,GPR11(r1);						\
1050	lwz	r1,GPR1(r1);						\
1051	PPC405_ERR77_SYNC;						\
1052	exc_lvl_rfi;							\
1053	b	.;		/* prevent prefetch past exc_lvl_rfi */
1054
1055#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1056	lwz	r9,_##exc_lvl_srr0(r1);					\
1057	lwz	r10,_##exc_lvl_srr1(r1);				\
1058	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1059	mtspr	SPRN_##exc_lvl_srr1,r10;
1060
1061#if defined(CONFIG_PPC_BOOK3E_MMU)
1062#ifdef CONFIG_PHYS_64BIT
1063#define	RESTORE_MAS7							\
1064	lwz	r11,MAS7(r1);						\
1065	mtspr	SPRN_MAS7,r11;
1066#else
1067#define	RESTORE_MAS7
1068#endif /* CONFIG_PHYS_64BIT */
1069#define RESTORE_MMU_REGS						\
1070	lwz	r9,MAS0(r1);						\
1071	lwz	r10,MAS1(r1);						\
1072	lwz	r11,MAS2(r1);						\
1073	mtspr	SPRN_MAS0,r9;						\
1074	lwz	r9,MAS3(r1);						\
1075	mtspr	SPRN_MAS1,r10;						\
1076	lwz	r10,MAS6(r1);						\
1077	mtspr	SPRN_MAS2,r11;						\
1078	mtspr	SPRN_MAS3,r9;						\
1079	mtspr	SPRN_MAS6,r10;						\
1080	RESTORE_MAS7;
1081#elif defined(CONFIG_44x)
1082#define RESTORE_MMU_REGS						\
1083	lwz	r9,MMUCR(r1);						\
1084	mtspr	SPRN_MMUCR,r9;
1085#else
1086#define RESTORE_MMU_REGS
1087#endif
1088
1089#ifdef CONFIG_40x
1090	.globl	ret_from_crit_exc
1091ret_from_crit_exc:
1092	mfspr	r9,SPRN_SPRG_THREAD
1093	lis	r10,saved_ksp_limit@ha;
1094	lwz	r10,saved_ksp_limit@l(r10);
1095	tovirt(r9,r9);
1096	stw	r10,KSP_LIMIT(r9)
1097	lis	r9,crit_srr0@ha;
1098	lwz	r9,crit_srr0@l(r9);
1099	lis	r10,crit_srr1@ha;
1100	lwz	r10,crit_srr1@l(r10);
1101	mtspr	SPRN_SRR0,r9;
1102	mtspr	SPRN_SRR1,r10;
1103	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1104#endif /* CONFIG_40x */
1105
1106#ifdef CONFIG_BOOKE
1107	.globl	ret_from_crit_exc
1108ret_from_crit_exc:
1109	mfspr	r9,SPRN_SPRG_THREAD
1110	lwz	r10,SAVED_KSP_LIMIT(r1)
1111	stw	r10,KSP_LIMIT(r9)
1112	RESTORE_xSRR(SRR0,SRR1);
1113	RESTORE_MMU_REGS;
1114	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1115
1116	.globl	ret_from_debug_exc
1117ret_from_debug_exc:
1118	mfspr	r9,SPRN_SPRG_THREAD
1119	lwz	r10,SAVED_KSP_LIMIT(r1)
1120	stw	r10,KSP_LIMIT(r9)
1121	lwz	r9,THREAD_INFO-THREAD(r9)
1122	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)
1123	lwz	r10,TI_PREEMPT(r10)
1124	stw	r10,TI_PREEMPT(r9)
1125	RESTORE_xSRR(SRR0,SRR1);
1126	RESTORE_xSRR(CSRR0,CSRR1);
1127	RESTORE_MMU_REGS;
1128	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1129
1130	.globl	ret_from_mcheck_exc
1131ret_from_mcheck_exc:
1132	mfspr	r9,SPRN_SPRG_THREAD
1133	lwz	r10,SAVED_KSP_LIMIT(r1)
1134	stw	r10,KSP_LIMIT(r9)
1135	RESTORE_xSRR(SRR0,SRR1);
1136	RESTORE_xSRR(CSRR0,CSRR1);
1137	RESTORE_xSRR(DSRR0,DSRR1);
1138	RESTORE_MMU_REGS;
1139	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1140#endif /* CONFIG_BOOKE */
1141
1142/*
1143 * Load the DBCR0 value for a task that is being ptraced,
1144 * having first saved away the global DBCR0.  Note that r0
1145 * has the dbcr0 value to set upon entry to this.
1146 */
1147load_dbcr0:
1148	mfmsr	r10		/* first disable debug exceptions */
1149	rlwinm	r10,r10,0,~MSR_DE
1150	mtmsr	r10
1151	isync
1152	mfspr	r10,SPRN_DBCR0
1153	lis	r11,global_dbcr0@ha
1154	addi	r11,r11,global_dbcr0@l
1155#ifdef CONFIG_SMP
1156	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1157	lwz	r9,TI_CPU(r9)
1158	slwi	r9,r9,3
1159	add	r11,r11,r9
1160#endif
1161	stw	r10,0(r11)
1162	mtspr	SPRN_DBCR0,r0
1163	lwz	r10,4(r11)
1164	addi	r10,r10,1
1165	stw	r10,4(r11)
1166	li	r11,-1
1167	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1168	blr
1169
1170	.section .bss
1171	.align	4
1172global_dbcr0:
1173	.space	8*NR_CPUS
1174	.previous
1175#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1176
1177do_work:			/* r10 contains MSR_KERNEL here */
1178	andi.	r0,r9,_TIF_NEED_RESCHED
1179	beq	do_user_signal
1180
1181do_resched:			/* r10 contains MSR_KERNEL here */
1182	/* Note: We don't need to inform lockdep that we are enabling
1183	 * interrupts here. As far as it knows, they are already enabled
1184	 */
1185	ori	r10,r10,MSR_EE
1186	SYNC
1187	MTMSRD(r10)		/* hard-enable interrupts */
1188	bl	schedule
1189recheck:
1190	/* Note: And we don't tell it we are disabling them again
1191	 * neither. Those disable/enable cycles used to peek at
1192	 * TI_FLAGS aren't advertised.
1193	 */
1194	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1195	SYNC
1196	MTMSRD(r10)		/* disable interrupts */
1197	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1198	lwz	r9,TI_FLAGS(r9)
1199	andi.	r0,r9,_TIF_NEED_RESCHED
1200	bne-	do_resched
1201	andi.	r0,r9,_TIF_USER_WORK_MASK
1202	beq	restore_user
1203do_user_signal:			/* r10 contains MSR_KERNEL here */
1204	ori	r10,r10,MSR_EE
1205	SYNC
1206	MTMSRD(r10)		/* hard-enable interrupts */
1207	/* save r13-r31 in the exception frame, if not already done */
1208	lwz	r3,_TRAP(r1)
1209	andi.	r0,r3,1
1210	beq	2f
1211	SAVE_NVGPRS(r1)
1212	rlwinm	r3,r3,0,0,30
1213	stw	r3,_TRAP(r1)
12142:	addi	r3,r1,STACK_FRAME_OVERHEAD
1215	mr	r4,r9
1216	bl	do_signal
1217	REST_NVGPRS(r1)
1218	b	recheck
1219
1220/*
1221 * We come here when we are at the end of handling an exception
1222 * that occurred at a place where taking an exception will lose
1223 * state information, such as the contents of SRR0 and SRR1.
1224 */
1225nonrecoverable:
1226	lis	r10,exc_exit_restart_end@ha
1227	addi	r10,r10,exc_exit_restart_end@l
1228	cmplw	r12,r10
1229	bge	3f
1230	lis	r11,exc_exit_restart@ha
1231	addi	r11,r11,exc_exit_restart@l
1232	cmplw	r12,r11
1233	blt	3f
1234	lis	r10,ee_restarts@ha
1235	lwz	r12,ee_restarts@l(r10)
1236	addi	r12,r12,1
1237	stw	r12,ee_restarts@l(r10)
1238	mr	r12,r11		/* restart at exc_exit_restart */
1239	blr
12403:	/* OK, we can't recover, kill this process */
1241	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1242BEGIN_FTR_SECTION
1243	blr
1244END_FTR_SECTION_IFSET(CPU_FTR_601)
1245	lwz	r3,_TRAP(r1)
1246	andi.	r0,r3,1
1247	beq	4f
1248	SAVE_NVGPRS(r1)
1249	rlwinm	r3,r3,0,0,30
1250	stw	r3,_TRAP(r1)
12514:	addi	r3,r1,STACK_FRAME_OVERHEAD
1252	bl	nonrecoverable_exception
1253	/* shouldn't return */
1254	b	4b
1255
1256	.section .bss
1257	.align	2
1258ee_restarts:
1259	.space	4
1260	.previous
1261
1262/*
1263 * PROM code for specific machines follows.  Put it
1264 * here so it's easy to add arch-specific sections later.
1265 * -- Cort
1266 */
1267#ifdef CONFIG_PPC_RTAS
1268/*
1269 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1270 * called with the MMU off.
1271 */
1272_GLOBAL(enter_rtas)
1273	stwu	r1,-INT_FRAME_SIZE(r1)
1274	mflr	r0
1275	stw	r0,INT_FRAME_SIZE+4(r1)
1276	LOAD_REG_ADDR(r4, rtas)
1277	lis	r6,1f@ha	/* physical return address for rtas */
1278	addi	r6,r6,1f@l
1279	tophys(r6,r6)
1280	tophys(r7,r1)
1281	lwz	r8,RTASENTRY(r4)
1282	lwz	r4,RTASBASE(r4)
1283	mfmsr	r9
1284	stw	r9,8(r1)
1285	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1286	SYNC			/* disable interrupts so SRR0/1 */
1287	MTMSRD(r0)		/* don't get trashed */
1288	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1289	mtlr	r6
1290	mtspr	SPRN_SPRG_RTAS,r7
1291	mtspr	SPRN_SRR0,r8
1292	mtspr	SPRN_SRR1,r9
1293	RFI
12941:	tophys(r9,r1)
1295	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1296	lwz	r9,8(r9)	/* original msr value */
1297	FIX_SRR1(r9,r0)
1298	addi	r1,r1,INT_FRAME_SIZE
1299	li	r0,0
1300	mtspr	SPRN_SPRG_RTAS,r0
1301	mtspr	SPRN_SRR0,r8
1302	mtspr	SPRN_SRR1,r9
1303	RFI			/* return to caller */
1304
1305	.globl	machine_check_in_rtas
1306machine_check_in_rtas:
1307	twi	31,0,0
1308	/* XXX load up BATs and panic */
1309
1310#endif /* CONFIG_PPC_RTAS */
1311
1312#ifdef CONFIG_FUNCTION_TRACER
1313#ifdef CONFIG_DYNAMIC_FTRACE
1314_GLOBAL(mcount)
1315_GLOBAL(_mcount)
1316	/*
1317	 * It is required that _mcount on PPC32 must preserve the
1318	 * link register. But we have r0 to play with. We use r0
1319	 * to push the return address back to the caller of mcount
1320	 * into the ctr register, restore the link register and
1321	 * then jump back using the ctr register.
1322	 */
1323	mflr	r0
1324	mtctr	r0
1325	lwz	r0, 4(r1)
1326	mtlr	r0
1327	bctr
1328
1329_GLOBAL(ftrace_caller)
1330	MCOUNT_SAVE_FRAME
1331	/* r3 ends up with link register */
1332	subi	r3, r3, MCOUNT_INSN_SIZE
1333.globl ftrace_call
1334ftrace_call:
1335	bl	ftrace_stub
1336	nop
1337#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1338.globl ftrace_graph_call
1339ftrace_graph_call:
1340	b	ftrace_graph_stub
1341_GLOBAL(ftrace_graph_stub)
1342#endif
1343	MCOUNT_RESTORE_FRAME
1344	/* old link register ends up in ctr reg */
1345	bctr
1346#else
1347_GLOBAL(mcount)
1348_GLOBAL(_mcount)
1349
1350	MCOUNT_SAVE_FRAME
1351
1352	subi	r3, r3, MCOUNT_INSN_SIZE
1353	LOAD_REG_ADDR(r5, ftrace_trace_function)
1354	lwz	r5,0(r5)
1355
1356	mtctr	r5
1357	bctrl
1358	nop
1359
1360#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1361	b	ftrace_graph_caller
1362#endif
1363	MCOUNT_RESTORE_FRAME
1364	bctr
1365#endif
1366
1367_GLOBAL(ftrace_stub)
1368	blr
1369
1370#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1371_GLOBAL(ftrace_graph_caller)
1372	/* load r4 with local address */
1373	lwz	r4, 44(r1)
1374	subi	r4, r4, MCOUNT_INSN_SIZE
1375
1376	/* get the parent address */
1377	addi	r3, r1, 52
1378
1379	bl	prepare_ftrace_return
1380	nop
1381
1382	MCOUNT_RESTORE_FRAME
1383	/* old link register ends up in ctr reg */
1384	bctr
1385
1386_GLOBAL(return_to_handler)
1387	/* need to save return values */
1388	stwu	r1, -32(r1)
1389	stw	r3, 20(r1)
1390	stw	r4, 16(r1)
1391	stw	r31, 12(r1)
1392	mr	r31, r1
1393
1394	bl	ftrace_return_to_handler
1395	nop
1396
1397	/* return value has real return address */
1398	mtlr	r3
1399
1400	lwz	r3, 20(r1)
1401	lwz	r4, 16(r1)
1402	lwz	r31,12(r1)
1403	lwz	r1, 0(r1)
1404
1405	/* Jump back to real return address */
1406	blr
1407#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1408
1409#endif /* CONFIG_MCOUNT */
1410