xref: /linux/arch/powerpc/kernel/entry_32.S (revision 2d6ffcca623a9a16df6cdfbe8250b7a5904a5f5e)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48	.globl	mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
50	mfspr	r0,SPRN_DSRR0
51	stw	r0,_DSRR0(r11)
52	mfspr	r0,SPRN_DSRR1
53	stw	r0,_DSRR1(r11)
54	/* fall through */
55
56	.globl	debug_transfer_to_handler
57debug_transfer_to_handler:
58	mfspr	r0,SPRN_CSRR0
59	stw	r0,_CSRR0(r11)
60	mfspr	r0,SPRN_CSRR1
61	stw	r0,_CSRR1(r11)
62	/* fall through */
63
64	.globl	crit_transfer_to_handler
65crit_transfer_to_handler:
66#ifdef CONFIG_FSL_BOOKE
67	mfspr	r0,SPRN_MAS0
68	stw	r0,MAS0(r11)
69	mfspr	r0,SPRN_MAS1
70	stw	r0,MAS1(r11)
71	mfspr	r0,SPRN_MAS2
72	stw	r0,MAS2(r11)
73	mfspr	r0,SPRN_MAS3
74	stw	r0,MAS3(r11)
75	mfspr	r0,SPRN_MAS6
76	stw	r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78	mfspr	r0,SPRN_MAS7
79	stw	r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
81#endif /* CONFIG_FSL_BOOKE */
82#ifdef CONFIG_44x
83	mfspr	r0,SPRN_MMUCR
84	stw	r0,MMUCR(r11)
85#endif
86	mfspr	r0,SPRN_SRR0
87	stw	r0,_SRR0(r11)
88	mfspr	r0,SPRN_SRR1
89	stw	r0,_SRR1(r11)
90
91	mfspr	r8,SPRN_SPRG3
92	lwz	r0,KSP_LIMIT(r8)
93	stw	r0,SAVED_KSP_LIMIT(r11)
94	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
95	stw	r0,KSP_LIMIT(r8)
96	/* fall through */
97#endif
98
99#ifdef CONFIG_40x
100	.globl	crit_transfer_to_handler
101crit_transfer_to_handler:
102	lwz	r0,crit_r10@l(0)
103	stw	r0,GPR10(r11)
104	lwz	r0,crit_r11@l(0)
105	stw	r0,GPR11(r11)
106	mfspr	r0,SPRN_SRR0
107	stw	r0,crit_srr0@l(0)
108	mfspr	r0,SPRN_SRR1
109	stw	r0,crit_srr1@l(0)
110
111	mfspr	r8,SPRN_SPRG3
112	lwz	r0,KSP_LIMIT(r8)
113	stw	r0,saved_ksp_limit@l(0)
114	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
115	stw	r0,KSP_LIMIT(r8)
116	/* fall through */
117#endif
118
119/*
120 * This code finishes saving the registers to the exception frame
121 * and jumps to the appropriate handler for the exception, turning
122 * on address translation.
123 * Note that we rely on the caller having set cr0.eq iff the exception
124 * occurred in kernel mode (i.e. MSR:PR = 0).
125 */
126	.globl	transfer_to_handler_full
127transfer_to_handler_full:
128	SAVE_NVGPRS(r11)
129	/* fall through */
130
131	.globl	transfer_to_handler
132transfer_to_handler:
133	stw	r2,GPR2(r11)
134	stw	r12,_NIP(r11)
135	stw	r9,_MSR(r11)
136	andi.	r2,r9,MSR_PR
137	mfctr	r12
138	mfspr	r2,SPRN_XER
139	stw	r12,_CTR(r11)
140	stw	r2,_XER(r11)
141	mfspr	r12,SPRN_SPRG3
142	addi	r2,r12,-THREAD
143	tovirt(r2,r2)			/* set r2 to current */
144	beq	2f			/* if from user, fix up THREAD.regs */
145	addi	r11,r1,STACK_FRAME_OVERHEAD
146	stw	r11,PT_REGS(r12)
147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148	/* Check to see if the dbcr0 register is set up to debug.  Use the
149	   internal debug mode bit to do this. */
150	lwz	r12,THREAD_DBCR0(r12)
151	andis.	r12,r12,DBCR0_IDM@h
152	beq+	3f
153	/* From user and task is ptraced - load up global dbcr0 */
154	li	r12,-1			/* clear all pending debug events */
155	mtspr	SPRN_DBSR,r12
156	lis	r11,global_dbcr0@ha
157	tophys(r11,r11)
158	addi	r11,r11,global_dbcr0@l
159#ifdef CONFIG_SMP
160	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
161	lwz	r9,TI_CPU(r9)
162	slwi	r9,r9,3
163	add	r11,r11,r9
164#endif
165	lwz	r12,0(r11)
166	mtspr	SPRN_DBCR0,r12
167	lwz	r12,4(r11)
168	addi	r12,r12,-1
169	stw	r12,4(r11)
170#endif
171	b	3f
172
1732:	/* if from kernel, check interrupted DOZE/NAP mode and
174         * check for stack overflow
175         */
176	lwz	r9,KSP_LIMIT(r12)
177	cmplw	r1,r9			/* if r1 <= ksp_limit */
178	ble-	stack_ovf		/* then the kernel stack overflowed */
1795:
180#if defined(CONFIG_6xx) || defined(CONFIG_E500)
181	rlwinm	r9,r1,0,0,31-THREAD_SHIFT
182	tophys(r9,r9)			/* check local flags */
183	lwz	r12,TI_LOCAL_FLAGS(r9)
184	mtcrf	0x01,r12
185	bt-	31-TLF_NAPPING,4f
186	bt-	31-TLF_SLEEPING,7f
187#endif /* CONFIG_6xx || CONFIG_E500 */
188	.globl transfer_to_handler_cont
189transfer_to_handler_cont:
1903:
191	mflr	r9
192	lwz	r11,0(r9)		/* virtual address of handler */
193	lwz	r9,4(r9)		/* where to go when done */
194	mtspr	SPRN_SRR0,r11
195	mtspr	SPRN_SRR1,r10
196	mtlr	r9
197	SYNC
198	RFI				/* jump to handler, enable MMU */
199
200#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2014:	rlwinm	r12,r12,0,~_TLF_NAPPING
202	stw	r12,TI_LOCAL_FLAGS(r9)
203	b	power_save_ppc32_restore
204
2057:	rlwinm	r12,r12,0,~_TLF_SLEEPING
206	stw	r12,TI_LOCAL_FLAGS(r9)
207	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
208	rlwinm	r9,r9,0,~MSR_EE
209	lwz	r12,_LINK(r11)		/* and return to address in LR */
210	b	fast_exception_return
211#endif
212
213/*
214 * On kernel stack overflow, load up an initial stack pointer
215 * and call StackOverflow(regs), which should not return.
216 */
217stack_ovf:
218	/* sometimes we use a statically-allocated stack, which is OK. */
219	lis	r12,_end@h
220	ori	r12,r12,_end@l
221	cmplw	r1,r12
222	ble	5b			/* r1 <= &_end is OK */
223	SAVE_NVGPRS(r11)
224	addi	r3,r1,STACK_FRAME_OVERHEAD
225	lis	r1,init_thread_union@ha
226	addi	r1,r1,init_thread_union@l
227	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
228	lis	r9,StackOverflow@ha
229	addi	r9,r9,StackOverflow@l
230	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
231	FIX_SRR1(r10,r12)
232	mtspr	SPRN_SRR0,r9
233	mtspr	SPRN_SRR1,r10
234	SYNC
235	RFI
236
237/*
238 * Handle a system call.
239 */
240	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
241	.stabs	"entry_32.S",N_SO,0,0,0f
2420:
243
244_GLOBAL(DoSyscall)
245	stw	r3,ORIG_GPR3(r1)
246	li	r12,0
247	stw	r12,RESULT(r1)
248	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
249	rlwinm	r11,r11,0,4,2
250	stw	r11,_CCR(r1)
251#ifdef SHOW_SYSCALLS
252	bl	do_show_syscall
253#endif /* SHOW_SYSCALLS */
254	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
255	lwz	r11,TI_FLAGS(r10)
256	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
257	bne-	syscall_dotrace
258syscall_dotrace_cont:
259	cmplwi	0,r0,NR_syscalls
260	lis	r10,sys_call_table@h
261	ori	r10,r10,sys_call_table@l
262	slwi	r0,r0,2
263	bge-	66f
264	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
265	mtlr	r10
266	addi	r9,r1,STACK_FRAME_OVERHEAD
267	PPC440EP_ERR42
268	blrl			/* Call handler */
269	.globl	ret_from_syscall
270ret_from_syscall:
271#ifdef SHOW_SYSCALLS
272	bl	do_show_syscall_exit
273#endif
274	mr	r6,r3
275	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
276	/* disable interrupts so current_thread_info()->flags can't change */
277	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
278	SYNC
279	MTMSRD(r10)
280	lwz	r9,TI_FLAGS(r12)
281	li	r8,-_LAST_ERRNO
282	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
283	bne-	syscall_exit_work
284	cmplw	0,r3,r8
285	blt+	syscall_exit_cont
286	lwz	r11,_CCR(r1)			/* Load CR */
287	neg	r3,r3
288	oris	r11,r11,0x1000	/* Set SO bit in CR */
289	stw	r11,_CCR(r1)
290syscall_exit_cont:
291#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
292	/* If the process has its own DBCR0 value, load it up.  The internal
293	   debug mode bit tells us that dbcr0 should be loaded. */
294	lwz	r0,THREAD+THREAD_DBCR0(r2)
295	andis.	r10,r0,DBCR0_IDM@h
296	bnel-	load_dbcr0
297#endif
298#ifdef CONFIG_44x
299	lis	r4,icache_44x_need_flush@ha
300	lwz	r5,icache_44x_need_flush@l(r4)
301	cmplwi	cr0,r5,0
302	bne-	2f
3031:
304#endif /* CONFIG_44x */
305BEGIN_FTR_SECTION
306	lwarx	r7,0,r1
307END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
308	stwcx.	r0,0,r1			/* to clear the reservation */
309	lwz	r4,_LINK(r1)
310	lwz	r5,_CCR(r1)
311	mtlr	r4
312	mtcr	r5
313	lwz	r7,_NIP(r1)
314	lwz	r8,_MSR(r1)
315	FIX_SRR1(r8, r0)
316	lwz	r2,GPR2(r1)
317	lwz	r1,GPR1(r1)
318	mtspr	SPRN_SRR0,r7
319	mtspr	SPRN_SRR1,r8
320	SYNC
321	RFI
322#ifdef CONFIG_44x
3232:	li	r7,0
324	iccci	r0,r0
325	stw	r7,icache_44x_need_flush@l(r4)
326	b	1b
327#endif  /* CONFIG_44x */
328
32966:	li	r3,-ENOSYS
330	b	ret_from_syscall
331
332	.globl	ret_from_fork
333ret_from_fork:
334	REST_NVGPRS(r1)
335	bl	schedule_tail
336	li	r3,0
337	b	ret_from_syscall
338
339/* Traced system call support */
340syscall_dotrace:
341	SAVE_NVGPRS(r1)
342	li	r0,0xc00
343	stw	r0,_TRAP(r1)
344	addi	r3,r1,STACK_FRAME_OVERHEAD
345	bl	do_syscall_trace_enter
346	lwz	r0,GPR0(r1)	/* Restore original registers */
347	lwz	r3,GPR3(r1)
348	lwz	r4,GPR4(r1)
349	lwz	r5,GPR5(r1)
350	lwz	r6,GPR6(r1)
351	lwz	r7,GPR7(r1)
352	lwz	r8,GPR8(r1)
353	REST_NVGPRS(r1)
354	b	syscall_dotrace_cont
355
356syscall_exit_work:
357	andi.	r0,r9,_TIF_RESTOREALL
358	beq+	0f
359	REST_NVGPRS(r1)
360	b	2f
3610:	cmplw	0,r3,r8
362	blt+	1f
363	andi.	r0,r9,_TIF_NOERROR
364	bne-	1f
365	lwz	r11,_CCR(r1)			/* Load CR */
366	neg	r3,r3
367	oris	r11,r11,0x1000	/* Set SO bit in CR */
368	stw	r11,_CCR(r1)
369
3701:	stw	r6,RESULT(r1)	/* Save result */
371	stw	r3,GPR3(r1)	/* Update return value */
3722:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
373	beq	4f
374
375	/* Clear per-syscall TIF flags if any are set.  */
376
377	li	r11,_TIF_PERSYSCALL_MASK
378	addi	r12,r12,TI_FLAGS
3793:	lwarx	r8,0,r12
380	andc	r8,r8,r11
381#ifdef CONFIG_IBM405_ERR77
382	dcbt	0,r12
383#endif
384	stwcx.	r8,0,r12
385	bne-	3b
386	subi	r12,r12,TI_FLAGS
387
3884:	/* Anything which requires enabling interrupts? */
389	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
390	beq	ret_from_except
391
392	/* Re-enable interrupts */
393	ori	r10,r10,MSR_EE
394	SYNC
395	MTMSRD(r10)
396
397	/* Save NVGPRS if they're not saved already */
398	lwz	r4,_TRAP(r1)
399	andi.	r4,r4,1
400	beq	5f
401	SAVE_NVGPRS(r1)
402	li	r4,0xc00
403	stw	r4,_TRAP(r1)
4045:
405	addi	r3,r1,STACK_FRAME_OVERHEAD
406	bl	do_syscall_trace_leave
407	b	ret_from_except_full
408
409#ifdef SHOW_SYSCALLS
410do_show_syscall:
411#ifdef SHOW_SYSCALLS_TASK
412	lis	r11,show_syscalls_task@ha
413	lwz	r11,show_syscalls_task@l(r11)
414	cmp	0,r2,r11
415	bnelr
416#endif
417	stw	r31,GPR31(r1)
418	mflr	r31
419	lis	r3,7f@ha
420	addi	r3,r3,7f@l
421	lwz	r4,GPR0(r1)
422	lwz	r5,GPR3(r1)
423	lwz	r6,GPR4(r1)
424	lwz	r7,GPR5(r1)
425	lwz	r8,GPR6(r1)
426	lwz	r9,GPR7(r1)
427	bl	printk
428	lis	r3,77f@ha
429	addi	r3,r3,77f@l
430	lwz	r4,GPR8(r1)
431	mr	r5,r2
432	bl	printk
433	lwz	r0,GPR0(r1)
434	lwz	r3,GPR3(r1)
435	lwz	r4,GPR4(r1)
436	lwz	r5,GPR5(r1)
437	lwz	r6,GPR6(r1)
438	lwz	r7,GPR7(r1)
439	lwz	r8,GPR8(r1)
440	mtlr	r31
441	lwz	r31,GPR31(r1)
442	blr
443
444do_show_syscall_exit:
445#ifdef SHOW_SYSCALLS_TASK
446	lis	r11,show_syscalls_task@ha
447	lwz	r11,show_syscalls_task@l(r11)
448	cmp	0,r2,r11
449	bnelr
450#endif
451	stw	r31,GPR31(r1)
452	mflr	r31
453	stw	r3,RESULT(r1)	/* Save result */
454	mr	r4,r3
455	lis	r3,79f@ha
456	addi	r3,r3,79f@l
457	bl	printk
458	lwz	r3,RESULT(r1)
459	mtlr	r31
460	lwz	r31,GPR31(r1)
461	blr
462
4637:	.string	"syscall %d(%x, %x, %x, %x, %x, "
46477:	.string	"%x), current=%p\n"
46579:	.string	" -> %x\n"
466	.align	2,0
467
468#ifdef SHOW_SYSCALLS_TASK
469	.data
470	.globl	show_syscalls_task
471show_syscalls_task:
472	.long	-1
473	.text
474#endif
475#endif /* SHOW_SYSCALLS */
476
477/*
478 * The fork/clone functions need to copy the full register set into
479 * the child process. Therefore we need to save all the nonvolatile
480 * registers (r13 - r31) before calling the C code.
481 */
482	.globl	ppc_fork
483ppc_fork:
484	SAVE_NVGPRS(r1)
485	lwz	r0,_TRAP(r1)
486	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
487	stw	r0,_TRAP(r1)		/* register set saved */
488	b	sys_fork
489
490	.globl	ppc_vfork
491ppc_vfork:
492	SAVE_NVGPRS(r1)
493	lwz	r0,_TRAP(r1)
494	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
495	stw	r0,_TRAP(r1)		/* register set saved */
496	b	sys_vfork
497
498	.globl	ppc_clone
499ppc_clone:
500	SAVE_NVGPRS(r1)
501	lwz	r0,_TRAP(r1)
502	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
503	stw	r0,_TRAP(r1)		/* register set saved */
504	b	sys_clone
505
506	.globl	ppc_swapcontext
507ppc_swapcontext:
508	SAVE_NVGPRS(r1)
509	lwz	r0,_TRAP(r1)
510	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
511	stw	r0,_TRAP(r1)		/* register set saved */
512	b	sys_swapcontext
513
514/*
515 * Top-level page fault handling.
516 * This is in assembler because if do_page_fault tells us that
517 * it is a bad kernel page fault, we want to save the non-volatile
518 * registers before calling bad_page_fault.
519 */
520	.globl	handle_page_fault
521handle_page_fault:
522	stw	r4,_DAR(r1)
523	addi	r3,r1,STACK_FRAME_OVERHEAD
524	bl	do_page_fault
525	cmpwi	r3,0
526	beq+	ret_from_except
527	SAVE_NVGPRS(r1)
528	lwz	r0,_TRAP(r1)
529	clrrwi	r0,r0,1
530	stw	r0,_TRAP(r1)
531	mr	r5,r3
532	addi	r3,r1,STACK_FRAME_OVERHEAD
533	lwz	r4,_DAR(r1)
534	bl	bad_page_fault
535	b	ret_from_except_full
536
537/*
538 * This routine switches between two different tasks.  The process
539 * state of one is saved on its kernel stack.  Then the state
540 * of the other is restored from its kernel stack.  The memory
541 * management hardware is updated to the second process's state.
542 * Finally, we can return to the second process.
543 * On entry, r3 points to the THREAD for the current task, r4
544 * points to the THREAD for the new task.
545 *
546 * This routine is always called with interrupts disabled.
547 *
548 * Note: there are two ways to get to the "going out" portion
549 * of this code; either by coming in via the entry (_switch)
550 * or via "fork" which must set up an environment equivalent
551 * to the "_switch" path.  If you change this , you'll have to
552 * change the fork code also.
553 *
554 * The code which creates the new task context is in 'copy_thread'
555 * in arch/ppc/kernel/process.c
556 */
557_GLOBAL(_switch)
558	stwu	r1,-INT_FRAME_SIZE(r1)
559	mflr	r0
560	stw	r0,INT_FRAME_SIZE+4(r1)
561	/* r3-r12 are caller saved -- Cort */
562	SAVE_NVGPRS(r1)
563	stw	r0,_NIP(r1)	/* Return to switch caller */
564	mfmsr	r11
565	li	r0,MSR_FP	/* Disable floating-point */
566#ifdef CONFIG_ALTIVEC
567BEGIN_FTR_SECTION
568	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
569	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
570	stw	r12,THREAD+THREAD_VRSAVE(r2)
571END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
572#endif /* CONFIG_ALTIVEC */
573#ifdef CONFIG_SPE
574BEGIN_FTR_SECTION
575	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
576	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
577	stw	r12,THREAD+THREAD_SPEFSCR(r2)
578END_FTR_SECTION_IFSET(CPU_FTR_SPE)
579#endif /* CONFIG_SPE */
580	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
581	beq+	1f
582	andc	r11,r11,r0
583	MTMSRD(r11)
584	isync
5851:	stw	r11,_MSR(r1)
586	mfcr	r10
587	stw	r10,_CCR(r1)
588	stw	r1,KSP(r3)	/* Set old stack pointer */
589
590#ifdef CONFIG_SMP
591	/* We need a sync somewhere here to make sure that if the
592	 * previous task gets rescheduled on another CPU, it sees all
593	 * stores it has performed on this one.
594	 */
595	sync
596#endif /* CONFIG_SMP */
597
598	tophys(r0,r4)
599	CLR_TOP32(r0)
600	mtspr	SPRN_SPRG3,r0	/* Update current THREAD phys addr */
601	lwz	r1,KSP(r4)	/* Load new stack pointer */
602
603	/* save the old current 'last' for return value */
604	mr	r3,r2
605	addi	r2,r4,-THREAD	/* Update current */
606
607#ifdef CONFIG_ALTIVEC
608BEGIN_FTR_SECTION
609	lwz	r0,THREAD+THREAD_VRSAVE(r2)
610	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
611END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
612#endif /* CONFIG_ALTIVEC */
613#ifdef CONFIG_SPE
614BEGIN_FTR_SECTION
615	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
616	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
617END_FTR_SECTION_IFSET(CPU_FTR_SPE)
618#endif /* CONFIG_SPE */
619
620	lwz	r0,_CCR(r1)
621	mtcrf	0xFF,r0
622	/* r3-r12 are destroyed -- Cort */
623	REST_NVGPRS(r1)
624
625	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
626	mtlr	r4
627	addi	r1,r1,INT_FRAME_SIZE
628	blr
629
630	.globl	fast_exception_return
631fast_exception_return:
632#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
633	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
634	beq	1f			/* if not, we've got problems */
635#endif
636
6372:	REST_4GPRS(3, r11)
638	lwz	r10,_CCR(r11)
639	REST_GPR(1, r11)
640	mtcr	r10
641	lwz	r10,_LINK(r11)
642	mtlr	r10
643	REST_GPR(10, r11)
644	mtspr	SPRN_SRR1,r9
645	mtspr	SPRN_SRR0,r12
646	REST_GPR(9, r11)
647	REST_GPR(12, r11)
648	lwz	r11,GPR11(r11)
649	SYNC
650	RFI
651
652#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
653/* check if the exception happened in a restartable section */
6541:	lis	r3,exc_exit_restart_end@ha
655	addi	r3,r3,exc_exit_restart_end@l
656	cmplw	r12,r3
657	bge	3f
658	lis	r4,exc_exit_restart@ha
659	addi	r4,r4,exc_exit_restart@l
660	cmplw	r12,r4
661	blt	3f
662	lis	r3,fee_restarts@ha
663	tophys(r3,r3)
664	lwz	r5,fee_restarts@l(r3)
665	addi	r5,r5,1
666	stw	r5,fee_restarts@l(r3)
667	mr	r12,r4		/* restart at exc_exit_restart */
668	b	2b
669
670	.section .bss
671	.align	2
672fee_restarts:
673	.space	4
674	.previous
675
676/* aargh, a nonrecoverable interrupt, panic */
677/* aargh, we don't know which trap this is */
678/* but the 601 doesn't implement the RI bit, so assume it's OK */
6793:
680BEGIN_FTR_SECTION
681	b	2b
682END_FTR_SECTION_IFSET(CPU_FTR_601)
683	li	r10,-1
684	stw	r10,_TRAP(r11)
685	addi	r3,r1,STACK_FRAME_OVERHEAD
686	lis	r10,MSR_KERNEL@h
687	ori	r10,r10,MSR_KERNEL@l
688	bl	transfer_to_handler_full
689	.long	nonrecoverable_exception
690	.long	ret_from_except
691#endif
692
693	.globl	ret_from_except_full
694ret_from_except_full:
695	REST_NVGPRS(r1)
696	/* fall through */
697
698	.globl	ret_from_except
699ret_from_except:
700	/* Hard-disable interrupts so that current_thread_info()->flags
701	 * can't change between when we test it and when we return
702	 * from the interrupt. */
703	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
704	SYNC			/* Some chip revs have problems here... */
705	MTMSRD(r10)		/* disable interrupts */
706
707	lwz	r3,_MSR(r1)	/* Returning to user mode? */
708	andi.	r0,r3,MSR_PR
709	beq	resume_kernel
710
711user_exc_return:		/* r10 contains MSR_KERNEL here */
712	/* Check current_thread_info()->flags */
713	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
714	lwz	r9,TI_FLAGS(r9)
715	andi.	r0,r9,_TIF_USER_WORK_MASK
716	bne	do_work
717
718restore_user:
719#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
720	/* Check whether this process has its own DBCR0 value.  The internal
721	   debug mode bit tells us that dbcr0 should be loaded. */
722	lwz	r0,THREAD+THREAD_DBCR0(r2)
723	andis.	r10,r0,DBCR0_IDM@h
724	bnel-	load_dbcr0
725#endif
726
727#ifdef CONFIG_PREEMPT
728	b	restore
729
730/* N.B. the only way to get here is from the beq following ret_from_except. */
731resume_kernel:
732	/* check current_thread_info->preempt_count */
733	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
734	lwz	r0,TI_PREEMPT(r9)
735	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
736	bne	restore
737	lwz	r0,TI_FLAGS(r9)
738	andi.	r0,r0,_TIF_NEED_RESCHED
739	beq+	restore
740	andi.	r0,r3,MSR_EE	/* interrupts off? */
741	beq	restore		/* don't schedule if so */
7421:	bl	preempt_schedule_irq
743	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
744	lwz	r3,TI_FLAGS(r9)
745	andi.	r0,r3,_TIF_NEED_RESCHED
746	bne-	1b
747#else
748resume_kernel:
749#endif /* CONFIG_PREEMPT */
750
751	/* interrupts are hard-disabled at this point */
752restore:
753#ifdef CONFIG_44x
754	lis	r4,icache_44x_need_flush@ha
755	lwz	r5,icache_44x_need_flush@l(r4)
756	cmplwi	cr0,r5,0
757	beq+	1f
758	li	r6,0
759	iccci	r0,r0
760	stw	r6,icache_44x_need_flush@l(r4)
7611:
762#endif  /* CONFIG_44x */
763	lwz	r0,GPR0(r1)
764	lwz	r2,GPR2(r1)
765	REST_4GPRS(3, r1)
766	REST_2GPRS(7, r1)
767
768	lwz	r10,_XER(r1)
769	lwz	r11,_CTR(r1)
770	mtspr	SPRN_XER,r10
771	mtctr	r11
772
773	PPC405_ERR77(0,r1)
774BEGIN_FTR_SECTION
775	lwarx	r11,0,r1
776END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
777	stwcx.	r0,0,r1			/* to clear the reservation */
778
779#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
780	lwz	r9,_MSR(r1)
781	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
782	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
783
784	lwz	r10,_CCR(r1)
785	lwz	r11,_LINK(r1)
786	mtcrf	0xFF,r10
787	mtlr	r11
788
789	/*
790	 * Once we put values in SRR0 and SRR1, we are in a state
791	 * where exceptions are not recoverable, since taking an
792	 * exception will trash SRR0 and SRR1.  Therefore we clear the
793	 * MSR:RI bit to indicate this.  If we do take an exception,
794	 * we can't return to the point of the exception but we
795	 * can restart the exception exit path at the label
796	 * exc_exit_restart below.  -- paulus
797	 */
798	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
799	SYNC
800	MTMSRD(r10)		/* clear the RI bit */
801	.globl exc_exit_restart
802exc_exit_restart:
803	lwz	r9,_MSR(r1)
804	lwz	r12,_NIP(r1)
805	FIX_SRR1(r9,r10)
806	mtspr	SPRN_SRR0,r12
807	mtspr	SPRN_SRR1,r9
808	REST_4GPRS(9, r1)
809	lwz	r1,GPR1(r1)
810	.globl exc_exit_restart_end
811exc_exit_restart_end:
812	SYNC
813	RFI
814
815#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
816	/*
817	 * This is a bit different on 4xx/Book-E because it doesn't have
818	 * the RI bit in the MSR.
819	 * The TLB miss handler checks if we have interrupted
820	 * the exception exit path and restarts it if so
821	 * (well maybe one day it will... :).
822	 */
823	lwz	r11,_LINK(r1)
824	mtlr	r11
825	lwz	r10,_CCR(r1)
826	mtcrf	0xff,r10
827	REST_2GPRS(9, r1)
828	.globl exc_exit_restart
829exc_exit_restart:
830	lwz	r11,_NIP(r1)
831	lwz	r12,_MSR(r1)
832exc_exit_start:
833	mtspr	SPRN_SRR0,r11
834	mtspr	SPRN_SRR1,r12
835	REST_2GPRS(11, r1)
836	lwz	r1,GPR1(r1)
837	.globl exc_exit_restart_end
838exc_exit_restart_end:
839	PPC405_ERR77_SYNC
840	rfi
841	b	.			/* prevent prefetch past rfi */
842
843/*
844 * Returning from a critical interrupt in user mode doesn't need
845 * to be any different from a normal exception.  For a critical
846 * interrupt in the kernel, we just return (without checking for
847 * preemption) since the interrupt may have happened at some crucial
848 * place (e.g. inside the TLB miss handler), and because we will be
849 * running with r1 pointing into critical_stack, not the current
850 * process's kernel stack (and therefore current_thread_info() will
851 * give the wrong answer).
852 * We have to restore various SPRs that may have been in use at the
853 * time of the critical interrupt.
854 *
855 */
856#ifdef CONFIG_40x
857#define PPC_40x_TURN_OFF_MSR_DR						    \
858	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
859	 * assume the instructions here are mapped by a pinned TLB entry */ \
860	li	r10,MSR_IR;						    \
861	mtmsr	r10;							    \
862	isync;								    \
863	tophys(r1, r1);
864#else
865#define PPC_40x_TURN_OFF_MSR_DR
866#endif
867
868#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
869	REST_NVGPRS(r1);						\
870	lwz	r3,_MSR(r1);						\
871	andi.	r3,r3,MSR_PR;						\
872	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
873	bne	user_exc_return;					\
874	lwz	r0,GPR0(r1);						\
875	lwz	r2,GPR2(r1);						\
876	REST_4GPRS(3, r1);						\
877	REST_2GPRS(7, r1);						\
878	lwz	r10,_XER(r1);						\
879	lwz	r11,_CTR(r1);						\
880	mtspr	SPRN_XER,r10;						\
881	mtctr	r11;							\
882	PPC405_ERR77(0,r1);						\
883	stwcx.	r0,0,r1;		/* to clear the reservation */	\
884	lwz	r11,_LINK(r1);						\
885	mtlr	r11;							\
886	lwz	r10,_CCR(r1);						\
887	mtcrf	0xff,r10;						\
888	PPC_40x_TURN_OFF_MSR_DR;					\
889	lwz	r9,_DEAR(r1);						\
890	lwz	r10,_ESR(r1);						\
891	mtspr	SPRN_DEAR,r9;						\
892	mtspr	SPRN_ESR,r10;						\
893	lwz	r11,_NIP(r1);						\
894	lwz	r12,_MSR(r1);						\
895	mtspr	exc_lvl_srr0,r11;					\
896	mtspr	exc_lvl_srr1,r12;					\
897	lwz	r9,GPR9(r1);						\
898	lwz	r12,GPR12(r1);						\
899	lwz	r10,GPR10(r1);						\
900	lwz	r11,GPR11(r1);						\
901	lwz	r1,GPR1(r1);						\
902	PPC405_ERR77_SYNC;						\
903	exc_lvl_rfi;							\
904	b	.;		/* prevent prefetch past exc_lvl_rfi */
905
906#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
907	lwz	r9,_##exc_lvl_srr0(r1);					\
908	lwz	r10,_##exc_lvl_srr1(r1);				\
909	mtspr	SPRN_##exc_lvl_srr0,r9;					\
910	mtspr	SPRN_##exc_lvl_srr1,r10;
911
912#if defined(CONFIG_FSL_BOOKE)
913#ifdef CONFIG_PHYS_64BIT
914#define	RESTORE_MAS7							\
915	lwz	r11,MAS7(r1);						\
916	mtspr	SPRN_MAS7,r11;
917#else
918#define	RESTORE_MAS7
919#endif /* CONFIG_PHYS_64BIT */
920#define RESTORE_MMU_REGS						\
921	lwz	r9,MAS0(r1);						\
922	lwz	r10,MAS1(r1);						\
923	lwz	r11,MAS2(r1);						\
924	mtspr	SPRN_MAS0,r9;						\
925	lwz	r9,MAS3(r1);						\
926	mtspr	SPRN_MAS1,r10;						\
927	lwz	r10,MAS6(r1);						\
928	mtspr	SPRN_MAS2,r11;						\
929	mtspr	SPRN_MAS3,r9;						\
930	mtspr	SPRN_MAS6,r10;						\
931	RESTORE_MAS7;
932#elif defined(CONFIG_44x)
933#define RESTORE_MMU_REGS						\
934	lwz	r9,MMUCR(r1);						\
935	mtspr	SPRN_MMUCR,r9;
936#else
937#define RESTORE_MMU_REGS
938#endif
939
940#ifdef CONFIG_40x
941	.globl	ret_from_crit_exc
942ret_from_crit_exc:
943	mfspr	r9,SPRN_SPRG3
944	lis	r10,saved_ksp_limit@ha;
945	lwz	r10,saved_ksp_limit@l(r10);
946	tovirt(r9,r9);
947	stw	r10,KSP_LIMIT(r9)
948	lis	r9,crit_srr0@ha;
949	lwz	r9,crit_srr0@l(r9);
950	lis	r10,crit_srr1@ha;
951	lwz	r10,crit_srr1@l(r10);
952	mtspr	SPRN_SRR0,r9;
953	mtspr	SPRN_SRR1,r10;
954	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
955#endif /* CONFIG_40x */
956
957#ifdef CONFIG_BOOKE
958	.globl	ret_from_crit_exc
959ret_from_crit_exc:
960	mfspr	r9,SPRN_SPRG3
961	lwz	r10,SAVED_KSP_LIMIT(r1)
962	stw	r10,KSP_LIMIT(r9)
963	RESTORE_xSRR(SRR0,SRR1);
964	RESTORE_MMU_REGS;
965	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
966
967	.globl	ret_from_debug_exc
968ret_from_debug_exc:
969	mfspr	r9,SPRN_SPRG3
970	lwz	r10,SAVED_KSP_LIMIT(r1)
971	stw	r10,KSP_LIMIT(r9)
972	lwz	r9,THREAD_INFO-THREAD(r9)
973	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)
974	lwz	r10,TI_PREEMPT(r10)
975	stw	r10,TI_PREEMPT(r9)
976	RESTORE_xSRR(SRR0,SRR1);
977	RESTORE_xSRR(CSRR0,CSRR1);
978	RESTORE_MMU_REGS;
979	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
980
981	.globl	ret_from_mcheck_exc
982ret_from_mcheck_exc:
983	mfspr	r9,SPRN_SPRG3
984	lwz	r10,SAVED_KSP_LIMIT(r1)
985	stw	r10,KSP_LIMIT(r9)
986	RESTORE_xSRR(SRR0,SRR1);
987	RESTORE_xSRR(CSRR0,CSRR1);
988	RESTORE_xSRR(DSRR0,DSRR1);
989	RESTORE_MMU_REGS;
990	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
991#endif /* CONFIG_BOOKE */
992
993/*
994 * Load the DBCR0 value for a task that is being ptraced,
995 * having first saved away the global DBCR0.  Note that r0
996 * has the dbcr0 value to set upon entry to this.
997 */
998load_dbcr0:
999	mfmsr	r10		/* first disable debug exceptions */
1000	rlwinm	r10,r10,0,~MSR_DE
1001	mtmsr	r10
1002	isync
1003	mfspr	r10,SPRN_DBCR0
1004	lis	r11,global_dbcr0@ha
1005	addi	r11,r11,global_dbcr0@l
1006#ifdef CONFIG_SMP
1007	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1008	lwz	r9,TI_CPU(r9)
1009	slwi	r9,r9,3
1010	add	r11,r11,r9
1011#endif
1012	stw	r10,0(r11)
1013	mtspr	SPRN_DBCR0,r0
1014	lwz	r10,4(r11)
1015	addi	r10,r10,1
1016	stw	r10,4(r11)
1017	li	r11,-1
1018	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1019	blr
1020
1021	.section .bss
1022	.align	4
1023global_dbcr0:
1024	.space	8*NR_CPUS
1025	.previous
1026#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1027
1028do_work:			/* r10 contains MSR_KERNEL here */
1029	andi.	r0,r9,_TIF_NEED_RESCHED
1030	beq	do_user_signal
1031
1032do_resched:			/* r10 contains MSR_KERNEL here */
1033	ori	r10,r10,MSR_EE
1034	SYNC
1035	MTMSRD(r10)		/* hard-enable interrupts */
1036	bl	schedule
1037recheck:
1038	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1039	SYNC
1040	MTMSRD(r10)		/* disable interrupts */
1041	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1042	lwz	r9,TI_FLAGS(r9)
1043	andi.	r0,r9,_TIF_NEED_RESCHED
1044	bne-	do_resched
1045	andi.	r0,r9,_TIF_USER_WORK_MASK
1046	beq	restore_user
1047do_user_signal:			/* r10 contains MSR_KERNEL here */
1048	ori	r10,r10,MSR_EE
1049	SYNC
1050	MTMSRD(r10)		/* hard-enable interrupts */
1051	/* save r13-r31 in the exception frame, if not already done */
1052	lwz	r3,_TRAP(r1)
1053	andi.	r0,r3,1
1054	beq	2f
1055	SAVE_NVGPRS(r1)
1056	rlwinm	r3,r3,0,0,30
1057	stw	r3,_TRAP(r1)
10582:	li	r3,0
1059	addi	r4,r1,STACK_FRAME_OVERHEAD
1060	bl	do_signal
1061	REST_NVGPRS(r1)
1062	b	recheck
1063
1064/*
1065 * We come here when we are at the end of handling an exception
1066 * that occurred at a place where taking an exception will lose
1067 * state information, such as the contents of SRR0 and SRR1.
1068 */
1069nonrecoverable:
1070	lis	r10,exc_exit_restart_end@ha
1071	addi	r10,r10,exc_exit_restart_end@l
1072	cmplw	r12,r10
1073	bge	3f
1074	lis	r11,exc_exit_restart@ha
1075	addi	r11,r11,exc_exit_restart@l
1076	cmplw	r12,r11
1077	blt	3f
1078	lis	r10,ee_restarts@ha
1079	lwz	r12,ee_restarts@l(r10)
1080	addi	r12,r12,1
1081	stw	r12,ee_restarts@l(r10)
1082	mr	r12,r11		/* restart at exc_exit_restart */
1083	blr
10843:	/* OK, we can't recover, kill this process */
1085	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1086BEGIN_FTR_SECTION
1087	blr
1088END_FTR_SECTION_IFSET(CPU_FTR_601)
1089	lwz	r3,_TRAP(r1)
1090	andi.	r0,r3,1
1091	beq	4f
1092	SAVE_NVGPRS(r1)
1093	rlwinm	r3,r3,0,0,30
1094	stw	r3,_TRAP(r1)
10954:	addi	r3,r1,STACK_FRAME_OVERHEAD
1096	bl	nonrecoverable_exception
1097	/* shouldn't return */
1098	b	4b
1099
1100	.section .bss
1101	.align	2
1102ee_restarts:
1103	.space	4
1104	.previous
1105
1106/*
1107 * PROM code for specific machines follows.  Put it
1108 * here so it's easy to add arch-specific sections later.
1109 * -- Cort
1110 */
1111#ifdef CONFIG_PPC_RTAS
1112/*
1113 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1114 * called with the MMU off.
1115 */
1116_GLOBAL(enter_rtas)
1117	stwu	r1,-INT_FRAME_SIZE(r1)
1118	mflr	r0
1119	stw	r0,INT_FRAME_SIZE+4(r1)
1120	LOAD_REG_ADDR(r4, rtas)
1121	lis	r6,1f@ha	/* physical return address for rtas */
1122	addi	r6,r6,1f@l
1123	tophys(r6,r6)
1124	tophys(r7,r1)
1125	lwz	r8,RTASENTRY(r4)
1126	lwz	r4,RTASBASE(r4)
1127	mfmsr	r9
1128	stw	r9,8(r1)
1129	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1130	SYNC			/* disable interrupts so SRR0/1 */
1131	MTMSRD(r0)		/* don't get trashed */
1132	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1133	mtlr	r6
1134	mtspr	SPRN_SPRG2,r7
1135	mtspr	SPRN_SRR0,r8
1136	mtspr	SPRN_SRR1,r9
1137	RFI
11381:	tophys(r9,r1)
1139	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1140	lwz	r9,8(r9)	/* original msr value */
1141	FIX_SRR1(r9,r0)
1142	addi	r1,r1,INT_FRAME_SIZE
1143	li	r0,0
1144	mtspr	SPRN_SPRG2,r0
1145	mtspr	SPRN_SRR0,r8
1146	mtspr	SPRN_SRR1,r9
1147	RFI			/* return to caller */
1148
1149	.globl	machine_check_in_rtas
1150machine_check_in_rtas:
1151	twi	31,0,0
1152	/* XXX load up BATs and panic */
1153
1154#endif /* CONFIG_PPC_RTAS */
1155
1156#ifdef CONFIG_FTRACE
1157#ifdef CONFIG_DYNAMIC_FTRACE
1158_GLOBAL(mcount)
1159_GLOBAL(_mcount)
1160	stwu	r1,-48(r1)
1161	stw	r3, 12(r1)
1162	stw	r4, 16(r1)
1163	stw	r5, 20(r1)
1164	stw	r6, 24(r1)
1165	mflr	r3
1166	stw	r7, 28(r1)
1167	mfcr	r5
1168	stw	r8, 32(r1)
1169	stw	r9, 36(r1)
1170	stw	r10,40(r1)
1171	stw	r3, 44(r1)
1172	stw	r5, 8(r1)
1173	subi	r3, r3, MCOUNT_INSN_SIZE
1174	.globl mcount_call
1175mcount_call:
1176	bl	ftrace_stub
1177	nop
1178	lwz	r6, 8(r1)
1179	lwz	r0, 44(r1)
1180	lwz	r3, 12(r1)
1181	mtctr	r0
1182	lwz	r4, 16(r1)
1183	mtcr	r6
1184	lwz	r5, 20(r1)
1185	lwz	r6, 24(r1)
1186	lwz	r0, 52(r1)
1187	lwz	r7, 28(r1)
1188	lwz	r8, 32(r1)
1189	mtlr	r0
1190	lwz	r9, 36(r1)
1191	lwz	r10,40(r1)
1192	addi	r1, r1, 48
1193	bctr
1194
1195_GLOBAL(ftrace_caller)
1196	/* Based off of objdump optput from glibc */
1197	stwu	r1,-48(r1)
1198	stw	r3, 12(r1)
1199	stw	r4, 16(r1)
1200	stw	r5, 20(r1)
1201	stw	r6, 24(r1)
1202	mflr	r3
1203	lwz	r4, 52(r1)
1204	mfcr	r5
1205	stw	r7, 28(r1)
1206	stw	r8, 32(r1)
1207	stw	r9, 36(r1)
1208	stw	r10,40(r1)
1209	stw	r3, 44(r1)
1210	stw	r5, 8(r1)
1211	subi	r3, r3, MCOUNT_INSN_SIZE
1212.globl ftrace_call
1213ftrace_call:
1214	bl	ftrace_stub
1215	nop
1216	lwz	r6, 8(r1)
1217	lwz	r0, 44(r1)
1218	lwz	r3, 12(r1)
1219	mtctr	r0
1220	lwz	r4, 16(r1)
1221	mtcr	r6
1222	lwz	r5, 20(r1)
1223	lwz	r6, 24(r1)
1224	lwz	r0, 52(r1)
1225	lwz	r7, 28(r1)
1226	lwz	r8, 32(r1)
1227	mtlr	r0
1228	lwz	r9, 36(r1)
1229	lwz	r10,40(r1)
1230	addi	r1, r1, 48
1231	bctr
1232#else
1233_GLOBAL(mcount)
1234_GLOBAL(_mcount)
1235	stwu	r1,-48(r1)
1236	stw	r3, 12(r1)
1237	stw	r4, 16(r1)
1238	stw	r5, 20(r1)
1239	stw	r6, 24(r1)
1240	mflr	r3
1241	lwz	r4, 52(r1)
1242	mfcr	r5
1243	stw	r7, 28(r1)
1244	stw	r8, 32(r1)
1245	stw	r9, 36(r1)
1246	stw	r10,40(r1)
1247	stw	r3, 44(r1)
1248	stw	r5, 8(r1)
1249
1250	subi	r3, r3, MCOUNT_INSN_SIZE
1251	LOAD_REG_ADDR(r5, ftrace_trace_function)
1252	lwz	r5,0(r5)
1253
1254	mtctr	r5
1255	bctrl
1256
1257	nop
1258
1259	lwz	r6, 8(r1)
1260	lwz	r0, 44(r1)
1261	lwz	r3, 12(r1)
1262	mtctr	r0
1263	lwz	r4, 16(r1)
1264	mtcr	r6
1265	lwz	r5, 20(r1)
1266	lwz	r6, 24(r1)
1267	lwz	r0, 52(r1)
1268	lwz	r7, 28(r1)
1269	lwz	r8, 32(r1)
1270	mtlr	r0
1271	lwz	r9, 36(r1)
1272	lwz	r10,40(r1)
1273	addi	r1, r1, 48
1274	bctr
1275#endif
1276
1277_GLOBAL(ftrace_stub)
1278	blr
1279
1280#endif /* CONFIG_MCOUNT */
1281