xref: /linux/arch/powerpc/kernel/entry_32.S (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)	\
49	mtspr	exc_level##_SPRG,r8;			\
50	BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);		\
51	lwz	r0,GPR10-INT_FRAME_SIZE(r8);		\
52	stw	r0,GPR10(r11);				\
53	lwz	r0,GPR11-INT_FRAME_SIZE(r8);		\
54	stw	r0,GPR11(r11);				\
55	mfspr	r8,exc_level##_SPRG
56
57	.globl	mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59	TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60	b	transfer_to_handler_full
61
62	.globl	debug_transfer_to_handler
63debug_transfer_to_handler:
64	TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65	b	transfer_to_handler_full
66
67	.globl	crit_transfer_to_handler
68crit_transfer_to_handler:
69	TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70	/* fall through */
71#endif
72
73#ifdef CONFIG_40x
74	.globl	crit_transfer_to_handler
75crit_transfer_to_handler:
76	lwz	r0,crit_r10@l(0)
77	stw	r0,GPR10(r11)
78	lwz	r0,crit_r11@l(0)
79	stw	r0,GPR11(r11)
80	/* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90	.globl	transfer_to_handler_full
91transfer_to_handler_full:
92	SAVE_NVGPRS(r11)
93	/* fall through */
94
95	.globl	transfer_to_handler
96transfer_to_handler:
97	stw	r2,GPR2(r11)
98	stw	r12,_NIP(r11)
99	stw	r9,_MSR(r11)
100	andi.	r2,r9,MSR_PR
101	mfctr	r12
102	mfspr	r2,SPRN_XER
103	stw	r12,_CTR(r11)
104	stw	r2,_XER(r11)
105	mfspr	r12,SPRN_SPRG3
106	addi	r2,r12,-THREAD
107	tovirt(r2,r2)			/* set r2 to current */
108	beq	2f			/* if from user, fix up THREAD.regs */
109	addi	r11,r1,STACK_FRAME_OVERHEAD
110	stw	r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112	/* Check to see if the dbcr0 register is set up to debug.  Use the
113	   single-step bit to do this. */
114	lwz	r12,THREAD_DBCR0(r12)
115	andis.	r12,r12,DBCR0_IC@h
116	beq+	3f
117	/* From user and task is ptraced - load up global dbcr0 */
118	li	r12,-1			/* clear all pending debug events */
119	mtspr	SPRN_DBSR,r12
120	lis	r11,global_dbcr0@ha
121	tophys(r11,r11)
122	addi	r11,r11,global_dbcr0@l
123	lwz	r12,0(r11)
124	mtspr	SPRN_DBCR0,r12
125	lwz	r12,4(r11)
126	addi	r12,r12,-1
127	stw	r12,4(r11)
128#endif
129	b	3f
130
1312:	/* if from kernel, check interrupted DOZE/NAP mode and
132         * check for stack overflow
133         */
134	lwz	r9,THREAD_INFO-THREAD(r12)
135	cmplw	r1,r9			/* if r1 <= current->thread_info */
136	ble-	stack_ovf		/* then the kernel stack overflowed */
1375:
138#ifdef CONFIG_6xx
139	tophys(r9,r9)			/* check local flags */
140	lwz	r12,TI_LOCAL_FLAGS(r9)
141	mtcrf	0x01,r12
142	bt-	31-TLF_NAPPING,4f
143#endif /* CONFIG_6xx */
144	.globl transfer_to_handler_cont
145transfer_to_handler_cont:
1463:
147	mflr	r9
148	lwz	r11,0(r9)		/* virtual address of handler */
149	lwz	r9,4(r9)		/* where to go when done */
150	mtspr	SPRN_SRR0,r11
151	mtspr	SPRN_SRR1,r10
152	mtlr	r9
153	SYNC
154	RFI				/* jump to handler, enable MMU */
155
156#ifdef CONFIG_6xx
1574:	rlwinm	r12,r12,0,~_TLF_NAPPING
158	stw	r12,TI_LOCAL_FLAGS(r9)
159	b	power_save_6xx_restore
160#endif
161
162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167	/* sometimes we use a statically-allocated stack, which is OK. */
168	lis	r12,_end@h
169	ori	r12,r12,_end@l
170	cmplw	r1,r12
171	ble	5b			/* r1 <= &_end is OK */
172	SAVE_NVGPRS(r11)
173	addi	r3,r1,STACK_FRAME_OVERHEAD
174	lis	r1,init_thread_union@ha
175	addi	r1,r1,init_thread_union@l
176	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177	lis	r9,StackOverflow@ha
178	addi	r9,r9,StackOverflow@l
179	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180	FIX_SRR1(r10,r12)
181	mtspr	SPRN_SRR0,r9
182	mtspr	SPRN_SRR1,r10
183	SYNC
184	RFI
185
186/*
187 * Handle a system call.
188 */
189	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
190	.stabs	"entry_32.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
194	stw	r0,THREAD+LAST_SYSCALL(r2)
195	stw	r3,ORIG_GPR3(r1)
196	li	r12,0
197	stw	r12,RESULT(r1)
198	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
199	rlwinm	r11,r11,0,4,2
200	stw	r11,_CCR(r1)
201#ifdef SHOW_SYSCALLS
202	bl	do_show_syscall
203#endif /* SHOW_SYSCALLS */
204	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
205	lwz	r11,TI_FLAGS(r10)
206	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
207	bne-	syscall_dotrace
208syscall_dotrace_cont:
209	cmplwi	0,r0,NR_syscalls
210	lis	r10,sys_call_table@h
211	ori	r10,r10,sys_call_table@l
212	slwi	r0,r0,2
213	bge-	66f
214	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
215	mtlr	r10
216	addi	r9,r1,STACK_FRAME_OVERHEAD
217	PPC440EP_ERR42
218	blrl			/* Call handler */
219	.globl	ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222	bl	do_show_syscall_exit
223#endif
224	mr	r6,r3
225	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
226	/* disable interrupts so current_thread_info()->flags can't change */
227	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
228	SYNC
229	MTMSRD(r10)
230	lwz	r9,TI_FLAGS(r12)
231	li	r8,-_LAST_ERRNO
232	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
233	bne-	syscall_exit_work
234	cmplw	0,r3,r8
235	blt+	syscall_exit_cont
236	lwz	r11,_CCR(r1)			/* Load CR */
237	neg	r3,r3
238	oris	r11,r11,0x1000	/* Set SO bit in CR */
239	stw	r11,_CCR(r1)
240syscall_exit_cont:
241#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
242	/* If the process has its own DBCR0 value, load it up.  The single
243	   step bit tells us that dbcr0 should be loaded. */
244	lwz	r0,THREAD+THREAD_DBCR0(r2)
245	andis.	r10,r0,DBCR0_IC@h
246	bnel-	load_dbcr0
247#endif
248	stwcx.	r0,0,r1			/* to clear the reservation */
249	lwz	r4,_LINK(r1)
250	lwz	r5,_CCR(r1)
251	mtlr	r4
252	mtcr	r5
253	lwz	r7,_NIP(r1)
254	lwz	r8,_MSR(r1)
255	FIX_SRR1(r8, r0)
256	lwz	r2,GPR2(r1)
257	lwz	r1,GPR1(r1)
258	mtspr	SPRN_SRR0,r7
259	mtspr	SPRN_SRR1,r8
260	SYNC
261	RFI
262
26366:	li	r3,-ENOSYS
264	b	ret_from_syscall
265
266	.globl	ret_from_fork
267ret_from_fork:
268	REST_NVGPRS(r1)
269	bl	schedule_tail
270	li	r3,0
271	b	ret_from_syscall
272
273/* Traced system call support */
274syscall_dotrace:
275	SAVE_NVGPRS(r1)
276	li	r0,0xc00
277	stw	r0,_TRAP(r1)
278	addi	r3,r1,STACK_FRAME_OVERHEAD
279	bl	do_syscall_trace_enter
280	lwz	r0,GPR0(r1)	/* Restore original registers */
281	lwz	r3,GPR3(r1)
282	lwz	r4,GPR4(r1)
283	lwz	r5,GPR5(r1)
284	lwz	r6,GPR6(r1)
285	lwz	r7,GPR7(r1)
286	lwz	r8,GPR8(r1)
287	REST_NVGPRS(r1)
288	b	syscall_dotrace_cont
289
290syscall_exit_work:
291	andi.	r0,r9,_TIF_RESTOREALL
292	beq+	0f
293	REST_NVGPRS(r1)
294	b	2f
2950:	cmplw	0,r3,r8
296	blt+	1f
297	andi.	r0,r9,_TIF_NOERROR
298	bne-	1f
299	lwz	r11,_CCR(r1)			/* Load CR */
300	neg	r3,r3
301	oris	r11,r11,0x1000	/* Set SO bit in CR */
302	stw	r11,_CCR(r1)
303
3041:	stw	r6,RESULT(r1)	/* Save result */
305	stw	r3,GPR3(r1)	/* Update return value */
3062:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
307	beq	4f
308
309	/* Clear per-syscall TIF flags if any are set.  */
310
311	li	r11,_TIF_PERSYSCALL_MASK
312	addi	r12,r12,TI_FLAGS
3133:	lwarx	r8,0,r12
314	andc	r8,r8,r11
315#ifdef CONFIG_IBM405_ERR77
316	dcbt	0,r12
317#endif
318	stwcx.	r8,0,r12
319	bne-	3b
320	subi	r12,r12,TI_FLAGS
321
3224:	/* Anything which requires enabling interrupts? */
323	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
324	beq	ret_from_except
325
326	/* Re-enable interrupts */
327	ori	r10,r10,MSR_EE
328	SYNC
329	MTMSRD(r10)
330
331	/* Save NVGPRS if they're not saved already */
332	lwz	r4,_TRAP(r1)
333	andi.	r4,r4,1
334	beq	5f
335	SAVE_NVGPRS(r1)
336	li	r4,0xc00
337	stw	r4,_TRAP(r1)
3385:
339	addi	r3,r1,STACK_FRAME_OVERHEAD
340	bl	do_syscall_trace_leave
341	b	ret_from_except_full
342
343#ifdef SHOW_SYSCALLS
344do_show_syscall:
345#ifdef SHOW_SYSCALLS_TASK
346	lis	r11,show_syscalls_task@ha
347	lwz	r11,show_syscalls_task@l(r11)
348	cmp	0,r2,r11
349	bnelr
350#endif
351	stw	r31,GPR31(r1)
352	mflr	r31
353	lis	r3,7f@ha
354	addi	r3,r3,7f@l
355	lwz	r4,GPR0(r1)
356	lwz	r5,GPR3(r1)
357	lwz	r6,GPR4(r1)
358	lwz	r7,GPR5(r1)
359	lwz	r8,GPR6(r1)
360	lwz	r9,GPR7(r1)
361	bl	printk
362	lis	r3,77f@ha
363	addi	r3,r3,77f@l
364	lwz	r4,GPR8(r1)
365	mr	r5,r2
366	bl	printk
367	lwz	r0,GPR0(r1)
368	lwz	r3,GPR3(r1)
369	lwz	r4,GPR4(r1)
370	lwz	r5,GPR5(r1)
371	lwz	r6,GPR6(r1)
372	lwz	r7,GPR7(r1)
373	lwz	r8,GPR8(r1)
374	mtlr	r31
375	lwz	r31,GPR31(r1)
376	blr
377
378do_show_syscall_exit:
379#ifdef SHOW_SYSCALLS_TASK
380	lis	r11,show_syscalls_task@ha
381	lwz	r11,show_syscalls_task@l(r11)
382	cmp	0,r2,r11
383	bnelr
384#endif
385	stw	r31,GPR31(r1)
386	mflr	r31
387	stw	r3,RESULT(r1)	/* Save result */
388	mr	r4,r3
389	lis	r3,79f@ha
390	addi	r3,r3,79f@l
391	bl	printk
392	lwz	r3,RESULT(r1)
393	mtlr	r31
394	lwz	r31,GPR31(r1)
395	blr
396
3977:	.string	"syscall %d(%x, %x, %x, %x, %x, "
39877:	.string	"%x), current=%p\n"
39979:	.string	" -> %x\n"
400	.align	2,0
401
402#ifdef SHOW_SYSCALLS_TASK
403	.data
404	.globl	show_syscalls_task
405show_syscalls_task:
406	.long	-1
407	.text
408#endif
409#endif /* SHOW_SYSCALLS */
410
411/*
412 * The fork/clone functions need to copy the full register set into
413 * the child process. Therefore we need to save all the nonvolatile
414 * registers (r13 - r31) before calling the C code.
415 */
416	.globl	ppc_fork
417ppc_fork:
418	SAVE_NVGPRS(r1)
419	lwz	r0,_TRAP(r1)
420	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
421	stw	r0,_TRAP(r1)		/* register set saved */
422	b	sys_fork
423
424	.globl	ppc_vfork
425ppc_vfork:
426	SAVE_NVGPRS(r1)
427	lwz	r0,_TRAP(r1)
428	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
429	stw	r0,_TRAP(r1)		/* register set saved */
430	b	sys_vfork
431
432	.globl	ppc_clone
433ppc_clone:
434	SAVE_NVGPRS(r1)
435	lwz	r0,_TRAP(r1)
436	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
437	stw	r0,_TRAP(r1)		/* register set saved */
438	b	sys_clone
439
440	.globl	ppc_swapcontext
441ppc_swapcontext:
442	SAVE_NVGPRS(r1)
443	lwz	r0,_TRAP(r1)
444	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
445	stw	r0,_TRAP(r1)		/* register set saved */
446	b	sys_swapcontext
447
448/*
449 * Top-level page fault handling.
450 * This is in assembler because if do_page_fault tells us that
451 * it is a bad kernel page fault, we want to save the non-volatile
452 * registers before calling bad_page_fault.
453 */
454	.globl	handle_page_fault
455handle_page_fault:
456	stw	r4,_DAR(r1)
457	addi	r3,r1,STACK_FRAME_OVERHEAD
458	bl	do_page_fault
459	cmpwi	r3,0
460	beq+	ret_from_except
461	SAVE_NVGPRS(r1)
462	lwz	r0,_TRAP(r1)
463	clrrwi	r0,r0,1
464	stw	r0,_TRAP(r1)
465	mr	r5,r3
466	addi	r3,r1,STACK_FRAME_OVERHEAD
467	lwz	r4,_DAR(r1)
468	bl	bad_page_fault
469	b	ret_from_except_full
470
471/*
472 * This routine switches between two different tasks.  The process
473 * state of one is saved on its kernel stack.  Then the state
474 * of the other is restored from its kernel stack.  The memory
475 * management hardware is updated to the second process's state.
476 * Finally, we can return to the second process.
477 * On entry, r3 points to the THREAD for the current task, r4
478 * points to the THREAD for the new task.
479 *
480 * This routine is always called with interrupts disabled.
481 *
482 * Note: there are two ways to get to the "going out" portion
483 * of this code; either by coming in via the entry (_switch)
484 * or via "fork" which must set up an environment equivalent
485 * to the "_switch" path.  If you change this , you'll have to
486 * change the fork code also.
487 *
488 * The code which creates the new task context is in 'copy_thread'
489 * in arch/ppc/kernel/process.c
490 */
491_GLOBAL(_switch)
492	stwu	r1,-INT_FRAME_SIZE(r1)
493	mflr	r0
494	stw	r0,INT_FRAME_SIZE+4(r1)
495	/* r3-r12 are caller saved -- Cort */
496	SAVE_NVGPRS(r1)
497	stw	r0,_NIP(r1)	/* Return to switch caller */
498	mfmsr	r11
499	li	r0,MSR_FP	/* Disable floating-point */
500#ifdef CONFIG_ALTIVEC
501BEGIN_FTR_SECTION
502	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
503	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
504	stw	r12,THREAD+THREAD_VRSAVE(r2)
505END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
506#endif /* CONFIG_ALTIVEC */
507#ifdef CONFIG_SPE
508	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
509	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
510	stw	r12,THREAD+THREAD_SPEFSCR(r2)
511#endif /* CONFIG_SPE */
512	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
513	beq+	1f
514	andc	r11,r11,r0
515	MTMSRD(r11)
516	isync
5171:	stw	r11,_MSR(r1)
518	mfcr	r10
519	stw	r10,_CCR(r1)
520	stw	r1,KSP(r3)	/* Set old stack pointer */
521
522#ifdef CONFIG_SMP
523	/* We need a sync somewhere here to make sure that if the
524	 * previous task gets rescheduled on another CPU, it sees all
525	 * stores it has performed on this one.
526	 */
527	sync
528#endif /* CONFIG_SMP */
529
530	tophys(r0,r4)
531	CLR_TOP32(r0)
532	mtspr	SPRN_SPRG3,r0	/* Update current THREAD phys addr */
533	lwz	r1,KSP(r4)	/* Load new stack pointer */
534
535	/* save the old current 'last' for return value */
536	mr	r3,r2
537	addi	r2,r4,-THREAD	/* Update current */
538
539#ifdef CONFIG_ALTIVEC
540BEGIN_FTR_SECTION
541	lwz	r0,THREAD+THREAD_VRSAVE(r2)
542	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
543END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
544#endif /* CONFIG_ALTIVEC */
545#ifdef CONFIG_SPE
546	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
547	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
548#endif /* CONFIG_SPE */
549
550	lwz	r0,_CCR(r1)
551	mtcrf	0xFF,r0
552	/* r3-r12 are destroyed -- Cort */
553	REST_NVGPRS(r1)
554
555	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
556	mtlr	r4
557	addi	r1,r1,INT_FRAME_SIZE
558	blr
559
560	.globl	fast_exception_return
561fast_exception_return:
562#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
563	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
564	beq	1f			/* if not, we've got problems */
565#endif
566
5672:	REST_4GPRS(3, r11)
568	lwz	r10,_CCR(r11)
569	REST_GPR(1, r11)
570	mtcr	r10
571	lwz	r10,_LINK(r11)
572	mtlr	r10
573	REST_GPR(10, r11)
574	mtspr	SPRN_SRR1,r9
575	mtspr	SPRN_SRR0,r12
576	REST_GPR(9, r11)
577	REST_GPR(12, r11)
578	lwz	r11,GPR11(r11)
579	SYNC
580	RFI
581
582#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
583/* check if the exception happened in a restartable section */
5841:	lis	r3,exc_exit_restart_end@ha
585	addi	r3,r3,exc_exit_restart_end@l
586	cmplw	r12,r3
587	bge	3f
588	lis	r4,exc_exit_restart@ha
589	addi	r4,r4,exc_exit_restart@l
590	cmplw	r12,r4
591	blt	3f
592	lis	r3,fee_restarts@ha
593	tophys(r3,r3)
594	lwz	r5,fee_restarts@l(r3)
595	addi	r5,r5,1
596	stw	r5,fee_restarts@l(r3)
597	mr	r12,r4		/* restart at exc_exit_restart */
598	b	2b
599
600	.comm	fee_restarts,4
601
602/* aargh, a nonrecoverable interrupt, panic */
603/* aargh, we don't know which trap this is */
604/* but the 601 doesn't implement the RI bit, so assume it's OK */
6053:
606BEGIN_FTR_SECTION
607	b	2b
608END_FTR_SECTION_IFSET(CPU_FTR_601)
609	li	r10,-1
610	stw	r10,_TRAP(r11)
611	addi	r3,r1,STACK_FRAME_OVERHEAD
612	lis	r10,MSR_KERNEL@h
613	ori	r10,r10,MSR_KERNEL@l
614	bl	transfer_to_handler_full
615	.long	nonrecoverable_exception
616	.long	ret_from_except
617#endif
618
619	.globl	ret_from_except_full
620ret_from_except_full:
621	REST_NVGPRS(r1)
622	/* fall through */
623
624	.globl	ret_from_except
625ret_from_except:
626	/* Hard-disable interrupts so that current_thread_info()->flags
627	 * can't change between when we test it and when we return
628	 * from the interrupt. */
629	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
630	SYNC			/* Some chip revs have problems here... */
631	MTMSRD(r10)		/* disable interrupts */
632
633	lwz	r3,_MSR(r1)	/* Returning to user mode? */
634	andi.	r0,r3,MSR_PR
635	beq	resume_kernel
636
637user_exc_return:		/* r10 contains MSR_KERNEL here */
638	/* Check current_thread_info()->flags */
639	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
640	lwz	r9,TI_FLAGS(r9)
641	andi.	r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
642	bne	do_work
643
644restore_user:
645#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
646	/* Check whether this process has its own DBCR0 value.  The single
647	   step bit tells us that dbcr0 should be loaded. */
648	lwz	r0,THREAD+THREAD_DBCR0(r2)
649	andis.	r10,r0,DBCR0_IC@h
650	bnel-	load_dbcr0
651#endif
652
653#ifdef CONFIG_PREEMPT
654	b	restore
655
656/* N.B. the only way to get here is from the beq following ret_from_except. */
657resume_kernel:
658	/* check current_thread_info->preempt_count */
659	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
660	lwz	r0,TI_PREEMPT(r9)
661	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
662	bne	restore
663	lwz	r0,TI_FLAGS(r9)
664	andi.	r0,r0,_TIF_NEED_RESCHED
665	beq+	restore
666	andi.	r0,r3,MSR_EE	/* interrupts off? */
667	beq	restore		/* don't schedule if so */
6681:	bl	preempt_schedule_irq
669	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
670	lwz	r3,TI_FLAGS(r9)
671	andi.	r0,r3,_TIF_NEED_RESCHED
672	bne-	1b
673#else
674resume_kernel:
675#endif /* CONFIG_PREEMPT */
676
677	/* interrupts are hard-disabled at this point */
678restore:
679	lwz	r0,GPR0(r1)
680	lwz	r2,GPR2(r1)
681	REST_4GPRS(3, r1)
682	REST_2GPRS(7, r1)
683
684	lwz	r10,_XER(r1)
685	lwz	r11,_CTR(r1)
686	mtspr	SPRN_XER,r10
687	mtctr	r11
688
689	PPC405_ERR77(0,r1)
690	stwcx.	r0,0,r1			/* to clear the reservation */
691
692#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
693	lwz	r9,_MSR(r1)
694	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
695	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
696
697	lwz	r10,_CCR(r1)
698	lwz	r11,_LINK(r1)
699	mtcrf	0xFF,r10
700	mtlr	r11
701
702	/*
703	 * Once we put values in SRR0 and SRR1, we are in a state
704	 * where exceptions are not recoverable, since taking an
705	 * exception will trash SRR0 and SRR1.  Therefore we clear the
706	 * MSR:RI bit to indicate this.  If we do take an exception,
707	 * we can't return to the point of the exception but we
708	 * can restart the exception exit path at the label
709	 * exc_exit_restart below.  -- paulus
710	 */
711	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
712	SYNC
713	MTMSRD(r10)		/* clear the RI bit */
714	.globl exc_exit_restart
715exc_exit_restart:
716	lwz	r9,_MSR(r1)
717	lwz	r12,_NIP(r1)
718	FIX_SRR1(r9,r10)
719	mtspr	SPRN_SRR0,r12
720	mtspr	SPRN_SRR1,r9
721	REST_4GPRS(9, r1)
722	lwz	r1,GPR1(r1)
723	.globl exc_exit_restart_end
724exc_exit_restart_end:
725	SYNC
726	RFI
727
728#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
729	/*
730	 * This is a bit different on 4xx/Book-E because it doesn't have
731	 * the RI bit in the MSR.
732	 * The TLB miss handler checks if we have interrupted
733	 * the exception exit path and restarts it if so
734	 * (well maybe one day it will... :).
735	 */
736	lwz	r11,_LINK(r1)
737	mtlr	r11
738	lwz	r10,_CCR(r1)
739	mtcrf	0xff,r10
740	REST_2GPRS(9, r1)
741	.globl exc_exit_restart
742exc_exit_restart:
743	lwz	r11,_NIP(r1)
744	lwz	r12,_MSR(r1)
745exc_exit_start:
746	mtspr	SPRN_SRR0,r11
747	mtspr	SPRN_SRR1,r12
748	REST_2GPRS(11, r1)
749	lwz	r1,GPR1(r1)
750	.globl exc_exit_restart_end
751exc_exit_restart_end:
752	PPC405_ERR77_SYNC
753	rfi
754	b	.			/* prevent prefetch past rfi */
755
756/*
757 * Returning from a critical interrupt in user mode doesn't need
758 * to be any different from a normal exception.  For a critical
759 * interrupt in the kernel, we just return (without checking for
760 * preemption) since the interrupt may have happened at some crucial
761 * place (e.g. inside the TLB miss handler), and because we will be
762 * running with r1 pointing into critical_stack, not the current
763 * process's kernel stack (and therefore current_thread_info() will
764 * give the wrong answer).
765 * We have to restore various SPRs that may have been in use at the
766 * time of the critical interrupt.
767 *
768 */
769#ifdef CONFIG_40x
770#define PPC_40x_TURN_OFF_MSR_DR						    \
771	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
772	 * assume the instructions here are mapped by a pinned TLB entry */ \
773	li	r10,MSR_IR;						    \
774	mtmsr	r10;							    \
775	isync;								    \
776	tophys(r1, r1);
777#else
778#define PPC_40x_TURN_OFF_MSR_DR
779#endif
780
781#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
782	REST_NVGPRS(r1);						\
783	lwz	r3,_MSR(r1);						\
784	andi.	r3,r3,MSR_PR;						\
785	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
786	bne	user_exc_return;					\
787	lwz	r0,GPR0(r1);						\
788	lwz	r2,GPR2(r1);						\
789	REST_4GPRS(3, r1);						\
790	REST_2GPRS(7, r1);						\
791	lwz	r10,_XER(r1);						\
792	lwz	r11,_CTR(r1);						\
793	mtspr	SPRN_XER,r10;						\
794	mtctr	r11;							\
795	PPC405_ERR77(0,r1);						\
796	stwcx.	r0,0,r1;		/* to clear the reservation */	\
797	lwz	r11,_LINK(r1);						\
798	mtlr	r11;							\
799	lwz	r10,_CCR(r1);						\
800	mtcrf	0xff,r10;						\
801	PPC_40x_TURN_OFF_MSR_DR;					\
802	lwz	r9,_DEAR(r1);						\
803	lwz	r10,_ESR(r1);						\
804	mtspr	SPRN_DEAR,r9;						\
805	mtspr	SPRN_ESR,r10;						\
806	lwz	r11,_NIP(r1);						\
807	lwz	r12,_MSR(r1);						\
808	mtspr	exc_lvl_srr0,r11;					\
809	mtspr	exc_lvl_srr1,r12;					\
810	lwz	r9,GPR9(r1);						\
811	lwz	r12,GPR12(r1);						\
812	lwz	r10,GPR10(r1);						\
813	lwz	r11,GPR11(r1);						\
814	lwz	r1,GPR1(r1);						\
815	PPC405_ERR77_SYNC;						\
816	exc_lvl_rfi;							\
817	b	.;		/* prevent prefetch past exc_lvl_rfi */
818
819	.globl	ret_from_crit_exc
820ret_from_crit_exc:
821	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
822
823#ifdef CONFIG_BOOKE
824	.globl	ret_from_debug_exc
825ret_from_debug_exc:
826	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
827
828	.globl	ret_from_mcheck_exc
829ret_from_mcheck_exc:
830	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
831#endif /* CONFIG_BOOKE */
832
833/*
834 * Load the DBCR0 value for a task that is being ptraced,
835 * having first saved away the global DBCR0.  Note that r0
836 * has the dbcr0 value to set upon entry to this.
837 */
838load_dbcr0:
839	mfmsr	r10		/* first disable debug exceptions */
840	rlwinm	r10,r10,0,~MSR_DE
841	mtmsr	r10
842	isync
843	mfspr	r10,SPRN_DBCR0
844	lis	r11,global_dbcr0@ha
845	addi	r11,r11,global_dbcr0@l
846	stw	r10,0(r11)
847	mtspr	SPRN_DBCR0,r0
848	lwz	r10,4(r11)
849	addi	r10,r10,1
850	stw	r10,4(r11)
851	li	r11,-1
852	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
853	blr
854
855	.comm	global_dbcr0,8
856#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
857
858do_work:			/* r10 contains MSR_KERNEL here */
859	andi.	r0,r9,_TIF_NEED_RESCHED
860	beq	do_user_signal
861
862do_resched:			/* r10 contains MSR_KERNEL here */
863	ori	r10,r10,MSR_EE
864	SYNC
865	MTMSRD(r10)		/* hard-enable interrupts */
866	bl	schedule
867recheck:
868	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
869	SYNC
870	MTMSRD(r10)		/* disable interrupts */
871	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
872	lwz	r9,TI_FLAGS(r9)
873	andi.	r0,r9,_TIF_NEED_RESCHED
874	bne-	do_resched
875	andi.	r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
876	beq	restore_user
877do_user_signal:			/* r10 contains MSR_KERNEL here */
878	ori	r10,r10,MSR_EE
879	SYNC
880	MTMSRD(r10)		/* hard-enable interrupts */
881	/* save r13-r31 in the exception frame, if not already done */
882	lwz	r3,_TRAP(r1)
883	andi.	r0,r3,1
884	beq	2f
885	SAVE_NVGPRS(r1)
886	rlwinm	r3,r3,0,0,30
887	stw	r3,_TRAP(r1)
8882:	li	r3,0
889	addi	r4,r1,STACK_FRAME_OVERHEAD
890	bl	do_signal
891	REST_NVGPRS(r1)
892	b	recheck
893
894/*
895 * We come here when we are at the end of handling an exception
896 * that occurred at a place where taking an exception will lose
897 * state information, such as the contents of SRR0 and SRR1.
898 */
899nonrecoverable:
900	lis	r10,exc_exit_restart_end@ha
901	addi	r10,r10,exc_exit_restart_end@l
902	cmplw	r12,r10
903	bge	3f
904	lis	r11,exc_exit_restart@ha
905	addi	r11,r11,exc_exit_restart@l
906	cmplw	r12,r11
907	blt	3f
908	lis	r10,ee_restarts@ha
909	lwz	r12,ee_restarts@l(r10)
910	addi	r12,r12,1
911	stw	r12,ee_restarts@l(r10)
912	mr	r12,r11		/* restart at exc_exit_restart */
913	blr
9143:	/* OK, we can't recover, kill this process */
915	/* but the 601 doesn't implement the RI bit, so assume it's OK */
916BEGIN_FTR_SECTION
917	blr
918END_FTR_SECTION_IFSET(CPU_FTR_601)
919	lwz	r3,_TRAP(r1)
920	andi.	r0,r3,1
921	beq	4f
922	SAVE_NVGPRS(r1)
923	rlwinm	r3,r3,0,0,30
924	stw	r3,_TRAP(r1)
9254:	addi	r3,r1,STACK_FRAME_OVERHEAD
926	bl	nonrecoverable_exception
927	/* shouldn't return */
928	b	4b
929
930	.comm	ee_restarts,4
931
932/*
933 * PROM code for specific machines follows.  Put it
934 * here so it's easy to add arch-specific sections later.
935 * -- Cort
936 */
937#ifdef CONFIG_PPC_RTAS
938/*
939 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
940 * called with the MMU off.
941 */
942_GLOBAL(enter_rtas)
943	stwu	r1,-INT_FRAME_SIZE(r1)
944	mflr	r0
945	stw	r0,INT_FRAME_SIZE+4(r1)
946	LOAD_REG_ADDR(r4, rtas)
947	lis	r6,1f@ha	/* physical return address for rtas */
948	addi	r6,r6,1f@l
949	tophys(r6,r6)
950	tophys(r7,r1)
951	lwz	r8,RTASENTRY(r4)
952	lwz	r4,RTASBASE(r4)
953	mfmsr	r9
954	stw	r9,8(r1)
955	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
956	SYNC			/* disable interrupts so SRR0/1 */
957	MTMSRD(r0)		/* don't get trashed */
958	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
959	mtlr	r6
960	mtspr	SPRN_SPRG2,r7
961	mtspr	SPRN_SRR0,r8
962	mtspr	SPRN_SRR1,r9
963	RFI
9641:	tophys(r9,r1)
965	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
966	lwz	r9,8(r9)	/* original msr value */
967	FIX_SRR1(r9,r0)
968	addi	r1,r1,INT_FRAME_SIZE
969	li	r0,0
970	mtspr	SPRN_SPRG2,r0
971	mtspr	SPRN_SRR0,r8
972	mtspr	SPRN_SRR1,r9
973	RFI			/* return to caller */
974
975	.globl	machine_check_in_rtas
976machine_check_in_rtas:
977	twi	31,0,0
978	/* XXX load up BATs and panic */
979
980#endif /* CONFIG_PPC_RTAS */
981