xref: /linux/arch/powerpc/kernel/entry_32.S (revision 6d247e4d264961aa3b871290f9b11a48d5a567f2)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35
36#include "head_32.h"
37
38/*
39 * powerpc relies on return from interrupt/syscall being context synchronising
40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41 * synchronisation instructions.
42 */
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49	.align	12
50
51#ifdef CONFIG_BOOKE
52	.globl	mcheck_transfer_to_handler
53mcheck_transfer_to_handler:
54	mfspr	r0,SPRN_DSRR0
55	stw	r0,_DSRR0(r11)
56	mfspr	r0,SPRN_DSRR1
57	stw	r0,_DSRR1(r11)
58	/* fall through */
59_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
60
61	.globl	debug_transfer_to_handler
62debug_transfer_to_handler:
63	mfspr	r0,SPRN_CSRR0
64	stw	r0,_CSRR0(r11)
65	mfspr	r0,SPRN_CSRR1
66	stw	r0,_CSRR1(r11)
67	/* fall through */
68_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
69
70	.globl	crit_transfer_to_handler
71crit_transfer_to_handler:
72#ifdef CONFIG_PPC_BOOK3E_MMU
73	mfspr	r0,SPRN_MAS0
74	stw	r0,MAS0(r11)
75	mfspr	r0,SPRN_MAS1
76	stw	r0,MAS1(r11)
77	mfspr	r0,SPRN_MAS2
78	stw	r0,MAS2(r11)
79	mfspr	r0,SPRN_MAS3
80	stw	r0,MAS3(r11)
81	mfspr	r0,SPRN_MAS6
82	stw	r0,MAS6(r11)
83#ifdef CONFIG_PHYS_64BIT
84	mfspr	r0,SPRN_MAS7
85	stw	r0,MAS7(r11)
86#endif /* CONFIG_PHYS_64BIT */
87#endif /* CONFIG_PPC_BOOK3E_MMU */
88#ifdef CONFIG_44x
89	mfspr	r0,SPRN_MMUCR
90	stw	r0,MMUCR(r11)
91#endif
92	mfspr	r0,SPRN_SRR0
93	stw	r0,_SRR0(r11)
94	mfspr	r0,SPRN_SRR1
95	stw	r0,_SRR1(r11)
96
97	/* set the stack limit to the current stack */
98	mfspr	r8,SPRN_SPRG_THREAD
99	lwz	r0,KSP_LIMIT(r8)
100	stw	r0,SAVED_KSP_LIMIT(r11)
101	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
102	stw	r0,KSP_LIMIT(r8)
103	/* fall through */
104_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
105#endif
106
107#ifdef CONFIG_40x
108	.globl	crit_transfer_to_handler
109crit_transfer_to_handler:
110	lwz	r0,crit_r10@l(0)
111	stw	r0,GPR10(r11)
112	lwz	r0,crit_r11@l(0)
113	stw	r0,GPR11(r11)
114	mfspr	r0,SPRN_SRR0
115	stw	r0,crit_srr0@l(0)
116	mfspr	r0,SPRN_SRR1
117	stw	r0,crit_srr1@l(0)
118
119	/* set the stack limit to the current stack */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
127#endif
128
129/*
130 * This code finishes saving the registers to the exception frame
131 * and jumps to the appropriate handler for the exception, turning
132 * on address translation.
133 * Note that we rely on the caller having set cr0.eq iff the exception
134 * occurred in kernel mode (i.e. MSR:PR = 0).
135 */
136	.globl	transfer_to_handler_full
137transfer_to_handler_full:
138	SAVE_NVGPRS(r11)
139_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
140	/* fall through */
141
142	.globl	transfer_to_handler
143transfer_to_handler:
144	stw	r2,GPR2(r11)
145	stw	r12,_NIP(r11)
146	stw	r9,_MSR(r11)
147	andi.	r2,r9,MSR_PR
148	mfctr	r12
149	mfspr	r2,SPRN_XER
150	stw	r12,_CTR(r11)
151	stw	r2,_XER(r11)
152	mfspr	r12,SPRN_SPRG_THREAD
153	tovirt_vmstack r12, r12
154	beq	2f			/* if from user, fix up THREAD.regs */
155	addi	r2, r12, -THREAD
156	addi	r11,r1,STACK_FRAME_OVERHEAD
157	stw	r11,PT_REGS(r12)
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159	/* Check to see if the dbcr0 register is set up to debug.  Use the
160	   internal debug mode bit to do this. */
161	lwz	r12,THREAD_DBCR0(r12)
162	andis.	r12,r12,DBCR0_IDM@h
163#endif
164	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
165#ifdef CONFIG_PPC_BOOK3S_32
166	kuep_lock r11, r12
167#endif
168#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
169	beq+	3f
170	/* From user and task is ptraced - load up global dbcr0 */
171	li	r12,-1			/* clear all pending debug events */
172	mtspr	SPRN_DBSR,r12
173	lis	r11,global_dbcr0@ha
174	tophys(r11,r11)
175	addi	r11,r11,global_dbcr0@l
176#ifdef CONFIG_SMP
177	lwz	r9,TASK_CPU(r2)
178	slwi	r9,r9,3
179	add	r11,r11,r9
180#endif
181	lwz	r12,0(r11)
182	mtspr	SPRN_DBCR0,r12
183	lwz	r12,4(r11)
184	addi	r12,r12,-1
185	stw	r12,4(r11)
186#endif
187
188	b	3f
189
1902:	/* if from kernel, check interrupted DOZE/NAP mode and
191         * check for stack overflow
192         */
193	kuap_save_and_lock r11, r12, r9, r2, r6
194	addi	r2, r12, -THREAD
195#ifndef CONFIG_VMAP_STACK
196	lwz	r9,KSP_LIMIT(r12)
197	cmplw	r1,r9			/* if r1 <= ksp_limit */
198	ble-	stack_ovf		/* then the kernel stack overflowed */
199#endif
2005:
201#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
202	lwz	r12,TI_LOCAL_FLAGS(r2)
203	mtcrf	0x01,r12
204	bt-	31-TLF_NAPPING,4f
205	bt-	31-TLF_SLEEPING,7f
206#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
207	.globl transfer_to_handler_cont
208transfer_to_handler_cont:
2093:
210	mflr	r9
211	tovirt_novmstack r2, r2 	/* set r2 to current */
212	tovirt_vmstack r9, r9
213	lwz	r11,0(r9)		/* virtual address of handler */
214	lwz	r9,4(r9)		/* where to go when done */
215#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
216	mtspr	SPRN_NRI, r0
217#endif
218#ifdef CONFIG_TRACE_IRQFLAGS
219	/*
220	 * When tracing IRQ state (lockdep) we enable the MMU before we call
221	 * the IRQ tracing functions as they might access vmalloc space or
222	 * perform IOs for console output.
223	 *
224	 * To speed up the syscall path where interrupts stay on, let's check
225	 * first if we are changing the MSR value at all.
226	 */
227	tophys_novmstack r12, r1
228	lwz	r12,_MSR(r12)
229	andi.	r12,r12,MSR_EE
230	bne	1f
231
232	/* MSR isn't changing, just transition directly */
233#endif
234	mtspr	SPRN_SRR0,r11
235	mtspr	SPRN_SRR1,r10
236	mtlr	r9
237	rfi				/* jump to handler, enable MMU */
238#ifdef CONFIG_40x
239	b .	/* Prevent prefetch past rfi */
240#endif
241
242#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2434:	rlwinm	r12,r12,0,~_TLF_NAPPING
244	stw	r12,TI_LOCAL_FLAGS(r2)
245	b	power_save_ppc32_restore
246
2477:	rlwinm	r12,r12,0,~_TLF_SLEEPING
248	stw	r12,TI_LOCAL_FLAGS(r2)
249	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
250	rlwinm	r9,r9,0,~MSR_EE
251	lwz	r12,_LINK(r11)		/* and return to address in LR */
252	kuap_restore r11, r2, r3, r4, r5
253	lwz	r2, GPR2(r11)
254	b	fast_exception_return
255#endif
256_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
257_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
258
259#ifdef CONFIG_TRACE_IRQFLAGS
2601:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
261	 * keep interrupts disabled at this point otherwise we might risk
262	 * taking an interrupt before we tell lockdep they are enabled.
263	 */
264	lis	r12,reenable_mmu@h
265	ori	r12,r12,reenable_mmu@l
266	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
267	mtspr	SPRN_SRR0,r12
268	mtspr	SPRN_SRR1,r0
269	rfi
270#ifdef CONFIG_40x
271	b .	/* Prevent prefetch past rfi */
272#endif
273
274reenable_mmu:
275	/*
276	 * We save a bunch of GPRs,
277	 * r3 can be different from GPR3(r1) at this point, r9 and r11
278	 * contains the old MSR and handler address respectively,
279	 * r4 & r5 can contain page fault arguments that need to be passed
280	 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
281	 * clobbered as they aren't useful past this point.
282	 */
283
284	stwu	r1,-32(r1)
285	stw	r9,8(r1)
286	stw	r11,12(r1)
287	stw	r3,16(r1)
288	stw	r4,20(r1)
289	stw	r5,24(r1)
290
291	/* If we are disabling interrupts (normal case), simply log it with
292	 * lockdep
293	 */
2941:	bl	trace_hardirqs_off
295	lwz	r5,24(r1)
296	lwz	r4,20(r1)
297	lwz	r3,16(r1)
298	lwz	r11,12(r1)
299	lwz	r9,8(r1)
300	addi	r1,r1,32
301	mtctr	r11
302	mtlr	r9
303	bctr				/* jump to handler */
304#endif /* CONFIG_TRACE_IRQFLAGS */
305
306#ifndef CONFIG_VMAP_STACK
307/*
308 * On kernel stack overflow, load up an initial stack pointer
309 * and call StackOverflow(regs), which should not return.
310 */
311stack_ovf:
312	/* sometimes we use a statically-allocated stack, which is OK. */
313	lis	r12,_end@h
314	ori	r12,r12,_end@l
315	cmplw	r1,r12
316	ble	5b			/* r1 <= &_end is OK */
317	SAVE_NVGPRS(r11)
318	addi	r3,r1,STACK_FRAME_OVERHEAD
319	lis	r1,init_thread_union@ha
320	addi	r1,r1,init_thread_union@l
321	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
322	lis	r9,StackOverflow@ha
323	addi	r9,r9,StackOverflow@l
324	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
325#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
326	mtspr	SPRN_NRI, r0
327#endif
328	mtspr	SPRN_SRR0,r9
329	mtspr	SPRN_SRR1,r10
330	rfi
331#ifdef CONFIG_40x
332	b .	/* Prevent prefetch past rfi */
333#endif
334_ASM_NOKPROBE_SYMBOL(stack_ovf)
335#endif
336
337#ifdef CONFIG_TRACE_IRQFLAGS
338trace_syscall_entry_irq_off:
339	/*
340	 * Syscall shouldn't happen while interrupts are disabled,
341	 * so let's do a warning here.
342	 */
3430:	trap
344	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
345	bl	trace_hardirqs_on
346
347	/* Now enable for real */
348	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
349	mtmsr	r10
350
351	REST_GPR(0, r1)
352	REST_4GPRS(3, r1)
353	REST_2GPRS(7, r1)
354	b	DoSyscall
355#endif /* CONFIG_TRACE_IRQFLAGS */
356
357	.globl	transfer_to_syscall
358transfer_to_syscall:
359#ifdef CONFIG_TRACE_IRQFLAGS
360	andi.	r12,r9,MSR_EE
361	beq-	trace_syscall_entry_irq_off
362#endif /* CONFIG_TRACE_IRQFLAGS */
363
364/*
365 * Handle a system call.
366 */
367	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
368	.stabs	"entry_32.S",N_SO,0,0,0f
3690:
370
371_GLOBAL(DoSyscall)
372	stw	r3,ORIG_GPR3(r1)
373	li	r12,0
374	stw	r12,RESULT(r1)
375#ifdef CONFIG_TRACE_IRQFLAGS
376	/* Make sure interrupts are enabled */
377	mfmsr	r11
378	andi.	r12,r11,MSR_EE
379	/* We came in with interrupts disabled, we WARN and mark them enabled
380	 * for lockdep now */
3810:	tweqi	r12, 0
382	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
383#endif /* CONFIG_TRACE_IRQFLAGS */
384	lwz	r11,TI_FLAGS(r2)
385	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
386	bne-	syscall_dotrace
387syscall_dotrace_cont:
388	cmplwi	0,r0,NR_syscalls
389	lis	r10,sys_call_table@h
390	ori	r10,r10,sys_call_table@l
391	slwi	r0,r0,2
392	bge-	66f
393
394	barrier_nospec_asm
395	/*
396	 * Prevent the load of the handler below (based on the user-passed
397	 * system call number) being speculatively executed until the test
398	 * against NR_syscalls and branch to .66f above has
399	 * committed.
400	 */
401
402	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
403	mtlr	r10
404	addi	r9,r1,STACK_FRAME_OVERHEAD
405	PPC440EP_ERR42
406	blrl			/* Call handler */
407	.globl	ret_from_syscall
408ret_from_syscall:
409#ifdef CONFIG_DEBUG_RSEQ
410	/* Check whether the syscall is issued inside a restartable sequence */
411	stw	r3,GPR3(r1)
412	addi    r3,r1,STACK_FRAME_OVERHEAD
413	bl      rseq_syscall
414	lwz	r3,GPR3(r1)
415#endif
416	mr	r6,r3
417	/* disable interrupts so current_thread_info()->flags can't change */
418	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
419	/* Note: We don't bother telling lockdep about it */
420	mtmsr	r10
421	lwz	r9,TI_FLAGS(r2)
422	li	r8,-MAX_ERRNO
423	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
424	bne-	syscall_exit_work
425	cmplw	0,r3,r8
426	blt+	syscall_exit_cont
427	lwz	r11,_CCR(r1)			/* Load CR */
428	neg	r3,r3
429	oris	r11,r11,0x1000	/* Set SO bit in CR */
430	stw	r11,_CCR(r1)
431syscall_exit_cont:
432	lwz	r8,_MSR(r1)
433#ifdef CONFIG_TRACE_IRQFLAGS
434	/* If we are going to return from the syscall with interrupts
435	 * off, we trace that here. It shouldn't normally happen.
436	 */
437	andi.	r10,r8,MSR_EE
438	bne+	1f
439	stw	r3,GPR3(r1)
440	bl      trace_hardirqs_off
441	lwz	r3,GPR3(r1)
4421:
443#endif /* CONFIG_TRACE_IRQFLAGS */
444#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
445	/* If the process has its own DBCR0 value, load it up.  The internal
446	   debug mode bit tells us that dbcr0 should be loaded. */
447	lwz	r0,THREAD+THREAD_DBCR0(r2)
448	andis.	r10,r0,DBCR0_IDM@h
449	bnel-	load_dbcr0
450#endif
451#ifdef CONFIG_44x
452BEGIN_MMU_FTR_SECTION
453	lis	r4,icache_44x_need_flush@ha
454	lwz	r5,icache_44x_need_flush@l(r4)
455	cmplwi	cr0,r5,0
456	bne-	2f
4571:
458END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
459#endif /* CONFIG_44x */
460BEGIN_FTR_SECTION
461	lwarx	r7,0,r1
462END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
463	stwcx.	r0,0,r1			/* to clear the reservation */
464	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
465#ifdef CONFIG_PPC_BOOK3S_32
466	kuep_unlock r5, r7
467#endif
468	kuap_check r2, r4
469	lwz	r4,_LINK(r1)
470	lwz	r5,_CCR(r1)
471	mtlr	r4
472	mtcr	r5
473	lwz	r7,_NIP(r1)
474	lwz	r2,GPR2(r1)
475	lwz	r1,GPR1(r1)
476syscall_exit_finish:
477#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
478	mtspr	SPRN_NRI, r0
479#endif
480	mtspr	SPRN_SRR0,r7
481	mtspr	SPRN_SRR1,r8
482	rfi
483#ifdef CONFIG_40x
484	b .	/* Prevent prefetch past rfi */
485#endif
486_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
487#ifdef CONFIG_44x
4882:	li	r7,0
489	iccci	r0,r0
490	stw	r7,icache_44x_need_flush@l(r4)
491	b	1b
492#endif  /* CONFIG_44x */
493
49466:	li	r3,-ENOSYS
495	b	ret_from_syscall
496
497	.globl	ret_from_fork
498ret_from_fork:
499	REST_NVGPRS(r1)
500	bl	schedule_tail
501	li	r3,0
502	b	ret_from_syscall
503
504	.globl	ret_from_kernel_thread
505ret_from_kernel_thread:
506	REST_NVGPRS(r1)
507	bl	schedule_tail
508	mtlr	r14
509	mr	r3,r15
510	PPC440EP_ERR42
511	blrl
512	li	r3,0
513	b	ret_from_syscall
514
515/* Traced system call support */
516syscall_dotrace:
517	SAVE_NVGPRS(r1)
518	li	r0,0xc00
519	stw	r0,_TRAP(r1)
520	addi	r3,r1,STACK_FRAME_OVERHEAD
521	bl	do_syscall_trace_enter
522	/*
523	 * Restore argument registers possibly just changed.
524	 * We use the return value of do_syscall_trace_enter
525	 * for call number to look up in the table (r0).
526	 */
527	mr	r0,r3
528	lwz	r3,GPR3(r1)
529	lwz	r4,GPR4(r1)
530	lwz	r5,GPR5(r1)
531	lwz	r6,GPR6(r1)
532	lwz	r7,GPR7(r1)
533	lwz	r8,GPR8(r1)
534	REST_NVGPRS(r1)
535
536	cmplwi	r0,NR_syscalls
537	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
538	bge-	ret_from_syscall
539	b	syscall_dotrace_cont
540
541syscall_exit_work:
542	andi.	r0,r9,_TIF_RESTOREALL
543	beq+	0f
544	REST_NVGPRS(r1)
545	b	2f
5460:	cmplw	0,r3,r8
547	blt+	1f
548	andi.	r0,r9,_TIF_NOERROR
549	bne-	1f
550	lwz	r11,_CCR(r1)			/* Load CR */
551	neg	r3,r3
552	oris	r11,r11,0x1000	/* Set SO bit in CR */
553	stw	r11,_CCR(r1)
554
5551:	stw	r6,RESULT(r1)	/* Save result */
556	stw	r3,GPR3(r1)	/* Update return value */
5572:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
558	beq	4f
559
560	/* Clear per-syscall TIF flags if any are set.  */
561
562	li	r11,_TIF_PERSYSCALL_MASK
563	addi	r12,r2,TI_FLAGS
5643:	lwarx	r8,0,r12
565	andc	r8,r8,r11
566	stwcx.	r8,0,r12
567	bne-	3b
568
5694:	/* Anything which requires enabling interrupts? */
570	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
571	beq	ret_from_except
572
573	/* Re-enable interrupts. There is no need to trace that with
574	 * lockdep as we are supposed to have IRQs on at this point
575	 */
576	ori	r10,r10,MSR_EE
577	mtmsr	r10
578
579	/* Save NVGPRS if they're not saved already */
580	lwz	r4,_TRAP(r1)
581	andi.	r4,r4,1
582	beq	5f
583	SAVE_NVGPRS(r1)
584	li	r4,0xc00
585	stw	r4,_TRAP(r1)
5865:
587	addi	r3,r1,STACK_FRAME_OVERHEAD
588	bl	do_syscall_trace_leave
589	b	ret_from_except_full
590
591	/*
592	 * System call was called from kernel. We get here with SRR1 in r9.
593	 * Mark the exception as recoverable once we have retrieved SRR0,
594	 * trap a warning and return ENOSYS with CR[SO] set.
595	 */
596	.globl	ret_from_kernel_syscall
597ret_from_kernel_syscall:
598	mfspr	r9, SPRN_SRR0
599	mfspr	r10, SPRN_SRR1
600#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
601	LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
602	mtmsr	r11
603#endif
604
6050:	trap
606	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
607
608	li	r3, ENOSYS
609	crset	so
610#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
611	mtspr	SPRN_NRI, r0
612#endif
613	mtspr	SPRN_SRR0, r9
614	mtspr	SPRN_SRR1, r10
615	rfi
616#ifdef CONFIG_40x
617	b .	/* Prevent prefetch past rfi */
618#endif
619_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
620
621/*
622 * The fork/clone functions need to copy the full register set into
623 * the child process. Therefore we need to save all the nonvolatile
624 * registers (r13 - r31) before calling the C code.
625 */
626	.globl	ppc_fork
627ppc_fork:
628	SAVE_NVGPRS(r1)
629	lwz	r0,_TRAP(r1)
630	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
631	stw	r0,_TRAP(r1)		/* register set saved */
632	b	sys_fork
633
634	.globl	ppc_vfork
635ppc_vfork:
636	SAVE_NVGPRS(r1)
637	lwz	r0,_TRAP(r1)
638	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
639	stw	r0,_TRAP(r1)		/* register set saved */
640	b	sys_vfork
641
642	.globl	ppc_clone
643ppc_clone:
644	SAVE_NVGPRS(r1)
645	lwz	r0,_TRAP(r1)
646	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
647	stw	r0,_TRAP(r1)		/* register set saved */
648	b	sys_clone
649
650	.globl	ppc_clone3
651ppc_clone3:
652	SAVE_NVGPRS(r1)
653	lwz	r0,_TRAP(r1)
654	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
655	stw	r0,_TRAP(r1)		/* register set saved */
656	b	sys_clone3
657
658	.globl	ppc_swapcontext
659ppc_swapcontext:
660	SAVE_NVGPRS(r1)
661	lwz	r0,_TRAP(r1)
662	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
663	stw	r0,_TRAP(r1)		/* register set saved */
664	b	sys_swapcontext
665
666/*
667 * Top-level page fault handling.
668 * This is in assembler because if do_page_fault tells us that
669 * it is a bad kernel page fault, we want to save the non-volatile
670 * registers before calling bad_page_fault.
671 */
672	.globl	handle_page_fault
673handle_page_fault:
674	addi	r3,r1,STACK_FRAME_OVERHEAD
675#ifdef CONFIG_PPC_BOOK3S_32
676	andis.  r0,r5,DSISR_DABRMATCH@h
677	bne-    handle_dabr_fault
678#endif
679	bl	do_page_fault
680	cmpwi	r3,0
681	beq+	ret_from_except
682	SAVE_NVGPRS(r1)
683	lwz	r0,_TRAP(r1)
684	clrrwi	r0,r0,1
685	stw	r0,_TRAP(r1)
686	mr	r5,r3
687	addi	r3,r1,STACK_FRAME_OVERHEAD
688	lwz	r4,_DAR(r1)
689	bl	bad_page_fault
690	b	ret_from_except_full
691
692#ifdef CONFIG_PPC_BOOK3S_32
693	/* We have a data breakpoint exception - handle it */
694handle_dabr_fault:
695	SAVE_NVGPRS(r1)
696	lwz	r0,_TRAP(r1)
697	clrrwi	r0,r0,1
698	stw	r0,_TRAP(r1)
699	bl      do_break
700	b	ret_from_except_full
701#endif
702
703/*
704 * This routine switches between two different tasks.  The process
705 * state of one is saved on its kernel stack.  Then the state
706 * of the other is restored from its kernel stack.  The memory
707 * management hardware is updated to the second process's state.
708 * Finally, we can return to the second process.
709 * On entry, r3 points to the THREAD for the current task, r4
710 * points to the THREAD for the new task.
711 *
712 * This routine is always called with interrupts disabled.
713 *
714 * Note: there are two ways to get to the "going out" portion
715 * of this code; either by coming in via the entry (_switch)
716 * or via "fork" which must set up an environment equivalent
717 * to the "_switch" path.  If you change this , you'll have to
718 * change the fork code also.
719 *
720 * The code which creates the new task context is in 'copy_thread'
721 * in arch/ppc/kernel/process.c
722 */
723_GLOBAL(_switch)
724	stwu	r1,-INT_FRAME_SIZE(r1)
725	mflr	r0
726	stw	r0,INT_FRAME_SIZE+4(r1)
727	/* r3-r12 are caller saved -- Cort */
728	SAVE_NVGPRS(r1)
729	stw	r0,_NIP(r1)	/* Return to switch caller */
730	mfmsr	r11
731	li	r0,MSR_FP	/* Disable floating-point */
732#ifdef CONFIG_ALTIVEC
733BEGIN_FTR_SECTION
734	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
735	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
736	stw	r12,THREAD+THREAD_VRSAVE(r2)
737END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
738#endif /* CONFIG_ALTIVEC */
739#ifdef CONFIG_SPE
740BEGIN_FTR_SECTION
741	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
742	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
743	stw	r12,THREAD+THREAD_SPEFSCR(r2)
744END_FTR_SECTION_IFSET(CPU_FTR_SPE)
745#endif /* CONFIG_SPE */
746	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
747	beq+	1f
748	andc	r11,r11,r0
749	mtmsr	r11
750	isync
7511:	stw	r11,_MSR(r1)
752	mfcr	r10
753	stw	r10,_CCR(r1)
754	stw	r1,KSP(r3)	/* Set old stack pointer */
755
756	kuap_check r2, r0
757#ifdef CONFIG_SMP
758	/* We need a sync somewhere here to make sure that if the
759	 * previous task gets rescheduled on another CPU, it sees all
760	 * stores it has performed on this one.
761	 */
762	sync
763#endif /* CONFIG_SMP */
764
765	tophys(r0,r4)
766	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
767	lwz	r1,KSP(r4)	/* Load new stack pointer */
768
769	/* save the old current 'last' for return value */
770	mr	r3,r2
771	addi	r2,r4,-THREAD	/* Update current */
772
773#ifdef CONFIG_ALTIVEC
774BEGIN_FTR_SECTION
775	lwz	r0,THREAD+THREAD_VRSAVE(r2)
776	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
777END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
778#endif /* CONFIG_ALTIVEC */
779#ifdef CONFIG_SPE
780BEGIN_FTR_SECTION
781	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
782	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
783END_FTR_SECTION_IFSET(CPU_FTR_SPE)
784#endif /* CONFIG_SPE */
785
786	lwz	r0,_CCR(r1)
787	mtcrf	0xFF,r0
788	/* r3-r12 are destroyed -- Cort */
789	REST_NVGPRS(r1)
790
791	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
792	mtlr	r4
793	addi	r1,r1,INT_FRAME_SIZE
794	blr
795
796	.globl	fast_exception_return
797fast_exception_return:
798#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
799	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
800	beq	1f			/* if not, we've got problems */
801#endif
802
8032:	REST_4GPRS(3, r11)
804	lwz	r10,_CCR(r11)
805	REST_GPR(1, r11)
806	mtcr	r10
807	lwz	r10,_LINK(r11)
808	mtlr	r10
809	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
810	li	r10, 0
811	stw	r10, 8(r11)
812	REST_GPR(10, r11)
813#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
814	mtspr	SPRN_NRI, r0
815#endif
816	mtspr	SPRN_SRR1,r9
817	mtspr	SPRN_SRR0,r12
818	REST_GPR(9, r11)
819	REST_GPR(12, r11)
820	lwz	r11,GPR11(r11)
821	rfi
822#ifdef CONFIG_40x
823	b .	/* Prevent prefetch past rfi */
824#endif
825_ASM_NOKPROBE_SYMBOL(fast_exception_return)
826
827#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
828/* check if the exception happened in a restartable section */
8291:	lis	r3,exc_exit_restart_end@ha
830	addi	r3,r3,exc_exit_restart_end@l
831	cmplw	r12,r3
832	bge	3f
833	lis	r4,exc_exit_restart@ha
834	addi	r4,r4,exc_exit_restart@l
835	cmplw	r12,r4
836	blt	3f
837	lis	r3,fee_restarts@ha
838	tophys(r3,r3)
839	lwz	r5,fee_restarts@l(r3)
840	addi	r5,r5,1
841	stw	r5,fee_restarts@l(r3)
842	mr	r12,r4		/* restart at exc_exit_restart */
843	b	2b
844
845	.section .bss
846	.align	2
847fee_restarts:
848	.space	4
849	.previous
850
851/* aargh, a nonrecoverable interrupt, panic */
852/* aargh, we don't know which trap this is */
8533:
854	li	r10,-1
855	stw	r10,_TRAP(r11)
856	addi	r3,r1,STACK_FRAME_OVERHEAD
857	lis	r10,MSR_KERNEL@h
858	ori	r10,r10,MSR_KERNEL@l
859	bl	transfer_to_handler_full
860	.long	unrecoverable_exception
861	.long	ret_from_except
862#endif
863
864	.globl	ret_from_except_full
865ret_from_except_full:
866	REST_NVGPRS(r1)
867	/* fall through */
868
869	.globl	ret_from_except
870ret_from_except:
871	/* Hard-disable interrupts so that current_thread_info()->flags
872	 * can't change between when we test it and when we return
873	 * from the interrupt. */
874	/* Note: We don't bother telling lockdep about it */
875	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
876	mtmsr	r10		/* disable interrupts */
877
878	lwz	r3,_MSR(r1)	/* Returning to user mode? */
879	andi.	r0,r3,MSR_PR
880	beq	resume_kernel
881
882user_exc_return:		/* r10 contains MSR_KERNEL here */
883	/* Check current_thread_info()->flags */
884	lwz	r9,TI_FLAGS(r2)
885	andi.	r0,r9,_TIF_USER_WORK_MASK
886	bne	do_work
887
888restore_user:
889#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
890	/* Check whether this process has its own DBCR0 value.  The internal
891	   debug mode bit tells us that dbcr0 should be loaded. */
892	lwz	r0,THREAD+THREAD_DBCR0(r2)
893	andis.	r10,r0,DBCR0_IDM@h
894	bnel-	load_dbcr0
895#endif
896	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
897#ifdef CONFIG_PPC_BOOK3S_32
898	kuep_unlock	r10, r11
899#endif
900
901	b	restore
902
903/* N.B. the only way to get here is from the beq following ret_from_except. */
904resume_kernel:
905	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
906	lwz	r8,TI_FLAGS(r2)
907	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
908	beq+	1f
909
910	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
911
912	lwz	r3,GPR1(r1)
913	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
914	mr	r4,r1			/* src:  current exception frame */
915	mr	r1,r3			/* Reroute the trampoline frame to r1 */
916
917	/* Copy from the original to the trampoline. */
918	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
919	li	r6,0			/* start offset: 0 */
920	mtctr	r5
9212:	lwzx	r0,r6,r4
922	stwx	r0,r6,r3
923	addi	r6,r6,4
924	bdnz	2b
925
926	/* Do real store operation to complete stwu */
927	lwz	r5,GPR1(r1)
928	stw	r8,0(r5)
929
930	/* Clear _TIF_EMULATE_STACK_STORE flag */
931	lis	r11,_TIF_EMULATE_STACK_STORE@h
932	addi	r5,r2,TI_FLAGS
9330:	lwarx	r8,0,r5
934	andc	r8,r8,r11
935	stwcx.	r8,0,r5
936	bne-	0b
9371:
938
939#ifdef CONFIG_PREEMPTION
940	/* check current_thread_info->preempt_count */
941	lwz	r0,TI_PREEMPT(r2)
942	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
943	bne	restore_kuap
944	andi.	r8,r8,_TIF_NEED_RESCHED
945	beq+	restore_kuap
946	lwz	r3,_MSR(r1)
947	andi.	r0,r3,MSR_EE	/* interrupts off? */
948	beq	restore_kuap	/* don't schedule if so */
949#ifdef CONFIG_TRACE_IRQFLAGS
950	/* Lockdep thinks irqs are enabled, we need to call
951	 * preempt_schedule_irq with IRQs off, so we inform lockdep
952	 * now that we -did- turn them off already
953	 */
954	bl	trace_hardirqs_off
955#endif
956	bl	preempt_schedule_irq
957#ifdef CONFIG_TRACE_IRQFLAGS
958	/* And now, to properly rebalance the above, we tell lockdep they
959	 * are being turned back on, which will happen when we return
960	 */
961	bl	trace_hardirqs_on
962#endif
963#endif /* CONFIG_PREEMPTION */
964restore_kuap:
965	kuap_restore r1, r2, r9, r10, r0
966
967	/* interrupts are hard-disabled at this point */
968restore:
969#ifdef CONFIG_44x
970BEGIN_MMU_FTR_SECTION
971	b	1f
972END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
973	lis	r4,icache_44x_need_flush@ha
974	lwz	r5,icache_44x_need_flush@l(r4)
975	cmplwi	cr0,r5,0
976	beq+	1f
977	li	r6,0
978	iccci	r0,r0
979	stw	r6,icache_44x_need_flush@l(r4)
9801:
981#endif  /* CONFIG_44x */
982
983	lwz	r9,_MSR(r1)
984#ifdef CONFIG_TRACE_IRQFLAGS
985	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
986	 * off in this assembly code while peeking at TI_FLAGS() and such. However
987	 * we need to inform it if the exception turned interrupts off, and we
988	 * are about to trun them back on.
989	 */
990	andi.	r10,r9,MSR_EE
991	beq	1f
992	stwu	r1,-32(r1)
993	mflr	r0
994	stw	r0,4(r1)
995	bl	trace_hardirqs_on
996	addi	r1, r1, 32
997	lwz	r9,_MSR(r1)
9981:
999#endif /* CONFIG_TRACE_IRQFLAGS */
1000
1001	lwz	r0,GPR0(r1)
1002	lwz	r2,GPR2(r1)
1003	REST_4GPRS(3, r1)
1004	REST_2GPRS(7, r1)
1005
1006	lwz	r10,_XER(r1)
1007	lwz	r11,_CTR(r1)
1008	mtspr	SPRN_XER,r10
1009	mtctr	r11
1010
1011BEGIN_FTR_SECTION
1012	lwarx	r11,0,r1
1013END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1014	stwcx.	r0,0,r1			/* to clear the reservation */
1015
1016#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
1017	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
1018	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
1019
1020	lwz	r10,_CCR(r1)
1021	lwz	r11,_LINK(r1)
1022	mtcrf	0xFF,r10
1023	mtlr	r11
1024
1025	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1026	li	r10, 0
1027	stw	r10, 8(r1)
1028	/*
1029	 * Once we put values in SRR0 and SRR1, we are in a state
1030	 * where exceptions are not recoverable, since taking an
1031	 * exception will trash SRR0 and SRR1.  Therefore we clear the
1032	 * MSR:RI bit to indicate this.  If we do take an exception,
1033	 * we can't return to the point of the exception but we
1034	 * can restart the exception exit path at the label
1035	 * exc_exit_restart below.  -- paulus
1036	 */
1037	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1038	mtmsr	r10		/* clear the RI bit */
1039	.globl exc_exit_restart
1040exc_exit_restart:
1041	lwz	r12,_NIP(r1)
1042	mtspr	SPRN_SRR0,r12
1043	mtspr	SPRN_SRR1,r9
1044	REST_4GPRS(9, r1)
1045	lwz	r1,GPR1(r1)
1046	.globl exc_exit_restart_end
1047exc_exit_restart_end:
1048	rfi
1049_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1050_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
1051
1052#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1053	/*
1054	 * This is a bit different on 4xx/Book-E because it doesn't have
1055	 * the RI bit in the MSR.
1056	 * The TLB miss handler checks if we have interrupted
1057	 * the exception exit path and restarts it if so
1058	 * (well maybe one day it will... :).
1059	 */
1060	lwz	r11,_LINK(r1)
1061	mtlr	r11
1062	lwz	r10,_CCR(r1)
1063	mtcrf	0xff,r10
1064	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1065	li	r10, 0
1066	stw	r10, 8(r1)
1067	REST_2GPRS(9, r1)
1068	.globl exc_exit_restart
1069exc_exit_restart:
1070	lwz	r11,_NIP(r1)
1071	lwz	r12,_MSR(r1)
1072	mtspr	SPRN_SRR0,r11
1073	mtspr	SPRN_SRR1,r12
1074	REST_2GPRS(11, r1)
1075	lwz	r1,GPR1(r1)
1076	.globl exc_exit_restart_end
1077exc_exit_restart_end:
1078	rfi
1079	b	.			/* prevent prefetch past rfi */
1080_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1081
1082/*
1083 * Returning from a critical interrupt in user mode doesn't need
1084 * to be any different from a normal exception.  For a critical
1085 * interrupt in the kernel, we just return (without checking for
1086 * preemption) since the interrupt may have happened at some crucial
1087 * place (e.g. inside the TLB miss handler), and because we will be
1088 * running with r1 pointing into critical_stack, not the current
1089 * process's kernel stack (and therefore current_thread_info() will
1090 * give the wrong answer).
1091 * We have to restore various SPRs that may have been in use at the
1092 * time of the critical interrupt.
1093 *
1094 */
1095#ifdef CONFIG_40x
1096#define PPC_40x_TURN_OFF_MSR_DR						    \
1097	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1098	 * assume the instructions here are mapped by a pinned TLB entry */ \
1099	li	r10,MSR_IR;						    \
1100	mtmsr	r10;							    \
1101	isync;								    \
1102	tophys(r1, r1);
1103#else
1104#define PPC_40x_TURN_OFF_MSR_DR
1105#endif
1106
1107#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1108	REST_NVGPRS(r1);						\
1109	lwz	r3,_MSR(r1);						\
1110	andi.	r3,r3,MSR_PR;						\
1111	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1112	bne	user_exc_return;					\
1113	lwz	r0,GPR0(r1);						\
1114	lwz	r2,GPR2(r1);						\
1115	REST_4GPRS(3, r1);						\
1116	REST_2GPRS(7, r1);						\
1117	lwz	r10,_XER(r1);						\
1118	lwz	r11,_CTR(r1);						\
1119	mtspr	SPRN_XER,r10;						\
1120	mtctr	r11;							\
1121	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1122	lwz	r11,_LINK(r1);						\
1123	mtlr	r11;							\
1124	lwz	r10,_CCR(r1);						\
1125	mtcrf	0xff,r10;						\
1126	PPC_40x_TURN_OFF_MSR_DR;					\
1127	lwz	r9,_DEAR(r1);						\
1128	lwz	r10,_ESR(r1);						\
1129	mtspr	SPRN_DEAR,r9;						\
1130	mtspr	SPRN_ESR,r10;						\
1131	lwz	r11,_NIP(r1);						\
1132	lwz	r12,_MSR(r1);						\
1133	mtspr	exc_lvl_srr0,r11;					\
1134	mtspr	exc_lvl_srr1,r12;					\
1135	lwz	r9,GPR9(r1);						\
1136	lwz	r12,GPR12(r1);						\
1137	lwz	r10,GPR10(r1);						\
1138	lwz	r11,GPR11(r1);						\
1139	lwz	r1,GPR1(r1);						\
1140	exc_lvl_rfi;							\
1141	b	.;		/* prevent prefetch past exc_lvl_rfi */
1142
1143#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1144	lwz	r9,_##exc_lvl_srr0(r1);					\
1145	lwz	r10,_##exc_lvl_srr1(r1);				\
1146	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1147	mtspr	SPRN_##exc_lvl_srr1,r10;
1148
1149#if defined(CONFIG_PPC_BOOK3E_MMU)
1150#ifdef CONFIG_PHYS_64BIT
1151#define	RESTORE_MAS7							\
1152	lwz	r11,MAS7(r1);						\
1153	mtspr	SPRN_MAS7,r11;
1154#else
1155#define	RESTORE_MAS7
1156#endif /* CONFIG_PHYS_64BIT */
1157#define RESTORE_MMU_REGS						\
1158	lwz	r9,MAS0(r1);						\
1159	lwz	r10,MAS1(r1);						\
1160	lwz	r11,MAS2(r1);						\
1161	mtspr	SPRN_MAS0,r9;						\
1162	lwz	r9,MAS3(r1);						\
1163	mtspr	SPRN_MAS1,r10;						\
1164	lwz	r10,MAS6(r1);						\
1165	mtspr	SPRN_MAS2,r11;						\
1166	mtspr	SPRN_MAS3,r9;						\
1167	mtspr	SPRN_MAS6,r10;						\
1168	RESTORE_MAS7;
1169#elif defined(CONFIG_44x)
1170#define RESTORE_MMU_REGS						\
1171	lwz	r9,MMUCR(r1);						\
1172	mtspr	SPRN_MMUCR,r9;
1173#else
1174#define RESTORE_MMU_REGS
1175#endif
1176
1177#ifdef CONFIG_40x
1178	.globl	ret_from_crit_exc
1179ret_from_crit_exc:
1180	mfspr	r9,SPRN_SPRG_THREAD
1181	lis	r10,saved_ksp_limit@ha;
1182	lwz	r10,saved_ksp_limit@l(r10);
1183	tovirt(r9,r9);
1184	stw	r10,KSP_LIMIT(r9)
1185	lis	r9,crit_srr0@ha;
1186	lwz	r9,crit_srr0@l(r9);
1187	lis	r10,crit_srr1@ha;
1188	lwz	r10,crit_srr1@l(r10);
1189	mtspr	SPRN_SRR0,r9;
1190	mtspr	SPRN_SRR1,r10;
1191	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1192_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1193#endif /* CONFIG_40x */
1194
1195#ifdef CONFIG_BOOKE
1196	.globl	ret_from_crit_exc
1197ret_from_crit_exc:
1198	mfspr	r9,SPRN_SPRG_THREAD
1199	lwz	r10,SAVED_KSP_LIMIT(r1)
1200	stw	r10,KSP_LIMIT(r9)
1201	RESTORE_xSRR(SRR0,SRR1);
1202	RESTORE_MMU_REGS;
1203	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1204_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1205
1206	.globl	ret_from_debug_exc
1207ret_from_debug_exc:
1208	mfspr	r9,SPRN_SPRG_THREAD
1209	lwz	r10,SAVED_KSP_LIMIT(r1)
1210	stw	r10,KSP_LIMIT(r9)
1211	RESTORE_xSRR(SRR0,SRR1);
1212	RESTORE_xSRR(CSRR0,CSRR1);
1213	RESTORE_MMU_REGS;
1214	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1215_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
1216
1217	.globl	ret_from_mcheck_exc
1218ret_from_mcheck_exc:
1219	mfspr	r9,SPRN_SPRG_THREAD
1220	lwz	r10,SAVED_KSP_LIMIT(r1)
1221	stw	r10,KSP_LIMIT(r9)
1222	RESTORE_xSRR(SRR0,SRR1);
1223	RESTORE_xSRR(CSRR0,CSRR1);
1224	RESTORE_xSRR(DSRR0,DSRR1);
1225	RESTORE_MMU_REGS;
1226	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1227_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
1228#endif /* CONFIG_BOOKE */
1229
1230/*
1231 * Load the DBCR0 value for a task that is being ptraced,
1232 * having first saved away the global DBCR0.  Note that r0
1233 * has the dbcr0 value to set upon entry to this.
1234 */
1235load_dbcr0:
1236	mfmsr	r10		/* first disable debug exceptions */
1237	rlwinm	r10,r10,0,~MSR_DE
1238	mtmsr	r10
1239	isync
1240	mfspr	r10,SPRN_DBCR0
1241	lis	r11,global_dbcr0@ha
1242	addi	r11,r11,global_dbcr0@l
1243#ifdef CONFIG_SMP
1244	lwz	r9,TASK_CPU(r2)
1245	slwi	r9,r9,3
1246	add	r11,r11,r9
1247#endif
1248	stw	r10,0(r11)
1249	mtspr	SPRN_DBCR0,r0
1250	lwz	r10,4(r11)
1251	addi	r10,r10,1
1252	stw	r10,4(r11)
1253	li	r11,-1
1254	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1255	blr
1256
1257	.section .bss
1258	.align	4
1259	.global global_dbcr0
1260global_dbcr0:
1261	.space	8*NR_CPUS
1262	.previous
1263#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1264
1265do_work:			/* r10 contains MSR_KERNEL here */
1266	andi.	r0,r9,_TIF_NEED_RESCHED
1267	beq	do_user_signal
1268
1269do_resched:			/* r10 contains MSR_KERNEL here */
1270#ifdef CONFIG_TRACE_IRQFLAGS
1271	bl	trace_hardirqs_on
1272	mfmsr	r10
1273#endif
1274	ori	r10,r10,MSR_EE
1275	mtmsr	r10		/* hard-enable interrupts */
1276	bl	schedule
1277recheck:
1278	/* Note: And we don't tell it we are disabling them again
1279	 * neither. Those disable/enable cycles used to peek at
1280	 * TI_FLAGS aren't advertised.
1281	 */
1282	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1283	mtmsr	r10		/* disable interrupts */
1284	lwz	r9,TI_FLAGS(r2)
1285	andi.	r0,r9,_TIF_NEED_RESCHED
1286	bne-	do_resched
1287	andi.	r0,r9,_TIF_USER_WORK_MASK
1288	beq	restore_user
1289do_user_signal:			/* r10 contains MSR_KERNEL here */
1290	ori	r10,r10,MSR_EE
1291	mtmsr	r10		/* hard-enable interrupts */
1292	/* save r13-r31 in the exception frame, if not already done */
1293	lwz	r3,_TRAP(r1)
1294	andi.	r0,r3,1
1295	beq	2f
1296	SAVE_NVGPRS(r1)
1297	rlwinm	r3,r3,0,0,30
1298	stw	r3,_TRAP(r1)
12992:	addi	r3,r1,STACK_FRAME_OVERHEAD
1300	mr	r4,r9
1301	bl	do_notify_resume
1302	REST_NVGPRS(r1)
1303	b	recheck
1304
1305/*
1306 * We come here when we are at the end of handling an exception
1307 * that occurred at a place where taking an exception will lose
1308 * state information, such as the contents of SRR0 and SRR1.
1309 */
1310nonrecoverable:
1311	lis	r10,exc_exit_restart_end@ha
1312	addi	r10,r10,exc_exit_restart_end@l
1313	cmplw	r12,r10
1314	bge	3f
1315	lis	r11,exc_exit_restart@ha
1316	addi	r11,r11,exc_exit_restart@l
1317	cmplw	r12,r11
1318	blt	3f
1319	lis	r10,ee_restarts@ha
1320	lwz	r12,ee_restarts@l(r10)
1321	addi	r12,r12,1
1322	stw	r12,ee_restarts@l(r10)
1323	mr	r12,r11		/* restart at exc_exit_restart */
1324	blr
13253:	/* OK, we can't recover, kill this process */
1326	lwz	r3,_TRAP(r1)
1327	andi.	r0,r3,1
1328	beq	5f
1329	SAVE_NVGPRS(r1)
1330	rlwinm	r3,r3,0,0,30
1331	stw	r3,_TRAP(r1)
13325:	mfspr	r2,SPRN_SPRG_THREAD
1333	addi	r2,r2,-THREAD
1334	tovirt(r2,r2)			/* set back r2 to current */
13354:	addi	r3,r1,STACK_FRAME_OVERHEAD
1336	bl	unrecoverable_exception
1337	/* shouldn't return */
1338	b	4b
1339_ASM_NOKPROBE_SYMBOL(nonrecoverable)
1340
1341	.section .bss
1342	.align	2
1343ee_restarts:
1344	.space	4
1345	.previous
1346
1347/*
1348 * PROM code for specific machines follows.  Put it
1349 * here so it's easy to add arch-specific sections later.
1350 * -- Cort
1351 */
1352#ifdef CONFIG_PPC_RTAS
1353/*
1354 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1355 * called with the MMU off.
1356 */
1357_GLOBAL(enter_rtas)
1358	stwu	r1,-INT_FRAME_SIZE(r1)
1359	mflr	r0
1360	stw	r0,INT_FRAME_SIZE+4(r1)
1361	LOAD_REG_ADDR(r4, rtas)
1362	lis	r6,1f@ha	/* physical return address for rtas */
1363	addi	r6,r6,1f@l
1364	tophys(r6,r6)
1365	tophys_novmstack r7, r1
1366	lwz	r8,RTASENTRY(r4)
1367	lwz	r4,RTASBASE(r4)
1368	mfmsr	r9
1369	stw	r9,8(r1)
1370	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1371	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
1372	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1373	mtlr	r6
1374	stw	r7, THREAD + RTAS_SP(r2)
1375	mtspr	SPRN_SRR0,r8
1376	mtspr	SPRN_SRR1,r9
1377	rfi
13781:	tophys_novmstack r9, r1
1379#ifdef CONFIG_VMAP_STACK
1380	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
1381	mtmsr	r0
1382	isync
1383#endif
1384	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1385	lwz	r9,8(r9)	/* original msr value */
1386	addi	r1,r1,INT_FRAME_SIZE
1387	li	r0,0
1388	tophys_novmstack r7, r2
1389	stw	r0, THREAD + RTAS_SP(r7)
1390	mtspr	SPRN_SRR0,r8
1391	mtspr	SPRN_SRR1,r9
1392	rfi			/* return to caller */
1393_ASM_NOKPROBE_SYMBOL(enter_rtas)
1394#endif /* CONFIG_PPC_RTAS */
1395