xref: /linux/arch/powerpc/kernel/exceptions-64s.S (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 -        : Early init and support code
29 */
30	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 					\
32BEGIN_FTR_SECTION						\
33	cmpdi	r0,0x1ebe ; 					\
34	beq-	1f ;						\
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
36	mr	r9,r13 ;					\
37	GET_PACA(r13) ;						\
38	mfspr	r11,SPRN_SRR0 ;					\
390:
40
41#define SYSCALL_PSERIES_2_RFID 					\
42	mfspr	r12,SPRN_SRR1 ;					\
43	ld	r10,PACAKBASE(r13) ; 				\
44	LOAD_HANDLER(r10, system_call_entry) ; 			\
45	mtspr	SPRN_SRR0,r10 ; 				\
46	ld	r10,PACAKMSR(r13) ;				\
47	mtspr	SPRN_SRR1,r10 ; 				\
48	rfid ; 							\
49	b	. ;	/* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3					\
52	/* Fast LE/BE switch system call */			\
531:	mfspr	r12,SPRN_SRR1 ;					\
54	xori	r12,r12,MSR_LE ;				\
55	mtspr	SPRN_SRR1,r12 ;					\
56	rfid ;		/* return to userspace */		\
57	b	. ;	/* prevent speculative execution */
58
59#if defined(CONFIG_RELOCATABLE)
60	/*
61	 * We can't branch directly; in the direct case we use LR
62	 * and system_call_entry restores LR.  (We thus need to move
63	 * LR to r10 in the RFID case too.)
64	 */
65#define SYSCALL_PSERIES_2_DIRECT				\
66	mflr	r10 ;						\
67	ld	r12,PACAKBASE(r13) ; 				\
68	LOAD_HANDLER(r12, system_call_entry_direct) ;		\
69	mtctr	r12 ;						\
70	mfspr	r12,SPRN_SRR1 ;					\
71	/* Re-use of r13... No spare regs to do this */	\
72	li	r13,MSR_RI ;					\
73	mtmsrd 	r13,1 ;						\
74	GET_PACA(r13) ;	/* get r13 back */			\
75	bctr ;
76#else
77	/* We can branch directly */
78#define SYSCALL_PSERIES_2_DIRECT				\
79	mfspr	r12,SPRN_SRR1 ;					\
80	li	r10,MSR_RI ;					\
81	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
82	b	system_call_entry_direct ;
83#endif
84
85/*
86 * This is the start of the interrupt handlers for pSeries
87 * This code runs with relocation off.
88 * Code from here to __end_interrupts gets copied down to real
89 * address 0x100 when we are running a relocatable kernel.
90 * Therefore any relative branches in this section must only
91 * branch to labels in this section.
92 */
93	. = 0x100
94	.globl __start_interrupts
95__start_interrupts:
96
97	.globl system_reset_pSeries;
98system_reset_pSeries:
99	HMT_MEDIUM_PPR_DISCARD
100	SET_SCRATCH0(r13)
101#ifdef CONFIG_PPC_P7_NAP
102BEGIN_FTR_SECTION
103	/* Running native on arch 2.06 or later, check if we are
104	 * waking up from nap. We only handle no state loss and
105	 * supervisor state loss. We do -not- handle hypervisor
106	 * state loss at this time.
107	 */
108	mfspr	r13,SPRN_SRR1
109	rlwinm.	r13,r13,47-31,30,31
110	beq	9f
111
112	/* waking up from powersave (nap) state */
113	cmpwi	cr1,r13,2
114	/* Total loss of HV state is fatal, we could try to use the
115	 * PIR to locate a PACA, then use an emergency stack etc...
116	 * OPAL v3 based powernv platforms have new idle states
117	 * which fall in this catagory.
118	 */
119	bgt	cr1,8f
120	GET_PACA(r13)
121
122#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
123	li	r0,KVM_HWTHREAD_IN_KERNEL
124	stb	r0,HSTATE_HWTHREAD_STATE(r13)
125	/* Order setting hwthread_state vs. testing hwthread_req */
126	sync
127	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
128	cmpwi	r0,0
129	beq	1f
130	b	kvm_start_guest
1311:
132#endif
133
134	/* Return SRR1 from power7_nap() */
135	mfspr	r3,SPRN_SRR1
136	beq	cr1,2f
137	b	power7_wakeup_noloss
1382:	b	power7_wakeup_loss
139
140	/* Fast Sleep wakeup on PowerNV */
1418:	GET_PACA(r13)
142	b 	power7_wakeup_tb_loss
143
1449:
145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146#endif /* CONFIG_PPC_P7_NAP */
147	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148				 NOTEST, 0x100)
149
150	. = 0x200
151machine_check_pSeries_1:
152	/* This is moved out of line as it can be patched by FW, but
153	 * some code path might still want to branch into the original
154	 * vector
155	 */
156	HMT_MEDIUM_PPR_DISCARD
157	SET_SCRATCH0(r13)		/* save r13 */
158#ifdef CONFIG_PPC_P7_NAP
159BEGIN_FTR_SECTION
160	/* Running native on arch 2.06 or later, check if we are
161	 * waking up from nap. We only handle no state loss and
162	 * supervisor state loss. We do -not- handle hypervisor
163	 * state loss at this time.
164	 */
165	mfspr	r13,SPRN_SRR1
166	rlwinm.	r13,r13,47-31,30,31
167	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
168	beq	9f
169
170	mfspr	r13,SPRN_SRR1
171	rlwinm.	r13,r13,47-31,30,31
172	/* waking up from powersave (nap) state */
173	cmpwi	cr1,r13,2
174	/* Total loss of HV state is fatal. let's just stay stuck here */
175	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
176	bgt	cr1,.
1779:
178	OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
179END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
180#endif /* CONFIG_PPC_P7_NAP */
181	EXCEPTION_PROLOG_0(PACA_EXMC)
182BEGIN_FTR_SECTION
183	b	machine_check_pSeries_early
184FTR_SECTION_ELSE
185	b	machine_check_pSeries_0
186ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
187
188	. = 0x300
189	.globl data_access_pSeries
190data_access_pSeries:
191	HMT_MEDIUM_PPR_DISCARD
192	SET_SCRATCH0(r13)
193	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
194				 KVMTEST, 0x300)
195
196	. = 0x380
197	.globl data_access_slb_pSeries
198data_access_slb_pSeries:
199	HMT_MEDIUM_PPR_DISCARD
200	SET_SCRATCH0(r13)
201	EXCEPTION_PROLOG_0(PACA_EXSLB)
202	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
203	std	r3,PACA_EXSLB+EX_R3(r13)
204	mfspr	r3,SPRN_DAR
205#ifdef __DISABLED__
206	/* Keep that around for when we re-implement dynamic VSIDs */
207	cmpdi	r3,0
208	bge	slb_miss_user_pseries
209#endif /* __DISABLED__ */
210	mfspr	r12,SPRN_SRR1
211#ifndef CONFIG_RELOCATABLE
212	b	slb_miss_realmode
213#else
214	/*
215	 * We can't just use a direct branch to slb_miss_realmode
216	 * because the distance from here to there depends on where
217	 * the kernel ends up being put.
218	 */
219	mfctr	r11
220	ld	r10,PACAKBASE(r13)
221	LOAD_HANDLER(r10, slb_miss_realmode)
222	mtctr	r10
223	bctr
224#endif
225
226	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
227
228	. = 0x480
229	.globl instruction_access_slb_pSeries
230instruction_access_slb_pSeries:
231	HMT_MEDIUM_PPR_DISCARD
232	SET_SCRATCH0(r13)
233	EXCEPTION_PROLOG_0(PACA_EXSLB)
234	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
235	std	r3,PACA_EXSLB+EX_R3(r13)
236	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
237#ifdef __DISABLED__
238	/* Keep that around for when we re-implement dynamic VSIDs */
239	cmpdi	r3,0
240	bge	slb_miss_user_pseries
241#endif /* __DISABLED__ */
242	mfspr	r12,SPRN_SRR1
243#ifndef CONFIG_RELOCATABLE
244	b	slb_miss_realmode
245#else
246	mfctr	r11
247	ld	r10,PACAKBASE(r13)
248	LOAD_HANDLER(r10, slb_miss_realmode)
249	mtctr	r10
250	bctr
251#endif
252
253	/* We open code these as we can't have a ". = x" (even with
254	 * x = "." within a feature section
255	 */
256	. = 0x500;
257	.globl hardware_interrupt_pSeries;
258	.globl hardware_interrupt_hv;
259hardware_interrupt_pSeries:
260hardware_interrupt_hv:
261	HMT_MEDIUM_PPR_DISCARD
262	BEGIN_FTR_SECTION
263		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
264					    EXC_HV, SOFTEN_TEST_HV)
265		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
266	FTR_SECTION_ELSE
267		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
268					    EXC_STD, SOFTEN_TEST_HV_201)
269		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
270	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
271
272	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
273	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
274
275	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
276	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
277
278	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
279	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
280
281	. = 0x900
282	.globl decrementer_pSeries
283decrementer_pSeries:
284	_MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
285
286	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
287
288	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
289	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
290
291	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
292	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
293
294	. = 0xc00
295	.globl	system_call_pSeries
296system_call_pSeries:
297	 /*
298	  * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
299	  * that support it) before changing to HMT_MEDIUM. That allows the KVM
300	  * code to save that value into the guest state (it is the guest's PPR
301	  * value). Otherwise just change to HMT_MEDIUM as userspace has
302	  * already saved the PPR.
303	  */
304#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
305	SET_SCRATCH0(r13)
306	GET_PACA(r13)
307	std	r9,PACA_EXGEN+EX_R9(r13)
308	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
309	HMT_MEDIUM;
310	std	r10,PACA_EXGEN+EX_R10(r13)
311	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
312	mfcr	r9
313	KVMTEST(0xc00)
314	GET_SCRATCH0(r13)
315#else
316	HMT_MEDIUM;
317#endif
318	SYSCALL_PSERIES_1
319	SYSCALL_PSERIES_2_RFID
320	SYSCALL_PSERIES_3
321	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
322
323	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
324	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
325
326	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
327	 * out of line to handle them
328	 */
329	. = 0xe00
330hv_data_storage_trampoline:
331	SET_SCRATCH0(r13)
332	EXCEPTION_PROLOG_0(PACA_EXGEN)
333	b	h_data_storage_hv
334
335	. = 0xe20
336hv_instr_storage_trampoline:
337	SET_SCRATCH0(r13)
338	EXCEPTION_PROLOG_0(PACA_EXGEN)
339	b	h_instr_storage_hv
340
341	. = 0xe40
342emulation_assist_trampoline:
343	SET_SCRATCH0(r13)
344	EXCEPTION_PROLOG_0(PACA_EXGEN)
345	b	emulation_assist_hv
346
347	. = 0xe60
348hv_exception_trampoline:
349	SET_SCRATCH0(r13)
350	EXCEPTION_PROLOG_0(PACA_EXGEN)
351	b	hmi_exception_early
352
353	. = 0xe80
354hv_doorbell_trampoline:
355	SET_SCRATCH0(r13)
356	EXCEPTION_PROLOG_0(PACA_EXGEN)
357	b	h_doorbell_hv
358
359	/* We need to deal with the Altivec unavailable exception
360	 * here which is at 0xf20, thus in the middle of the
361	 * prolog code of the PerformanceMonitor one. A little
362	 * trickery is thus necessary
363	 */
364	. = 0xf00
365performance_monitor_pseries_trampoline:
366	SET_SCRATCH0(r13)
367	EXCEPTION_PROLOG_0(PACA_EXGEN)
368	b	performance_monitor_pSeries
369
370	. = 0xf20
371altivec_unavailable_pseries_trampoline:
372	SET_SCRATCH0(r13)
373	EXCEPTION_PROLOG_0(PACA_EXGEN)
374	b	altivec_unavailable_pSeries
375
376	. = 0xf40
377vsx_unavailable_pseries_trampoline:
378	SET_SCRATCH0(r13)
379	EXCEPTION_PROLOG_0(PACA_EXGEN)
380	b	vsx_unavailable_pSeries
381
382	. = 0xf60
383facility_unavailable_trampoline:
384	SET_SCRATCH0(r13)
385	EXCEPTION_PROLOG_0(PACA_EXGEN)
386	b	facility_unavailable_pSeries
387
388	. = 0xf80
389hv_facility_unavailable_trampoline:
390	SET_SCRATCH0(r13)
391	EXCEPTION_PROLOG_0(PACA_EXGEN)
392	b	facility_unavailable_hv
393
394#ifdef CONFIG_CBE_RAS
395	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
396	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
397#endif /* CONFIG_CBE_RAS */
398
399	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
400	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
401
402	. = 0x1500
403	.global denorm_exception_hv
404denorm_exception_hv:
405	HMT_MEDIUM_PPR_DISCARD
406	mtspr	SPRN_SPRG_HSCRATCH0,r13
407	EXCEPTION_PROLOG_0(PACA_EXGEN)
408	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
409
410#ifdef CONFIG_PPC_DENORMALISATION
411	mfspr	r10,SPRN_HSRR1
412	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
413	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
414	addi	r11,r11,-4		/* HSRR0 is next instruction */
415	bne+	denorm_assist
416#endif
417
418	KVMTEST(0x1500)
419	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
420	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
421
422#ifdef CONFIG_CBE_RAS
423	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
424	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
425#endif /* CONFIG_CBE_RAS */
426
427	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
428	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
429
430#ifdef CONFIG_CBE_RAS
431	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
432	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
433#else
434	. = 0x1800
435#endif /* CONFIG_CBE_RAS */
436
437
438/*** Out of line interrupts support ***/
439
440	.align	7
441	/* moved from 0x200 */
442machine_check_pSeries_early:
443BEGIN_FTR_SECTION
444	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
445	/*
446	 * Register contents:
447	 * R13		= PACA
448	 * R9		= CR
449	 * Original R9 to R13 is saved on PACA_EXMC
450	 *
451	 * Switch to mc_emergency stack and handle re-entrancy (we limit
452	 * the nested MCE upto level 4 to avoid stack overflow).
453	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
454	 *
455	 * We use paca->in_mce to check whether this is the first entry or
456	 * nested machine check. We increment paca->in_mce to track nested
457	 * machine checks.
458	 *
459	 * If this is the first entry then set stack pointer to
460	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
461	 * stack frame on mc_emergency stack.
462	 *
463	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
464	 * checkstop if we get another machine check exception before we do
465	 * rfid with MSR_ME=1.
466	 */
467	mr	r11,r1			/* Save r1 */
468	lhz	r10,PACA_IN_MCE(r13)
469	cmpwi	r10,0			/* Are we in nested machine check */
470	bne	0f			/* Yes, we are. */
471	/* First machine check entry */
472	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
4730:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
474	addi	r10,r10,1		/* increment paca->in_mce */
475	sth	r10,PACA_IN_MCE(r13)
476	/* Limit nested MCE to level 4 to avoid stack overflow */
477	cmpwi	r10,4
478	bgt	2f			/* Check if we hit limit of 4 */
479	std	r11,GPR1(r1)		/* Save r1 on the stack. */
480	std	r11,0(r1)		/* make stack chain pointer */
481	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
482	std	r11,_NIP(r1)
483	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
484	std	r11,_MSR(r1)
485	mfspr	r11,SPRN_DAR		/* Save DAR */
486	std	r11,_DAR(r1)
487	mfspr	r11,SPRN_DSISR		/* Save DSISR */
488	std	r11,_DSISR(r1)
489	std	r9,_CCR(r1)		/* Save CR in stackframe */
490	/* Save r9 through r13 from EXMC save area to stack frame. */
491	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
492	mfmsr	r11			/* get MSR value */
493	ori	r11,r11,MSR_ME		/* turn on ME bit */
494	ori	r11,r11,MSR_RI		/* turn on RI bit */
495	ld	r12,PACAKBASE(r13)	/* get high part of &label */
496	LOAD_HANDLER(r12, machine_check_handle_early)
4971:	mtspr	SPRN_SRR0,r12
498	mtspr	SPRN_SRR1,r11
499	rfid
500	b	.	/* prevent speculative execution */
5012:
502	/* Stack overflow. Stay on emergency stack and panic.
503	 * Keep the ME bit off while panic-ing, so that if we hit
504	 * another machine check we checkstop.
505	 */
506	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
507	ld	r11,PACAKMSR(r13)
508	ld	r12,PACAKBASE(r13)
509	LOAD_HANDLER(r12, unrecover_mce)
510	li	r10,MSR_ME
511	andc	r11,r11,r10		/* Turn off MSR_ME */
512	b	1b
513	b	.	/* prevent speculative execution */
514END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
515
516machine_check_pSeries:
517	.globl machine_check_fwnmi
518machine_check_fwnmi:
519	HMT_MEDIUM_PPR_DISCARD
520	SET_SCRATCH0(r13)		/* save r13 */
521	EXCEPTION_PROLOG_0(PACA_EXMC)
522machine_check_pSeries_0:
523	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
524	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
525	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
526	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
527	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
528	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
529	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
530	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
531	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
532
533#ifdef CONFIG_PPC_DENORMALISATION
534denorm_assist:
535BEGIN_FTR_SECTION
536/*
537 * To denormalise we need to move a copy of the register to itself.
538 * For POWER6 do that here for all FP regs.
539 */
540	mfmsr	r10
541	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
542	xori	r10,r10,(MSR_FE0|MSR_FE1)
543	mtmsrd	r10
544	sync
545
546#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
547#define FMR4(n)  FMR2(n) ; FMR2(n+2)
548#define FMR8(n)  FMR4(n) ; FMR4(n+4)
549#define FMR16(n) FMR8(n) ; FMR8(n+8)
550#define FMR32(n) FMR16(n) ; FMR16(n+16)
551	FMR32(0)
552
553FTR_SECTION_ELSE
554/*
555 * To denormalise we need to move a copy of the register to itself.
556 * For POWER7 do that here for the first 32 VSX registers only.
557 */
558	mfmsr	r10
559	oris	r10,r10,MSR_VSX@h
560	mtmsrd	r10
561	sync
562
563#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
564#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
565#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
566#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
567#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
568	XVCPSGNDP32(0)
569
570ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
571
572BEGIN_FTR_SECTION
573	b	denorm_done
574END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
575/*
576 * To denormalise we need to move a copy of the register to itself.
577 * For POWER8 we need to do that for all 64 VSX registers
578 */
579	XVCPSGNDP32(32)
580denorm_done:
581	mtspr	SPRN_HSRR0,r11
582	mtcrf	0x80,r9
583	ld	r9,PACA_EXGEN+EX_R9(r13)
584	RESTORE_PPR_PACA(PACA_EXGEN, r10)
585BEGIN_FTR_SECTION
586	ld	r10,PACA_EXGEN+EX_CFAR(r13)
587	mtspr	SPRN_CFAR,r10
588END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
589	ld	r10,PACA_EXGEN+EX_R10(r13)
590	ld	r11,PACA_EXGEN+EX_R11(r13)
591	ld	r12,PACA_EXGEN+EX_R12(r13)
592	ld	r13,PACA_EXGEN+EX_R13(r13)
593	HRFID
594	b	.
595#endif
596
597	.align	7
598	/* moved from 0xe00 */
599	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
600	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
601	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
602	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
603	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
604	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
605	MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
606	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
607
608	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
609	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
610
611	/* moved from 0xf00 */
612	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
613	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
614	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
615	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
616	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
617	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
618	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
619	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
620	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
621	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
622
623/*
624 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
625 * - If it was a decrementer interrupt, we bump the dec to max and and return.
626 * - If it was a doorbell we return immediately since doorbells are edge
627 *   triggered and won't automatically refire.
628 * - If it was a HMI we return immediately since we handled it in realmode
629 *   and it won't refire.
630 * - else we hard disable and return.
631 * This is called with r10 containing the value to OR to the paca field.
632 */
633#define MASKED_INTERRUPT(_H)				\
634masked_##_H##interrupt:					\
635	std	r11,PACA_EXGEN+EX_R11(r13);		\
636	lbz	r11,PACAIRQHAPPENED(r13);		\
637	or	r11,r11,r10;				\
638	stb	r11,PACAIRQHAPPENED(r13);		\
639	cmpwi	r10,PACA_IRQ_DEC;			\
640	bne	1f;					\
641	lis	r10,0x7fff;				\
642	ori	r10,r10,0xffff;				\
643	mtspr	SPRN_DEC,r10;				\
644	b	2f;					\
6451:	cmpwi	r10,PACA_IRQ_DBELL;			\
646	beq	2f;					\
647	cmpwi	r10,PACA_IRQ_HMI;			\
648	beq	2f;					\
649	mfspr	r10,SPRN_##_H##SRR1;			\
650	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
651	rotldi	r10,r10,16;				\
652	mtspr	SPRN_##_H##SRR1,r10;			\
6532:	mtcrf	0x80,r9;				\
654	ld	r9,PACA_EXGEN+EX_R9(r13);		\
655	ld	r10,PACA_EXGEN+EX_R10(r13);		\
656	ld	r11,PACA_EXGEN+EX_R11(r13);		\
657	GET_SCRATCH0(r13);				\
658	##_H##rfid;					\
659	b	.
660
661	MASKED_INTERRUPT()
662	MASKED_INTERRUPT(H)
663
664/*
665 * Called from arch_local_irq_enable when an interrupt needs
666 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
667 * which kind of interrupt. MSR:EE is already off. We generate a
668 * stackframe like if a real interrupt had happened.
669 *
670 * Note: While MSR:EE is off, we need to make sure that _MSR
671 * in the generated frame has EE set to 1 or the exception
672 * handler will not properly re-enable them.
673 */
674_GLOBAL(__replay_interrupt)
675	/* We are going to jump to the exception common code which
676	 * will retrieve various register values from the PACA which
677	 * we don't give a damn about, so we don't bother storing them.
678	 */
679	mfmsr	r12
680	mflr	r11
681	mfcr	r9
682	ori	r12,r12,MSR_EE
683	cmpwi	r3,0x900
684	beq	decrementer_common
685	cmpwi	r3,0x500
686	beq	hardware_interrupt_common
687BEGIN_FTR_SECTION
688	cmpwi	r3,0xe80
689	beq	h_doorbell_common
690FTR_SECTION_ELSE
691	cmpwi	r3,0xa00
692	beq	doorbell_super_common
693ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
694	blr
695
696#ifdef CONFIG_PPC_PSERIES
697/*
698 * Vectors for the FWNMI option.  Share common code.
699 */
700	.globl system_reset_fwnmi
701      .align 7
702system_reset_fwnmi:
703	HMT_MEDIUM_PPR_DISCARD
704	SET_SCRATCH0(r13)		/* save r13 */
705	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
706				 NOTEST, 0x100)
707
708#endif /* CONFIG_PPC_PSERIES */
709
710#ifdef __DISABLED__
711/*
712 * This is used for when the SLB miss handler has to go virtual,
713 * which doesn't happen for now anymore but will once we re-implement
714 * dynamic VSIDs for shared page tables
715 */
716slb_miss_user_pseries:
717	std	r10,PACA_EXGEN+EX_R10(r13)
718	std	r11,PACA_EXGEN+EX_R11(r13)
719	std	r12,PACA_EXGEN+EX_R12(r13)
720	GET_SCRATCH0(r10)
721	ld	r11,PACA_EXSLB+EX_R9(r13)
722	ld	r12,PACA_EXSLB+EX_R3(r13)
723	std	r10,PACA_EXGEN+EX_R13(r13)
724	std	r11,PACA_EXGEN+EX_R9(r13)
725	std	r12,PACA_EXGEN+EX_R3(r13)
726	clrrdi	r12,r13,32
727	mfmsr	r10
728	mfspr	r11,SRR0			/* save SRR0 */
729	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
730	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
731	mtspr	SRR0,r12
732	mfspr	r12,SRR1			/* and SRR1 */
733	mtspr	SRR1,r10
734	rfid
735	b	.				/* prevent spec. execution */
736#endif /* __DISABLED__ */
737
738#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
739kvmppc_skip_interrupt:
740	/*
741	 * Here all GPRs are unchanged from when the interrupt happened
742	 * except for r13, which is saved in SPRG_SCRATCH0.
743	 */
744	mfspr	r13, SPRN_SRR0
745	addi	r13, r13, 4
746	mtspr	SPRN_SRR0, r13
747	GET_SCRATCH0(r13)
748	rfid
749	b	.
750
751kvmppc_skip_Hinterrupt:
752	/*
753	 * Here all GPRs are unchanged from when the interrupt happened
754	 * except for r13, which is saved in SPRG_SCRATCH0.
755	 */
756	mfspr	r13, SPRN_HSRR0
757	addi	r13, r13, 4
758	mtspr	SPRN_HSRR0, r13
759	GET_SCRATCH0(r13)
760	hrfid
761	b	.
762#endif
763
764/*
765 * Code from here down to __end_handlers is invoked from the
766 * exception prologs above.  Because the prologs assemble the
767 * addresses of these handlers using the LOAD_HANDLER macro,
768 * which uses an ori instruction, these handlers must be in
769 * the first 64k of the kernel image.
770 */
771
772/*** Common interrupt handlers ***/
773
774	STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
775
776	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
777	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
778	STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
779#ifdef CONFIG_PPC_DOORBELL
780	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
781#else
782	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
783#endif
784	STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
785	STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
786	STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
787	STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
788	STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
789#ifdef CONFIG_PPC_DOORBELL
790	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
791#else
792	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
793#endif
794	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
795	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
796	STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
797#ifdef CONFIG_ALTIVEC
798	STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
799#else
800	STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
801#endif
802#ifdef CONFIG_CBE_RAS
803	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
804	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
805	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
806#endif /* CONFIG_CBE_RAS */
807
808	/*
809	 * Relocation-on interrupts: A subset of the interrupts can be delivered
810	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
811	 * it.  Addresses are the same as the original interrupt addresses, but
812	 * offset by 0xc000000000004000.
813	 * It's impossible to receive interrupts below 0x300 via this mechanism.
814	 * KVM: None of these traps are from the guest ; anything that escalated
815	 * to HV=1 from HV=0 is delivered via real mode handlers.
816	 */
817
818	/*
819	 * This uses the standard macro, since the original 0x300 vector
820	 * only has extra guff for STAB-based processors -- which never
821	 * come here.
822	 */
823	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
824	. = 0x4380
825	.globl data_access_slb_relon_pSeries
826data_access_slb_relon_pSeries:
827	SET_SCRATCH0(r13)
828	EXCEPTION_PROLOG_0(PACA_EXSLB)
829	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
830	std	r3,PACA_EXSLB+EX_R3(r13)
831	mfspr	r3,SPRN_DAR
832	mfspr	r12,SPRN_SRR1
833#ifndef CONFIG_RELOCATABLE
834	b	slb_miss_realmode
835#else
836	/*
837	 * We can't just use a direct branch to slb_miss_realmode
838	 * because the distance from here to there depends on where
839	 * the kernel ends up being put.
840	 */
841	mfctr	r11
842	ld	r10,PACAKBASE(r13)
843	LOAD_HANDLER(r10, slb_miss_realmode)
844	mtctr	r10
845	bctr
846#endif
847
848	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
849	. = 0x4480
850	.globl instruction_access_slb_relon_pSeries
851instruction_access_slb_relon_pSeries:
852	SET_SCRATCH0(r13)
853	EXCEPTION_PROLOG_0(PACA_EXSLB)
854	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
855	std	r3,PACA_EXSLB+EX_R3(r13)
856	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
857	mfspr	r12,SPRN_SRR1
858#ifndef CONFIG_RELOCATABLE
859	b	slb_miss_realmode
860#else
861	mfctr	r11
862	ld	r10,PACAKBASE(r13)
863	LOAD_HANDLER(r10, slb_miss_realmode)
864	mtctr	r10
865	bctr
866#endif
867
868	. = 0x4500
869	.globl hardware_interrupt_relon_pSeries;
870	.globl hardware_interrupt_relon_hv;
871hardware_interrupt_relon_pSeries:
872hardware_interrupt_relon_hv:
873	BEGIN_FTR_SECTION
874		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
875	FTR_SECTION_ELSE
876		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
877	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
878	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
879	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
880	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
881	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
882	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
883	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
884	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
885
886	. = 0x4c00
887	.globl system_call_relon_pSeries
888system_call_relon_pSeries:
889	HMT_MEDIUM
890	SYSCALL_PSERIES_1
891	SYSCALL_PSERIES_2_DIRECT
892	SYSCALL_PSERIES_3
893
894	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
895
896	. = 0x4e00
897	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
898
899	. = 0x4e20
900	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
901
902	. = 0x4e40
903emulation_assist_relon_trampoline:
904	SET_SCRATCH0(r13)
905	EXCEPTION_PROLOG_0(PACA_EXGEN)
906	b	emulation_assist_relon_hv
907
908	. = 0x4e60
909	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
910
911	. = 0x4e80
912h_doorbell_relon_trampoline:
913	SET_SCRATCH0(r13)
914	EXCEPTION_PROLOG_0(PACA_EXGEN)
915	b	h_doorbell_relon_hv
916
917	. = 0x4f00
918performance_monitor_relon_pseries_trampoline:
919	SET_SCRATCH0(r13)
920	EXCEPTION_PROLOG_0(PACA_EXGEN)
921	b	performance_monitor_relon_pSeries
922
923	. = 0x4f20
924altivec_unavailable_relon_pseries_trampoline:
925	SET_SCRATCH0(r13)
926	EXCEPTION_PROLOG_0(PACA_EXGEN)
927	b	altivec_unavailable_relon_pSeries
928
929	. = 0x4f40
930vsx_unavailable_relon_pseries_trampoline:
931	SET_SCRATCH0(r13)
932	EXCEPTION_PROLOG_0(PACA_EXGEN)
933	b	vsx_unavailable_relon_pSeries
934
935	. = 0x4f60
936facility_unavailable_relon_trampoline:
937	SET_SCRATCH0(r13)
938	EXCEPTION_PROLOG_0(PACA_EXGEN)
939	b	facility_unavailable_relon_pSeries
940
941	. = 0x4f80
942hv_facility_unavailable_relon_trampoline:
943	SET_SCRATCH0(r13)
944	EXCEPTION_PROLOG_0(PACA_EXGEN)
945	b	hv_facility_unavailable_relon_hv
946
947	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
948#ifdef CONFIG_PPC_DENORMALISATION
949	. = 0x5500
950	b	denorm_exception_hv
951#endif
952	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
953
954	/* Other future vectors */
955	.align	7
956	.globl	__end_interrupts
957__end_interrupts:
958
959	.align	7
960system_call_entry_direct:
961#if defined(CONFIG_RELOCATABLE)
962	/* The first level prologue may have used LR to get here, saving
963	 * orig in r10.  To save hacking/ifdeffing common code, restore here.
964	 */
965	mtlr	r10
966#endif
967system_call_entry:
968	b	system_call_common
969
970ppc64_runlatch_on_trampoline:
971	b	__ppc64_runlatch_on
972
973/*
974 * Here r13 points to the paca, r9 contains the saved CR,
975 * SRR0 and SRR1 are saved in r11 and r12,
976 * r9 - r13 are saved in paca->exgen.
977 */
978	.align	7
979	.globl data_access_common
980data_access_common:
981	mfspr	r10,SPRN_DAR
982	std	r10,PACA_EXGEN+EX_DAR(r13)
983	mfspr	r10,SPRN_DSISR
984	stw	r10,PACA_EXGEN+EX_DSISR(r13)
985	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
986	RECONCILE_IRQ_STATE(r10, r11)
987	ld	r12,_MSR(r1)
988	ld	r3,PACA_EXGEN+EX_DAR(r13)
989	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
990	li	r5,0x300
991	b	do_hash_page		/* Try to handle as hpte fault */
992
993	.align  7
994	.globl  h_data_storage_common
995h_data_storage_common:
996	mfspr   r10,SPRN_HDAR
997	std     r10,PACA_EXGEN+EX_DAR(r13)
998	mfspr   r10,SPRN_HDSISR
999	stw     r10,PACA_EXGEN+EX_DSISR(r13)
1000	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1001	bl      save_nvgprs
1002	RECONCILE_IRQ_STATE(r10, r11)
1003	addi    r3,r1,STACK_FRAME_OVERHEAD
1004	bl      unknown_exception
1005	b       ret_from_except
1006
1007	.align	7
1008	.globl instruction_access_common
1009instruction_access_common:
1010	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1011	RECONCILE_IRQ_STATE(r10, r11)
1012	ld	r12,_MSR(r1)
1013	ld	r3,_NIP(r1)
1014	andis.	r4,r12,0x5820
1015	li	r5,0x400
1016	b	do_hash_page		/* Try to handle as hpte fault */
1017
1018	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
1019
1020/*
1021 * Here is the common SLB miss user that is used when going to virtual
1022 * mode for SLB misses, that is currently not used
1023 */
1024#ifdef __DISABLED__
1025	.align	7
1026	.globl	slb_miss_user_common
1027slb_miss_user_common:
1028	mflr	r10
1029	std	r3,PACA_EXGEN+EX_DAR(r13)
1030	stw	r9,PACA_EXGEN+EX_CCR(r13)
1031	std	r10,PACA_EXGEN+EX_LR(r13)
1032	std	r11,PACA_EXGEN+EX_SRR0(r13)
1033	bl	slb_allocate_user
1034
1035	ld	r10,PACA_EXGEN+EX_LR(r13)
1036	ld	r3,PACA_EXGEN+EX_R3(r13)
1037	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1038	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1039	mtlr	r10
1040	beq-	slb_miss_fault
1041
1042	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1043	beq-	unrecov_user_slb
1044	mfmsr	r10
1045
1046.machine push
1047.machine "power4"
1048	mtcrf	0x80,r9
1049.machine pop
1050
1051	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1052	mtmsrd	r10,1
1053
1054	mtspr	SRR0,r11
1055	mtspr	SRR1,r12
1056
1057	ld	r9,PACA_EXGEN+EX_R9(r13)
1058	ld	r10,PACA_EXGEN+EX_R10(r13)
1059	ld	r11,PACA_EXGEN+EX_R11(r13)
1060	ld	r12,PACA_EXGEN+EX_R12(r13)
1061	ld	r13,PACA_EXGEN+EX_R13(r13)
1062	rfid
1063	b	.
1064
1065slb_miss_fault:
1066	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1067	ld	r4,PACA_EXGEN+EX_DAR(r13)
1068	li	r5,0
1069	std	r4,_DAR(r1)
1070	std	r5,_DSISR(r1)
1071	b	handle_page_fault
1072
1073unrecov_user_slb:
1074	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1075	RECONCILE_IRQ_STATE(r10, r11)
1076	bl	save_nvgprs
10771:	addi	r3,r1,STACK_FRAME_OVERHEAD
1078	bl	unrecoverable_exception
1079	b	1b
1080
1081#endif /* __DISABLED__ */
1082
1083
1084	/*
1085	 * Machine check is different because we use a different
1086	 * save area: PACA_EXMC instead of PACA_EXGEN.
1087	 */
1088	.align	7
1089	.globl machine_check_common
1090machine_check_common:
1091
1092	mfspr	r10,SPRN_DAR
1093	std	r10,PACA_EXGEN+EX_DAR(r13)
1094	mfspr	r10,SPRN_DSISR
1095	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1096	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1097	FINISH_NAP
1098	RECONCILE_IRQ_STATE(r10, r11)
1099	ld	r3,PACA_EXGEN+EX_DAR(r13)
1100	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1101	std	r3,_DAR(r1)
1102	std	r4,_DSISR(r1)
1103	bl	save_nvgprs
1104	addi	r3,r1,STACK_FRAME_OVERHEAD
1105	bl	machine_check_exception
1106	b	ret_from_except
1107
1108	.align	7
1109	.globl alignment_common
1110alignment_common:
1111	mfspr	r10,SPRN_DAR
1112	std	r10,PACA_EXGEN+EX_DAR(r13)
1113	mfspr	r10,SPRN_DSISR
1114	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1115	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1116	ld	r3,PACA_EXGEN+EX_DAR(r13)
1117	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1118	std	r3,_DAR(r1)
1119	std	r4,_DSISR(r1)
1120	bl	save_nvgprs
1121	RECONCILE_IRQ_STATE(r10, r11)
1122	addi	r3,r1,STACK_FRAME_OVERHEAD
1123	bl	alignment_exception
1124	b	ret_from_except
1125
1126	.align	7
1127	.globl program_check_common
1128program_check_common:
1129	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1130	bl	save_nvgprs
1131	RECONCILE_IRQ_STATE(r10, r11)
1132	addi	r3,r1,STACK_FRAME_OVERHEAD
1133	bl	program_check_exception
1134	b	ret_from_except
1135
1136	.align	7
1137	.globl fp_unavailable_common
1138fp_unavailable_common:
1139	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1140	bne	1f			/* if from user, just load it up */
1141	bl	save_nvgprs
1142	RECONCILE_IRQ_STATE(r10, r11)
1143	addi	r3,r1,STACK_FRAME_OVERHEAD
1144	bl	kernel_fp_unavailable_exception
1145	BUG_OPCODE
11461:
1147#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1148BEGIN_FTR_SECTION
1149	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1150	 * transaction), go do TM stuff
1151	 */
1152	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1153	bne-	2f
1154END_FTR_SECTION_IFSET(CPU_FTR_TM)
1155#endif
1156	bl	load_up_fpu
1157	b	fast_exception_return
1158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11592:	/* User process was in a transaction */
1160	bl	save_nvgprs
1161	RECONCILE_IRQ_STATE(r10, r11)
1162	addi	r3,r1,STACK_FRAME_OVERHEAD
1163	bl	fp_unavailable_tm
1164	b	ret_from_except
1165#endif
1166	.align	7
1167	.globl altivec_unavailable_common
1168altivec_unavailable_common:
1169	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1170#ifdef CONFIG_ALTIVEC
1171BEGIN_FTR_SECTION
1172	beq	1f
1173#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1174  BEGIN_FTR_SECTION_NESTED(69)
1175	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1176	 * transaction), go do TM stuff
1177	 */
1178	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1179	bne-	2f
1180  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1181#endif
1182	bl	load_up_altivec
1183	b	fast_exception_return
1184#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11852:	/* User process was in a transaction */
1186	bl	save_nvgprs
1187	RECONCILE_IRQ_STATE(r10, r11)
1188	addi	r3,r1,STACK_FRAME_OVERHEAD
1189	bl	altivec_unavailable_tm
1190	b	ret_from_except
1191#endif
11921:
1193END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1194#endif
1195	bl	save_nvgprs
1196	RECONCILE_IRQ_STATE(r10, r11)
1197	addi	r3,r1,STACK_FRAME_OVERHEAD
1198	bl	altivec_unavailable_exception
1199	b	ret_from_except
1200
1201	.align	7
1202	.globl vsx_unavailable_common
1203vsx_unavailable_common:
1204	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1205#ifdef CONFIG_VSX
1206BEGIN_FTR_SECTION
1207	beq	1f
1208#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1209  BEGIN_FTR_SECTION_NESTED(69)
1210	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1211	 * transaction), go do TM stuff
1212	 */
1213	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1214	bne-	2f
1215  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1216#endif
1217	b	load_up_vsx
1218#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12192:	/* User process was in a transaction */
1220	bl	save_nvgprs
1221	RECONCILE_IRQ_STATE(r10, r11)
1222	addi	r3,r1,STACK_FRAME_OVERHEAD
1223	bl	vsx_unavailable_tm
1224	b	ret_from_except
1225#endif
12261:
1227END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1228#endif
1229	bl	save_nvgprs
1230	RECONCILE_IRQ_STATE(r10, r11)
1231	addi	r3,r1,STACK_FRAME_OVERHEAD
1232	bl	vsx_unavailable_exception
1233	b	ret_from_except
1234
1235	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
1236	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
1237
1238	.align	7
1239	.globl	__end_handlers
1240__end_handlers:
1241
1242	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1243	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1244	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1245
1246	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1247	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1248	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1249	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1250	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1251
1252#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1253/*
1254 * Data area reserved for FWNMI option.
1255 * This address (0x7000) is fixed by the RPA.
1256 */
1257	.= 0x7000
1258	.globl fwnmi_data_area
1259fwnmi_data_area:
1260
1261	/* pseries and powernv need to keep the whole page from
1262	 * 0x7000 to 0x8000 free for use by the firmware
1263	 */
1264	. = 0x8000
1265#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1266
1267	.globl hmi_exception_early
1268hmi_exception_early:
1269	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0xe60)
1270	mr	r10,r1			/* Save r1			*/
1271	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1272	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1273	std	r9,_CCR(r1)		/* save CR in stackframe	*/
1274	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
1275	std	r11,_NIP(r1)		/* save HSRR0 in stackframe	*/
1276	mfspr	r12,SPRN_HSRR1		/* Save SRR1 */
1277	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
1278	std	r10,0(r1)		/* make stack chain pointer	*/
1279	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
1280	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
1281	EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1282	EXCEPTION_PROLOG_COMMON_3(0xe60)
1283	addi	r3,r1,STACK_FRAME_OVERHEAD
1284	bl	hmi_exception_realmode
1285	/* Windup the stack. */
1286	/* Move original HSRR0 and HSRR1 into the respective regs */
1287	ld	r9,_MSR(r1)
1288	mtspr	SPRN_HSRR1,r9
1289	ld	r3,_NIP(r1)
1290	mtspr	SPRN_HSRR0,r3
1291	ld	r9,_CTR(r1)
1292	mtctr	r9
1293	ld	r9,_XER(r1)
1294	mtxer	r9
1295	ld	r9,_LINK(r1)
1296	mtlr	r9
1297	REST_GPR(0, r1)
1298	REST_8GPRS(2, r1)
1299	REST_GPR(10, r1)
1300	ld	r11,_CCR(r1)
1301	mtcr	r11
1302	REST_GPR(11, r1)
1303	REST_2GPRS(12, r1)
1304	/* restore original r1. */
1305	ld	r1,GPR1(r1)
1306
1307	/*
1308	 * Go to virtual mode and pull the HMI event information from
1309	 * firmware.
1310	 */
1311	.globl hmi_exception_after_realmode
1312hmi_exception_after_realmode:
1313	SET_SCRATCH0(r13)
1314	EXCEPTION_PROLOG_0(PACA_EXGEN)
1315	b	hmi_exception_hv
1316
1317
1318#define MACHINE_CHECK_HANDLER_WINDUP			\
1319	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1320	li	r0,MSR_RI;				\
1321	mfmsr	r9;		/* get MSR value */	\
1322	andc	r9,r9,r0;				\
1323	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1324	/* Move original SRR0 and SRR1 into the respective regs */	\
1325	ld	r9,_MSR(r1);				\
1326	mtspr	SPRN_SRR1,r9;				\
1327	ld	r3,_NIP(r1);				\
1328	mtspr	SPRN_SRR0,r3;				\
1329	ld	r9,_CTR(r1);				\
1330	mtctr	r9;					\
1331	ld	r9,_XER(r1);				\
1332	mtxer	r9;					\
1333	ld	r9,_LINK(r1);				\
1334	mtlr	r9;					\
1335	REST_GPR(0, r1);				\
1336	REST_8GPRS(2, r1);				\
1337	REST_GPR(10, r1);				\
1338	ld	r11,_CCR(r1);				\
1339	mtcr	r11;					\
1340	/* Decrement paca->in_mce. */			\
1341	lhz	r12,PACA_IN_MCE(r13);			\
1342	subi	r12,r12,1;				\
1343	sth	r12,PACA_IN_MCE(r13);			\
1344	REST_GPR(11, r1);				\
1345	REST_2GPRS(12, r1);				\
1346	/* restore original r1. */			\
1347	ld	r1,GPR1(r1)
1348
1349	/*
1350	 * Handle machine check early in real mode. We come here with
1351	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1352	 */
1353	.align	7
1354	.globl machine_check_handle_early
1355machine_check_handle_early:
1356	std	r0,GPR0(r1)	/* Save r0 */
1357	EXCEPTION_PROLOG_COMMON_3(0x200)
1358	bl	save_nvgprs
1359	addi	r3,r1,STACK_FRAME_OVERHEAD
1360	bl	machine_check_early
1361	std	r3,RESULT(r1)	/* Save result */
1362	ld	r12,_MSR(r1)
1363#ifdef	CONFIG_PPC_P7_NAP
1364	/*
1365	 * Check if thread was in power saving mode. We come here when any
1366	 * of the following is true:
1367	 * a. thread wasn't in power saving mode
1368	 * b. thread was in power saving mode with no state loss or
1369	 *    supervisor state loss
1370	 *
1371	 * Go back to nap again if (b) is true.
1372	 */
1373	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */
1374	beq	4f			/* No, it wasn;t */
1375	/* Thread was in power saving mode. Go back to nap again. */
1376	cmpwi	r11,2
1377	bne	3f
1378	/* Supervisor state loss */
1379	li	r0,1
1380	stb	r0,PACA_NAPSTATELOST(r13)
13813:	bl	machine_check_queue_event
1382	MACHINE_CHECK_HANDLER_WINDUP
1383	GET_PACA(r13)
1384	ld	r1,PACAR1(r13)
1385	b	power7_enter_nap_mode
13864:
1387#endif
1388	/*
1389	 * Check if we are coming from hypervisor userspace. If yes then we
1390	 * continue in host kernel in V mode to deliver the MC event.
1391	 */
1392	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
1393	beq	5f
1394	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1395	bne	9f			/* continue in V mode if we are. */
1396
13975:
1398#ifdef CONFIG_KVM_BOOK3S_64_HV
1399	/*
1400	 * We are coming from kernel context. Check if we are coming from
1401	 * guest. if yes, then we can continue. We will fall through
1402	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1403	 */
1404	lbz	r11,HSTATE_IN_GUEST(r13)
1405	cmpwi	r11,0			/* Check if coming from guest */
1406	bne	9f			/* continue if we are. */
1407#endif
1408	/*
1409	 * At this point we are not sure about what context we come from.
1410	 * Queue up the MCE event and return from the interrupt.
1411	 * But before that, check if this is an un-recoverable exception.
1412	 * If yes, then stay on emergency stack and panic.
1413	 */
1414	andi.	r11,r12,MSR_RI
1415	bne	2f
14161:	mfspr	r11,SPRN_SRR0
1417	ld	r10,PACAKBASE(r13)
1418	LOAD_HANDLER(r10,unrecover_mce)
1419	mtspr	SPRN_SRR0,r10
1420	ld	r10,PACAKMSR(r13)
1421	/*
1422	 * We are going down. But there are chances that we might get hit by
1423	 * another MCE during panic path and we may run into unstable state
1424	 * with no way out. Hence, turn ME bit off while going down, so that
1425	 * when another MCE is hit during panic path, system will checkstop
1426	 * and hypervisor will get restarted cleanly by SP.
1427	 */
1428	li	r3,MSR_ME
1429	andc	r10,r10,r3		/* Turn off MSR_ME */
1430	mtspr	SPRN_SRR1,r10
1431	rfid
1432	b	.
14332:
1434	/*
1435	 * Check if we have successfully handled/recovered from error, if not
1436	 * then stay on emergency stack and panic.
1437	 */
1438	ld	r3,RESULT(r1)	/* Load result */
1439	cmpdi	r3,0		/* see if we handled MCE successfully */
1440
1441	beq	1b		/* if !handled then panic */
1442	/*
1443	 * Return from MC interrupt.
1444	 * Queue up the MCE event so that we can log it later, while
1445	 * returning from kernel or opal call.
1446	 */
1447	bl	machine_check_queue_event
1448	MACHINE_CHECK_HANDLER_WINDUP
1449	rfid
14509:
1451	/* Deliver the machine check to host kernel in V mode. */
1452	MACHINE_CHECK_HANDLER_WINDUP
1453	b	machine_check_pSeries
1454
1455unrecover_mce:
1456	/* Invoke machine_check_exception to print MCE event and panic. */
1457	addi	r3,r1,STACK_FRAME_OVERHEAD
1458	bl	machine_check_exception
1459	/*
1460	 * We will not reach here. Even if we did, there is no way out. Call
1461	 * unrecoverable_exception and die.
1462	 */
14631:	addi	r3,r1,STACK_FRAME_OVERHEAD
1464	bl	unrecoverable_exception
1465	b	1b
1466/*
1467 * r13 points to the PACA, r9 contains the saved CR,
1468 * r12 contain the saved SRR1, SRR0 is still ready for return
1469 * r3 has the faulting address
1470 * r9 - r13 are saved in paca->exslb.
1471 * r3 is saved in paca->slb_r3
1472 * We assume we aren't going to take any exceptions during this procedure.
1473 */
1474slb_miss_realmode:
1475	mflr	r10
1476#ifdef CONFIG_RELOCATABLE
1477	mtctr	r11
1478#endif
1479
1480	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1481	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1482
1483	bl	slb_allocate_realmode
1484
1485	/* All done -- return from exception. */
1486
1487	ld	r10,PACA_EXSLB+EX_LR(r13)
1488	ld	r3,PACA_EXSLB+EX_R3(r13)
1489	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1490
1491	mtlr	r10
1492
1493	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1494	beq-	2f
1495
1496.machine	push
1497.machine	"power4"
1498	mtcrf	0x80,r9
1499	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1500.machine	pop
1501
1502	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1503	ld	r9,PACA_EXSLB+EX_R9(r13)
1504	ld	r10,PACA_EXSLB+EX_R10(r13)
1505	ld	r11,PACA_EXSLB+EX_R11(r13)
1506	ld	r12,PACA_EXSLB+EX_R12(r13)
1507	ld	r13,PACA_EXSLB+EX_R13(r13)
1508	rfid
1509	b	.	/* prevent speculative execution */
1510
15112:	mfspr	r11,SPRN_SRR0
1512	ld	r10,PACAKBASE(r13)
1513	LOAD_HANDLER(r10,unrecov_slb)
1514	mtspr	SPRN_SRR0,r10
1515	ld	r10,PACAKMSR(r13)
1516	mtspr	SPRN_SRR1,r10
1517	rfid
1518	b	.
1519
1520unrecov_slb:
1521	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1522	RECONCILE_IRQ_STATE(r10, r11)
1523	bl	save_nvgprs
15241:	addi	r3,r1,STACK_FRAME_OVERHEAD
1525	bl	unrecoverable_exception
1526	b	1b
1527
1528
1529#ifdef CONFIG_PPC_970_NAP
1530power4_fixup_nap:
1531	andc	r9,r9,r10
1532	std	r9,TI_LOCAL_FLAGS(r11)
1533	ld	r10,_LINK(r1)		/* make idle task do the */
1534	std	r10,_NIP(r1)		/* equivalent of a blr */
1535	blr
1536#endif
1537
1538/*
1539 * Hash table stuff
1540 */
1541	.align	7
1542do_hash_page:
1543	std	r3,_DAR(r1)
1544	std	r4,_DSISR(r1)
1545
1546	andis.	r0,r4,0xa410		/* weird error? */
1547	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1548	andis.  r0,r4,DSISR_DABRMATCH@h
1549	bne-    handle_dabr_fault
1550	CURRENT_THREAD_INFO(r11, r1)
1551	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1552	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1553	bne	77f			/* then don't call hash_page now */
1554	/*
1555	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1556	 * accessing a userspace segment (even from the kernel). We assume
1557	 * kernel addresses always have the high bit set.
1558	 */
1559	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1560	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1561	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1562	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1563	ori	r4,r4,1			/* add _PAGE_PRESENT */
1564	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1565
1566	/*
1567	 * r3 contains the faulting address
1568	 * r4 contains the required access permissions
1569	 * r5 contains the trap number
1570	 * r6 contains dsisr
1571	 *
1572	 * at return r3 = 0 for success, 1 for page fault, negative for error
1573	 */
1574	ld      r6,_DSISR(r1)
1575	bl	hash_page		/* build HPTE if possible */
1576	cmpdi	r3,0			/* see if hash_page succeeded */
1577
1578	/* Success */
1579	beq	fast_exc_return_irq	/* Return from exception on success */
1580
1581	/* Error */
1582	blt-	13f
1583
1584/* Here we have a page fault that hash_page can't handle. */
1585handle_page_fault:
158611:	ld	r4,_DAR(r1)
1587	ld	r5,_DSISR(r1)
1588	addi	r3,r1,STACK_FRAME_OVERHEAD
1589	bl	do_page_fault
1590	cmpdi	r3,0
1591	beq+	12f
1592	bl	save_nvgprs
1593	mr	r5,r3
1594	addi	r3,r1,STACK_FRAME_OVERHEAD
1595	lwz	r4,_DAR(r1)
1596	bl	bad_page_fault
1597	b	ret_from_except
1598
1599/* We have a data breakpoint exception - handle it */
1600handle_dabr_fault:
1601	bl	save_nvgprs
1602	ld      r4,_DAR(r1)
1603	ld      r5,_DSISR(r1)
1604	addi    r3,r1,STACK_FRAME_OVERHEAD
1605	bl      do_break
160612:	b       ret_from_except_lite
1607
1608
1609/* We have a page fault that hash_page could handle but HV refused
1610 * the PTE insertion
1611 */
161213:	bl	save_nvgprs
1613	mr	r5,r3
1614	addi	r3,r1,STACK_FRAME_OVERHEAD
1615	ld	r4,_DAR(r1)
1616	bl	low_hash_fault
1617	b	ret_from_except
1618
1619/*
1620 * We come here as a result of a DSI at a point where we don't want
1621 * to call hash_page, such as when we are accessing memory (possibly
1622 * user memory) inside a PMU interrupt that occurred while interrupts
1623 * were soft-disabled.  We want to invoke the exception handler for
1624 * the access, or panic if there isn't a handler.
1625 */
162677:	bl	save_nvgprs
1627	mr	r4,r3
1628	addi	r3,r1,STACK_FRAME_OVERHEAD
1629	li	r5,SIGSEGV
1630	bl	bad_page_fault
1631	b	ret_from_except
1632
1633/*
1634 * Here we have detected that the kernel stack pointer is bad.
1635 * R9 contains the saved CR, r13 points to the paca,
1636 * r10 contains the (bad) kernel stack pointer,
1637 * r11 and r12 contain the saved SRR0 and SRR1.
1638 * We switch to using an emergency stack, save the registers there,
1639 * and call kernel_bad_stack(), which panics.
1640 */
1641bad_stack:
1642	ld	r1,PACAEMERGSP(r13)
1643	subi	r1,r1,64+INT_FRAME_SIZE
1644	std	r9,_CCR(r1)
1645	std	r10,GPR1(r1)
1646	std	r11,_NIP(r1)
1647	std	r12,_MSR(r1)
1648	mfspr	r11,SPRN_DAR
1649	mfspr	r12,SPRN_DSISR
1650	std	r11,_DAR(r1)
1651	std	r12,_DSISR(r1)
1652	mflr	r10
1653	mfctr	r11
1654	mfxer	r12
1655	std	r10,_LINK(r1)
1656	std	r11,_CTR(r1)
1657	std	r12,_XER(r1)
1658	SAVE_GPR(0,r1)
1659	SAVE_GPR(2,r1)
1660	ld	r10,EX_R3(r3)
1661	std	r10,GPR3(r1)
1662	SAVE_GPR(4,r1)
1663	SAVE_4GPRS(5,r1)
1664	ld	r9,EX_R9(r3)
1665	ld	r10,EX_R10(r3)
1666	SAVE_2GPRS(9,r1)
1667	ld	r9,EX_R11(r3)
1668	ld	r10,EX_R12(r3)
1669	ld	r11,EX_R13(r3)
1670	std	r9,GPR11(r1)
1671	std	r10,GPR12(r1)
1672	std	r11,GPR13(r1)
1673BEGIN_FTR_SECTION
1674	ld	r10,EX_CFAR(r3)
1675	std	r10,ORIG_GPR3(r1)
1676END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1677	SAVE_8GPRS(14,r1)
1678	SAVE_10GPRS(22,r1)
1679	lhz	r12,PACA_TRAP_SAVE(r13)
1680	std	r12,_TRAP(r1)
1681	addi	r11,r1,INT_FRAME_SIZE
1682	std	r11,0(r1)
1683	li	r12,0
1684	std	r12,0(r11)
1685	ld	r2,PACATOC(r13)
1686	ld	r11,exception_marker@toc(r2)
1687	std	r12,RESULT(r1)
1688	std	r11,STACK_FRAME_OVERHEAD-16(r1)
16891:	addi	r3,r1,STACK_FRAME_OVERHEAD
1690	bl	kernel_bad_stack
1691	b	1b
1692