xref: /linux/arch/powerpc/kernel/exceptions-64s.S (revision 988b0c541ed8b1c633c4d4df7169010635942e18)
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 -        : Early init and support code
29 */
30	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 					\
32BEGIN_FTR_SECTION						\
33	cmpdi	r0,0x1ebe ; 					\
34	beq-	1f ;						\
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
36	mr	r9,r13 ;					\
37	GET_PACA(r13) ;						\
38	mfspr	r11,SPRN_SRR0 ;					\
390:
40
41#define SYSCALL_PSERIES_2_RFID 					\
42	mfspr	r12,SPRN_SRR1 ;					\
43	ld	r10,PACAKBASE(r13) ; 				\
44	LOAD_HANDLER(r10, system_call_entry) ; 			\
45	mtspr	SPRN_SRR0,r10 ; 				\
46	ld	r10,PACAKMSR(r13) ;				\
47	mtspr	SPRN_SRR1,r10 ; 				\
48	rfid ; 							\
49	b	. ;	/* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3					\
52	/* Fast LE/BE switch system call */			\
531:	mfspr	r12,SPRN_SRR1 ;					\
54	xori	r12,r12,MSR_LE ;				\
55	mtspr	SPRN_SRR1,r12 ;					\
56	rfid ;		/* return to userspace */		\
57	b	. ;	/* prevent speculative execution */
58
59#if defined(CONFIG_RELOCATABLE)
60	/*
61	 * We can't branch directly; in the direct case we use LR
62	 * and system_call_entry restores LR.  (We thus need to move
63	 * LR to r10 in the RFID case too.)
64	 */
65#define SYSCALL_PSERIES_2_DIRECT				\
66	mflr	r10 ;						\
67	ld	r12,PACAKBASE(r13) ; 				\
68	LOAD_HANDLER(r12, system_call_entry_direct) ;		\
69	mtctr	r12 ;						\
70	mfspr	r12,SPRN_SRR1 ;					\
71	/* Re-use of r13... No spare regs to do this */	\
72	li	r13,MSR_RI ;					\
73	mtmsrd 	r13,1 ;						\
74	GET_PACA(r13) ;	/* get r13 back */			\
75	bctr ;
76#else
77	/* We can branch directly */
78#define SYSCALL_PSERIES_2_DIRECT				\
79	mfspr	r12,SPRN_SRR1 ;					\
80	li	r10,MSR_RI ;					\
81	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
82	b	system_call_entry_direct ;
83#endif
84
85/*
86 * This is the start of the interrupt handlers for pSeries
87 * This code runs with relocation off.
88 * Code from here to __end_interrupts gets copied down to real
89 * address 0x100 when we are running a relocatable kernel.
90 * Therefore any relative branches in this section must only
91 * branch to labels in this section.
92 */
93	. = 0x100
94	.globl __start_interrupts
95__start_interrupts:
96
97	.globl system_reset_pSeries;
98system_reset_pSeries:
99	HMT_MEDIUM_PPR_DISCARD
100	SET_SCRATCH0(r13)
101#ifdef CONFIG_PPC_P7_NAP
102BEGIN_FTR_SECTION
103	/* Running native on arch 2.06 or later, check if we are
104	 * waking up from nap. We only handle no state loss and
105	 * supervisor state loss. We do -not- handle hypervisor
106	 * state loss at this time.
107	 */
108	mfspr	r13,SPRN_SRR1
109	rlwinm.	r13,r13,47-31,30,31
110	beq	9f
111
112	/* waking up from powersave (nap) state */
113	cmpwi	cr1,r13,2
114	/* Total loss of HV state is fatal, we could try to use the
115	 * PIR to locate a PACA, then use an emergency stack etc...
116	 * OPAL v3 based powernv platforms have new idle states
117	 * which fall in this catagory.
118	 */
119	bgt	cr1,8f
120	GET_PACA(r13)
121
122#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
123	li	r0,KVM_HWTHREAD_IN_KERNEL
124	stb	r0,HSTATE_HWTHREAD_STATE(r13)
125	/* Order setting hwthread_state vs. testing hwthread_req */
126	sync
127	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
128	cmpwi	r0,0
129	beq	1f
130	b	kvm_start_guest
1311:
132#endif
133
134	beq	cr1,2f
135	b	power7_wakeup_noloss
1362:	b	power7_wakeup_loss
137
138	/* Fast Sleep wakeup on PowerNV */
1398:	GET_PACA(r13)
140	b 	power7_wakeup_tb_loss
141
1429:
143END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
144#endif /* CONFIG_PPC_P7_NAP */
145	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
146				 NOTEST, 0x100)
147
148	. = 0x200
149machine_check_pSeries_1:
150	/* This is moved out of line as it can be patched by FW, but
151	 * some code path might still want to branch into the original
152	 * vector
153	 */
154	HMT_MEDIUM_PPR_DISCARD
155	SET_SCRATCH0(r13)		/* save r13 */
156#ifdef CONFIG_PPC_P7_NAP
157BEGIN_FTR_SECTION
158	/* Running native on arch 2.06 or later, check if we are
159	 * waking up from nap. We only handle no state loss and
160	 * supervisor state loss. We do -not- handle hypervisor
161	 * state loss at this time.
162	 */
163	mfspr	r13,SPRN_SRR1
164	rlwinm.	r13,r13,47-31,30,31
165	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
166	beq	9f
167
168	mfspr	r13,SPRN_SRR1
169	rlwinm.	r13,r13,47-31,30,31
170	/* waking up from powersave (nap) state */
171	cmpwi	cr1,r13,2
172	/* Total loss of HV state is fatal. let's just stay stuck here */
173	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
174	bgt	cr1,.
1759:
176	OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
177END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
178#endif /* CONFIG_PPC_P7_NAP */
179	EXCEPTION_PROLOG_0(PACA_EXMC)
180BEGIN_FTR_SECTION
181	b	machine_check_pSeries_early
182FTR_SECTION_ELSE
183	b	machine_check_pSeries_0
184ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
185
186	. = 0x300
187	.globl data_access_pSeries
188data_access_pSeries:
189	HMT_MEDIUM_PPR_DISCARD
190	SET_SCRATCH0(r13)
191BEGIN_FTR_SECTION
192	b	data_access_check_stab
193data_access_not_stab:
194END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
195	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
196				 KVMTEST, 0x300)
197
198	. = 0x380
199	.globl data_access_slb_pSeries
200data_access_slb_pSeries:
201	HMT_MEDIUM_PPR_DISCARD
202	SET_SCRATCH0(r13)
203	EXCEPTION_PROLOG_0(PACA_EXSLB)
204	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
205	std	r3,PACA_EXSLB+EX_R3(r13)
206	mfspr	r3,SPRN_DAR
207#ifdef __DISABLED__
208	/* Keep that around for when we re-implement dynamic VSIDs */
209	cmpdi	r3,0
210	bge	slb_miss_user_pseries
211#endif /* __DISABLED__ */
212	mfspr	r12,SPRN_SRR1
213#ifndef CONFIG_RELOCATABLE
214	b	slb_miss_realmode
215#else
216	/*
217	 * We can't just use a direct branch to slb_miss_realmode
218	 * because the distance from here to there depends on where
219	 * the kernel ends up being put.
220	 */
221	mfctr	r11
222	ld	r10,PACAKBASE(r13)
223	LOAD_HANDLER(r10, slb_miss_realmode)
224	mtctr	r10
225	bctr
226#endif
227
228	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
229
230	. = 0x480
231	.globl instruction_access_slb_pSeries
232instruction_access_slb_pSeries:
233	HMT_MEDIUM_PPR_DISCARD
234	SET_SCRATCH0(r13)
235	EXCEPTION_PROLOG_0(PACA_EXSLB)
236	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
237	std	r3,PACA_EXSLB+EX_R3(r13)
238	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
239#ifdef __DISABLED__
240	/* Keep that around for when we re-implement dynamic VSIDs */
241	cmpdi	r3,0
242	bge	slb_miss_user_pseries
243#endif /* __DISABLED__ */
244	mfspr	r12,SPRN_SRR1
245#ifndef CONFIG_RELOCATABLE
246	b	slb_miss_realmode
247#else
248	mfctr	r11
249	ld	r10,PACAKBASE(r13)
250	LOAD_HANDLER(r10, slb_miss_realmode)
251	mtctr	r10
252	bctr
253#endif
254
255	/* We open code these as we can't have a ". = x" (even with
256	 * x = "." within a feature section
257	 */
258	. = 0x500;
259	.globl hardware_interrupt_pSeries;
260	.globl hardware_interrupt_hv;
261hardware_interrupt_pSeries:
262hardware_interrupt_hv:
263	HMT_MEDIUM_PPR_DISCARD
264	BEGIN_FTR_SECTION
265		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
266					    EXC_HV, SOFTEN_TEST_HV)
267		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
268	FTR_SECTION_ELSE
269		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
270					    EXC_STD, SOFTEN_TEST_HV_201)
271		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
272	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
273
274	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
275	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
276
277	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
278	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
279
280	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
281	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
282
283	. = 0x900
284	.globl decrementer_pSeries
285decrementer_pSeries:
286	_MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
287
288	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
289
290	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
291	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
292
293	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
294	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
295
296	. = 0xc00
297	.globl	system_call_pSeries
298system_call_pSeries:
299	HMT_MEDIUM
300#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
301	SET_SCRATCH0(r13)
302	GET_PACA(r13)
303	std	r9,PACA_EXGEN+EX_R9(r13)
304	std	r10,PACA_EXGEN+EX_R10(r13)
305	mfcr	r9
306	KVMTEST(0xc00)
307	GET_SCRATCH0(r13)
308#endif
309	SYSCALL_PSERIES_1
310	SYSCALL_PSERIES_2_RFID
311	SYSCALL_PSERIES_3
312	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
313
314	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
315	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
316
317	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
318	 * out of line to handle them
319	 */
320	. = 0xe00
321hv_data_storage_trampoline:
322	SET_SCRATCH0(r13)
323	EXCEPTION_PROLOG_0(PACA_EXGEN)
324	b	h_data_storage_hv
325
326	. = 0xe20
327hv_instr_storage_trampoline:
328	SET_SCRATCH0(r13)
329	EXCEPTION_PROLOG_0(PACA_EXGEN)
330	b	h_instr_storage_hv
331
332	. = 0xe40
333emulation_assist_trampoline:
334	SET_SCRATCH0(r13)
335	EXCEPTION_PROLOG_0(PACA_EXGEN)
336	b	emulation_assist_hv
337
338	. = 0xe60
339hv_exception_trampoline:
340	SET_SCRATCH0(r13)
341	EXCEPTION_PROLOG_0(PACA_EXGEN)
342	b	hmi_exception_hv
343
344	. = 0xe80
345hv_doorbell_trampoline:
346	SET_SCRATCH0(r13)
347	EXCEPTION_PROLOG_0(PACA_EXGEN)
348	b	h_doorbell_hv
349
350	/* We need to deal with the Altivec unavailable exception
351	 * here which is at 0xf20, thus in the middle of the
352	 * prolog code of the PerformanceMonitor one. A little
353	 * trickery is thus necessary
354	 */
355	. = 0xf00
356performance_monitor_pseries_trampoline:
357	SET_SCRATCH0(r13)
358	EXCEPTION_PROLOG_0(PACA_EXGEN)
359	b	performance_monitor_pSeries
360
361	. = 0xf20
362altivec_unavailable_pseries_trampoline:
363	SET_SCRATCH0(r13)
364	EXCEPTION_PROLOG_0(PACA_EXGEN)
365	b	altivec_unavailable_pSeries
366
367	. = 0xf40
368vsx_unavailable_pseries_trampoline:
369	SET_SCRATCH0(r13)
370	EXCEPTION_PROLOG_0(PACA_EXGEN)
371	b	vsx_unavailable_pSeries
372
373	. = 0xf60
374facility_unavailable_trampoline:
375	SET_SCRATCH0(r13)
376	EXCEPTION_PROLOG_0(PACA_EXGEN)
377	b	facility_unavailable_pSeries
378
379	. = 0xf80
380hv_facility_unavailable_trampoline:
381	SET_SCRATCH0(r13)
382	EXCEPTION_PROLOG_0(PACA_EXGEN)
383	b	facility_unavailable_hv
384
385#ifdef CONFIG_CBE_RAS
386	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
387	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
388#endif /* CONFIG_CBE_RAS */
389
390	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
391	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
392
393	. = 0x1500
394	.global denorm_exception_hv
395denorm_exception_hv:
396	HMT_MEDIUM_PPR_DISCARD
397	mtspr	SPRN_SPRG_HSCRATCH0,r13
398	EXCEPTION_PROLOG_0(PACA_EXGEN)
399	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
400
401#ifdef CONFIG_PPC_DENORMALISATION
402	mfspr	r10,SPRN_HSRR1
403	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
404	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
405	addi	r11,r11,-4		/* HSRR0 is next instruction */
406	bne+	denorm_assist
407#endif
408
409	KVMTEST(0x1500)
410	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
411	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
412
413#ifdef CONFIG_CBE_RAS
414	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
415	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
416#endif /* CONFIG_CBE_RAS */
417
418	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
419	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
420
421#ifdef CONFIG_CBE_RAS
422	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
423	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
424#else
425	. = 0x1800
426#endif /* CONFIG_CBE_RAS */
427
428
429/*** Out of line interrupts support ***/
430
431	.align	7
432	/* moved from 0x200 */
433machine_check_pSeries_early:
434BEGIN_FTR_SECTION
435	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
436	/*
437	 * Register contents:
438	 * R13		= PACA
439	 * R9		= CR
440	 * Original R9 to R13 is saved on PACA_EXMC
441	 *
442	 * Switch to mc_emergency stack and handle re-entrancy (we limit
443	 * the nested MCE upto level 4 to avoid stack overflow).
444	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
445	 *
446	 * We use paca->in_mce to check whether this is the first entry or
447	 * nested machine check. We increment paca->in_mce to track nested
448	 * machine checks.
449	 *
450	 * If this is the first entry then set stack pointer to
451	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
452	 * stack frame on mc_emergency stack.
453	 *
454	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
455	 * checkstop if we get another machine check exception before we do
456	 * rfid with MSR_ME=1.
457	 */
458	mr	r11,r1			/* Save r1 */
459	lhz	r10,PACA_IN_MCE(r13)
460	cmpwi	r10,0			/* Are we in nested machine check */
461	bne	0f			/* Yes, we are. */
462	/* First machine check entry */
463	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
4640:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
465	addi	r10,r10,1		/* increment paca->in_mce */
466	sth	r10,PACA_IN_MCE(r13)
467	/* Limit nested MCE to level 4 to avoid stack overflow */
468	cmpwi	r10,4
469	bgt	2f			/* Check if we hit limit of 4 */
470	std	r11,GPR1(r1)		/* Save r1 on the stack. */
471	std	r11,0(r1)		/* make stack chain pointer */
472	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
473	std	r11,_NIP(r1)
474	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
475	std	r11,_MSR(r1)
476	mfspr	r11,SPRN_DAR		/* Save DAR */
477	std	r11,_DAR(r1)
478	mfspr	r11,SPRN_DSISR		/* Save DSISR */
479	std	r11,_DSISR(r1)
480	std	r9,_CCR(r1)		/* Save CR in stackframe */
481	/* Save r9 through r13 from EXMC save area to stack frame. */
482	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
483	mfmsr	r11			/* get MSR value */
484	ori	r11,r11,MSR_ME		/* turn on ME bit */
485	ori	r11,r11,MSR_RI		/* turn on RI bit */
486	ld	r12,PACAKBASE(r13)	/* get high part of &label */
487	LOAD_HANDLER(r12, machine_check_handle_early)
4881:	mtspr	SPRN_SRR0,r12
489	mtspr	SPRN_SRR1,r11
490	rfid
491	b	.	/* prevent speculative execution */
4922:
493	/* Stack overflow. Stay on emergency stack and panic.
494	 * Keep the ME bit off while panic-ing, so that if we hit
495	 * another machine check we checkstop.
496	 */
497	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
498	ld	r11,PACAKMSR(r13)
499	ld	r12,PACAKBASE(r13)
500	LOAD_HANDLER(r12, unrecover_mce)
501	li	r10,MSR_ME
502	andc	r11,r11,r10		/* Turn off MSR_ME */
503	b	1b
504	b	.	/* prevent speculative execution */
505END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
506
507machine_check_pSeries:
508	.globl machine_check_fwnmi
509machine_check_fwnmi:
510	HMT_MEDIUM_PPR_DISCARD
511	SET_SCRATCH0(r13)		/* save r13 */
512	EXCEPTION_PROLOG_0(PACA_EXMC)
513machine_check_pSeries_0:
514	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
515	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
516	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
517
518	/* moved from 0x300 */
519data_access_check_stab:
520	GET_PACA(r13)
521	std	r9,PACA_EXSLB+EX_R9(r13)
522	std	r10,PACA_EXSLB+EX_R10(r13)
523	mfspr	r10,SPRN_DAR
524	mfspr	r9,SPRN_DSISR
525	srdi	r10,r10,60
526	rlwimi	r10,r9,16,0x20
527#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
528	lbz	r9,HSTATE_IN_GUEST(r13)
529	rlwimi	r10,r9,8,0x300
530#endif
531	mfcr	r9
532	cmpwi	r10,0x2c
533	beq	do_stab_bolted_pSeries
534	mtcrf	0x80,r9
535	ld	r9,PACA_EXSLB+EX_R9(r13)
536	ld	r10,PACA_EXSLB+EX_R10(r13)
537	b	data_access_not_stab
538do_stab_bolted_pSeries:
539	std	r11,PACA_EXSLB+EX_R11(r13)
540	std	r12,PACA_EXSLB+EX_R12(r13)
541	GET_SCRATCH0(r10)
542	std	r10,PACA_EXSLB+EX_R13(r13)
543	EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD)
544
545	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
546	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
547	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
548	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
549	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
550	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
551
552#ifdef CONFIG_PPC_DENORMALISATION
553denorm_assist:
554BEGIN_FTR_SECTION
555/*
556 * To denormalise we need to move a copy of the register to itself.
557 * For POWER6 do that here for all FP regs.
558 */
559	mfmsr	r10
560	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
561	xori	r10,r10,(MSR_FE0|MSR_FE1)
562	mtmsrd	r10
563	sync
564
565#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
566#define FMR4(n)  FMR2(n) ; FMR2(n+2)
567#define FMR8(n)  FMR4(n) ; FMR4(n+4)
568#define FMR16(n) FMR8(n) ; FMR8(n+8)
569#define FMR32(n) FMR16(n) ; FMR16(n+16)
570	FMR32(0)
571
572FTR_SECTION_ELSE
573/*
574 * To denormalise we need to move a copy of the register to itself.
575 * For POWER7 do that here for the first 32 VSX registers only.
576 */
577	mfmsr	r10
578	oris	r10,r10,MSR_VSX@h
579	mtmsrd	r10
580	sync
581
582#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
583#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
584#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
585#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
586#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
587	XVCPSGNDP32(0)
588
589ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
590
591BEGIN_FTR_SECTION
592	b	denorm_done
593END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
594/*
595 * To denormalise we need to move a copy of the register to itself.
596 * For POWER8 we need to do that for all 64 VSX registers
597 */
598	XVCPSGNDP32(32)
599denorm_done:
600	mtspr	SPRN_HSRR0,r11
601	mtcrf	0x80,r9
602	ld	r9,PACA_EXGEN+EX_R9(r13)
603	RESTORE_PPR_PACA(PACA_EXGEN, r10)
604BEGIN_FTR_SECTION
605	ld	r10,PACA_EXGEN+EX_CFAR(r13)
606	mtspr	SPRN_CFAR,r10
607END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
608	ld	r10,PACA_EXGEN+EX_R10(r13)
609	ld	r11,PACA_EXGEN+EX_R11(r13)
610	ld	r12,PACA_EXGEN+EX_R12(r13)
611	ld	r13,PACA_EXGEN+EX_R13(r13)
612	HRFID
613	b	.
614#endif
615
616	.align	7
617	/* moved from 0xe00 */
618	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
619	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
620	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
621	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
622	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
623	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
624	STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
625	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
626	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
627	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
628
629	/* moved from 0xf00 */
630	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
631	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
632	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
633	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
634	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
635	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
636	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
637	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
638	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
639	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
640
641/*
642 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
643 * - If it was a decrementer interrupt, we bump the dec to max and and return.
644 * - If it was a doorbell we return immediately since doorbells are edge
645 *   triggered and won't automatically refire.
646 * - else we hard disable and return.
647 * This is called with r10 containing the value to OR to the paca field.
648 */
649#define MASKED_INTERRUPT(_H)				\
650masked_##_H##interrupt:					\
651	std	r11,PACA_EXGEN+EX_R11(r13);		\
652	lbz	r11,PACAIRQHAPPENED(r13);		\
653	or	r11,r11,r10;				\
654	stb	r11,PACAIRQHAPPENED(r13);		\
655	cmpwi	r10,PACA_IRQ_DEC;			\
656	bne	1f;					\
657	lis	r10,0x7fff;				\
658	ori	r10,r10,0xffff;				\
659	mtspr	SPRN_DEC,r10;				\
660	b	2f;					\
6611:	cmpwi	r10,PACA_IRQ_DBELL;			\
662	beq	2f;					\
663	mfspr	r10,SPRN_##_H##SRR1;			\
664	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
665	rotldi	r10,r10,16;				\
666	mtspr	SPRN_##_H##SRR1,r10;			\
6672:	mtcrf	0x80,r9;				\
668	ld	r9,PACA_EXGEN+EX_R9(r13);		\
669	ld	r10,PACA_EXGEN+EX_R10(r13);		\
670	ld	r11,PACA_EXGEN+EX_R11(r13);		\
671	GET_SCRATCH0(r13);				\
672	##_H##rfid;					\
673	b	.
674
675	MASKED_INTERRUPT()
676	MASKED_INTERRUPT(H)
677
678/*
679 * Called from arch_local_irq_enable when an interrupt needs
680 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
681 * which kind of interrupt. MSR:EE is already off. We generate a
682 * stackframe like if a real interrupt had happened.
683 *
684 * Note: While MSR:EE is off, we need to make sure that _MSR
685 * in the generated frame has EE set to 1 or the exception
686 * handler will not properly re-enable them.
687 */
688_GLOBAL(__replay_interrupt)
689	/* We are going to jump to the exception common code which
690	 * will retrieve various register values from the PACA which
691	 * we don't give a damn about, so we don't bother storing them.
692	 */
693	mfmsr	r12
694	mflr	r11
695	mfcr	r9
696	ori	r12,r12,MSR_EE
697	cmpwi	r3,0x900
698	beq	decrementer_common
699	cmpwi	r3,0x500
700	beq	hardware_interrupt_common
701BEGIN_FTR_SECTION
702	cmpwi	r3,0xe80
703	beq	h_doorbell_common
704FTR_SECTION_ELSE
705	cmpwi	r3,0xa00
706	beq	doorbell_super_common
707ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
708	blr
709
710#ifdef CONFIG_PPC_PSERIES
711/*
712 * Vectors for the FWNMI option.  Share common code.
713 */
714	.globl system_reset_fwnmi
715      .align 7
716system_reset_fwnmi:
717	HMT_MEDIUM_PPR_DISCARD
718	SET_SCRATCH0(r13)		/* save r13 */
719	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
720				 NOTEST, 0x100)
721
722#endif /* CONFIG_PPC_PSERIES */
723
724#ifdef __DISABLED__
725/*
726 * This is used for when the SLB miss handler has to go virtual,
727 * which doesn't happen for now anymore but will once we re-implement
728 * dynamic VSIDs for shared page tables
729 */
730slb_miss_user_pseries:
731	std	r10,PACA_EXGEN+EX_R10(r13)
732	std	r11,PACA_EXGEN+EX_R11(r13)
733	std	r12,PACA_EXGEN+EX_R12(r13)
734	GET_SCRATCH0(r10)
735	ld	r11,PACA_EXSLB+EX_R9(r13)
736	ld	r12,PACA_EXSLB+EX_R3(r13)
737	std	r10,PACA_EXGEN+EX_R13(r13)
738	std	r11,PACA_EXGEN+EX_R9(r13)
739	std	r12,PACA_EXGEN+EX_R3(r13)
740	clrrdi	r12,r13,32
741	mfmsr	r10
742	mfspr	r11,SRR0			/* save SRR0 */
743	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
744	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
745	mtspr	SRR0,r12
746	mfspr	r12,SRR1			/* and SRR1 */
747	mtspr	SRR1,r10
748	rfid
749	b	.				/* prevent spec. execution */
750#endif /* __DISABLED__ */
751
752#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
753kvmppc_skip_interrupt:
754	/*
755	 * Here all GPRs are unchanged from when the interrupt happened
756	 * except for r13, which is saved in SPRG_SCRATCH0.
757	 */
758	mfspr	r13, SPRN_SRR0
759	addi	r13, r13, 4
760	mtspr	SPRN_SRR0, r13
761	GET_SCRATCH0(r13)
762	rfid
763	b	.
764
765kvmppc_skip_Hinterrupt:
766	/*
767	 * Here all GPRs are unchanged from when the interrupt happened
768	 * except for r13, which is saved in SPRG_SCRATCH0.
769	 */
770	mfspr	r13, SPRN_HSRR0
771	addi	r13, r13, 4
772	mtspr	SPRN_HSRR0, r13
773	GET_SCRATCH0(r13)
774	hrfid
775	b	.
776#endif
777
778/*
779 * Code from here down to __end_handlers is invoked from the
780 * exception prologs above.  Because the prologs assemble the
781 * addresses of these handlers using the LOAD_HANDLER macro,
782 * which uses an ori instruction, these handlers must be in
783 * the first 64k of the kernel image.
784 */
785
786/*** Common interrupt handlers ***/
787
788	STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
789
790	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
791	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
792	STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
793#ifdef CONFIG_PPC_DOORBELL
794	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
795#else
796	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
797#endif
798	STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
799	STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
800	STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
801	STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
802	STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception)
803#ifdef CONFIG_PPC_DOORBELL
804	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
805#else
806	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
807#endif
808	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
809	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
810	STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
811#ifdef CONFIG_ALTIVEC
812	STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
813#else
814	STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
815#endif
816#ifdef CONFIG_CBE_RAS
817	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
818	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
819	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
820#endif /* CONFIG_CBE_RAS */
821
822	/*
823	 * Relocation-on interrupts: A subset of the interrupts can be delivered
824	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
825	 * it.  Addresses are the same as the original interrupt addresses, but
826	 * offset by 0xc000000000004000.
827	 * It's impossible to receive interrupts below 0x300 via this mechanism.
828	 * KVM: None of these traps are from the guest ; anything that escalated
829	 * to HV=1 from HV=0 is delivered via real mode handlers.
830	 */
831
832	/*
833	 * This uses the standard macro, since the original 0x300 vector
834	 * only has extra guff for STAB-based processors -- which never
835	 * come here.
836	 */
837	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
838	. = 0x4380
839	.globl data_access_slb_relon_pSeries
840data_access_slb_relon_pSeries:
841	SET_SCRATCH0(r13)
842	EXCEPTION_PROLOG_0(PACA_EXSLB)
843	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
844	std	r3,PACA_EXSLB+EX_R3(r13)
845	mfspr	r3,SPRN_DAR
846	mfspr	r12,SPRN_SRR1
847#ifndef CONFIG_RELOCATABLE
848	b	slb_miss_realmode
849#else
850	/*
851	 * We can't just use a direct branch to slb_miss_realmode
852	 * because the distance from here to there depends on where
853	 * the kernel ends up being put.
854	 */
855	mfctr	r11
856	ld	r10,PACAKBASE(r13)
857	LOAD_HANDLER(r10, slb_miss_realmode)
858	mtctr	r10
859	bctr
860#endif
861
862	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
863	. = 0x4480
864	.globl instruction_access_slb_relon_pSeries
865instruction_access_slb_relon_pSeries:
866	SET_SCRATCH0(r13)
867	EXCEPTION_PROLOG_0(PACA_EXSLB)
868	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
869	std	r3,PACA_EXSLB+EX_R3(r13)
870	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
871	mfspr	r12,SPRN_SRR1
872#ifndef CONFIG_RELOCATABLE
873	b	slb_miss_realmode
874#else
875	mfctr	r11
876	ld	r10,PACAKBASE(r13)
877	LOAD_HANDLER(r10, slb_miss_realmode)
878	mtctr	r10
879	bctr
880#endif
881
882	. = 0x4500
883	.globl hardware_interrupt_relon_pSeries;
884	.globl hardware_interrupt_relon_hv;
885hardware_interrupt_relon_pSeries:
886hardware_interrupt_relon_hv:
887	BEGIN_FTR_SECTION
888		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
889	FTR_SECTION_ELSE
890		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
891	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
892	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
893	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
894	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
895	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
896	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
897	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
898	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
899
900	. = 0x4c00
901	.globl system_call_relon_pSeries
902system_call_relon_pSeries:
903	HMT_MEDIUM
904	SYSCALL_PSERIES_1
905	SYSCALL_PSERIES_2_DIRECT
906	SYSCALL_PSERIES_3
907
908	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
909
910	. = 0x4e00
911	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
912
913	. = 0x4e20
914	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
915
916	. = 0x4e40
917emulation_assist_relon_trampoline:
918	SET_SCRATCH0(r13)
919	EXCEPTION_PROLOG_0(PACA_EXGEN)
920	b	emulation_assist_relon_hv
921
922	. = 0x4e60
923	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
924
925	. = 0x4e80
926h_doorbell_relon_trampoline:
927	SET_SCRATCH0(r13)
928	EXCEPTION_PROLOG_0(PACA_EXGEN)
929	b	h_doorbell_relon_hv
930
931	. = 0x4f00
932performance_monitor_relon_pseries_trampoline:
933	SET_SCRATCH0(r13)
934	EXCEPTION_PROLOG_0(PACA_EXGEN)
935	b	performance_monitor_relon_pSeries
936
937	. = 0x4f20
938altivec_unavailable_relon_pseries_trampoline:
939	SET_SCRATCH0(r13)
940	EXCEPTION_PROLOG_0(PACA_EXGEN)
941	b	altivec_unavailable_relon_pSeries
942
943	. = 0x4f40
944vsx_unavailable_relon_pseries_trampoline:
945	SET_SCRATCH0(r13)
946	EXCEPTION_PROLOG_0(PACA_EXGEN)
947	b	vsx_unavailable_relon_pSeries
948
949	. = 0x4f60
950facility_unavailable_relon_trampoline:
951	SET_SCRATCH0(r13)
952	EXCEPTION_PROLOG_0(PACA_EXGEN)
953	b	facility_unavailable_relon_pSeries
954
955	. = 0x4f80
956hv_facility_unavailable_relon_trampoline:
957	SET_SCRATCH0(r13)
958	EXCEPTION_PROLOG_0(PACA_EXGEN)
959	b	hv_facility_unavailable_relon_hv
960
961	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
962#ifdef CONFIG_PPC_DENORMALISATION
963	. = 0x5500
964	b	denorm_exception_hv
965#endif
966	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
967
968	/* Other future vectors */
969	.align	7
970	.globl	__end_interrupts
971__end_interrupts:
972
973	.align	7
974system_call_entry_direct:
975#if defined(CONFIG_RELOCATABLE)
976	/* The first level prologue may have used LR to get here, saving
977	 * orig in r10.  To save hacking/ifdeffing common code, restore here.
978	 */
979	mtlr	r10
980#endif
981system_call_entry:
982	b	system_call_common
983
984ppc64_runlatch_on_trampoline:
985	b	__ppc64_runlatch_on
986
987/*
988 * Here we have detected that the kernel stack pointer is bad.
989 * R9 contains the saved CR, r13 points to the paca,
990 * r10 contains the (bad) kernel stack pointer,
991 * r11 and r12 contain the saved SRR0 and SRR1.
992 * We switch to using an emergency stack, save the registers there,
993 * and call kernel_bad_stack(), which panics.
994 */
995bad_stack:
996	ld	r1,PACAEMERGSP(r13)
997	subi	r1,r1,64+INT_FRAME_SIZE
998	std	r9,_CCR(r1)
999	std	r10,GPR1(r1)
1000	std	r11,_NIP(r1)
1001	std	r12,_MSR(r1)
1002	mfspr	r11,SPRN_DAR
1003	mfspr	r12,SPRN_DSISR
1004	std	r11,_DAR(r1)
1005	std	r12,_DSISR(r1)
1006	mflr	r10
1007	mfctr	r11
1008	mfxer	r12
1009	std	r10,_LINK(r1)
1010	std	r11,_CTR(r1)
1011	std	r12,_XER(r1)
1012	SAVE_GPR(0,r1)
1013	SAVE_GPR(2,r1)
1014	ld	r10,EX_R3(r3)
1015	std	r10,GPR3(r1)
1016	SAVE_GPR(4,r1)
1017	SAVE_4GPRS(5,r1)
1018	ld	r9,EX_R9(r3)
1019	ld	r10,EX_R10(r3)
1020	SAVE_2GPRS(9,r1)
1021	ld	r9,EX_R11(r3)
1022	ld	r10,EX_R12(r3)
1023	ld	r11,EX_R13(r3)
1024	std	r9,GPR11(r1)
1025	std	r10,GPR12(r1)
1026	std	r11,GPR13(r1)
1027BEGIN_FTR_SECTION
1028	ld	r10,EX_CFAR(r3)
1029	std	r10,ORIG_GPR3(r1)
1030END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1031	SAVE_8GPRS(14,r1)
1032	SAVE_10GPRS(22,r1)
1033	lhz	r12,PACA_TRAP_SAVE(r13)
1034	std	r12,_TRAP(r1)
1035	addi	r11,r1,INT_FRAME_SIZE
1036	std	r11,0(r1)
1037	li	r12,0
1038	std	r12,0(r11)
1039	ld	r2,PACATOC(r13)
1040	ld	r11,exception_marker@toc(r2)
1041	std	r12,RESULT(r1)
1042	std	r11,STACK_FRAME_OVERHEAD-16(r1)
10431:	addi	r3,r1,STACK_FRAME_OVERHEAD
1044	bl	kernel_bad_stack
1045	b	1b
1046
1047/*
1048 * Here r13 points to the paca, r9 contains the saved CR,
1049 * SRR0 and SRR1 are saved in r11 and r12,
1050 * r9 - r13 are saved in paca->exgen.
1051 */
1052	.align	7
1053	.globl data_access_common
1054data_access_common:
1055	mfspr	r10,SPRN_DAR
1056	std	r10,PACA_EXGEN+EX_DAR(r13)
1057	mfspr	r10,SPRN_DSISR
1058	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1059	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
1060	DISABLE_INTS
1061	ld	r12,_MSR(r1)
1062	ld	r3,PACA_EXGEN+EX_DAR(r13)
1063	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1064	li	r5,0x300
1065	b	do_hash_page		/* Try to handle as hpte fault */
1066
1067	.align  7
1068	.globl  h_data_storage_common
1069h_data_storage_common:
1070	mfspr   r10,SPRN_HDAR
1071	std     r10,PACA_EXGEN+EX_DAR(r13)
1072	mfspr   r10,SPRN_HDSISR
1073	stw     r10,PACA_EXGEN+EX_DSISR(r13)
1074	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1075	bl      save_nvgprs
1076	DISABLE_INTS
1077	addi    r3,r1,STACK_FRAME_OVERHEAD
1078	bl      unknown_exception
1079	b       ret_from_except
1080
1081	.align	7
1082	.globl instruction_access_common
1083instruction_access_common:
1084	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1085	DISABLE_INTS
1086	ld	r12,_MSR(r1)
1087	ld	r3,_NIP(r1)
1088	andis.	r4,r12,0x5820
1089	li	r5,0x400
1090	b	do_hash_page		/* Try to handle as hpte fault */
1091
1092	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
1093
1094/*
1095 * Here is the common SLB miss user that is used when going to virtual
1096 * mode for SLB misses, that is currently not used
1097 */
1098#ifdef __DISABLED__
1099	.align	7
1100	.globl	slb_miss_user_common
1101slb_miss_user_common:
1102	mflr	r10
1103	std	r3,PACA_EXGEN+EX_DAR(r13)
1104	stw	r9,PACA_EXGEN+EX_CCR(r13)
1105	std	r10,PACA_EXGEN+EX_LR(r13)
1106	std	r11,PACA_EXGEN+EX_SRR0(r13)
1107	bl	slb_allocate_user
1108
1109	ld	r10,PACA_EXGEN+EX_LR(r13)
1110	ld	r3,PACA_EXGEN+EX_R3(r13)
1111	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1112	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1113	mtlr	r10
1114	beq-	slb_miss_fault
1115
1116	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1117	beq-	unrecov_user_slb
1118	mfmsr	r10
1119
1120.machine push
1121.machine "power4"
1122	mtcrf	0x80,r9
1123.machine pop
1124
1125	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1126	mtmsrd	r10,1
1127
1128	mtspr	SRR0,r11
1129	mtspr	SRR1,r12
1130
1131	ld	r9,PACA_EXGEN+EX_R9(r13)
1132	ld	r10,PACA_EXGEN+EX_R10(r13)
1133	ld	r11,PACA_EXGEN+EX_R11(r13)
1134	ld	r12,PACA_EXGEN+EX_R12(r13)
1135	ld	r13,PACA_EXGEN+EX_R13(r13)
1136	rfid
1137	b	.
1138
1139slb_miss_fault:
1140	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1141	ld	r4,PACA_EXGEN+EX_DAR(r13)
1142	li	r5,0
1143	std	r4,_DAR(r1)
1144	std	r5,_DSISR(r1)
1145	b	handle_page_fault
1146
1147unrecov_user_slb:
1148	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1149	DISABLE_INTS
1150	bl	save_nvgprs
11511:	addi	r3,r1,STACK_FRAME_OVERHEAD
1152	bl	unrecoverable_exception
1153	b	1b
1154
1155#endif /* __DISABLED__ */
1156
1157
1158	/*
1159	 * Machine check is different because we use a different
1160	 * save area: PACA_EXMC instead of PACA_EXGEN.
1161	 */
1162	.align	7
1163	.globl machine_check_common
1164machine_check_common:
1165
1166	mfspr	r10,SPRN_DAR
1167	std	r10,PACA_EXGEN+EX_DAR(r13)
1168	mfspr	r10,SPRN_DSISR
1169	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1170	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1171	FINISH_NAP
1172	DISABLE_INTS
1173	ld	r3,PACA_EXGEN+EX_DAR(r13)
1174	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1175	std	r3,_DAR(r1)
1176	std	r4,_DSISR(r1)
1177	bl	save_nvgprs
1178	addi	r3,r1,STACK_FRAME_OVERHEAD
1179	bl	machine_check_exception
1180	b	ret_from_except
1181
1182	.align	7
1183	.globl alignment_common
1184alignment_common:
1185	mfspr	r10,SPRN_DAR
1186	std	r10,PACA_EXGEN+EX_DAR(r13)
1187	mfspr	r10,SPRN_DSISR
1188	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1189	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1190	ld	r3,PACA_EXGEN+EX_DAR(r13)
1191	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1192	std	r3,_DAR(r1)
1193	std	r4,_DSISR(r1)
1194	bl	save_nvgprs
1195	DISABLE_INTS
1196	addi	r3,r1,STACK_FRAME_OVERHEAD
1197	bl	alignment_exception
1198	b	ret_from_except
1199
1200	.align	7
1201	.globl program_check_common
1202program_check_common:
1203	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1204	bl	save_nvgprs
1205	DISABLE_INTS
1206	addi	r3,r1,STACK_FRAME_OVERHEAD
1207	bl	program_check_exception
1208	b	ret_from_except
1209
1210	.align	7
1211	.globl fp_unavailable_common
1212fp_unavailable_common:
1213	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1214	bne	1f			/* if from user, just load it up */
1215	bl	save_nvgprs
1216	DISABLE_INTS
1217	addi	r3,r1,STACK_FRAME_OVERHEAD
1218	bl	kernel_fp_unavailable_exception
1219	BUG_OPCODE
12201:
1221#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1222BEGIN_FTR_SECTION
1223	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1224	 * transaction), go do TM stuff
1225	 */
1226	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1227	bne-	2f
1228END_FTR_SECTION_IFSET(CPU_FTR_TM)
1229#endif
1230	bl	load_up_fpu
1231	b	fast_exception_return
1232#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12332:	/* User process was in a transaction */
1234	bl	save_nvgprs
1235	DISABLE_INTS
1236	addi	r3,r1,STACK_FRAME_OVERHEAD
1237	bl	fp_unavailable_tm
1238	b	ret_from_except
1239#endif
1240	.align	7
1241	.globl altivec_unavailable_common
1242altivec_unavailable_common:
1243	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1244#ifdef CONFIG_ALTIVEC
1245BEGIN_FTR_SECTION
1246	beq	1f
1247#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1248  BEGIN_FTR_SECTION_NESTED(69)
1249	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1250	 * transaction), go do TM stuff
1251	 */
1252	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1253	bne-	2f
1254  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1255#endif
1256	bl	load_up_altivec
1257	b	fast_exception_return
1258#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12592:	/* User process was in a transaction */
1260	bl	save_nvgprs
1261	DISABLE_INTS
1262	addi	r3,r1,STACK_FRAME_OVERHEAD
1263	bl	altivec_unavailable_tm
1264	b	ret_from_except
1265#endif
12661:
1267END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1268#endif
1269	bl	save_nvgprs
1270	DISABLE_INTS
1271	addi	r3,r1,STACK_FRAME_OVERHEAD
1272	bl	altivec_unavailable_exception
1273	b	ret_from_except
1274
1275	.align	7
1276	.globl vsx_unavailable_common
1277vsx_unavailable_common:
1278	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1279#ifdef CONFIG_VSX
1280BEGIN_FTR_SECTION
1281	beq	1f
1282#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1283  BEGIN_FTR_SECTION_NESTED(69)
1284	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1285	 * transaction), go do TM stuff
1286	 */
1287	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1288	bne-	2f
1289  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1290#endif
1291	b	load_up_vsx
1292#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12932:	/* User process was in a transaction */
1294	bl	save_nvgprs
1295	DISABLE_INTS
1296	addi	r3,r1,STACK_FRAME_OVERHEAD
1297	bl	vsx_unavailable_tm
1298	b	ret_from_except
1299#endif
13001:
1301END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1302#endif
1303	bl	save_nvgprs
1304	DISABLE_INTS
1305	addi	r3,r1,STACK_FRAME_OVERHEAD
1306	bl	vsx_unavailable_exception
1307	b	ret_from_except
1308
1309	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
1310	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
1311
1312	.align	7
1313	.globl	__end_handlers
1314__end_handlers:
1315
1316	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1317	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1318	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1319
1320	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1321	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1322	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1323	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1324	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1325
1326#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1327/*
1328 * Data area reserved for FWNMI option.
1329 * This address (0x7000) is fixed by the RPA.
1330 */
1331	.= 0x7000
1332	.globl fwnmi_data_area
1333fwnmi_data_area:
1334
1335	/* pseries and powernv need to keep the whole page from
1336	 * 0x7000 to 0x8000 free for use by the firmware
1337	 */
1338	. = 0x8000
1339#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1340
1341/* Space for CPU0's segment table */
1342	.balign 4096
1343	.globl initial_stab
1344initial_stab:
1345	.space	4096
1346
1347#ifdef CONFIG_PPC_POWERNV
1348_GLOBAL(opal_mc_secondary_handler)
1349	HMT_MEDIUM_PPR_DISCARD
1350	SET_SCRATCH0(r13)
1351	GET_PACA(r13)
1352	clrldi	r3,r3,2
1353	tovirt(r3,r3)
1354	std	r3,PACA_OPAL_MC_EVT(r13)
1355	ld	r13,OPAL_MC_SRR0(r3)
1356	mtspr	SPRN_SRR0,r13
1357	ld	r13,OPAL_MC_SRR1(r3)
1358	mtspr	SPRN_SRR1,r13
1359	ld	r3,OPAL_MC_GPR3(r3)
1360	GET_SCRATCH0(r13)
1361	b	machine_check_pSeries
1362#endif /* CONFIG_PPC_POWERNV */
1363
1364
1365#define MACHINE_CHECK_HANDLER_WINDUP			\
1366	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1367	li	r0,MSR_RI;				\
1368	mfmsr	r9;		/* get MSR value */	\
1369	andc	r9,r9,r0;				\
1370	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1371	/* Move original SRR0 and SRR1 into the respective regs */	\
1372	ld	r9,_MSR(r1);				\
1373	mtspr	SPRN_SRR1,r9;				\
1374	ld	r3,_NIP(r1);				\
1375	mtspr	SPRN_SRR0,r3;				\
1376	ld	r9,_CTR(r1);				\
1377	mtctr	r9;					\
1378	ld	r9,_XER(r1);				\
1379	mtxer	r9;					\
1380	ld	r9,_LINK(r1);				\
1381	mtlr	r9;					\
1382	REST_GPR(0, r1);				\
1383	REST_8GPRS(2, r1);				\
1384	REST_GPR(10, r1);				\
1385	ld	r11,_CCR(r1);				\
1386	mtcr	r11;					\
1387	/* Decrement paca->in_mce. */			\
1388	lhz	r12,PACA_IN_MCE(r13);			\
1389	subi	r12,r12,1;				\
1390	sth	r12,PACA_IN_MCE(r13);			\
1391	REST_GPR(11, r1);				\
1392	REST_2GPRS(12, r1);				\
1393	/* restore original r1. */			\
1394	ld	r1,GPR1(r1)
1395
1396	/*
1397	 * Handle machine check early in real mode. We come here with
1398	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1399	 */
1400	.align	7
1401	.globl machine_check_handle_early
1402machine_check_handle_early:
1403	std	r0,GPR0(r1)	/* Save r0 */
1404	EXCEPTION_PROLOG_COMMON_3(0x200)
1405	bl	save_nvgprs
1406	addi	r3,r1,STACK_FRAME_OVERHEAD
1407	bl	machine_check_early
1408	std	r3,RESULT(r1)	/* Save result */
1409	ld	r12,_MSR(r1)
1410#ifdef	CONFIG_PPC_P7_NAP
1411	/*
1412	 * Check if thread was in power saving mode. We come here when any
1413	 * of the following is true:
1414	 * a. thread wasn't in power saving mode
1415	 * b. thread was in power saving mode with no state loss or
1416	 *    supervisor state loss
1417	 *
1418	 * Go back to nap again if (b) is true.
1419	 */
1420	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */
1421	beq	4f			/* No, it wasn;t */
1422	/* Thread was in power saving mode. Go back to nap again. */
1423	cmpwi	r11,2
1424	bne	3f
1425	/* Supervisor state loss */
1426	li	r0,1
1427	stb	r0,PACA_NAPSTATELOST(r13)
14283:	bl	machine_check_queue_event
1429	MACHINE_CHECK_HANDLER_WINDUP
1430	GET_PACA(r13)
1431	ld	r1,PACAR1(r13)
1432	b	power7_enter_nap_mode
14334:
1434#endif
1435	/*
1436	 * Check if we are coming from hypervisor userspace. If yes then we
1437	 * continue in host kernel in V mode to deliver the MC event.
1438	 */
1439	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
1440	beq	5f
1441	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1442	bne	9f			/* continue in V mode if we are. */
1443
14445:
1445#ifdef CONFIG_KVM_BOOK3S_64_HV
1446	/*
1447	 * We are coming from kernel context. Check if we are coming from
1448	 * guest. if yes, then we can continue. We will fall through
1449	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1450	 */
1451	lbz	r11,HSTATE_IN_GUEST(r13)
1452	cmpwi	r11,0			/* Check if coming from guest */
1453	bne	9f			/* continue if we are. */
1454#endif
1455	/*
1456	 * At this point we are not sure about what context we come from.
1457	 * Queue up the MCE event and return from the interrupt.
1458	 * But before that, check if this is an un-recoverable exception.
1459	 * If yes, then stay on emergency stack and panic.
1460	 */
1461	andi.	r11,r12,MSR_RI
1462	bne	2f
14631:	mfspr	r11,SPRN_SRR0
1464	ld	r10,PACAKBASE(r13)
1465	LOAD_HANDLER(r10,unrecover_mce)
1466	mtspr	SPRN_SRR0,r10
1467	ld	r10,PACAKMSR(r13)
1468	/*
1469	 * We are going down. But there are chances that we might get hit by
1470	 * another MCE during panic path and we may run into unstable state
1471	 * with no way out. Hence, turn ME bit off while going down, so that
1472	 * when another MCE is hit during panic path, system will checkstop
1473	 * and hypervisor will get restarted cleanly by SP.
1474	 */
1475	li	r3,MSR_ME
1476	andc	r10,r10,r3		/* Turn off MSR_ME */
1477	mtspr	SPRN_SRR1,r10
1478	rfid
1479	b	.
14802:
1481	/*
1482	 * Check if we have successfully handled/recovered from error, if not
1483	 * then stay on emergency stack and panic.
1484	 */
1485	ld	r3,RESULT(r1)	/* Load result */
1486	cmpdi	r3,0		/* see if we handled MCE successfully */
1487
1488	beq	1b		/* if !handled then panic */
1489	/*
1490	 * Return from MC interrupt.
1491	 * Queue up the MCE event so that we can log it later, while
1492	 * returning from kernel or opal call.
1493	 */
1494	bl	machine_check_queue_event
1495	MACHINE_CHECK_HANDLER_WINDUP
1496	rfid
14979:
1498	/* Deliver the machine check to host kernel in V mode. */
1499	MACHINE_CHECK_HANDLER_WINDUP
1500	b	machine_check_pSeries
1501
1502unrecover_mce:
1503	/* Invoke machine_check_exception to print MCE event and panic. */
1504	addi	r3,r1,STACK_FRAME_OVERHEAD
1505	bl	machine_check_exception
1506	/*
1507	 * We will not reach here. Even if we did, there is no way out. Call
1508	 * unrecoverable_exception and die.
1509	 */
15101:	addi	r3,r1,STACK_FRAME_OVERHEAD
1511	bl	unrecoverable_exception
1512	b	1b
1513/*
1514 * r13 points to the PACA, r9 contains the saved CR,
1515 * r12 contain the saved SRR1, SRR0 is still ready for return
1516 * r3 has the faulting address
1517 * r9 - r13 are saved in paca->exslb.
1518 * r3 is saved in paca->slb_r3
1519 * We assume we aren't going to take any exceptions during this procedure.
1520 */
1521slb_miss_realmode:
1522	mflr	r10
1523#ifdef CONFIG_RELOCATABLE
1524	mtctr	r11
1525#endif
1526
1527	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1528	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1529
1530	bl	slb_allocate_realmode
1531
1532	/* All done -- return from exception. */
1533
1534	ld	r10,PACA_EXSLB+EX_LR(r13)
1535	ld	r3,PACA_EXSLB+EX_R3(r13)
1536	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1537
1538	mtlr	r10
1539
1540	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1541	beq-	2f
1542
1543.machine	push
1544.machine	"power4"
1545	mtcrf	0x80,r9
1546	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1547.machine	pop
1548
1549	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1550	ld	r9,PACA_EXSLB+EX_R9(r13)
1551	ld	r10,PACA_EXSLB+EX_R10(r13)
1552	ld	r11,PACA_EXSLB+EX_R11(r13)
1553	ld	r12,PACA_EXSLB+EX_R12(r13)
1554	ld	r13,PACA_EXSLB+EX_R13(r13)
1555	rfid
1556	b	.	/* prevent speculative execution */
1557
15582:	mfspr	r11,SPRN_SRR0
1559	ld	r10,PACAKBASE(r13)
1560	LOAD_HANDLER(r10,unrecov_slb)
1561	mtspr	SPRN_SRR0,r10
1562	ld	r10,PACAKMSR(r13)
1563	mtspr	SPRN_SRR1,r10
1564	rfid
1565	b	.
1566
1567unrecov_slb:
1568	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1569	DISABLE_INTS
1570	bl	save_nvgprs
15711:	addi	r3,r1,STACK_FRAME_OVERHEAD
1572	bl	unrecoverable_exception
1573	b	1b
1574
1575
1576#ifdef CONFIG_PPC_970_NAP
1577power4_fixup_nap:
1578	andc	r9,r9,r10
1579	std	r9,TI_LOCAL_FLAGS(r11)
1580	ld	r10,_LINK(r1)		/* make idle task do the */
1581	std	r10,_NIP(r1)		/* equivalent of a blr */
1582	blr
1583#endif
1584
1585/*
1586 * Hash table stuff
1587 */
1588	.align	7
1589do_hash_page:
1590	std	r3,_DAR(r1)
1591	std	r4,_DSISR(r1)
1592
1593	andis.	r0,r4,0xa410		/* weird error? */
1594	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1595	andis.  r0,r4,DSISR_DABRMATCH@h
1596	bne-    handle_dabr_fault
1597
1598BEGIN_FTR_SECTION
1599	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1600	bne-	do_ste_alloc		/* If so handle it */
1601END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1602
1603	CURRENT_THREAD_INFO(r11, r1)
1604	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1605	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1606	bne	77f			/* then don't call hash_page now */
1607	/*
1608	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1609	 * accessing a userspace segment (even from the kernel). We assume
1610	 * kernel addresses always have the high bit set.
1611	 */
1612	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1613	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1614	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1615	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1616	ori	r4,r4,1			/* add _PAGE_PRESENT */
1617	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1618
1619	/*
1620	 * r3 contains the faulting address
1621	 * r4 contains the required access permissions
1622	 * r5 contains the trap number
1623	 *
1624	 * at return r3 = 0 for success, 1 for page fault, negative for error
1625	 */
1626	bl	hash_page		/* build HPTE if possible */
1627	cmpdi	r3,0			/* see if hash_page succeeded */
1628
1629	/* Success */
1630	beq	fast_exc_return_irq	/* Return from exception on success */
1631
1632	/* Error */
1633	blt-	13f
1634
1635/* Here we have a page fault that hash_page can't handle. */
1636handle_page_fault:
163711:	ld	r4,_DAR(r1)
1638	ld	r5,_DSISR(r1)
1639	addi	r3,r1,STACK_FRAME_OVERHEAD
1640	bl	do_page_fault
1641	cmpdi	r3,0
1642	beq+	12f
1643	bl	save_nvgprs
1644	mr	r5,r3
1645	addi	r3,r1,STACK_FRAME_OVERHEAD
1646	lwz	r4,_DAR(r1)
1647	bl	bad_page_fault
1648	b	ret_from_except
1649
1650/* We have a data breakpoint exception - handle it */
1651handle_dabr_fault:
1652	bl	save_nvgprs
1653	ld      r4,_DAR(r1)
1654	ld      r5,_DSISR(r1)
1655	addi    r3,r1,STACK_FRAME_OVERHEAD
1656	bl      do_break
165712:	b       ret_from_except_lite
1658
1659
1660/* We have a page fault that hash_page could handle but HV refused
1661 * the PTE insertion
1662 */
166313:	bl	save_nvgprs
1664	mr	r5,r3
1665	addi	r3,r1,STACK_FRAME_OVERHEAD
1666	ld	r4,_DAR(r1)
1667	bl	low_hash_fault
1668	b	ret_from_except
1669
1670/*
1671 * We come here as a result of a DSI at a point where we don't want
1672 * to call hash_page, such as when we are accessing memory (possibly
1673 * user memory) inside a PMU interrupt that occurred while interrupts
1674 * were soft-disabled.  We want to invoke the exception handler for
1675 * the access, or panic if there isn't a handler.
1676 */
167777:	bl	save_nvgprs
1678	mr	r4,r3
1679	addi	r3,r1,STACK_FRAME_OVERHEAD
1680	li	r5,SIGSEGV
1681	bl	bad_page_fault
1682	b	ret_from_except
1683
1684	/* here we have a segment miss */
1685do_ste_alloc:
1686	bl	ste_allocate		/* try to insert stab entry */
1687	cmpdi	r3,0
1688	bne-	handle_page_fault
1689	b	fast_exception_return
1690
1691/*
1692 * r13 points to the PACA, r9 contains the saved CR,
1693 * r11 and r12 contain the saved SRR0 and SRR1.
1694 * r9 - r13 are saved in paca->exslb.
1695 * We assume we aren't going to take any exceptions during this procedure.
1696 * We assume (DAR >> 60) == 0xc.
1697 */
1698	.align	7
1699do_stab_bolted:
1700	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1701	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1702	mfspr	r11,SPRN_DAR			/* ea */
1703
1704	/*
1705	 * check for bad kernel/user address
1706	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1707	 */
1708	rldicr. r9,r11,4,(63 - 46 - 4)
1709	li	r9,0	/* VSID = 0 for bad address */
1710	bne-	0f
1711
1712	/*
1713	 * Calculate VSID:
1714	 * This is the kernel vsid, we take the top for context from
1715	 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1716	 * Here we know that (ea >> 60) == 0xc
1717	 */
1718	lis	r9,(MAX_USER_CONTEXT + 1)@ha
1719	addi	r9,r9,(MAX_USER_CONTEXT + 1)@l
1720
1721	srdi	r10,r11,SID_SHIFT
1722	rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1723	ASM_VSID_SCRAMBLE(r10, r9, 256M)
1724	rldic	r9,r10,12,16	/* r9 = vsid << 12 */
1725
17260:
1727	/* Hash to the primary group */
1728	ld	r10,PACASTABVIRT(r13)
1729	srdi	r11,r11,SID_SHIFT
1730	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1731
1732	/* Search the primary group for a free entry */
17331:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1734	andi.	r11,r11,0x80
1735	beq	2f
1736	addi	r10,r10,16
1737	andi.	r11,r10,0x70
1738	bne	1b
1739
1740	/* Stick for only searching the primary group for now.		*/
1741	/* At least for now, we use a very simple random castout scheme */
1742	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1743	mftb	r11
1744	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1745	ori	r11,r11,0x10
1746
1747	/* r10 currently points to an ste one past the group of interest */
1748	/* make it point to the randomly selected entry			*/
1749	subi	r10,r10,128
1750	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1751
1752	isync			/* mark the entry invalid		*/
1753	ld	r11,0(r10)
1754	rldicl	r11,r11,56,1	/* clear the valid bit */
1755	rotldi	r11,r11,8
1756	std	r11,0(r10)
1757	sync
1758
1759	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1760	slbie	r11
1761
17622:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1763	eieio
1764
1765	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1766	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1767	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1768	std	r11,0(r10)	/* Put new entry back into the stab	*/
1769
1770	sync
1771
1772	/* All done -- return from exception. */
1773	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1774	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1775
1776	andi.	r10,r12,MSR_RI
1777	beq-	unrecov_slb
1778
1779	mtcrf	0x80,r9			/* restore CR */
1780
1781	mfmsr	r10
1782	clrrdi	r10,r10,2
1783	mtmsrd	r10,1
1784
1785	mtspr	SPRN_SRR0,r11
1786	mtspr	SPRN_SRR1,r12
1787	ld	r9,PACA_EXSLB+EX_R9(r13)
1788	ld	r10,PACA_EXSLB+EX_R10(r13)
1789	ld	r11,PACA_EXSLB+EX_R11(r13)
1790	ld	r12,PACA_EXSLB+EX_R12(r13)
1791	ld	r13,PACA_EXSLB+EX_R13(r13)
1792	rfid
1793	b	.	/* prevent speculative execution */
1794