xref: /linux/arch/powerpc/kernel/exceptions-64s.S (revision 9307c29524502c21f0e8a6d96d850b2f5bc0bd9a)
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 -        : Early init and support code
29 */
30	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 					\
32BEGIN_FTR_SECTION						\
33	cmpdi	r0,0x1ebe ; 					\
34	beq-	1f ;						\
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
36	mr	r9,r13 ;					\
37	GET_PACA(r13) ;						\
38	mfspr	r11,SPRN_SRR0 ;					\
390:
40
41#define SYSCALL_PSERIES_2_RFID 					\
42	mfspr	r12,SPRN_SRR1 ;					\
43	ld	r10,PACAKBASE(r13) ; 				\
44	LOAD_HANDLER(r10, system_call_entry) ; 			\
45	mtspr	SPRN_SRR0,r10 ; 				\
46	ld	r10,PACAKMSR(r13) ;				\
47	mtspr	SPRN_SRR1,r10 ; 				\
48	rfid ; 							\
49	b	. ;	/* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3					\
52	/* Fast LE/BE switch system call */			\
531:	mfspr	r12,SPRN_SRR1 ;					\
54	xori	r12,r12,MSR_LE ;				\
55	mtspr	SPRN_SRR1,r12 ;					\
56	rfid ;		/* return to userspace */		\
57	b	. ;						\
582:	mfspr	r12,SPRN_SRR1 ;					\
59	andi.	r12,r12,MSR_PR ;				\
60	bne	0b ;						\
61	mtspr	SPRN_SRR0,r3 ;					\
62	mtspr	SPRN_SRR1,r4 ;					\
63	mtspr	SPRN_SDR1,r5 ;					\
64	rfid ;							\
65	b	. ;	/* prevent speculative execution */
66
67#if defined(CONFIG_RELOCATABLE)
68	/*
69	 * We can't branch directly; in the direct case we use LR
70	 * and system_call_entry restores LR.  (We thus need to move
71	 * LR to r10 in the RFID case too.)
72	 */
73#define SYSCALL_PSERIES_2_DIRECT				\
74	mflr	r10 ;						\
75	ld	r12,PACAKBASE(r13) ; 				\
76	LOAD_HANDLER(r12, system_call_entry_direct) ;		\
77	mtctr	r12 ;						\
78	mfspr	r12,SPRN_SRR1 ;					\
79	/* Re-use of r13... No spare regs to do this */	\
80	li	r13,MSR_RI ;					\
81	mtmsrd 	r13,1 ;						\
82	GET_PACA(r13) ;	/* get r13 back */			\
83	bctr ;
84#else
85	/* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT				\
87	mfspr	r12,SPRN_SRR1 ;					\
88	li	r10,MSR_RI ;					\
89	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
90	b	system_call_entry_direct ;
91#endif
92
93/*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101	. = 0x100
102	.globl __start_interrupts
103__start_interrupts:
104
105	.globl system_reset_pSeries;
106system_reset_pSeries:
107	HMT_MEDIUM_PPR_DISCARD
108	SET_SCRATCH0(r13)
109#ifdef CONFIG_PPC_P7_NAP
110BEGIN_FTR_SECTION
111	/* Running native on arch 2.06 or later, check if we are
112	 * waking up from nap. We only handle no state loss and
113	 * supervisor state loss. We do -not- handle hypervisor
114	 * state loss at this time.
115	 */
116	mfspr	r13,SPRN_SRR1
117	rlwinm.	r13,r13,47-31,30,31
118	beq	9f
119
120	/* waking up from powersave (nap) state */
121	cmpwi	cr1,r13,2
122	/* Total loss of HV state is fatal, we could try to use the
123	 * PIR to locate a PACA, then use an emergency stack etc...
124	 * but for now, let's just stay stuck here
125	 */
126	bgt	cr1,.
127	GET_PACA(r13)
128
129#ifdef CONFIG_KVM_BOOK3S_64_HV
130	li	r0,KVM_HWTHREAD_IN_KERNEL
131	stb	r0,HSTATE_HWTHREAD_STATE(r13)
132	/* Order setting hwthread_state vs. testing hwthread_req */
133	sync
134	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
135	cmpwi	r0,0
136	beq	1f
137	b	kvm_start_guest
1381:
139#endif
140
141	beq	cr1,2f
142	b	.power7_wakeup_noloss
1432:	b	.power7_wakeup_loss
1449:
145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146#endif /* CONFIG_PPC_P7_NAP */
147	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148				 NOTEST, 0x100)
149
150	. = 0x200
151machine_check_pSeries_1:
152	/* This is moved out of line as it can be patched by FW, but
153	 * some code path might still want to branch into the original
154	 * vector
155	 */
156	HMT_MEDIUM_PPR_DISCARD
157	SET_SCRATCH0(r13)		/* save r13 */
158	EXCEPTION_PROLOG_0(PACA_EXMC)
159	b	machine_check_pSeries_0
160
161	. = 0x300
162	.globl data_access_pSeries
163data_access_pSeries:
164	HMT_MEDIUM_PPR_DISCARD
165	SET_SCRATCH0(r13)
166BEGIN_FTR_SECTION
167	b	data_access_check_stab
168data_access_not_stab:
169END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
170	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
171				 KVMTEST, 0x300)
172
173	. = 0x380
174	.globl data_access_slb_pSeries
175data_access_slb_pSeries:
176	HMT_MEDIUM_PPR_DISCARD
177	SET_SCRATCH0(r13)
178	EXCEPTION_PROLOG_0(PACA_EXSLB)
179	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
180	std	r3,PACA_EXSLB+EX_R3(r13)
181	mfspr	r3,SPRN_DAR
182#ifdef __DISABLED__
183	/* Keep that around for when we re-implement dynamic VSIDs */
184	cmpdi	r3,0
185	bge	slb_miss_user_pseries
186#endif /* __DISABLED__ */
187	mfspr	r12,SPRN_SRR1
188#ifndef CONFIG_RELOCATABLE
189	b	.slb_miss_realmode
190#else
191	/*
192	 * We can't just use a direct branch to .slb_miss_realmode
193	 * because the distance from here to there depends on where
194	 * the kernel ends up being put.
195	 */
196	mfctr	r11
197	ld	r10,PACAKBASE(r13)
198	LOAD_HANDLER(r10, .slb_miss_realmode)
199	mtctr	r10
200	bctr
201#endif
202
203	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
204
205	. = 0x480
206	.globl instruction_access_slb_pSeries
207instruction_access_slb_pSeries:
208	HMT_MEDIUM_PPR_DISCARD
209	SET_SCRATCH0(r13)
210	EXCEPTION_PROLOG_0(PACA_EXSLB)
211	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
212	std	r3,PACA_EXSLB+EX_R3(r13)
213	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
214#ifdef __DISABLED__
215	/* Keep that around for when we re-implement dynamic VSIDs */
216	cmpdi	r3,0
217	bge	slb_miss_user_pseries
218#endif /* __DISABLED__ */
219	mfspr	r12,SPRN_SRR1
220#ifndef CONFIG_RELOCATABLE
221	b	.slb_miss_realmode
222#else
223	mfctr	r11
224	ld	r10,PACAKBASE(r13)
225	LOAD_HANDLER(r10, .slb_miss_realmode)
226	mtctr	r10
227	bctr
228#endif
229
230	/* We open code these as we can't have a ". = x" (even with
231	 * x = "." within a feature section
232	 */
233	. = 0x500;
234	.globl hardware_interrupt_pSeries;
235	.globl hardware_interrupt_hv;
236hardware_interrupt_pSeries:
237hardware_interrupt_hv:
238	HMT_MEDIUM_PPR_DISCARD
239	BEGIN_FTR_SECTION
240		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
241					    EXC_HV, SOFTEN_TEST_HV)
242		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
243	FTR_SECTION_ELSE
244		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
245					    EXC_STD, SOFTEN_TEST_HV_201)
246		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
247	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
248
249	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
250	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
251
252	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
253	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
254
255	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
256	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
257
258	. = 0x900
259	.globl decrementer_pSeries
260decrementer_pSeries:
261	_MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
262
263	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
264
265	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
266	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
267
268	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
269	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
270
271	. = 0xc00
272	.globl	system_call_pSeries
273system_call_pSeries:
274	HMT_MEDIUM
275#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
276	SET_SCRATCH0(r13)
277	GET_PACA(r13)
278	std	r9,PACA_EXGEN+EX_R9(r13)
279	std	r10,PACA_EXGEN+EX_R10(r13)
280	mfcr	r9
281	KVMTEST(0xc00)
282	GET_SCRATCH0(r13)
283#endif
284	SYSCALL_PSERIES_1
285	SYSCALL_PSERIES_2_RFID
286	SYSCALL_PSERIES_3
287	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
288
289	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
290	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
291
292	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
293	 * out of line to handle them
294	 */
295	. = 0xe00
296hv_exception_trampoline:
297	SET_SCRATCH0(r13)
298	EXCEPTION_PROLOG_0(PACA_EXGEN)
299	b	h_data_storage_hv
300
301	. = 0xe20
302	SET_SCRATCH0(r13)
303	EXCEPTION_PROLOG_0(PACA_EXGEN)
304	b	h_instr_storage_hv
305
306	. = 0xe40
307	SET_SCRATCH0(r13)
308	EXCEPTION_PROLOG_0(PACA_EXGEN)
309	b	emulation_assist_hv
310
311	. = 0xe60
312	SET_SCRATCH0(r13)
313	EXCEPTION_PROLOG_0(PACA_EXGEN)
314	b	hmi_exception_hv
315
316	. = 0xe80
317	SET_SCRATCH0(r13)
318	EXCEPTION_PROLOG_0(PACA_EXGEN)
319	b	h_doorbell_hv
320
321	/* We need to deal with the Altivec unavailable exception
322	 * here which is at 0xf20, thus in the middle of the
323	 * prolog code of the PerformanceMonitor one. A little
324	 * trickery is thus necessary
325	 */
326performance_monitor_pSeries_1:
327	. = 0xf00
328	SET_SCRATCH0(r13)
329	EXCEPTION_PROLOG_0(PACA_EXGEN)
330	b	performance_monitor_pSeries
331
332altivec_unavailable_pSeries_1:
333	. = 0xf20
334	SET_SCRATCH0(r13)
335	EXCEPTION_PROLOG_0(PACA_EXGEN)
336	b	altivec_unavailable_pSeries
337
338vsx_unavailable_pSeries_1:
339	. = 0xf40
340	SET_SCRATCH0(r13)
341	EXCEPTION_PROLOG_0(PACA_EXGEN)
342	b	vsx_unavailable_pSeries
343
344	. = 0xf60
345	SET_SCRATCH0(r13)
346	EXCEPTION_PROLOG_0(PACA_EXGEN)
347	b	tm_unavailable_pSeries
348
349#ifdef CONFIG_CBE_RAS
350	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
351	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
352#endif /* CONFIG_CBE_RAS */
353
354	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
355	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
356
357	. = 0x1500
358	.global denorm_exception_hv
359denorm_exception_hv:
360	HMT_MEDIUM_PPR_DISCARD
361	mtspr	SPRN_SPRG_HSCRATCH0,r13
362	EXCEPTION_PROLOG_0(PACA_EXGEN)
363	std	r11,PACA_EXGEN+EX_R11(r13)
364	std	r12,PACA_EXGEN+EX_R12(r13)
365	mfspr	r9,SPRN_SPRG_HSCRATCH0
366	std	r9,PACA_EXGEN+EX_R13(r13)
367	mfcr	r9
368
369#ifdef CONFIG_PPC_DENORMALISATION
370	mfspr	r10,SPRN_HSRR1
371	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
372	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
373	addi	r11,r11,-4		/* HSRR0 is next instruction */
374	bne+	denorm_assist
375#endif
376
377	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
378	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
379
380#ifdef CONFIG_CBE_RAS
381	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
382	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
383#endif /* CONFIG_CBE_RAS */
384
385	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
386	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
387
388#ifdef CONFIG_CBE_RAS
389	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
390	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
391#else
392	. = 0x1800
393#endif /* CONFIG_CBE_RAS */
394
395
396/*** Out of line interrupts support ***/
397
398	.align	7
399	/* moved from 0x200 */
400machine_check_pSeries:
401	.globl machine_check_fwnmi
402machine_check_fwnmi:
403	HMT_MEDIUM_PPR_DISCARD
404	SET_SCRATCH0(r13)		/* save r13 */
405	EXCEPTION_PROLOG_0(PACA_EXMC)
406machine_check_pSeries_0:
407	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
408	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
409	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
410
411	/* moved from 0x300 */
412data_access_check_stab:
413	GET_PACA(r13)
414	std	r9,PACA_EXSLB+EX_R9(r13)
415	std	r10,PACA_EXSLB+EX_R10(r13)
416	mfspr	r10,SPRN_DAR
417	mfspr	r9,SPRN_DSISR
418	srdi	r10,r10,60
419	rlwimi	r10,r9,16,0x20
420#ifdef CONFIG_KVM_BOOK3S_PR
421	lbz	r9,HSTATE_IN_GUEST(r13)
422	rlwimi	r10,r9,8,0x300
423#endif
424	mfcr	r9
425	cmpwi	r10,0x2c
426	beq	do_stab_bolted_pSeries
427	mtcrf	0x80,r9
428	ld	r9,PACA_EXSLB+EX_R9(r13)
429	ld	r10,PACA_EXSLB+EX_R10(r13)
430	b	data_access_not_stab
431do_stab_bolted_pSeries:
432	std	r11,PACA_EXSLB+EX_R11(r13)
433	std	r12,PACA_EXSLB+EX_R12(r13)
434	GET_SCRATCH0(r10)
435	std	r10,PACA_EXSLB+EX_R13(r13)
436	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
437
438	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
439	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
440	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
441	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
442	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
443	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
444
445#ifdef CONFIG_PPC_DENORMALISATION
446denorm_assist:
447BEGIN_FTR_SECTION
448/*
449 * To denormalise we need to move a copy of the register to itself.
450 * For POWER6 do that here for all FP regs.
451 */
452	mfmsr	r10
453	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
454	xori	r10,r10,(MSR_FE0|MSR_FE1)
455	mtmsrd	r10
456	sync
457
458#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
459#define FMR4(n)  FMR2(n) ; FMR2(n+2)
460#define FMR8(n)  FMR4(n) ; FMR4(n+4)
461#define FMR16(n) FMR8(n) ; FMR8(n+8)
462#define FMR32(n) FMR16(n) ; FMR16(n+16)
463	FMR32(0)
464
465FTR_SECTION_ELSE
466/*
467 * To denormalise we need to move a copy of the register to itself.
468 * For POWER7 do that here for the first 32 VSX registers only.
469 */
470	mfmsr	r10
471	oris	r10,r10,MSR_VSX@h
472	mtmsrd	r10
473	sync
474
475#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
476#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
477#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
478#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
479#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
480	XVCPSGNDP32(0)
481
482ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
483
484BEGIN_FTR_SECTION
485	b	denorm_done
486END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
487/*
488 * To denormalise we need to move a copy of the register to itself.
489 * For POWER8 we need to do that for all 64 VSX registers
490 */
491	XVCPSGNDP32(32)
492denorm_done:
493	mtspr	SPRN_HSRR0,r11
494	mtcrf	0x80,r9
495	ld	r9,PACA_EXGEN+EX_R9(r13)
496	RESTORE_PPR_PACA(PACA_EXGEN, r10)
497	ld	r10,PACA_EXGEN+EX_R10(r13)
498	ld	r11,PACA_EXGEN+EX_R11(r13)
499	ld	r12,PACA_EXGEN+EX_R12(r13)
500	ld	r13,PACA_EXGEN+EX_R13(r13)
501	HRFID
502	b	.
503#endif
504
505	.align	7
506	/* moved from 0xe00 */
507	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
508	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
509	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
510	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
511	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
512	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
513	STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
514	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
515	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
516	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
517
518	/* moved from 0xf00 */
519	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
520	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
521	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
522	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
523	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
524	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
525	STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
526	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
527
528/*
529 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
530 * - If it was a decrementer interrupt, we bump the dec to max and and return.
531 * - If it was a doorbell we return immediately since doorbells are edge
532 *   triggered and won't automatically refire.
533 * - else we hard disable and return.
534 * This is called with r10 containing the value to OR to the paca field.
535 */
536#define MASKED_INTERRUPT(_H)				\
537masked_##_H##interrupt:					\
538	std	r11,PACA_EXGEN+EX_R11(r13);		\
539	lbz	r11,PACAIRQHAPPENED(r13);		\
540	or	r11,r11,r10;				\
541	stb	r11,PACAIRQHAPPENED(r13);		\
542	cmpwi	r10,PACA_IRQ_DEC;			\
543	bne	1f;					\
544	lis	r10,0x7fff;				\
545	ori	r10,r10,0xffff;				\
546	mtspr	SPRN_DEC,r10;				\
547	b	2f;					\
5481:	cmpwi	r10,PACA_IRQ_DBELL;			\
549	beq	2f;					\
550	mfspr	r10,SPRN_##_H##SRR1;			\
551	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
552	rotldi	r10,r10,16;				\
553	mtspr	SPRN_##_H##SRR1,r10;			\
5542:	mtcrf	0x80,r9;				\
555	ld	r9,PACA_EXGEN+EX_R9(r13);		\
556	ld	r10,PACA_EXGEN+EX_R10(r13);		\
557	ld	r11,PACA_EXGEN+EX_R11(r13);		\
558	GET_SCRATCH0(r13);				\
559	##_H##rfid;					\
560	b	.
561
562	MASKED_INTERRUPT()
563	MASKED_INTERRUPT(H)
564
565/*
566 * Called from arch_local_irq_enable when an interrupt needs
567 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
568 * which kind of interrupt. MSR:EE is already off. We generate a
569 * stackframe like if a real interrupt had happened.
570 *
571 * Note: While MSR:EE is off, we need to make sure that _MSR
572 * in the generated frame has EE set to 1 or the exception
573 * handler will not properly re-enable them.
574 */
575_GLOBAL(__replay_interrupt)
576	/* We are going to jump to the exception common code which
577	 * will retrieve various register values from the PACA which
578	 * we don't give a damn about, so we don't bother storing them.
579	 */
580	mfmsr	r12
581	mflr	r11
582	mfcr	r9
583	ori	r12,r12,MSR_EE
584	cmpwi	r3,0x900
585	beq	decrementer_common
586	cmpwi	r3,0x500
587	beq	hardware_interrupt_common
588BEGIN_FTR_SECTION
589	cmpwi	r3,0xe80
590	beq	h_doorbell_common
591FTR_SECTION_ELSE
592	cmpwi	r3,0xa00
593	beq	doorbell_super_common
594ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
595	blr
596
597#ifdef CONFIG_PPC_PSERIES
598/*
599 * Vectors for the FWNMI option.  Share common code.
600 */
601	.globl system_reset_fwnmi
602      .align 7
603system_reset_fwnmi:
604	HMT_MEDIUM_PPR_DISCARD
605	SET_SCRATCH0(r13)		/* save r13 */
606	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
607				 NOTEST, 0x100)
608
609#endif /* CONFIG_PPC_PSERIES */
610
611#ifdef __DISABLED__
612/*
613 * This is used for when the SLB miss handler has to go virtual,
614 * which doesn't happen for now anymore but will once we re-implement
615 * dynamic VSIDs for shared page tables
616 */
617slb_miss_user_pseries:
618	std	r10,PACA_EXGEN+EX_R10(r13)
619	std	r11,PACA_EXGEN+EX_R11(r13)
620	std	r12,PACA_EXGEN+EX_R12(r13)
621	GET_SCRATCH0(r10)
622	ld	r11,PACA_EXSLB+EX_R9(r13)
623	ld	r12,PACA_EXSLB+EX_R3(r13)
624	std	r10,PACA_EXGEN+EX_R13(r13)
625	std	r11,PACA_EXGEN+EX_R9(r13)
626	std	r12,PACA_EXGEN+EX_R3(r13)
627	clrrdi	r12,r13,32
628	mfmsr	r10
629	mfspr	r11,SRR0			/* save SRR0 */
630	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
631	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
632	mtspr	SRR0,r12
633	mfspr	r12,SRR1			/* and SRR1 */
634	mtspr	SRR1,r10
635	rfid
636	b	.				/* prevent spec. execution */
637#endif /* __DISABLED__ */
638
639/*
640 * Code from here down to __end_handlers is invoked from the
641 * exception prologs above.  Because the prologs assemble the
642 * addresses of these handlers using the LOAD_HANDLER macro,
643 * which uses an ori instruction, these handlers must be in
644 * the first 64k of the kernel image.
645 */
646
647/*** Common interrupt handlers ***/
648
649	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
650
651	/*
652	 * Machine check is different because we use a different
653	 * save area: PACA_EXMC instead of PACA_EXGEN.
654	 */
655	.align	7
656	.globl machine_check_common
657machine_check_common:
658
659	mfspr	r10,SPRN_DAR
660	std	r10,PACA_EXGEN+EX_DAR(r13)
661	mfspr	r10,SPRN_DSISR
662	stw	r10,PACA_EXGEN+EX_DSISR(r13)
663	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
664	FINISH_NAP
665	DISABLE_INTS
666	ld	r3,PACA_EXGEN+EX_DAR(r13)
667	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
668	std	r3,_DAR(r1)
669	std	r4,_DSISR(r1)
670	bl	.save_nvgprs
671	addi	r3,r1,STACK_FRAME_OVERHEAD
672	bl	.machine_check_exception
673	b	.ret_from_except
674
675	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
676	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
677	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
678#ifdef CONFIG_PPC_DOORBELL
679	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
680#else
681	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
682#endif
683	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
684	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
685	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
686	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
687	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
688#ifdef CONFIG_PPC_DOORBELL
689	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
690#else
691	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
692#endif
693	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
694	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
695	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
696#ifdef CONFIG_ALTIVEC
697	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
698#else
699	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
700#endif
701#ifdef CONFIG_CBE_RAS
702	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
703	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
704	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
705#endif /* CONFIG_CBE_RAS */
706
707	/*
708	 * Relocation-on interrupts: A subset of the interrupts can be delivered
709	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
710	 * it.  Addresses are the same as the original interrupt addresses, but
711	 * offset by 0xc000000000004000.
712	 * It's impossible to receive interrupts below 0x300 via this mechanism.
713	 * KVM: None of these traps are from the guest ; anything that escalated
714	 * to HV=1 from HV=0 is delivered via real mode handlers.
715	 */
716
717	/*
718	 * This uses the standard macro, since the original 0x300 vector
719	 * only has extra guff for STAB-based processors -- which never
720	 * come here.
721	 */
722	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
723	. = 0x4380
724	.globl data_access_slb_relon_pSeries
725data_access_slb_relon_pSeries:
726	SET_SCRATCH0(r13)
727	EXCEPTION_PROLOG_0(PACA_EXSLB)
728	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
729	std	r3,PACA_EXSLB+EX_R3(r13)
730	mfspr	r3,SPRN_DAR
731	mfspr	r12,SPRN_SRR1
732#ifndef CONFIG_RELOCATABLE
733	b	.slb_miss_realmode
734#else
735	/*
736	 * We can't just use a direct branch to .slb_miss_realmode
737	 * because the distance from here to there depends on where
738	 * the kernel ends up being put.
739	 */
740	mfctr	r11
741	ld	r10,PACAKBASE(r13)
742	LOAD_HANDLER(r10, .slb_miss_realmode)
743	mtctr	r10
744	bctr
745#endif
746
747	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
748	. = 0x4480
749	.globl instruction_access_slb_relon_pSeries
750instruction_access_slb_relon_pSeries:
751	SET_SCRATCH0(r13)
752	EXCEPTION_PROLOG_0(PACA_EXSLB)
753	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
754	std	r3,PACA_EXSLB+EX_R3(r13)
755	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
756	mfspr	r12,SPRN_SRR1
757#ifndef CONFIG_RELOCATABLE
758	b	.slb_miss_realmode
759#else
760	mfctr	r11
761	ld	r10,PACAKBASE(r13)
762	LOAD_HANDLER(r10, .slb_miss_realmode)
763	mtctr	r10
764	bctr
765#endif
766
767	. = 0x4500
768	.globl hardware_interrupt_relon_pSeries;
769	.globl hardware_interrupt_relon_hv;
770hardware_interrupt_relon_pSeries:
771hardware_interrupt_relon_hv:
772	BEGIN_FTR_SECTION
773		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
774	FTR_SECTION_ELSE
775		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
776	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
777	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
778	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
779	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
780	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
781	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
782	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
783	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
784
785	. = 0x4c00
786	.globl system_call_relon_pSeries
787system_call_relon_pSeries:
788	HMT_MEDIUM
789	SYSCALL_PSERIES_1
790	SYSCALL_PSERIES_2_DIRECT
791	SYSCALL_PSERIES_3
792
793	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
794
795	. = 0x4e00
796	SET_SCRATCH0(r13)
797	EXCEPTION_PROLOG_0(PACA_EXGEN)
798	b	h_data_storage_relon_hv
799
800	. = 0x4e20
801	SET_SCRATCH0(r13)
802	EXCEPTION_PROLOG_0(PACA_EXGEN)
803	b	h_instr_storage_relon_hv
804
805	. = 0x4e40
806	SET_SCRATCH0(r13)
807	EXCEPTION_PROLOG_0(PACA_EXGEN)
808	b	emulation_assist_relon_hv
809
810	. = 0x4e60
811	SET_SCRATCH0(r13)
812	EXCEPTION_PROLOG_0(PACA_EXGEN)
813	b	hmi_exception_relon_hv
814
815	. = 0x4e80
816	SET_SCRATCH0(r13)
817	EXCEPTION_PROLOG_0(PACA_EXGEN)
818	b	h_doorbell_relon_hv
819
820performance_monitor_relon_pSeries_1:
821	. = 0x4f00
822	SET_SCRATCH0(r13)
823	EXCEPTION_PROLOG_0(PACA_EXGEN)
824	b	performance_monitor_relon_pSeries
825
826altivec_unavailable_relon_pSeries_1:
827	. = 0x4f20
828	SET_SCRATCH0(r13)
829	EXCEPTION_PROLOG_0(PACA_EXGEN)
830	b	altivec_unavailable_relon_pSeries
831
832vsx_unavailable_relon_pSeries_1:
833	. = 0x4f40
834	SET_SCRATCH0(r13)
835	EXCEPTION_PROLOG_0(PACA_EXGEN)
836	b	vsx_unavailable_relon_pSeries
837
838tm_unavailable_relon_pSeries_1:
839	. = 0x4f60
840	SET_SCRATCH0(r13)
841	EXCEPTION_PROLOG_0(PACA_EXGEN)
842	b	tm_unavailable_relon_pSeries
843
844	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
845#ifdef CONFIG_PPC_DENORMALISATION
846	. = 0x5500
847	b	denorm_exception_hv
848#endif
849	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
850
851	/* Other future vectors */
852	.align	7
853	.globl	__end_interrupts
854__end_interrupts:
855
856	.align	7
857system_call_entry_direct:
858#if defined(CONFIG_RELOCATABLE)
859	/* The first level prologue may have used LR to get here, saving
860	 * orig in r10.  To save hacking/ifdeffing common code, restore here.
861	 */
862	mtlr	r10
863#endif
864system_call_entry:
865	b	system_call_common
866
867ppc64_runlatch_on_trampoline:
868	b	.__ppc64_runlatch_on
869
870/*
871 * Here we have detected that the kernel stack pointer is bad.
872 * R9 contains the saved CR, r13 points to the paca,
873 * r10 contains the (bad) kernel stack pointer,
874 * r11 and r12 contain the saved SRR0 and SRR1.
875 * We switch to using an emergency stack, save the registers there,
876 * and call kernel_bad_stack(), which panics.
877 */
878bad_stack:
879	ld	r1,PACAEMERGSP(r13)
880	subi	r1,r1,64+INT_FRAME_SIZE
881	std	r9,_CCR(r1)
882	std	r10,GPR1(r1)
883	std	r11,_NIP(r1)
884	std	r12,_MSR(r1)
885	mfspr	r11,SPRN_DAR
886	mfspr	r12,SPRN_DSISR
887	std	r11,_DAR(r1)
888	std	r12,_DSISR(r1)
889	mflr	r10
890	mfctr	r11
891	mfxer	r12
892	std	r10,_LINK(r1)
893	std	r11,_CTR(r1)
894	std	r12,_XER(r1)
895	SAVE_GPR(0,r1)
896	SAVE_GPR(2,r1)
897	ld	r10,EX_R3(r3)
898	std	r10,GPR3(r1)
899	SAVE_GPR(4,r1)
900	SAVE_4GPRS(5,r1)
901	ld	r9,EX_R9(r3)
902	ld	r10,EX_R10(r3)
903	SAVE_2GPRS(9,r1)
904	ld	r9,EX_R11(r3)
905	ld	r10,EX_R12(r3)
906	ld	r11,EX_R13(r3)
907	std	r9,GPR11(r1)
908	std	r10,GPR12(r1)
909	std	r11,GPR13(r1)
910BEGIN_FTR_SECTION
911	ld	r10,EX_CFAR(r3)
912	std	r10,ORIG_GPR3(r1)
913END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
914	SAVE_8GPRS(14,r1)
915	SAVE_10GPRS(22,r1)
916	lhz	r12,PACA_TRAP_SAVE(r13)
917	std	r12,_TRAP(r1)
918	addi	r11,r1,INT_FRAME_SIZE
919	std	r11,0(r1)
920	li	r12,0
921	std	r12,0(r11)
922	ld	r2,PACATOC(r13)
923	ld	r11,exception_marker@toc(r2)
924	std	r12,RESULT(r1)
925	std	r11,STACK_FRAME_OVERHEAD-16(r1)
9261:	addi	r3,r1,STACK_FRAME_OVERHEAD
927	bl	.kernel_bad_stack
928	b	1b
929
930/*
931 * Here r13 points to the paca, r9 contains the saved CR,
932 * SRR0 and SRR1 are saved in r11 and r12,
933 * r9 - r13 are saved in paca->exgen.
934 */
935	.align	7
936	.globl data_access_common
937data_access_common:
938	mfspr	r10,SPRN_DAR
939	std	r10,PACA_EXGEN+EX_DAR(r13)
940	mfspr	r10,SPRN_DSISR
941	stw	r10,PACA_EXGEN+EX_DSISR(r13)
942	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
943	DISABLE_INTS
944	ld	r12,_MSR(r1)
945	ld	r3,PACA_EXGEN+EX_DAR(r13)
946	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
947	li	r5,0x300
948	b	.do_hash_page		/* Try to handle as hpte fault */
949
950	.align  7
951	.globl  h_data_storage_common
952h_data_storage_common:
953	mfspr   r10,SPRN_HDAR
954	std     r10,PACA_EXGEN+EX_DAR(r13)
955	mfspr   r10,SPRN_HDSISR
956	stw     r10,PACA_EXGEN+EX_DSISR(r13)
957	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
958	bl      .save_nvgprs
959	DISABLE_INTS
960	addi    r3,r1,STACK_FRAME_OVERHEAD
961	bl      .unknown_exception
962	b       .ret_from_except
963
964	.align	7
965	.globl instruction_access_common
966instruction_access_common:
967	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
968	DISABLE_INTS
969	ld	r12,_MSR(r1)
970	ld	r3,_NIP(r1)
971	andis.	r4,r12,0x5820
972	li	r5,0x400
973	b	.do_hash_page		/* Try to handle as hpte fault */
974
975	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
976
977/*
978 * Here is the common SLB miss user that is used when going to virtual
979 * mode for SLB misses, that is currently not used
980 */
981#ifdef __DISABLED__
982	.align	7
983	.globl	slb_miss_user_common
984slb_miss_user_common:
985	mflr	r10
986	std	r3,PACA_EXGEN+EX_DAR(r13)
987	stw	r9,PACA_EXGEN+EX_CCR(r13)
988	std	r10,PACA_EXGEN+EX_LR(r13)
989	std	r11,PACA_EXGEN+EX_SRR0(r13)
990	bl	.slb_allocate_user
991
992	ld	r10,PACA_EXGEN+EX_LR(r13)
993	ld	r3,PACA_EXGEN+EX_R3(r13)
994	lwz	r9,PACA_EXGEN+EX_CCR(r13)
995	ld	r11,PACA_EXGEN+EX_SRR0(r13)
996	mtlr	r10
997	beq-	slb_miss_fault
998
999	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1000	beq-	unrecov_user_slb
1001	mfmsr	r10
1002
1003.machine push
1004.machine "power4"
1005	mtcrf	0x80,r9
1006.machine pop
1007
1008	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1009	mtmsrd	r10,1
1010
1011	mtspr	SRR0,r11
1012	mtspr	SRR1,r12
1013
1014	ld	r9,PACA_EXGEN+EX_R9(r13)
1015	ld	r10,PACA_EXGEN+EX_R10(r13)
1016	ld	r11,PACA_EXGEN+EX_R11(r13)
1017	ld	r12,PACA_EXGEN+EX_R12(r13)
1018	ld	r13,PACA_EXGEN+EX_R13(r13)
1019	rfid
1020	b	.
1021
1022slb_miss_fault:
1023	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1024	ld	r4,PACA_EXGEN+EX_DAR(r13)
1025	li	r5,0
1026	std	r4,_DAR(r1)
1027	std	r5,_DSISR(r1)
1028	b	handle_page_fault
1029
1030unrecov_user_slb:
1031	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1032	DISABLE_INTS
1033	bl	.save_nvgprs
10341:	addi	r3,r1,STACK_FRAME_OVERHEAD
1035	bl	.unrecoverable_exception
1036	b	1b
1037
1038#endif /* __DISABLED__ */
1039
1040
1041	.align	7
1042	.globl alignment_common
1043alignment_common:
1044	mfspr	r10,SPRN_DAR
1045	std	r10,PACA_EXGEN+EX_DAR(r13)
1046	mfspr	r10,SPRN_DSISR
1047	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1048	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1049	ld	r3,PACA_EXGEN+EX_DAR(r13)
1050	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1051	std	r3,_DAR(r1)
1052	std	r4,_DSISR(r1)
1053	bl	.save_nvgprs
1054	DISABLE_INTS
1055	addi	r3,r1,STACK_FRAME_OVERHEAD
1056	bl	.alignment_exception
1057	b	.ret_from_except
1058
1059	.align	7
1060	.globl program_check_common
1061program_check_common:
1062	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1063	bl	.save_nvgprs
1064	DISABLE_INTS
1065	addi	r3,r1,STACK_FRAME_OVERHEAD
1066	bl	.program_check_exception
1067	b	.ret_from_except
1068
1069	.align	7
1070	.globl fp_unavailable_common
1071fp_unavailable_common:
1072	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1073	bne	1f			/* if from user, just load it up */
1074	bl	.save_nvgprs
1075	DISABLE_INTS
1076	addi	r3,r1,STACK_FRAME_OVERHEAD
1077	bl	.kernel_fp_unavailable_exception
1078	BUG_OPCODE
10791:
1080#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1081BEGIN_FTR_SECTION
1082	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1083	 * transaction), go do TM stuff
1084	 */
1085	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1086	bne-	2f
1087END_FTR_SECTION_IFSET(CPU_FTR_TM)
1088#endif
1089	bl	.load_up_fpu
1090	b	fast_exception_return
1091#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
10922:	/* User process was in a transaction */
1093	bl	.save_nvgprs
1094	DISABLE_INTS
1095	addi	r3,r1,STACK_FRAME_OVERHEAD
1096	bl	.fp_unavailable_tm
1097	b	.ret_from_except
1098#endif
1099	.align	7
1100	.globl altivec_unavailable_common
1101altivec_unavailable_common:
1102	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1103#ifdef CONFIG_ALTIVEC
1104BEGIN_FTR_SECTION
1105	beq	1f
1106#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1107  BEGIN_FTR_SECTION_NESTED(69)
1108	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1109	 * transaction), go do TM stuff
1110	 */
1111	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1112	bne-	2f
1113  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1114#endif
1115	bl	.load_up_altivec
1116	b	fast_exception_return
1117#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11182:	/* User process was in a transaction */
1119	bl	.save_nvgprs
1120	DISABLE_INTS
1121	addi	r3,r1,STACK_FRAME_OVERHEAD
1122	bl	.altivec_unavailable_tm
1123	b	.ret_from_except
1124#endif
11251:
1126END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1127#endif
1128	bl	.save_nvgprs
1129	DISABLE_INTS
1130	addi	r3,r1,STACK_FRAME_OVERHEAD
1131	bl	.altivec_unavailable_exception
1132	b	.ret_from_except
1133
1134	.align	7
1135	.globl vsx_unavailable_common
1136vsx_unavailable_common:
1137	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1138#ifdef CONFIG_VSX
1139BEGIN_FTR_SECTION
1140	beq	1f
1141#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1142  BEGIN_FTR_SECTION_NESTED(69)
1143	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1144	 * transaction), go do TM stuff
1145	 */
1146	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1147	bne-	2f
1148  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1149#endif
1150	b	.load_up_vsx
1151#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11522:	/* User process was in a transaction */
1153	bl	.save_nvgprs
1154	DISABLE_INTS
1155	addi	r3,r1,STACK_FRAME_OVERHEAD
1156	bl	.vsx_unavailable_tm
1157	b	.ret_from_except
1158#endif
11591:
1160END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1161#endif
1162	bl	.save_nvgprs
1163	DISABLE_INTS
1164	addi	r3,r1,STACK_FRAME_OVERHEAD
1165	bl	.vsx_unavailable_exception
1166	b	.ret_from_except
1167
1168	.align	7
1169	.globl tm_unavailable_common
1170tm_unavailable_common:
1171	EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
1172	bl	.save_nvgprs
1173	DISABLE_INTS
1174	addi	r3,r1,STACK_FRAME_OVERHEAD
1175	bl	.tm_unavailable_exception
1176	b	.ret_from_except
1177
1178	.align	7
1179	.globl	__end_handlers
1180__end_handlers:
1181
1182	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1183	STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
1184	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1185	STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
1186	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1187	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1188	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1189	STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
1190	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
1191	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1192	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
1193
1194	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1195	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1196	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1197	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
1198
1199#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1200/*
1201 * Data area reserved for FWNMI option.
1202 * This address (0x7000) is fixed by the RPA.
1203 */
1204	.= 0x7000
1205	.globl fwnmi_data_area
1206fwnmi_data_area:
1207
1208	/* pseries and powernv need to keep the whole page from
1209	 * 0x7000 to 0x8000 free for use by the firmware
1210	 */
1211	. = 0x8000
1212#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1213
1214/* Space for CPU0's segment table */
1215	.balign 4096
1216	.globl initial_stab
1217initial_stab:
1218	.space	4096
1219
1220#ifdef CONFIG_PPC_POWERNV
1221_GLOBAL(opal_mc_secondary_handler)
1222	HMT_MEDIUM_PPR_DISCARD
1223	SET_SCRATCH0(r13)
1224	GET_PACA(r13)
1225	clrldi	r3,r3,2
1226	tovirt(r3,r3)
1227	std	r3,PACA_OPAL_MC_EVT(r13)
1228	ld	r13,OPAL_MC_SRR0(r3)
1229	mtspr	SPRN_SRR0,r13
1230	ld	r13,OPAL_MC_SRR1(r3)
1231	mtspr	SPRN_SRR1,r13
1232	ld	r3,OPAL_MC_GPR3(r3)
1233	GET_SCRATCH0(r13)
1234	b	machine_check_pSeries
1235#endif /* CONFIG_PPC_POWERNV */
1236
1237
1238/*
1239 * r13 points to the PACA, r9 contains the saved CR,
1240 * r12 contain the saved SRR1, SRR0 is still ready for return
1241 * r3 has the faulting address
1242 * r9 - r13 are saved in paca->exslb.
1243 * r3 is saved in paca->slb_r3
1244 * We assume we aren't going to take any exceptions during this procedure.
1245 */
1246_GLOBAL(slb_miss_realmode)
1247	mflr	r10
1248#ifdef CONFIG_RELOCATABLE
1249	mtctr	r11
1250#endif
1251
1252	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1253	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1254
1255	bl	.slb_allocate_realmode
1256
1257	/* All done -- return from exception. */
1258
1259	ld	r10,PACA_EXSLB+EX_LR(r13)
1260	ld	r3,PACA_EXSLB+EX_R3(r13)
1261	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1262
1263	mtlr	r10
1264
1265	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1266	beq-	2f
1267
1268.machine	push
1269.machine	"power4"
1270	mtcrf	0x80,r9
1271	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1272.machine	pop
1273
1274	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1275	ld	r9,PACA_EXSLB+EX_R9(r13)
1276	ld	r10,PACA_EXSLB+EX_R10(r13)
1277	ld	r11,PACA_EXSLB+EX_R11(r13)
1278	ld	r12,PACA_EXSLB+EX_R12(r13)
1279	ld	r13,PACA_EXSLB+EX_R13(r13)
1280	rfid
1281	b	.	/* prevent speculative execution */
1282
12832:	mfspr	r11,SPRN_SRR0
1284	ld	r10,PACAKBASE(r13)
1285	LOAD_HANDLER(r10,unrecov_slb)
1286	mtspr	SPRN_SRR0,r10
1287	ld	r10,PACAKMSR(r13)
1288	mtspr	SPRN_SRR1,r10
1289	rfid
1290	b	.
1291
1292unrecov_slb:
1293	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1294	DISABLE_INTS
1295	bl	.save_nvgprs
12961:	addi	r3,r1,STACK_FRAME_OVERHEAD
1297	bl	.unrecoverable_exception
1298	b	1b
1299
1300
1301#ifdef CONFIG_PPC_970_NAP
1302power4_fixup_nap:
1303	andc	r9,r9,r10
1304	std	r9,TI_LOCAL_FLAGS(r11)
1305	ld	r10,_LINK(r1)		/* make idle task do the */
1306	std	r10,_NIP(r1)		/* equivalent of a blr */
1307	blr
1308#endif
1309
1310/*
1311 * Hash table stuff
1312 */
1313	.align	7
1314_STATIC(do_hash_page)
1315	std	r3,_DAR(r1)
1316	std	r4,_DSISR(r1)
1317
1318	andis.	r0,r4,0xa410		/* weird error? */
1319	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1320	andis.  r0,r4,DSISR_DABRMATCH@h
1321	bne-    handle_dabr_fault
1322
1323BEGIN_FTR_SECTION
1324	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1325	bne-	do_ste_alloc		/* If so handle it */
1326END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1327
1328	CURRENT_THREAD_INFO(r11, r1)
1329	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1330	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1331	bne	77f			/* then don't call hash_page now */
1332	/*
1333	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1334	 * accessing a userspace segment (even from the kernel). We assume
1335	 * kernel addresses always have the high bit set.
1336	 */
1337	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1338	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1339	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1340	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1341	ori	r4,r4,1			/* add _PAGE_PRESENT */
1342	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1343
1344	/*
1345	 * r3 contains the faulting address
1346	 * r4 contains the required access permissions
1347	 * r5 contains the trap number
1348	 *
1349	 * at return r3 = 0 for success, 1 for page fault, negative for error
1350	 */
1351	bl	.hash_page		/* build HPTE if possible */
1352	cmpdi	r3,0			/* see if hash_page succeeded */
1353
1354	/* Success */
1355	beq	fast_exc_return_irq	/* Return from exception on success */
1356
1357	/* Error */
1358	blt-	13f
1359
1360/* Here we have a page fault that hash_page can't handle. */
1361handle_page_fault:
136211:	ld	r4,_DAR(r1)
1363	ld	r5,_DSISR(r1)
1364	addi	r3,r1,STACK_FRAME_OVERHEAD
1365	bl	.do_page_fault
1366	cmpdi	r3,0
1367	beq+	12f
1368	bl	.save_nvgprs
1369	mr	r5,r3
1370	addi	r3,r1,STACK_FRAME_OVERHEAD
1371	lwz	r4,_DAR(r1)
1372	bl	.bad_page_fault
1373	b	.ret_from_except
1374
1375/* We have a data breakpoint exception - handle it */
1376handle_dabr_fault:
1377	bl	.save_nvgprs
1378	ld      r4,_DAR(r1)
1379	ld      r5,_DSISR(r1)
1380	addi    r3,r1,STACK_FRAME_OVERHEAD
1381	bl      .do_break
138212:	b       .ret_from_except_lite
1383
1384
1385/* We have a page fault that hash_page could handle but HV refused
1386 * the PTE insertion
1387 */
138813:	bl	.save_nvgprs
1389	mr	r5,r3
1390	addi	r3,r1,STACK_FRAME_OVERHEAD
1391	ld	r4,_DAR(r1)
1392	bl	.low_hash_fault
1393	b	.ret_from_except
1394
1395/*
1396 * We come here as a result of a DSI at a point where we don't want
1397 * to call hash_page, such as when we are accessing memory (possibly
1398 * user memory) inside a PMU interrupt that occurred while interrupts
1399 * were soft-disabled.  We want to invoke the exception handler for
1400 * the access, or panic if there isn't a handler.
1401 */
140277:	bl	.save_nvgprs
1403	mr	r4,r3
1404	addi	r3,r1,STACK_FRAME_OVERHEAD
1405	li	r5,SIGSEGV
1406	bl	.bad_page_fault
1407	b	.ret_from_except
1408
1409	/* here we have a segment miss */
1410do_ste_alloc:
1411	bl	.ste_allocate		/* try to insert stab entry */
1412	cmpdi	r3,0
1413	bne-	handle_page_fault
1414	b	fast_exception_return
1415
1416/*
1417 * r13 points to the PACA, r9 contains the saved CR,
1418 * r11 and r12 contain the saved SRR0 and SRR1.
1419 * r9 - r13 are saved in paca->exslb.
1420 * We assume we aren't going to take any exceptions during this procedure.
1421 * We assume (DAR >> 60) == 0xc.
1422 */
1423	.align	7
1424_GLOBAL(do_stab_bolted)
1425	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1426	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1427	mfspr	r11,SPRN_DAR			/* ea */
1428
1429	/*
1430	 * check for bad kernel/user address
1431	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1432	 */
1433	rldicr. r9,r11,4,(63 - 46 - 4)
1434	li	r9,0	/* VSID = 0 for bad address */
1435	bne-	0f
1436
1437	/*
1438	 * Calculate VSID:
1439	 * This is the kernel vsid, we take the top for context from
1440	 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1441	 * Here we know that (ea >> 60) == 0xc
1442	 */
1443	lis	r9,(MAX_USER_CONTEXT + 1)@ha
1444	addi	r9,r9,(MAX_USER_CONTEXT + 1)@l
1445
1446	srdi	r10,r11,SID_SHIFT
1447	rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1448	ASM_VSID_SCRAMBLE(r10, r9, 256M)
1449	rldic	r9,r10,12,16	/* r9 = vsid << 12 */
1450
14510:
1452	/* Hash to the primary group */
1453	ld	r10,PACASTABVIRT(r13)
1454	srdi	r11,r11,SID_SHIFT
1455	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1456
1457	/* Search the primary group for a free entry */
14581:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1459	andi.	r11,r11,0x80
1460	beq	2f
1461	addi	r10,r10,16
1462	andi.	r11,r10,0x70
1463	bne	1b
1464
1465	/* Stick for only searching the primary group for now.		*/
1466	/* At least for now, we use a very simple random castout scheme */
1467	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1468	mftb	r11
1469	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1470	ori	r11,r11,0x10
1471
1472	/* r10 currently points to an ste one past the group of interest */
1473	/* make it point to the randomly selected entry			*/
1474	subi	r10,r10,128
1475	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1476
1477	isync			/* mark the entry invalid		*/
1478	ld	r11,0(r10)
1479	rldicl	r11,r11,56,1	/* clear the valid bit */
1480	rotldi	r11,r11,8
1481	std	r11,0(r10)
1482	sync
1483
1484	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1485	slbie	r11
1486
14872:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1488	eieio
1489
1490	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1491	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1492	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1493	std	r11,0(r10)	/* Put new entry back into the stab	*/
1494
1495	sync
1496
1497	/* All done -- return from exception. */
1498	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1499	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1500
1501	andi.	r10,r12,MSR_RI
1502	beq-	unrecov_slb
1503
1504	mtcrf	0x80,r9			/* restore CR */
1505
1506	mfmsr	r10
1507	clrrdi	r10,r10,2
1508	mtmsrd	r10,1
1509
1510	mtspr	SPRN_SRR0,r11
1511	mtspr	SPRN_SRR1,r12
1512	ld	r9,PACA_EXSLB+EX_R9(r13)
1513	ld	r10,PACA_EXSLB+EX_R10(r13)
1514	ld	r11,PACA_EXSLB+EX_R11(r13)
1515	ld	r12,PACA_EXSLB+EX_R12(r13)
1516	ld	r13,PACA_EXSLB+EX_R13(r13)
1517	rfid
1518	b	.	/* prevent speculative execution */
1519