xref: /linux/arch/powerpc/kernel/exceptions-64s.S (revision 2b64b2ed277ff23e785fbdb65098ee7e1252d64f)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
9 * position dependent assembly.
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
16#include <asm/hw_irq.h>
17#include <asm/exception-64s.h>
18#include <asm/ptrace.h>
19#include <asm/cpuidle.h>
20#include <asm/head-64.h>
21#include <asm/feature-fixups.h>
22
23/*
24 * There are a few constraints to be concerned with.
25 * - Real mode exceptions code/data must be located at their physical location.
26 * - Virtual mode exceptions must be mapped at their 0xc000... location.
27 * - Fixed location code must not call directly beyond the __end_interrupts
28 *   area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
29 *   must be used.
30 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
31 *   virtual 0xc00...
32 * - Conditional branch targets must be within +/-32K of caller.
33 *
34 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
35 * therefore don't have to run in physically located code or rfid to
36 * virtual mode kernel code. However on relocatable kernels they do have
37 * to branch to KERNELBASE offset because the rest of the kernel (outside
38 * the exception vectors) may be located elsewhere.
39 *
40 * Virtual exceptions correspond with physical, except their entry points
41 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
42 * offset applied. Virtual exceptions are enabled with the Alternate
43 * Interrupt Location (AIL) bit set in the LPCR. However this does not
44 * guarantee they will be delivered virtually. Some conditions (see the ISA)
45 * cause exceptions to be delivered in real mode.
46 *
47 * It's impossible to receive interrupts below 0x300 via AIL.
48 *
49 * KVM: None of the virtual exceptions are from the guest. Anything that
50 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
51 *
52 *
53 * We layout physical memory as follows:
54 * 0x0000 - 0x00ff : Secondary processor spin code
55 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
56 * 0x1900 - 0x3fff : Real mode trampolines
57 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
58 * 0x5900 - 0x6fff : Relon mode trampolines
59 * 0x7000 - 0x7fff : FWNMI data area
60 * 0x8000 -   .... : Common interrupt handlers, remaining early
61 *                   setup code, rest of kernel.
62 *
63 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
64 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
65 * vectors there.
66 */
67OPEN_FIXED_SECTION(real_vectors,        0x0100, 0x1900)
68OPEN_FIXED_SECTION(real_trampolines,    0x1900, 0x4000)
69OPEN_FIXED_SECTION(virt_vectors,        0x4000, 0x5900)
70OPEN_FIXED_SECTION(virt_trampolines,    0x5900, 0x7000)
71
72#ifdef CONFIG_PPC_POWERNV
73	.globl start_real_trampolines
74	.globl end_real_trampolines
75	.globl start_virt_trampolines
76	.globl end_virt_trampolines
77#endif
78
79#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
80/*
81 * Data area reserved for FWNMI option.
82 * This address (0x7000) is fixed by the RPA.
83 * pseries and powernv need to keep the whole page from
84 * 0x7000 to 0x8000 free for use by the firmware
85 */
86ZERO_FIXED_SECTION(fwnmi_page,          0x7000, 0x8000)
87OPEN_TEXT_SECTION(0x8000)
88#else
89OPEN_TEXT_SECTION(0x7000)
90#endif
91
92USE_FIXED_SECTION(real_vectors)
93
94/*
95 * This is the start of the interrupt handlers for pSeries
96 * This code runs with relocation off.
97 * Code from here to __end_interrupts gets copied down to real
98 * address 0x100 when we are running a relocatable kernel.
99 * Therefore any relative branches in this section must only
100 * branch to labels in this section.
101 */
102	.globl __start_interrupts
103__start_interrupts:
104
105/* No virt vectors corresponding with 0x0..0x100 */
106EXC_VIRT_NONE(0x4000, 0x100)
107
108
109#ifdef CONFIG_PPC_P7_NAP
110	/*
111	 * If running native on arch 2.06 or later, check if we are waking up
112	 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
113	 * bits 46:47. A non-0 value indicates that we are coming from a power
114	 * saving state. The idle wakeup handler initially runs in real mode,
115	 * but we branch to the 0xc000... address so we can turn on relocation
116	 * with mtmsr.
117	 */
118#define IDLETEST(n)							\
119	BEGIN_FTR_SECTION ;						\
120	mfspr	r10,SPRN_SRR1 ;						\
121	rlwinm.	r10,r10,47-31,30,31 ;					\
122	beq-	1f ;							\
123	cmpwi	cr3,r10,2 ;						\
124	BRANCH_TO_C000(r10, system_reset_idle_common) ;			\
1251:									\
126	KVMTEST_PR(n) ;							\
127	END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
128#else
129#define IDLETEST NOTEST
130#endif
131
132EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
133	SET_SCRATCH0(r13)
134	/*
135	 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
136	 * being used, so a nested NMI exception would corrupt it.
137	 */
138	EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
139			      IDLETEST, 0x100)
140
141EXC_REAL_END(system_reset, 0x100, 0x100)
142EXC_VIRT_NONE(0x4100, 0x100)
143TRAMP_KVM(PACA_EXNMI, 0x100)
144
145#ifdef CONFIG_PPC_P7_NAP
146EXC_COMMON_BEGIN(system_reset_idle_common)
147	mfspr	r12,SPRN_SRR1
148	b	pnv_powersave_wakeup
149#endif
150
151/*
152 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
153 * the right thing. We do not want to reconcile because that goes
154 * through irq tracing which we don't want in NMI.
155 *
156 * Save PACAIRQHAPPENED because some code will do a hard disable
157 * (e.g., xmon). So we want to restore this back to where it was
158 * when we return. DAR is unused in the stack, so save it there.
159 */
160#define ADD_RECONCILE_NMI						\
161	li	r10,IRQS_ALL_DISABLED;					\
162	stb	r10,PACAIRQSOFTMASK(r13);				\
163	lbz	r10,PACAIRQHAPPENED(r13);				\
164	std	r10,_DAR(r1)
165
166EXC_COMMON_BEGIN(system_reset_common)
167	/*
168	 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
169	 * to recover, but nested NMI will notice in_nmi and not recover
170	 * because of the use of the NMI stack. in_nmi reentrancy is tested in
171	 * system_reset_exception.
172	 */
173	lhz	r10,PACA_IN_NMI(r13)
174	addi	r10,r10,1
175	sth	r10,PACA_IN_NMI(r13)
176	li	r10,MSR_RI
177	mtmsrd 	r10,1
178
179	mr	r10,r1
180	ld	r1,PACA_NMI_EMERG_SP(r13)
181	subi	r1,r1,INT_FRAME_SIZE
182	EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100,
183			system_reset, system_reset_exception,
184			ADD_NVGPRS;ADD_RECONCILE_NMI)
185
186	/* This (and MCE) can be simplified with mtmsrd L=1 */
187	/* Clear MSR_RI before setting SRR0 and SRR1. */
188	li	r0,MSR_RI
189	mfmsr	r9
190	andc	r9,r9,r0
191	mtmsrd	r9,1
192
193	/*
194	 * MSR_RI is clear, now we can decrement paca->in_nmi.
195	 */
196	lhz	r10,PACA_IN_NMI(r13)
197	subi	r10,r10,1
198	sth	r10,PACA_IN_NMI(r13)
199
200	/*
201	 * Restore soft mask settings.
202	 */
203	ld	r10,_DAR(r1)
204	stb	r10,PACAIRQHAPPENED(r13)
205	ld	r10,SOFTE(r1)
206	stb	r10,PACAIRQSOFTMASK(r13)
207
208	/*
209	 * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
210	 * Should share common bits...
211	 */
212
213	/* Move original SRR0 and SRR1 into the respective regs */
214	ld	r9,_MSR(r1)
215	mtspr	SPRN_SRR1,r9
216	ld	r3,_NIP(r1)
217	mtspr	SPRN_SRR0,r3
218	ld	r9,_CTR(r1)
219	mtctr	r9
220	ld	r9,_XER(r1)
221	mtxer	r9
222	ld	r9,_LINK(r1)
223	mtlr	r9
224	REST_GPR(0, r1)
225	REST_8GPRS(2, r1)
226	REST_GPR(10, r1)
227	ld	r11,_CCR(r1)
228	mtcr	r11
229	REST_GPR(11, r1)
230	REST_2GPRS(12, r1)
231	/* restore original r1. */
232	ld	r1,GPR1(r1)
233	RFI_TO_USER_OR_KERNEL
234
235#ifdef CONFIG_PPC_PSERIES
236/*
237 * Vectors for the FWNMI option.  Share common code.
238 */
239TRAMP_REAL_BEGIN(system_reset_fwnmi)
240	SET_SCRATCH0(r13)		/* save r13 */
241	/* See comment at system_reset exception */
242	EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
243			      NOTEST, 0x100)
244#endif /* CONFIG_PPC_PSERIES */
245
246
247EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
248	/* This is moved out of line as it can be patched by FW, but
249	 * some code path might still want to branch into the original
250	 * vector
251	 */
252	SET_SCRATCH0(r13)		/* save r13 */
253	EXCEPTION_PROLOG_0(PACA_EXMC)
254BEGIN_FTR_SECTION
255	b	machine_check_common_early
256FTR_SECTION_ELSE
257	b	machine_check_pSeries_0
258ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
259EXC_REAL_END(machine_check, 0x200, 0x100)
260EXC_VIRT_NONE(0x4200, 0x100)
261TRAMP_REAL_BEGIN(machine_check_common_early)
262	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
263	/*
264	 * Register contents:
265	 * R13		= PACA
266	 * R9		= CR
267	 * Original R9 to R13 is saved on PACA_EXMC
268	 *
269	 * Switch to mc_emergency stack and handle re-entrancy (we limit
270	 * the nested MCE upto level 4 to avoid stack overflow).
271	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
272	 *
273	 * We use paca->in_mce to check whether this is the first entry or
274	 * nested machine check. We increment paca->in_mce to track nested
275	 * machine checks.
276	 *
277	 * If this is the first entry then set stack pointer to
278	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
279	 * stack frame on mc_emergency stack.
280	 *
281	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
282	 * checkstop if we get another machine check exception before we do
283	 * rfid with MSR_ME=1.
284	 *
285	 * This interrupt can wake directly from idle. If that is the case,
286	 * the machine check is handled then the idle wakeup code is called
287	 * to restore state.
288	 */
289	mr	r11,r1			/* Save r1 */
290	lhz	r10,PACA_IN_MCE(r13)
291	cmpwi	r10,0			/* Are we in nested machine check */
292	bne	0f			/* Yes, we are. */
293	/* First machine check entry */
294	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
2950:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
296	addi	r10,r10,1		/* increment paca->in_mce */
297	sth	r10,PACA_IN_MCE(r13)
298	/* Limit nested MCE to level 4 to avoid stack overflow */
299	cmpwi	r10,MAX_MCE_DEPTH
300	bgt	2f			/* Check if we hit limit of 4 */
301	std	r11,GPR1(r1)		/* Save r1 on the stack. */
302	std	r11,0(r1)		/* make stack chain pointer */
303	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
304	std	r11,_NIP(r1)
305	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
306	std	r11,_MSR(r1)
307	mfspr	r11,SPRN_DAR		/* Save DAR */
308	std	r11,_DAR(r1)
309	mfspr	r11,SPRN_DSISR		/* Save DSISR */
310	std	r11,_DSISR(r1)
311	std	r9,_CCR(r1)		/* Save CR in stackframe */
312	/* Save r9 through r13 from EXMC save area to stack frame. */
313	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
314	mfmsr	r11			/* get MSR value */
315BEGIN_FTR_SECTION
316	ori	r11,r11,MSR_ME		/* turn on ME bit */
317END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
318	ori	r11,r11,MSR_RI		/* turn on RI bit */
319	LOAD_HANDLER(r12, machine_check_handle_early)
3201:	mtspr	SPRN_SRR0,r12
321	mtspr	SPRN_SRR1,r11
322	RFI_TO_KERNEL
323	b	.	/* prevent speculative execution */
3242:
325	/* Stack overflow. Stay on emergency stack and panic.
326	 * Keep the ME bit off while panic-ing, so that if we hit
327	 * another machine check we checkstop.
328	 */
329	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
330	ld	r11,PACAKMSR(r13)
331	LOAD_HANDLER(r12, unrecover_mce)
332	li	r10,MSR_ME
333	andc	r11,r11,r10		/* Turn off MSR_ME */
334	b	1b
335	b	.	/* prevent speculative execution */
336
337TRAMP_REAL_BEGIN(machine_check_pSeries)
338	.globl machine_check_fwnmi
339machine_check_fwnmi:
340	SET_SCRATCH0(r13)		/* save r13 */
341	EXCEPTION_PROLOG_0(PACA_EXMC)
342BEGIN_FTR_SECTION
343	b	machine_check_common_early
344END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
345machine_check_pSeries_0:
346	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
347	/*
348	 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
349	 * nested machine check corrupts it. machine_check_common enables
350	 * MSR_RI.
351	 */
352	EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD)
353
354TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
355
356EXC_COMMON_BEGIN(machine_check_common)
357	/*
358	 * Machine check is different because we use a different
359	 * save area: PACA_EXMC instead of PACA_EXGEN.
360	 */
361	mfspr	r10,SPRN_DAR
362	std	r10,PACA_EXMC+EX_DAR(r13)
363	mfspr	r10,SPRN_DSISR
364	stw	r10,PACA_EXMC+EX_DSISR(r13)
365	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
366	FINISH_NAP
367	RECONCILE_IRQ_STATE(r10, r11)
368	ld	r3,PACA_EXMC+EX_DAR(r13)
369	lwz	r4,PACA_EXMC+EX_DSISR(r13)
370	/* Enable MSR_RI when finished with PACA_EXMC */
371	li	r10,MSR_RI
372	mtmsrd 	r10,1
373	std	r3,_DAR(r1)
374	std	r4,_DSISR(r1)
375	bl	save_nvgprs
376	addi	r3,r1,STACK_FRAME_OVERHEAD
377	bl	machine_check_exception
378	b	ret_from_except
379
380#define MACHINE_CHECK_HANDLER_WINDUP			\
381	/* Clear MSR_RI before setting SRR0 and SRR1. */\
382	li	r0,MSR_RI;				\
383	mfmsr	r9;		/* get MSR value */	\
384	andc	r9,r9,r0;				\
385	mtmsrd	r9,1;		/* Clear MSR_RI */	\
386	/* Move original SRR0 and SRR1 into the respective regs */	\
387	ld	r9,_MSR(r1);				\
388	mtspr	SPRN_SRR1,r9;				\
389	ld	r3,_NIP(r1);				\
390	mtspr	SPRN_SRR0,r3;				\
391	ld	r9,_CTR(r1);				\
392	mtctr	r9;					\
393	ld	r9,_XER(r1);				\
394	mtxer	r9;					\
395	ld	r9,_LINK(r1);				\
396	mtlr	r9;					\
397	REST_GPR(0, r1);				\
398	REST_8GPRS(2, r1);				\
399	REST_GPR(10, r1);				\
400	ld	r11,_CCR(r1);				\
401	mtcr	r11;					\
402	/* Decrement paca->in_mce. */			\
403	lhz	r12,PACA_IN_MCE(r13);			\
404	subi	r12,r12,1;				\
405	sth	r12,PACA_IN_MCE(r13);			\
406	REST_GPR(11, r1);				\
407	REST_2GPRS(12, r1);				\
408	/* restore original r1. */			\
409	ld	r1,GPR1(r1)
410
411#ifdef CONFIG_PPC_P7_NAP
412/*
413 * This is an idle wakeup. Low level machine check has already been
414 * done. Queue the event then call the idle code to do the wake up.
415 */
416EXC_COMMON_BEGIN(machine_check_idle_common)
417	bl	machine_check_queue_event
418
419	/*
420	 * We have not used any non-volatile GPRs here, and as a rule
421	 * most exception code including machine check does not.
422	 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
423	 * wakeup will restore volatile registers.
424	 *
425	 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
426	 *
427	 * Then decrement MCE nesting after finishing with the stack.
428	 */
429	ld	r3,_MSR(r1)
430
431	lhz	r11,PACA_IN_MCE(r13)
432	subi	r11,r11,1
433	sth	r11,PACA_IN_MCE(r13)
434
435	/* Turn off the RI bit because SRR1 is used by idle wakeup code. */
436	/* Recoverability could be improved by reducing the use of SRR1. */
437	li	r11,0
438	mtmsrd	r11,1
439
440	b	pnv_powersave_wakeup_mce
441#endif
442	/*
443	 * Handle machine check early in real mode. We come here with
444	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
445	 */
446EXC_COMMON_BEGIN(machine_check_handle_early)
447	std	r0,GPR0(r1)	/* Save r0 */
448	EXCEPTION_PROLOG_COMMON_3(0x200)
449	bl	save_nvgprs
450	addi	r3,r1,STACK_FRAME_OVERHEAD
451	bl	machine_check_early
452	std	r3,RESULT(r1)	/* Save result */
453	ld	r12,_MSR(r1)
454BEGIN_FTR_SECTION
455	b	4f
456END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
457
458#ifdef	CONFIG_PPC_P7_NAP
459	/*
460	 * Check if thread was in power saving mode. We come here when any
461	 * of the following is true:
462	 * a. thread wasn't in power saving mode
463	 * b. thread was in power saving mode with no state loss,
464	 *    supervisor state loss or hypervisor state loss.
465	 *
466	 * Go back to nap/sleep/winkle mode again if (b) is true.
467	 */
468	BEGIN_FTR_SECTION
469	rlwinm.	r11,r12,47-31,30,31
470	bne	machine_check_idle_common
471	END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
472#endif
473
474	/*
475	 * Check if we are coming from hypervisor userspace. If yes then we
476	 * continue in host kernel in V mode to deliver the MC event.
477	 */
478	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
479	beq	5f
4804:	andi.	r11,r12,MSR_PR		/* See if coming from user. */
481	bne	9f			/* continue in V mode if we are. */
482
4835:
484#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
485BEGIN_FTR_SECTION
486	/*
487	 * We are coming from kernel context. Check if we are coming from
488	 * guest. if yes, then we can continue. We will fall through
489	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
490	 */
491	lbz	r11,HSTATE_IN_GUEST(r13)
492	cmpwi	r11,0			/* Check if coming from guest */
493	bne	9f			/* continue if we are. */
494END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
495#endif
496	/*
497	 * At this point we are not sure about what context we come from.
498	 * Queue up the MCE event and return from the interrupt.
499	 * But before that, check if this is an un-recoverable exception.
500	 * If yes, then stay on emergency stack and panic.
501	 */
502	andi.	r11,r12,MSR_RI
503	bne	2f
5041:	mfspr	r11,SPRN_SRR0
505	LOAD_HANDLER(r10,unrecover_mce)
506	mtspr	SPRN_SRR0,r10
507	ld	r10,PACAKMSR(r13)
508	/*
509	 * We are going down. But there are chances that we might get hit by
510	 * another MCE during panic path and we may run into unstable state
511	 * with no way out. Hence, turn ME bit off while going down, so that
512	 * when another MCE is hit during panic path, system will checkstop
513	 * and hypervisor will get restarted cleanly by SP.
514	 */
515	li	r3,MSR_ME
516	andc	r10,r10,r3		/* Turn off MSR_ME */
517	mtspr	SPRN_SRR1,r10
518	RFI_TO_KERNEL
519	b	.
5202:
521	/*
522	 * Check if we have successfully handled/recovered from error, if not
523	 * then stay on emergency stack and panic.
524	 */
525	ld	r3,RESULT(r1)	/* Load result */
526	cmpdi	r3,0		/* see if we handled MCE successfully */
527
528	beq	1b		/* if !handled then panic */
529BEGIN_FTR_SECTION
530	/*
531	 * Return from MC interrupt.
532	 * Queue up the MCE event so that we can log it later, while
533	 * returning from kernel or opal call.
534	 */
535	bl	machine_check_queue_event
536	MACHINE_CHECK_HANDLER_WINDUP
537	RFI_TO_USER_OR_KERNEL
538FTR_SECTION_ELSE
539	/*
540	 * pSeries: Return from MC interrupt. Before that stay on emergency
541	 * stack and call machine_check_exception to log the MCE event.
542	 */
543	LOAD_HANDLER(r10,mce_return)
544	mtspr	SPRN_SRR0,r10
545	ld	r10,PACAKMSR(r13)
546	mtspr	SPRN_SRR1,r10
547	RFI_TO_KERNEL
548	b	.
549ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
5509:
551	/* Deliver the machine check to host kernel in V mode. */
552	MACHINE_CHECK_HANDLER_WINDUP
553	SET_SCRATCH0(r13)		/* save r13 */
554	EXCEPTION_PROLOG_0(PACA_EXMC)
555	b	machine_check_pSeries_0
556
557EXC_COMMON_BEGIN(unrecover_mce)
558	/* Invoke machine_check_exception to print MCE event and panic. */
559	addi	r3,r1,STACK_FRAME_OVERHEAD
560	bl	machine_check_exception
561	/*
562	 * We will not reach here. Even if we did, there is no way out. Call
563	 * unrecoverable_exception and die.
564	 */
5651:	addi	r3,r1,STACK_FRAME_OVERHEAD
566	bl	unrecoverable_exception
567	b	1b
568
569EXC_COMMON_BEGIN(mce_return)
570	/* Invoke machine_check_exception to print MCE event and return. */
571	addi	r3,r1,STACK_FRAME_OVERHEAD
572	bl	machine_check_exception
573	MACHINE_CHECK_HANDLER_WINDUP
574	RFI_TO_KERNEL
575	b	.
576
577EXC_REAL_BEGIN(data_access, 0x300, 0x80)
578SET_SCRATCH0(r13)		/* save r13 */
579EXCEPTION_PROLOG_0(PACA_EXGEN)
580	b	tramp_real_data_access
581EXC_REAL_END(data_access, 0x300, 0x80)
582
583TRAMP_REAL_BEGIN(tramp_real_data_access)
584EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x300)
585	/*
586	 * DAR/DSISR must be read before setting MSR[RI], because
587	 * a d-side MCE will clobber those registers so is not
588	 * recoverable if they are live.
589	 */
590	mfspr	r10,SPRN_DAR
591	mfspr	r11,SPRN_DSISR
592	std	r10,PACA_EXGEN+EX_DAR(r13)
593	stw	r11,PACA_EXGEN+EX_DSISR(r13)
594EXCEPTION_PROLOG_2(data_access_common, EXC_STD)
595
596EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
597SET_SCRATCH0(r13)		/* save r13 */
598EXCEPTION_PROLOG_0(PACA_EXGEN)
599EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x300)
600	mfspr	r10,SPRN_DAR
601	mfspr	r11,SPRN_DSISR
602	std	r10,PACA_EXGEN+EX_DAR(r13)
603	stw	r11,PACA_EXGEN+EX_DSISR(r13)
604EXCEPTION_PROLOG_2_RELON(data_access_common, EXC_STD)
605EXC_VIRT_END(data_access, 0x4300, 0x80)
606
607TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
608
609EXC_COMMON_BEGIN(data_access_common)
610	/*
611	 * Here r13 points to the paca, r9 contains the saved CR,
612	 * SRR0 and SRR1 are saved in r11 and r12,
613	 * r9 - r13 are saved in paca->exgen.
614	 * EX_DAR and EX_DSISR have saved DAR/DSISR
615	 */
616	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
617	RECONCILE_IRQ_STATE(r10, r11)
618	ld	r12,_MSR(r1)
619	ld	r3,PACA_EXGEN+EX_DAR(r13)
620	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
621	li	r5,0x300
622	std	r3,_DAR(r1)
623	std	r4,_DSISR(r1)
624BEGIN_MMU_FTR_SECTION
625	b	do_hash_page		/* Try to handle as hpte fault */
626MMU_FTR_SECTION_ELSE
627	b	handle_page_fault
628ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
629
630
631EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
632SET_SCRATCH0(r13)		/* save r13 */
633EXCEPTION_PROLOG_0(PACA_EXSLB)
634	b	tramp_real_data_access_slb
635EXC_REAL_END(data_access_slb, 0x380, 0x80)
636
637TRAMP_REAL_BEGIN(tramp_real_data_access_slb)
638EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
639	mfspr	r10,SPRN_DAR
640	std	r10,PACA_EXSLB+EX_DAR(r13)
641EXCEPTION_PROLOG_2(data_access_slb_common, EXC_STD)
642
643EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
644SET_SCRATCH0(r13)		/* save r13 */
645EXCEPTION_PROLOG_0(PACA_EXSLB)
646EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
647	mfspr	r10,SPRN_DAR
648	std	r10,PACA_EXSLB+EX_DAR(r13)
649EXCEPTION_PROLOG_2_RELON(data_access_slb_common, EXC_STD)
650EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
651
652TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
653
654EXC_COMMON_BEGIN(data_access_slb_common)
655	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
656	ld	r4,PACA_EXSLB+EX_DAR(r13)
657	std	r4,_DAR(r1)
658	addi	r3,r1,STACK_FRAME_OVERHEAD
659	bl	do_slb_fault
660	cmpdi	r3,0
661	bne-	1f
662	b	fast_exception_return
6631:	/* Error case */
664	std	r3,RESULT(r1)
665	bl	save_nvgprs
666	RECONCILE_IRQ_STATE(r10, r11)
667	ld	r4,_DAR(r1)
668	ld	r5,RESULT(r1)
669	addi	r3,r1,STACK_FRAME_OVERHEAD
670	bl	do_bad_slb_fault
671	b	ret_from_except
672
673
674EXC_REAL(instruction_access, 0x400, 0x80)
675EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
676TRAMP_KVM(PACA_EXGEN, 0x400)
677
678EXC_COMMON_BEGIN(instruction_access_common)
679	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
680	RECONCILE_IRQ_STATE(r10, r11)
681	ld	r12,_MSR(r1)
682	ld	r3,_NIP(r1)
683	andis.	r4,r12,DSISR_SRR1_MATCH_64S@h
684	li	r5,0x400
685	std	r3,_DAR(r1)
686	std	r4,_DSISR(r1)
687BEGIN_MMU_FTR_SECTION
688	b	do_hash_page		/* Try to handle as hpte fault */
689MMU_FTR_SECTION_ELSE
690	b	handle_page_fault
691ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
692
693
694EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
695EXCEPTION_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, KVMTEST_PR, 0x480);
696EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
697
698EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
699EXCEPTION_RELON_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, NOTEST, 0x480);
700EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
701
702TRAMP_KVM(PACA_EXSLB, 0x480)
703
704EXC_COMMON_BEGIN(instruction_access_slb_common)
705	EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
706	ld	r4,_NIP(r1)
707	addi	r3,r1,STACK_FRAME_OVERHEAD
708	bl	do_slb_fault
709	cmpdi	r3,0
710	bne-	1f
711	b	fast_exception_return
7121:	/* Error case */
713	std	r3,RESULT(r1)
714	bl	save_nvgprs
715	RECONCILE_IRQ_STATE(r10, r11)
716	ld	r4,_NIP(r1)
717	ld	r5,RESULT(r1)
718	addi	r3,r1,STACK_FRAME_OVERHEAD
719	bl	do_bad_slb_fault
720	b	ret_from_except
721
722
723EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
724	.globl hardware_interrupt_hv;
725hardware_interrupt_hv:
726	BEGIN_FTR_SECTION
727		MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED)
728	FTR_SECTION_ELSE
729		MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED)
730	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
731EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
732
733EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
734	.globl hardware_interrupt_relon_hv;
735hardware_interrupt_relon_hv:
736	BEGIN_FTR_SECTION
737		MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common,
738					    IRQS_DISABLED)
739	FTR_SECTION_ELSE
740		__MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common,
741					   EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED)
742	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
743EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
744
745TRAMP_KVM(PACA_EXGEN, 0x500)
746TRAMP_KVM_HV(PACA_EXGEN, 0x500)
747EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
748
749
750EXC_REAL_BEGIN(alignment, 0x600, 0x100)
751SET_SCRATCH0(r13)		/* save r13 */
752EXCEPTION_PROLOG_0(PACA_EXGEN)
753EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x600)
754	mfspr	r10,SPRN_DAR
755	mfspr	r11,SPRN_DSISR
756	std	r10,PACA_EXGEN+EX_DAR(r13)
757	stw	r11,PACA_EXGEN+EX_DSISR(r13)
758EXCEPTION_PROLOG_2(alignment_common, EXC_STD)
759EXC_REAL_END(alignment, 0x600, 0x100)
760
761EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
762SET_SCRATCH0(r13)		/* save r13 */
763EXCEPTION_PROLOG_0(PACA_EXGEN)
764EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x600)
765	mfspr	r10,SPRN_DAR
766	mfspr	r11,SPRN_DSISR
767	std	r10,PACA_EXGEN+EX_DAR(r13)
768	stw	r11,PACA_EXGEN+EX_DSISR(r13)
769EXCEPTION_PROLOG_2_RELON(alignment_common, EXC_STD)
770EXC_VIRT_END(alignment, 0x4600, 0x100)
771
772TRAMP_KVM(PACA_EXGEN, 0x600)
773EXC_COMMON_BEGIN(alignment_common)
774	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
775	ld	r3,PACA_EXGEN+EX_DAR(r13)
776	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
777	std	r3,_DAR(r1)
778	std	r4,_DSISR(r1)
779	bl	save_nvgprs
780	RECONCILE_IRQ_STATE(r10, r11)
781	addi	r3,r1,STACK_FRAME_OVERHEAD
782	bl	alignment_exception
783	b	ret_from_except
784
785
786EXC_REAL(program_check, 0x700, 0x100)
787EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
788TRAMP_KVM(PACA_EXGEN, 0x700)
789EXC_COMMON_BEGIN(program_check_common)
790	/*
791	 * It's possible to receive a TM Bad Thing type program check with
792	 * userspace register values (in particular r1), but with SRR1 reporting
793	 * that we came from the kernel. Normally that would confuse the bad
794	 * stack logic, and we would report a bad kernel stack pointer. Instead
795	 * we switch to the emergency stack if we're taking a TM Bad Thing from
796	 * the kernel.
797	 */
798	li	r10,MSR_PR		/* Build a mask of MSR_PR ..	*/
799	oris	r10,r10,0x200000@h	/* .. and SRR1_PROGTM		*/
800	and	r10,r10,r12		/* Mask SRR1 with that.		*/
801	srdi	r10,r10,8		/* Shift it so we can compare	*/
802	cmpldi	r10,(0x200000 >> 8)	/* .. with an immediate.	*/
803	bne 1f				/* If != go to normal path.	*/
804
805	/* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack	*/
806	andi.	r10,r12,MSR_PR;		/* Set CR0 correctly for label	*/
807					/* 3 in EXCEPTION_PROLOG_COMMON	*/
808	mr	r10,r1			/* Save r1			*/
809	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
810	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
811	b 3f				/* Jump into the macro !!	*/
8121:	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
813	bl	save_nvgprs
814	RECONCILE_IRQ_STATE(r10, r11)
815	addi	r3,r1,STACK_FRAME_OVERHEAD
816	bl	program_check_exception
817	b	ret_from_except
818
819
820EXC_REAL(fp_unavailable, 0x800, 0x100)
821EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
822TRAMP_KVM(PACA_EXGEN, 0x800)
823EXC_COMMON_BEGIN(fp_unavailable_common)
824	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
825	bne	1f			/* if from user, just load it up */
826	bl	save_nvgprs
827	RECONCILE_IRQ_STATE(r10, r11)
828	addi	r3,r1,STACK_FRAME_OVERHEAD
829	bl	kernel_fp_unavailable_exception
830	BUG_OPCODE
8311:
832#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
833BEGIN_FTR_SECTION
834	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
835	 * transaction), go do TM stuff
836	 */
837	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
838	bne-	2f
839END_FTR_SECTION_IFSET(CPU_FTR_TM)
840#endif
841	bl	load_up_fpu
842	b	fast_exception_return
843#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
8442:	/* User process was in a transaction */
845	bl	save_nvgprs
846	RECONCILE_IRQ_STATE(r10, r11)
847	addi	r3,r1,STACK_FRAME_OVERHEAD
848	bl	fp_unavailable_tm
849	b	ret_from_except
850#endif
851
852
853EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
854EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
855TRAMP_KVM(PACA_EXGEN, 0x900)
856EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
857
858
859EXC_REAL_HV(hdecrementer, 0x980, 0x80)
860EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
861TRAMP_KVM_HV(PACA_EXGEN, 0x980)
862EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
863
864
865EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
866EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
867TRAMP_KVM(PACA_EXGEN, 0xa00)
868#ifdef CONFIG_PPC_DOORBELL
869EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
870#else
871EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
872#endif
873
874
875EXC_REAL(trap_0b, 0xb00, 0x100)
876EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
877TRAMP_KVM(PACA_EXGEN, 0xb00)
878EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
879
880/*
881 * system call / hypercall (0xc00, 0x4c00)
882 *
883 * The system call exception is invoked with "sc 0" and does not alter HV bit.
884 * There is support for kernel code to invoke system calls but there are no
885 * in-tree users.
886 *
887 * The hypercall is invoked with "sc 1" and sets HV=1.
888 *
889 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
890 * 0x4c00 virtual mode.
891 *
892 * Call convention:
893 *
894 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
895 *
896 * For hypercalls, the register convention is as follows:
897 * r0 volatile
898 * r1-2 nonvolatile
899 * r3 volatile parameter and return value for status
900 * r4-r10 volatile input and output value
901 * r11 volatile hypercall number and output value
902 * r12 volatile input and output value
903 * r13-r31 nonvolatile
904 * LR nonvolatile
905 * CTR volatile
906 * XER volatile
907 * CR0-1 CR5-7 volatile
908 * CR2-4 nonvolatile
909 * Other registers nonvolatile
910 *
911 * The intersection of volatile registers that don't contain possible
912 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
913 * without saving, though xer is not a good idea to use, as hardware may
914 * interpret some bits so it may be costly to change them.
915 */
916#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
917	/*
918	 * There is a little bit of juggling to get syscall and hcall
919	 * working well. Save r13 in ctr to avoid using SPRG scratch
920	 * register.
921	 *
922	 * Userspace syscalls have already saved the PPR, hcalls must save
923	 * it before setting HMT_MEDIUM.
924	 */
925#define SYSCALL_KVMTEST							\
926	mtctr	r13;							\
927	GET_PACA(r13);							\
928	std	r10,PACA_EXGEN+EX_R10(r13);				\
929	INTERRUPT_TO_KERNEL;						\
930	KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
931	HMT_MEDIUM;							\
932	mfctr	r9;
933
934#else
935#define SYSCALL_KVMTEST							\
936	HMT_MEDIUM;							\
937	mr	r9,r13;							\
938	GET_PACA(r13);							\
939	INTERRUPT_TO_KERNEL;
940#endif
941
942#define LOAD_SYSCALL_HANDLER(reg)					\
943	__LOAD_HANDLER(reg, system_call_common)
944
945/*
946 * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9,
947 * and HMT_MEDIUM.
948 */
949#define SYSCALL_REAL	 					\
950	mfspr	r11,SPRN_SRR0 ;					\
951	mfspr	r12,SPRN_SRR1 ;					\
952	LOAD_SYSCALL_HANDLER(r10) ; 				\
953	mtspr	SPRN_SRR0,r10 ; 				\
954	ld	r10,PACAKMSR(r13) ;				\
955	mtspr	SPRN_SRR1,r10 ; 				\
956	RFI_TO_KERNEL ;						\
957	b	. ;	/* prevent speculative execution */
958
959#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
960#define SYSCALL_FASTENDIAN_TEST					\
961BEGIN_FTR_SECTION						\
962	cmpdi	r0,0x1ebe ; 					\
963	beq-	1f ;						\
964END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
965
966#define SYSCALL_FASTENDIAN					\
967	/* Fast LE/BE switch system call */			\
9681:	mfspr	r12,SPRN_SRR1 ;					\
969	xori	r12,r12,MSR_LE ;				\
970	mtspr	SPRN_SRR1,r12 ;					\
971	mr	r13,r9 ;					\
972	RFI_TO_USER ;	/* return to userspace */		\
973	b	. ;	/* prevent speculative execution */
974#else
975#define SYSCALL_FASTENDIAN_TEST
976#define SYSCALL_FASTENDIAN
977#endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */
978
979#if defined(CONFIG_RELOCATABLE)
980	/*
981	 * We can't branch directly so we do it via the CTR which
982	 * is volatile across system calls.
983	 */
984#define SYSCALL_VIRT						\
985	LOAD_SYSCALL_HANDLER(r10) ;				\
986	mtctr	r10 ;						\
987	mfspr	r11,SPRN_SRR0 ;					\
988	mfspr	r12,SPRN_SRR1 ;					\
989	li	r10,MSR_RI ;					\
990	mtmsrd 	r10,1 ;						\
991	bctr ;
992#else
993	/* We can branch directly */
994#define SYSCALL_VIRT						\
995	mfspr	r11,SPRN_SRR0 ;					\
996	mfspr	r12,SPRN_SRR1 ;					\
997	li	r10,MSR_RI ;					\
998	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
999	b	system_call_common ;
1000#endif
1001
1002EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1003	SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1004	SYSCALL_FASTENDIAN_TEST
1005	SYSCALL_REAL
1006	SYSCALL_FASTENDIAN
1007EXC_REAL_END(system_call, 0xc00, 0x100)
1008
1009EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1010	SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1011	SYSCALL_FASTENDIAN_TEST
1012	SYSCALL_VIRT
1013	SYSCALL_FASTENDIAN
1014EXC_VIRT_END(system_call, 0x4c00, 0x100)
1015
1016#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1017	/*
1018	 * This is a hcall, so register convention is as above, with these
1019	 * differences:
1020	 * r13 = PACA
1021	 * ctr = orig r13
1022	 * orig r10 saved in PACA
1023	 */
1024TRAMP_KVM_BEGIN(do_kvm_0xc00)
1025	 /*
1026	  * Save the PPR (on systems that support it) before changing to
1027	  * HMT_MEDIUM. That allows the KVM code to save that value into the
1028	  * guest state (it is the guest's PPR value).
1029	  */
1030	OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
1031	HMT_MEDIUM
1032	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
1033	mfctr	r10
1034	SET_SCRATCH0(r10)
1035	std	r9,PACA_EXGEN+EX_R9(r13)
1036	mfcr	r9
1037	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
1038#endif
1039
1040
1041EXC_REAL(single_step, 0xd00, 0x100)
1042EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
1043TRAMP_KVM(PACA_EXGEN, 0xd00)
1044EXC_COMMON(single_step_common, 0xd00, single_step_exception)
1045
1046EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
1047EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
1048TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1049EXC_COMMON_BEGIN(h_data_storage_common)
1050	mfspr   r10,SPRN_HDAR
1051	std     r10,PACA_EXGEN+EX_DAR(r13)
1052	mfspr   r10,SPRN_HDSISR
1053	stw     r10,PACA_EXGEN+EX_DSISR(r13)
1054	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1055	bl      save_nvgprs
1056	RECONCILE_IRQ_STATE(r10, r11)
1057	addi    r3,r1,STACK_FRAME_OVERHEAD
1058BEGIN_MMU_FTR_SECTION
1059	ld	r4,PACA_EXGEN+EX_DAR(r13)
1060	lwz	r5,PACA_EXGEN+EX_DSISR(r13)
1061	std	r4,_DAR(r1)
1062	std	r5,_DSISR(r1)
1063	li	r5,SIGSEGV
1064	bl      bad_page_fault
1065MMU_FTR_SECTION_ELSE
1066	bl      unknown_exception
1067ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
1068	b       ret_from_except
1069
1070
1071EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
1072EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
1073TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1074EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1075
1076
1077EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1078EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
1079TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1080EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1081
1082
1083/*
1084 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1085 * first, and then eventaully from there to the trampoline to get into virtual
1086 * mode.
1087 */
1088__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
1089__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
1090EXC_VIRT_NONE(0x4e60, 0x20)
1091TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1092TRAMP_REAL_BEGIN(hmi_exception_early)
1093	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
1094	mr	r10,r1			/* Save r1 */
1095	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack for realmode */
1096	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1097	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
1098	mfspr	r12,SPRN_HSRR1		/* Save HSRR1 */
1099	EXCEPTION_PROLOG_COMMON_1()
1100	EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1101	EXCEPTION_PROLOG_COMMON_3(0xe60)
1102	addi	r3,r1,STACK_FRAME_OVERHEAD
1103	BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
1104	cmpdi	cr0,r3,0
1105
1106	/* Windup the stack. */
1107	/* Move original HSRR0 and HSRR1 into the respective regs */
1108	ld	r9,_MSR(r1)
1109	mtspr	SPRN_HSRR1,r9
1110	ld	r3,_NIP(r1)
1111	mtspr	SPRN_HSRR0,r3
1112	ld	r9,_CTR(r1)
1113	mtctr	r9
1114	ld	r9,_XER(r1)
1115	mtxer	r9
1116	ld	r9,_LINK(r1)
1117	mtlr	r9
1118	REST_GPR(0, r1)
1119	REST_8GPRS(2, r1)
1120	REST_GPR(10, r1)
1121	ld	r11,_CCR(r1)
1122	REST_2GPRS(12, r1)
1123	bne	1f
1124	mtcr	r11
1125	REST_GPR(11, r1)
1126	ld	r1,GPR1(r1)
1127	HRFI_TO_USER_OR_KERNEL
1128
11291:	mtcr	r11
1130	REST_GPR(11, r1)
1131	ld	r1,GPR1(r1)
1132
1133	/*
1134	 * Go to virtual mode and pull the HMI event information from
1135	 * firmware.
1136	 */
1137	.globl hmi_exception_after_realmode
1138hmi_exception_after_realmode:
1139	SET_SCRATCH0(r13)
1140	EXCEPTION_PROLOG_0(PACA_EXGEN)
1141	b	tramp_real_hmi_exception
1142
1143EXC_COMMON_BEGIN(hmi_exception_common)
1144EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception,
1145        ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON)
1146
1147EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1148EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
1149TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1150#ifdef CONFIG_PPC_DOORBELL
1151EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1152#else
1153EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1154#endif
1155
1156
1157EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1158EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
1159TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1160EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1161
1162
1163EXC_REAL_NONE(0xec0, 0x20)
1164EXC_VIRT_NONE(0x4ec0, 0x20)
1165EXC_REAL_NONE(0xee0, 0x20)
1166EXC_VIRT_NONE(0x4ee0, 0x20)
1167
1168
1169EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1170EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
1171TRAMP_KVM(PACA_EXGEN, 0xf00)
1172EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1173
1174
1175EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1176EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
1177TRAMP_KVM(PACA_EXGEN, 0xf20)
1178EXC_COMMON_BEGIN(altivec_unavailable_common)
1179	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1180#ifdef CONFIG_ALTIVEC
1181BEGIN_FTR_SECTION
1182	beq	1f
1183#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1184  BEGIN_FTR_SECTION_NESTED(69)
1185	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1186	 * transaction), go do TM stuff
1187	 */
1188	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1189	bne-	2f
1190  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1191#endif
1192	bl	load_up_altivec
1193	b	fast_exception_return
1194#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11952:	/* User process was in a transaction */
1196	bl	save_nvgprs
1197	RECONCILE_IRQ_STATE(r10, r11)
1198	addi	r3,r1,STACK_FRAME_OVERHEAD
1199	bl	altivec_unavailable_tm
1200	b	ret_from_except
1201#endif
12021:
1203END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1204#endif
1205	bl	save_nvgprs
1206	RECONCILE_IRQ_STATE(r10, r11)
1207	addi	r3,r1,STACK_FRAME_OVERHEAD
1208	bl	altivec_unavailable_exception
1209	b	ret_from_except
1210
1211
1212EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1213EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
1214TRAMP_KVM(PACA_EXGEN, 0xf40)
1215EXC_COMMON_BEGIN(vsx_unavailable_common)
1216	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1217#ifdef CONFIG_VSX
1218BEGIN_FTR_SECTION
1219	beq	1f
1220#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1221  BEGIN_FTR_SECTION_NESTED(69)
1222	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1223	 * transaction), go do TM stuff
1224	 */
1225	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1226	bne-	2f
1227  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1228#endif
1229	b	load_up_vsx
1230#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12312:	/* User process was in a transaction */
1232	bl	save_nvgprs
1233	RECONCILE_IRQ_STATE(r10, r11)
1234	addi	r3,r1,STACK_FRAME_OVERHEAD
1235	bl	vsx_unavailable_tm
1236	b	ret_from_except
1237#endif
12381:
1239END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1240#endif
1241	bl	save_nvgprs
1242	RECONCILE_IRQ_STATE(r10, r11)
1243	addi	r3,r1,STACK_FRAME_OVERHEAD
1244	bl	vsx_unavailable_exception
1245	b	ret_from_except
1246
1247
1248EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1249EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1250TRAMP_KVM(PACA_EXGEN, 0xf60)
1251EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1252
1253
1254EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1255EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
1256TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1257EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1258
1259
1260EXC_REAL_NONE(0xfa0, 0x20)
1261EXC_VIRT_NONE(0x4fa0, 0x20)
1262EXC_REAL_NONE(0xfc0, 0x20)
1263EXC_VIRT_NONE(0x4fc0, 0x20)
1264EXC_REAL_NONE(0xfe0, 0x20)
1265EXC_VIRT_NONE(0x4fe0, 0x20)
1266
1267EXC_REAL_NONE(0x1000, 0x100)
1268EXC_VIRT_NONE(0x5000, 0x100)
1269EXC_REAL_NONE(0x1100, 0x100)
1270EXC_VIRT_NONE(0x5100, 0x100)
1271
1272#ifdef CONFIG_CBE_RAS
1273EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
1274EXC_VIRT_NONE(0x5200, 0x100)
1275TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
1276EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
1277#else /* CONFIG_CBE_RAS */
1278EXC_REAL_NONE(0x1200, 0x100)
1279EXC_VIRT_NONE(0x5200, 0x100)
1280#endif
1281
1282
1283EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
1284EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
1285TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
1286EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
1287
1288EXC_REAL_NONE(0x1400, 0x100)
1289EXC_VIRT_NONE(0x5400, 0x100)
1290
1291EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
1292	mtspr	SPRN_SPRG_HSCRATCH0,r13
1293	EXCEPTION_PROLOG_0(PACA_EXGEN)
1294	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
1295
1296#ifdef CONFIG_PPC_DENORMALISATION
1297	mfspr	r10,SPRN_HSRR1
1298	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
1299	bne+	denorm_assist
1300#endif
1301
1302	KVMTEST_HV(0x1500)
1303	EXCEPTION_PROLOG_2(denorm_common, EXC_HV)
1304EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
1305
1306#ifdef CONFIG_PPC_DENORMALISATION
1307EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
1308	b	exc_real_0x1500_denorm_exception_hv
1309EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
1310#else
1311EXC_VIRT_NONE(0x5500, 0x100)
1312#endif
1313
1314TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
1315
1316#ifdef CONFIG_PPC_DENORMALISATION
1317TRAMP_REAL_BEGIN(denorm_assist)
1318BEGIN_FTR_SECTION
1319/*
1320 * To denormalise we need to move a copy of the register to itself.
1321 * For POWER6 do that here for all FP regs.
1322 */
1323	mfmsr	r10
1324	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
1325	xori	r10,r10,(MSR_FE0|MSR_FE1)
1326	mtmsrd	r10
1327	sync
1328
1329#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
1330#define FMR4(n)  FMR2(n) ; FMR2(n+2)
1331#define FMR8(n)  FMR4(n) ; FMR4(n+4)
1332#define FMR16(n) FMR8(n) ; FMR8(n+8)
1333#define FMR32(n) FMR16(n) ; FMR16(n+16)
1334	FMR32(0)
1335
1336FTR_SECTION_ELSE
1337/*
1338 * To denormalise we need to move a copy of the register to itself.
1339 * For POWER7 do that here for the first 32 VSX registers only.
1340 */
1341	mfmsr	r10
1342	oris	r10,r10,MSR_VSX@h
1343	mtmsrd	r10
1344	sync
1345
1346#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
1347#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
1348#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
1349#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
1350#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
1351	XVCPSGNDP32(0)
1352
1353ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
1354
1355BEGIN_FTR_SECTION
1356	b	denorm_done
1357END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1358/*
1359 * To denormalise we need to move a copy of the register to itself.
1360 * For POWER8 we need to do that for all 64 VSX registers
1361 */
1362	XVCPSGNDP32(32)
1363denorm_done:
1364	mfspr	r11,SPRN_HSRR0
1365	subi	r11,r11,4
1366	mtspr	SPRN_HSRR0,r11
1367	mtcrf	0x80,r9
1368	ld	r9,PACA_EXGEN+EX_R9(r13)
1369	RESTORE_PPR_PACA(PACA_EXGEN, r10)
1370BEGIN_FTR_SECTION
1371	ld	r10,PACA_EXGEN+EX_CFAR(r13)
1372	mtspr	SPRN_CFAR,r10
1373END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1374	ld	r10,PACA_EXGEN+EX_R10(r13)
1375	ld	r11,PACA_EXGEN+EX_R11(r13)
1376	ld	r12,PACA_EXGEN+EX_R12(r13)
1377	ld	r13,PACA_EXGEN+EX_R13(r13)
1378	HRFI_TO_UNKNOWN
1379	b	.
1380#endif
1381
1382EXC_COMMON(denorm_common, 0x1500, unknown_exception)
1383
1384
1385#ifdef CONFIG_CBE_RAS
1386EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
1387EXC_VIRT_NONE(0x5600, 0x100)
1388TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
1389EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
1390#else /* CONFIG_CBE_RAS */
1391EXC_REAL_NONE(0x1600, 0x100)
1392EXC_VIRT_NONE(0x5600, 0x100)
1393#endif
1394
1395
1396EXC_REAL(altivec_assist, 0x1700, 0x100)
1397EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
1398TRAMP_KVM(PACA_EXGEN, 0x1700)
1399#ifdef CONFIG_ALTIVEC
1400EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
1401#else
1402EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
1403#endif
1404
1405
1406#ifdef CONFIG_CBE_RAS
1407EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
1408EXC_VIRT_NONE(0x5800, 0x100)
1409TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
1410EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
1411#else /* CONFIG_CBE_RAS */
1412EXC_REAL_NONE(0x1800, 0x100)
1413EXC_VIRT_NONE(0x5800, 0x100)
1414#endif
1415
1416#ifdef CONFIG_PPC_WATCHDOG
1417
1418#define MASKED_DEC_HANDLER_LABEL 3f
1419
1420#define MASKED_DEC_HANDLER(_H)				\
14213: /* soft-nmi */					\
1422	std	r12,PACA_EXGEN+EX_R12(r13);		\
1423	GET_SCRATCH0(r10);				\
1424	std	r10,PACA_EXGEN+EX_R13(r13);		\
1425	EXCEPTION_PROLOG_2(soft_nmi_common, _H)
1426
1427/*
1428 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
1429 * stack is one that is usable by maskable interrupts so long as MSR_EE
1430 * remains off. It is used for recovery when something has corrupted the
1431 * normal kernel stack, for example. The "soft NMI" must not use the process
1432 * stack because we want irq disabled sections to avoid touching the stack
1433 * at all (other than PMU interrupts), so use the emergency stack for this,
1434 * and run it entirely with interrupts hard disabled.
1435 */
1436EXC_COMMON_BEGIN(soft_nmi_common)
1437	mr	r10,r1
1438	ld	r1,PACAEMERGSP(r13)
1439	subi	r1,r1,INT_FRAME_SIZE
1440	EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
1441			system_reset, soft_nmi_interrupt,
1442			ADD_NVGPRS;ADD_RECONCILE)
1443	b	ret_from_except
1444
1445#else /* CONFIG_PPC_WATCHDOG */
1446#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
1447#define MASKED_DEC_HANDLER(_H)
1448#endif /* CONFIG_PPC_WATCHDOG */
1449
1450/*
1451 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
1452 * - If it was a decrementer interrupt, we bump the dec to max and and return.
1453 * - If it was a doorbell we return immediately since doorbells are edge
1454 *   triggered and won't automatically refire.
1455 * - If it was a HMI we return immediately since we handled it in realmode
1456 *   and it won't refire.
1457 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
1458 * This is called with r10 containing the value to OR to the paca field.
1459 */
1460#define MASKED_INTERRUPT(_H)				\
1461masked_##_H##interrupt:					\
1462	std	r11,PACA_EXGEN+EX_R11(r13);		\
1463	lbz	r11,PACAIRQHAPPENED(r13);		\
1464	or	r11,r11,r10;				\
1465	stb	r11,PACAIRQHAPPENED(r13);		\
1466	cmpwi	r10,PACA_IRQ_DEC;			\
1467	bne	1f;					\
1468	lis	r10,0x7fff;				\
1469	ori	r10,r10,0xffff;				\
1470	mtspr	SPRN_DEC,r10;				\
1471	b	MASKED_DEC_HANDLER_LABEL;		\
14721:	andi.	r10,r10,PACA_IRQ_MUST_HARD_MASK;	\
1473	beq	2f;					\
1474	mfspr	r10,SPRN_##_H##SRR1;			\
1475	xori	r10,r10,MSR_EE; /* clear MSR_EE */	\
1476	mtspr	SPRN_##_H##SRR1,r10;			\
1477	ori	r11,r11,PACA_IRQ_HARD_DIS;		\
1478	stb	r11,PACAIRQHAPPENED(r13);		\
14792:	/* done */					\
1480	mtcrf	0x80,r9;				\
1481	std	r1,PACAR1(r13);				\
1482	ld	r9,PACA_EXGEN+EX_R9(r13);		\
1483	ld	r10,PACA_EXGEN+EX_R10(r13);		\
1484	ld	r11,PACA_EXGEN+EX_R11(r13);		\
1485	/* returns to kernel where r13 must be set up, so don't restore it */ \
1486	##_H##RFI_TO_KERNEL;				\
1487	b	.;					\
1488	MASKED_DEC_HANDLER(_H)
1489
1490TRAMP_REAL_BEGIN(stf_barrier_fallback)
1491	std	r9,PACA_EXRFI+EX_R9(r13)
1492	std	r10,PACA_EXRFI+EX_R10(r13)
1493	sync
1494	ld	r9,PACA_EXRFI+EX_R9(r13)
1495	ld	r10,PACA_EXRFI+EX_R10(r13)
1496	ori	31,31,0
1497	.rept 14
1498	b	1f
14991:
1500	.endr
1501	blr
1502
1503TRAMP_REAL_BEGIN(rfi_flush_fallback)
1504	SET_SCRATCH0(r13);
1505	GET_PACA(r13);
1506	std	r1,PACA_EXRFI+EX_R12(r13)
1507	ld	r1,PACAKSAVE(r13)
1508	std	r9,PACA_EXRFI+EX_R9(r13)
1509	std	r10,PACA_EXRFI+EX_R10(r13)
1510	std	r11,PACA_EXRFI+EX_R11(r13)
1511	mfctr	r9
1512	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1513	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
1514	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
1515	mtctr	r11
1516	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1517
1518	/* order ld/st prior to dcbt stop all streams with flushing */
1519	sync
1520
1521	/*
1522	 * The load adresses are at staggered offsets within cachelines,
1523	 * which suits some pipelines better (on others it should not
1524	 * hurt).
1525	 */
15261:
1527	ld	r11,(0x80 + 8)*0(r10)
1528	ld	r11,(0x80 + 8)*1(r10)
1529	ld	r11,(0x80 + 8)*2(r10)
1530	ld	r11,(0x80 + 8)*3(r10)
1531	ld	r11,(0x80 + 8)*4(r10)
1532	ld	r11,(0x80 + 8)*5(r10)
1533	ld	r11,(0x80 + 8)*6(r10)
1534	ld	r11,(0x80 + 8)*7(r10)
1535	addi	r10,r10,0x80*8
1536	bdnz	1b
1537
1538	mtctr	r9
1539	ld	r9,PACA_EXRFI+EX_R9(r13)
1540	ld	r10,PACA_EXRFI+EX_R10(r13)
1541	ld	r11,PACA_EXRFI+EX_R11(r13)
1542	ld	r1,PACA_EXRFI+EX_R12(r13)
1543	GET_SCRATCH0(r13);
1544	rfid
1545
1546TRAMP_REAL_BEGIN(hrfi_flush_fallback)
1547	SET_SCRATCH0(r13);
1548	GET_PACA(r13);
1549	std	r1,PACA_EXRFI+EX_R12(r13)
1550	ld	r1,PACAKSAVE(r13)
1551	std	r9,PACA_EXRFI+EX_R9(r13)
1552	std	r10,PACA_EXRFI+EX_R10(r13)
1553	std	r11,PACA_EXRFI+EX_R11(r13)
1554	mfctr	r9
1555	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1556	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
1557	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
1558	mtctr	r11
1559	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1560
1561	/* order ld/st prior to dcbt stop all streams with flushing */
1562	sync
1563
1564	/*
1565	 * The load adresses are at staggered offsets within cachelines,
1566	 * which suits some pipelines better (on others it should not
1567	 * hurt).
1568	 */
15691:
1570	ld	r11,(0x80 + 8)*0(r10)
1571	ld	r11,(0x80 + 8)*1(r10)
1572	ld	r11,(0x80 + 8)*2(r10)
1573	ld	r11,(0x80 + 8)*3(r10)
1574	ld	r11,(0x80 + 8)*4(r10)
1575	ld	r11,(0x80 + 8)*5(r10)
1576	ld	r11,(0x80 + 8)*6(r10)
1577	ld	r11,(0x80 + 8)*7(r10)
1578	addi	r10,r10,0x80*8
1579	bdnz	1b
1580
1581	mtctr	r9
1582	ld	r9,PACA_EXRFI+EX_R9(r13)
1583	ld	r10,PACA_EXRFI+EX_R10(r13)
1584	ld	r11,PACA_EXRFI+EX_R11(r13)
1585	ld	r1,PACA_EXRFI+EX_R12(r13)
1586	GET_SCRATCH0(r13);
1587	hrfid
1588
1589/*
1590 * Real mode exceptions actually use this too, but alternate
1591 * instruction code patches (which end up in the common .text area)
1592 * cannot reach these if they are put there.
1593 */
1594USE_FIXED_SECTION(virt_trampolines)
1595	MASKED_INTERRUPT()
1596	MASKED_INTERRUPT(H)
1597
1598#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1599TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
1600	/*
1601	 * Here all GPRs are unchanged from when the interrupt happened
1602	 * except for r13, which is saved in SPRG_SCRATCH0.
1603	 */
1604	mfspr	r13, SPRN_SRR0
1605	addi	r13, r13, 4
1606	mtspr	SPRN_SRR0, r13
1607	GET_SCRATCH0(r13)
1608	RFI_TO_KERNEL
1609	b	.
1610
1611TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
1612	/*
1613	 * Here all GPRs are unchanged from when the interrupt happened
1614	 * except for r13, which is saved in SPRG_SCRATCH0.
1615	 */
1616	mfspr	r13, SPRN_HSRR0
1617	addi	r13, r13, 4
1618	mtspr	SPRN_HSRR0, r13
1619	GET_SCRATCH0(r13)
1620	HRFI_TO_KERNEL
1621	b	.
1622#endif
1623
1624/*
1625 * Ensure that any handlers that get invoked from the exception prologs
1626 * above are below the first 64KB (0x10000) of the kernel image because
1627 * the prologs assemble the addresses of these handlers using the
1628 * LOAD_HANDLER macro, which uses an ori instruction.
1629 */
1630
1631/*** Common interrupt handlers ***/
1632
1633
1634	/*
1635	 * Relocation-on interrupts: A subset of the interrupts can be delivered
1636	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
1637	 * it.  Addresses are the same as the original interrupt addresses, but
1638	 * offset by 0xc000000000004000.
1639	 * It's impossible to receive interrupts below 0x300 via this mechanism.
1640	 * KVM: None of these traps are from the guest ; anything that escalated
1641	 * to HV=1 from HV=0 is delivered via real mode handlers.
1642	 */
1643
1644	/*
1645	 * This uses the standard macro, since the original 0x300 vector
1646	 * only has extra guff for STAB-based processors -- which never
1647	 * come here.
1648	 */
1649
1650EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
1651	b	__ppc64_runlatch_on
1652
1653USE_FIXED_SECTION(virt_trampolines)
1654	/*
1655	 * The __end_interrupts marker must be past the out-of-line (OOL)
1656	 * handlers, so that they are copied to real address 0x100 when running
1657	 * a relocatable kernel. This ensures they can be reached from the short
1658	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
1659	 * directly, without using LOAD_HANDLER().
1660	 */
1661	.align	7
1662	.globl	__end_interrupts
1663__end_interrupts:
1664DEFINE_FIXED_SYMBOL(__end_interrupts)
1665
1666#ifdef CONFIG_PPC_970_NAP
1667EXC_COMMON_BEGIN(power4_fixup_nap)
1668	andc	r9,r9,r10
1669	std	r9,TI_LOCAL_FLAGS(r11)
1670	ld	r10,_LINK(r1)		/* make idle task do the */
1671	std	r10,_NIP(r1)		/* equivalent of a blr */
1672	blr
1673#endif
1674
1675CLOSE_FIXED_SECTION(real_vectors);
1676CLOSE_FIXED_SECTION(real_trampolines);
1677CLOSE_FIXED_SECTION(virt_vectors);
1678CLOSE_FIXED_SECTION(virt_trampolines);
1679
1680USE_TEXT_SECTION()
1681
1682/*
1683 * Hash table stuff
1684 */
1685	.balign	IFETCH_ALIGN_BYTES
1686do_hash_page:
1687#ifdef CONFIG_PPC_BOOK3S_64
1688	lis	r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
1689	ori	r0,r0,DSISR_BAD_FAULT_64S@l
1690	and.	r0,r4,r0		/* weird error? */
1691	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1692	ld	r11, PACA_THREAD_INFO(r13)
1693	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1694	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1695	bne	77f			/* then don't call hash_page now */
1696
1697	/*
1698	 * r3 contains the faulting address
1699	 * r4 msr
1700	 * r5 contains the trap number
1701	 * r6 contains dsisr
1702	 *
1703	 * at return r3 = 0 for success, 1 for page fault, negative for error
1704	 */
1705        mr 	r4,r12
1706	ld      r6,_DSISR(r1)
1707	bl	__hash_page		/* build HPTE if possible */
1708        cmpdi	r3,0			/* see if __hash_page succeeded */
1709
1710	/* Success */
1711	beq	fast_exc_return_irq	/* Return from exception on success */
1712
1713	/* Error */
1714	blt-	13f
1715
1716	/* Reload DSISR into r4 for the DABR check below */
1717	ld      r4,_DSISR(r1)
1718#endif /* CONFIG_PPC_BOOK3S_64 */
1719
1720/* Here we have a page fault that hash_page can't handle. */
1721handle_page_fault:
172211:	andis.  r0,r4,DSISR_DABRMATCH@h
1723	bne-    handle_dabr_fault
1724	ld	r4,_DAR(r1)
1725	ld	r5,_DSISR(r1)
1726	addi	r3,r1,STACK_FRAME_OVERHEAD
1727	bl	do_page_fault
1728	cmpdi	r3,0
1729	beq+	12f
1730	bl	save_nvgprs
1731	mr	r5,r3
1732	addi	r3,r1,STACK_FRAME_OVERHEAD
1733	lwz	r4,_DAR(r1)
1734	bl	bad_page_fault
1735	b	ret_from_except
1736
1737/* We have a data breakpoint exception - handle it */
1738handle_dabr_fault:
1739	bl	save_nvgprs
1740	ld      r4,_DAR(r1)
1741	ld      r5,_DSISR(r1)
1742	addi    r3,r1,STACK_FRAME_OVERHEAD
1743	bl      do_break
174412:	b       ret_from_except_lite
1745
1746
1747#ifdef CONFIG_PPC_BOOK3S_64
1748/* We have a page fault that hash_page could handle but HV refused
1749 * the PTE insertion
1750 */
175113:	bl	save_nvgprs
1752	mr	r5,r3
1753	addi	r3,r1,STACK_FRAME_OVERHEAD
1754	ld	r4,_DAR(r1)
1755	bl	low_hash_fault
1756	b	ret_from_except
1757#endif
1758
1759/*
1760 * We come here as a result of a DSI at a point where we don't want
1761 * to call hash_page, such as when we are accessing memory (possibly
1762 * user memory) inside a PMU interrupt that occurred while interrupts
1763 * were soft-disabled.  We want to invoke the exception handler for
1764 * the access, or panic if there isn't a handler.
1765 */
176677:	bl	save_nvgprs
1767	mr	r4,r3
1768	addi	r3,r1,STACK_FRAME_OVERHEAD
1769	li	r5,SIGSEGV
1770	bl	bad_page_fault
1771	b	ret_from_except
1772
1773/*
1774 * Here we have detected that the kernel stack pointer is bad.
1775 * R9 contains the saved CR, r13 points to the paca,
1776 * r10 contains the (bad) kernel stack pointer,
1777 * r11 and r12 contain the saved SRR0 and SRR1.
1778 * We switch to using an emergency stack, save the registers there,
1779 * and call kernel_bad_stack(), which panics.
1780 */
1781bad_stack:
1782	ld	r1,PACAEMERGSP(r13)
1783	subi	r1,r1,64+INT_FRAME_SIZE
1784	std	r9,_CCR(r1)
1785	std	r10,GPR1(r1)
1786	std	r11,_NIP(r1)
1787	std	r12,_MSR(r1)
1788	mfspr	r11,SPRN_DAR
1789	mfspr	r12,SPRN_DSISR
1790	std	r11,_DAR(r1)
1791	std	r12,_DSISR(r1)
1792	mflr	r10
1793	mfctr	r11
1794	mfxer	r12
1795	std	r10,_LINK(r1)
1796	std	r11,_CTR(r1)
1797	std	r12,_XER(r1)
1798	SAVE_GPR(0,r1)
1799	SAVE_GPR(2,r1)
1800	ld	r10,EX_R3(r3)
1801	std	r10,GPR3(r1)
1802	SAVE_GPR(4,r1)
1803	SAVE_4GPRS(5,r1)
1804	ld	r9,EX_R9(r3)
1805	ld	r10,EX_R10(r3)
1806	SAVE_2GPRS(9,r1)
1807	ld	r9,EX_R11(r3)
1808	ld	r10,EX_R12(r3)
1809	ld	r11,EX_R13(r3)
1810	std	r9,GPR11(r1)
1811	std	r10,GPR12(r1)
1812	std	r11,GPR13(r1)
1813BEGIN_FTR_SECTION
1814	ld	r10,EX_CFAR(r3)
1815	std	r10,ORIG_GPR3(r1)
1816END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1817	SAVE_8GPRS(14,r1)
1818	SAVE_10GPRS(22,r1)
1819	lhz	r12,PACA_TRAP_SAVE(r13)
1820	std	r12,_TRAP(r1)
1821	addi	r11,r1,INT_FRAME_SIZE
1822	std	r11,0(r1)
1823	li	r12,0
1824	std	r12,0(r11)
1825	ld	r2,PACATOC(r13)
1826	ld	r11,exception_marker@toc(r2)
1827	std	r12,RESULT(r1)
1828	std	r11,STACK_FRAME_OVERHEAD-16(r1)
18291:	addi	r3,r1,STACK_FRAME_OVERHEAD
1830	bl	kernel_bad_stack
1831	b	1b
1832_ASM_NOKPROBE_SYMBOL(bad_stack);
1833
1834/*
1835 * When doorbell is triggered from system reset wakeup, the message is
1836 * not cleared, so it would fire again when EE is enabled.
1837 *
1838 * When coming from local_irq_enable, there may be the same problem if
1839 * we were hard disabled.
1840 *
1841 * Execute msgclr to clear pending exceptions before handling it.
1842 */
1843h_doorbell_common_msgclr:
1844	LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
1845	PPC_MSGCLR(3)
1846	b 	h_doorbell_common
1847
1848doorbell_super_common_msgclr:
1849	LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
1850	PPC_MSGCLRP(3)
1851	b 	doorbell_super_common
1852
1853/*
1854 * Called from arch_local_irq_enable when an interrupt needs
1855 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
1856 * which kind of interrupt. MSR:EE is already off. We generate a
1857 * stackframe like if a real interrupt had happened.
1858 *
1859 * Note: While MSR:EE is off, we need to make sure that _MSR
1860 * in the generated frame has EE set to 1 or the exception
1861 * handler will not properly re-enable them.
1862 *
1863 * Note that we don't specify LR as the NIP (return address) for
1864 * the interrupt because that would unbalance the return branch
1865 * predictor.
1866 */
1867_GLOBAL(__replay_interrupt)
1868	/* We are going to jump to the exception common code which
1869	 * will retrieve various register values from the PACA which
1870	 * we don't give a damn about, so we don't bother storing them.
1871	 */
1872	mfmsr	r12
1873	LOAD_REG_ADDR(r11, replay_interrupt_return)
1874	mfcr	r9
1875	ori	r12,r12,MSR_EE
1876	cmpwi	r3,0x900
1877	beq	decrementer_common
1878	cmpwi	r3,0x500
1879BEGIN_FTR_SECTION
1880	beq	h_virt_irq_common
1881FTR_SECTION_ELSE
1882	beq	hardware_interrupt_common
1883ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
1884	cmpwi	r3,0xf00
1885	beq	performance_monitor_common
1886BEGIN_FTR_SECTION
1887	cmpwi	r3,0xa00
1888	beq	h_doorbell_common_msgclr
1889	cmpwi	r3,0xe60
1890	beq	hmi_exception_common
1891FTR_SECTION_ELSE
1892	cmpwi	r3,0xa00
1893	beq	doorbell_super_common_msgclr
1894ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1895replay_interrupt_return:
1896	blr
1897
1898_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
1899