xref: /linux/arch/powerpc/kvm/book3s_hv_rmhandlers.S (revision 9d796e66230205cd3366f5660387bd9ecca9d336)
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu.h>
24#include <asm/page.h>
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h>
31#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34
35/* Values in HSTATE_NAPPING(r13) */
36#define NAPPING_CEDE	1
37#define NAPPING_NOVCPU	2
38
39/*
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
42 *
43 * Input Registers:
44 *
45 * LR = return address to continue at after eventually re-enabling MMU
46 */
47_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
48	mflr	r0
49	std	r0, PPC_LR_STKOFF(r1)
50	stdu	r1, -112(r1)
51	mfmsr	r10
52	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
53	li	r0,MSR_RI
54	andc	r0,r10,r0
55	li	r6,MSR_IR | MSR_DR
56	andc	r6,r10,r6
57	mtmsrd	r0,1		/* clear RI in MSR */
58	mtsrr0	r5
59	mtsrr1	r6
60	RFI
61
62kvmppc_call_hv_entry:
63	ld	r4, HSTATE_KVM_VCPU(r13)
64	bl	kvmppc_hv_entry
65
66	/* Back from guest - restore host state and return to caller */
67
68BEGIN_FTR_SECTION
69	/* Restore host DABR and DABRX */
70	ld	r5,HSTATE_DABR(r13)
71	li	r6,7
72	mtspr	SPRN_DABR,r5
73	mtspr	SPRN_DABRX,r6
74END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
75
76	/* Restore SPRG3 */
77	ld	r3,PACA_SPRG_VDSO(r13)
78	mtspr	SPRN_SPRG_VDSO_WRITE,r3
79
80	/* Reload the host's PMU registers */
81	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
82	lbz	r4, LPPACA_PMCINUSE(r3)
83	cmpwi	r4, 0
84	beq	23f			/* skip if not */
85BEGIN_FTR_SECTION
86	ld	r3, HSTATE_MMCR0(r13)
87	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
88	cmpwi	r4, MMCR0_PMAO
89	beql	kvmppc_fix_pmao
90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91	lwz	r3, HSTATE_PMC1(r13)
92	lwz	r4, HSTATE_PMC2(r13)
93	lwz	r5, HSTATE_PMC3(r13)
94	lwz	r6, HSTATE_PMC4(r13)
95	lwz	r8, HSTATE_PMC5(r13)
96	lwz	r9, HSTATE_PMC6(r13)
97	mtspr	SPRN_PMC1, r3
98	mtspr	SPRN_PMC2, r4
99	mtspr	SPRN_PMC3, r5
100	mtspr	SPRN_PMC4, r6
101	mtspr	SPRN_PMC5, r8
102	mtspr	SPRN_PMC6, r9
103	ld	r3, HSTATE_MMCR0(r13)
104	ld	r4, HSTATE_MMCR1(r13)
105	ld	r5, HSTATE_MMCRA(r13)
106	ld	r6, HSTATE_SIAR(r13)
107	ld	r7, HSTATE_SDAR(r13)
108	mtspr	SPRN_MMCR1, r4
109	mtspr	SPRN_MMCRA, r5
110	mtspr	SPRN_SIAR, r6
111	mtspr	SPRN_SDAR, r7
112BEGIN_FTR_SECTION
113	ld	r8, HSTATE_MMCR2(r13)
114	ld	r9, HSTATE_SIER(r13)
115	mtspr	SPRN_MMCR2, r8
116	mtspr	SPRN_SIER, r9
117END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
118	mtspr	SPRN_MMCR0, r3
119	isync
12023:
121
122	/*
123	 * Reload DEC.  HDEC interrupts were disabled when
124	 * we reloaded the host's LPCR value.
125	 */
126	ld	r3, HSTATE_DECEXP(r13)
127	mftb	r4
128	subf	r4, r4, r3
129	mtspr	SPRN_DEC, r4
130
131	/*
132	 * For external and machine check interrupts, we need
133	 * to call the Linux handler to process the interrupt.
134	 * We do that by jumping to absolute address 0x500 for
135	 * external interrupts, or the machine_check_fwnmi label
136	 * for machine checks (since firmware might have patched
137	 * the vector area at 0x200).  The [h]rfid at the end of the
138	 * handler will return to the book3s_hv_interrupts.S code.
139	 * For other interrupts we do the rfid to get back
140	 * to the book3s_hv_interrupts.S code here.
141	 */
142	ld	r8, 112+PPC_LR_STKOFF(r1)
143	addi	r1, r1, 112
144	ld	r7, HSTATE_HOST_MSR(r13)
145
146	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
148	beq	11f
149	cmpwi	cr2, r12, BOOK3S_INTERRUPT_HMI
150	beq	cr2, 14f			/* HMI check */
151
152	/* RFI into the highmem handler, or branch to interrupt handler */
153	mfmsr	r6
154	li	r0, MSR_RI
155	andc	r6, r6, r0
156	mtmsrd	r6, 1			/* Clear RI in MSR */
157	mtsrr0	r8
158	mtsrr1	r7
159	beq	cr1, 13f		/* machine check */
160	RFI
161
162	/* On POWER7, we have external interrupts set to use HSRR0/1 */
16311:	mtspr	SPRN_HSRR0, r8
164	mtspr	SPRN_HSRR1, r7
165	ba	0x500
166
16713:	b	machine_check_fwnmi
168
16914:	mtspr	SPRN_HSRR0, r8
170	mtspr	SPRN_HSRR1, r7
171	b	hmi_exception_after_realmode
172
173kvmppc_primary_no_guest:
174	/* We handle this much like a ceded vcpu */
175	/* set our bit in napping_threads */
176	ld	r5, HSTATE_KVM_VCORE(r13)
177	lbz	r7, HSTATE_PTID(r13)
178	li	r0, 1
179	sld	r0, r0, r7
180	addi	r6, r5, VCORE_NAPPING_THREADS
1811:	lwarx	r3, 0, r6
182	or	r3, r3, r0
183	stwcx.	r3, 0, r6
184	bne	1b
185	/* order napping_threads update vs testing entry_exit_count */
186	isync
187	li	r12, 0
188	lwz	r7, VCORE_ENTRY_EXIT(r5)
189	cmpwi	r7, 0x100
190	bge	kvm_novcpu_exit	/* another thread already exiting */
191	li	r3, NAPPING_NOVCPU
192	stb	r3, HSTATE_NAPPING(r13)
193
194	b	kvm_do_nap
195
196kvm_novcpu_wakeup:
197	ld	r1, HSTATE_HOST_R1(r13)
198	ld	r5, HSTATE_KVM_VCORE(r13)
199	li	r0, 0
200	stb	r0, HSTATE_NAPPING(r13)
201	stb	r0, HSTATE_HWTHREAD_REQ(r13)
202
203	/* check the wake reason */
204	bl	kvmppc_check_wake_reason
205
206	/* see if any other thread is already exiting */
207	lwz	r0, VCORE_ENTRY_EXIT(r5)
208	cmpwi	r0, 0x100
209	bge	kvm_novcpu_exit
210
211	/* clear our bit in napping_threads */
212	lbz	r7, HSTATE_PTID(r13)
213	li	r0, 1
214	sld	r0, r0, r7
215	addi	r6, r5, VCORE_NAPPING_THREADS
2164:	lwarx	r7, 0, r6
217	andc	r7, r7, r0
218	stwcx.	r7, 0, r6
219	bne	4b
220
221	/* See if the wake reason means we need to exit */
222	cmpdi	r3, 0
223	bge	kvm_novcpu_exit
224
225	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
226	ld	r4, HSTATE_KVM_VCPU(r13)
227	cmpdi	r4, 0
228	bne	kvmppc_got_guest
229
230kvm_novcpu_exit:
231	b	hdec_soon
232
233/*
234 * We come in here when wakened from nap mode.
235 * Relocation is off and most register values are lost.
236 * r13 points to the PACA.
237 */
238	.globl	kvm_start_guest
239kvm_start_guest:
240
241	/* Set runlatch bit the minute you wake up from nap */
242	mfspr	r1, SPRN_CTRLF
243	ori 	r1, r1, 1
244	mtspr	SPRN_CTRLT, r1
245
246	ld	r2,PACATOC(r13)
247
248	li	r0,KVM_HWTHREAD_IN_KVM
249	stb	r0,HSTATE_HWTHREAD_STATE(r13)
250
251	/* NV GPR values from power7_idle() will no longer be valid */
252	li	r0,1
253	stb	r0,PACA_NAPSTATELOST(r13)
254
255	/* were we napping due to cede? */
256	lbz	r0,HSTATE_NAPPING(r13)
257	cmpwi	r0,NAPPING_CEDE
258	beq	kvm_end_cede
259	cmpwi	r0,NAPPING_NOVCPU
260	beq	kvm_novcpu_wakeup
261
262	ld	r1,PACAEMERGSP(r13)
263	subi	r1,r1,STACK_FRAME_OVERHEAD
264
265	/*
266	 * We weren't napping due to cede, so this must be a secondary
267	 * thread being woken up to run a guest, or being woken up due
268	 * to a stray IPI.  (Or due to some machine check or hypervisor
269	 * maintenance interrupt while the core is in KVM.)
270	 */
271
272	/* Check the wake reason in SRR1 to see why we got here */
273	bl	kvmppc_check_wake_reason
274	cmpdi	r3, 0
275	bge	kvm_no_guest
276
277	/* get vcpu pointer, NULL if we have no vcpu to run */
278	ld	r4,HSTATE_KVM_VCPU(r13)
279	cmpdi	r4,0
280	/* if we have no vcpu to run, go back to sleep */
281	beq	kvm_no_guest
282
283kvm_secondary_got_guest:
284
285	/* Set HSTATE_DSCR(r13) to something sensible */
286	ld	r6, PACA_DSCR(r13)
287	std	r6, HSTATE_DSCR(r13)
288
289	bl	kvmppc_hv_entry
290
291	/* Back from the guest, go back to nap */
292	/* Clear our vcpu pointer so we don't come back in early */
293	li	r0, 0
294	std	r0, HSTATE_KVM_VCPU(r13)
295	/*
296	 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
297	 * the nap_count, because once the increment to nap_count is
298	 * visible we could be given another vcpu.
299	 */
300	lwsync
301
302	/* increment the nap count and then go to nap mode */
303	ld	r4, HSTATE_KVM_VCORE(r13)
304	addi	r4, r4, VCORE_NAP_COUNT
30551:	lwarx	r3, 0, r4
306	addi	r3, r3, 1
307	stwcx.	r3, 0, r4
308	bne	51b
309
310/*
311 * At this point we have finished executing in the guest.
312 * We need to wait for hwthread_req to become zero, since
313 * we may not turn on the MMU while hwthread_req is non-zero.
314 * While waiting we also need to check if we get given a vcpu to run.
315 */
316kvm_no_guest:
317	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
318	cmpwi	r3, 0
319	bne	53f
320	HMT_MEDIUM
321	li	r0, KVM_HWTHREAD_IN_KERNEL
322	stb	r0, HSTATE_HWTHREAD_STATE(r13)
323	/* need to recheck hwthread_req after a barrier, to avoid race */
324	sync
325	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
326	cmpwi	r3, 0
327	bne	54f
328/*
329 * We jump to power7_wakeup_loss, which will return to the caller
330 * of power7_nap in the powernv cpu offline loop.  The value we
331 * put in r3 becomes the return value for power7_nap.
332 */
333	li	r3, LPCR_PECE0
334	mfspr	r4, SPRN_LPCR
335	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
336	mtspr	SPRN_LPCR, r4
337	li	r3, 0
338	b	power7_wakeup_loss
339
34053:	HMT_LOW
341	ld	r4, HSTATE_KVM_VCPU(r13)
342	cmpdi	r4, 0
343	beq	kvm_no_guest
344	HMT_MEDIUM
345	b	kvm_secondary_got_guest
346
34754:	li	r0, KVM_HWTHREAD_IN_KVM
348	stb	r0, HSTATE_HWTHREAD_STATE(r13)
349	b	kvm_no_guest
350
351/******************************************************************************
352 *                                                                            *
353 *                               Entry code                                   *
354 *                                                                            *
355 *****************************************************************************/
356
357.global kvmppc_hv_entry
358kvmppc_hv_entry:
359
360	/* Required state:
361	 *
362	 * R4 = vcpu pointer (or NULL)
363	 * MSR = ~IR|DR
364	 * R13 = PACA
365	 * R1 = host R1
366	 * R2 = TOC
367	 * all other volatile GPRS = free
368	 */
369	mflr	r0
370	std	r0, PPC_LR_STKOFF(r1)
371	stdu	r1, -112(r1)
372
373	/* Save R1 in the PACA */
374	std	r1, HSTATE_HOST_R1(r13)
375
376	li	r6, KVM_GUEST_MODE_HOST_HV
377	stb	r6, HSTATE_IN_GUEST(r13)
378
379	/* Clear out SLB */
380	li	r6,0
381	slbmte	r6,r6
382	slbia
383	ptesync
384
385	/*
386	 * POWER7/POWER8 host -> guest partition switch code.
387	 * We don't have to lock against concurrent tlbies,
388	 * but we do have to coordinate across hardware threads.
389	 */
390	/* Increment entry count iff exit count is zero. */
391	ld	r5,HSTATE_KVM_VCORE(r13)
392	addi	r9,r5,VCORE_ENTRY_EXIT
39321:	lwarx	r3,0,r9
394	cmpwi	r3,0x100		/* any threads starting to exit? */
395	bge	secondary_too_late	/* if so we're too late to the party */
396	addi	r3,r3,1
397	stwcx.	r3,0,r9
398	bne	21b
399
400	/* Primary thread switches to guest partition. */
401	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
402	lbz	r6,HSTATE_PTID(r13)
403	cmpwi	r6,0
404	bne	20f
405	ld	r6,KVM_SDR1(r9)
406	lwz	r7,KVM_LPID(r9)
407	li	r0,LPID_RSVD		/* switch to reserved LPID */
408	mtspr	SPRN_LPID,r0
409	ptesync
410	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
411	mtspr	SPRN_LPID,r7
412	isync
413
414	/* See if we need to flush the TLB */
415	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
416	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
417	srdi	r6,r6,6			/* doubleword number */
418	sldi	r6,r6,3			/* address offset */
419	add	r6,r6,r9
420	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
421	li	r0,1
422	sld	r0,r0,r7
423	ld	r7,0(r6)
424	and.	r7,r7,r0
425	beq	22f
42623:	ldarx	r7,0,r6			/* if set, clear the bit */
427	andc	r7,r7,r0
428	stdcx.	r7,0,r6
429	bne	23b
430	/* Flush the TLB of any entries for this LPID */
431	/* use arch 2.07S as a proxy for POWER8 */
432BEGIN_FTR_SECTION
433	li	r6,512			/* POWER8 has 512 sets */
434FTR_SECTION_ELSE
435	li	r6,128			/* POWER7 has 128 sets */
436ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
437	mtctr	r6
438	li	r7,0x800		/* IS field = 0b10 */
439	ptesync
44028:	tlbiel	r7
441	addi	r7,r7,0x1000
442	bdnz	28b
443	ptesync
444
445	/* Add timebase offset onto timebase */
44622:	ld	r8,VCORE_TB_OFFSET(r5)
447	cmpdi	r8,0
448	beq	37f
449	mftb	r6		/* current host timebase */
450	add	r8,r8,r6
451	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
452	mftb	r7		/* check if lower 24 bits overflowed */
453	clrldi	r6,r6,40
454	clrldi	r7,r7,40
455	cmpld	r7,r6
456	bge	37f
457	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
458	mtspr	SPRN_TBU40,r8
459
460	/* Load guest PCR value to select appropriate compat mode */
46137:	ld	r7, VCORE_PCR(r5)
462	cmpdi	r7, 0
463	beq	38f
464	mtspr	SPRN_PCR, r7
46538:
466
467BEGIN_FTR_SECTION
468	/* DPDES is shared between threads */
469	ld	r8, VCORE_DPDES(r5)
470	mtspr	SPRN_DPDES, r8
471END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
472
473	li	r0,1
474	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
475	b	10f
476
477	/* Secondary threads wait for primary to have done partition switch */
47820:	lbz	r0,VCORE_IN_GUEST(r5)
479	cmpwi	r0,0
480	beq	20b
481
482	/* Set LPCR and RMOR. */
48310:	ld	r8,VCORE_LPCR(r5)
484	mtspr	SPRN_LPCR,r8
485	ld	r8,KVM_RMOR(r9)
486	mtspr	SPRN_RMOR,r8
487	isync
488
489	/* Check if HDEC expires soon */
490	mfspr	r3,SPRN_HDEC
491	cmpwi	r3,512		/* 1 microsecond */
492	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
493	blt	hdec_soon
494
495	/* Do we have a guest vcpu to run? */
496	cmpdi	r4, 0
497	beq	kvmppc_primary_no_guest
498kvmppc_got_guest:
499
500	/* Load up guest SLB entries */
501	lwz	r5,VCPU_SLB_MAX(r4)
502	cmpwi	r5,0
503	beq	9f
504	mtctr	r5
505	addi	r6,r4,VCPU_SLB
5061:	ld	r8,VCPU_SLB_E(r6)
507	ld	r9,VCPU_SLB_V(r6)
508	slbmte	r9,r8
509	addi	r6,r6,VCPU_SLB_SIZE
510	bdnz	1b
5119:
512	/* Increment yield count if they have a VPA */
513	ld	r3, VCPU_VPA(r4)
514	cmpdi	r3, 0
515	beq	25f
516	li	r6, LPPACA_YIELDCOUNT
517	LWZX_BE	r5, r3, r6
518	addi	r5, r5, 1
519	STWX_BE	r5, r3, r6
520	li	r6, 1
521	stb	r6, VCPU_VPA_DIRTY(r4)
52225:
523
524	/* Save purr/spurr */
525	mfspr	r5,SPRN_PURR
526	mfspr	r6,SPRN_SPURR
527	std	r5,HSTATE_PURR(r13)
528	std	r6,HSTATE_SPURR(r13)
529	ld	r7,VCPU_PURR(r4)
530	ld	r8,VCPU_SPURR(r4)
531	mtspr	SPRN_PURR,r7
532	mtspr	SPRN_SPURR,r8
533
534BEGIN_FTR_SECTION
535	/* Set partition DABR */
536	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
537	lwz	r5,VCPU_DABRX(r4)
538	ld	r6,VCPU_DABR(r4)
539	mtspr	SPRN_DABRX,r5
540	mtspr	SPRN_DABR,r6
541	isync
542END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
543
544#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
545BEGIN_FTR_SECTION
546	b	skip_tm
547END_FTR_SECTION_IFCLR(CPU_FTR_TM)
548
549	/* Turn on TM/FP/VSX/VMX so we can restore them. */
550	mfmsr	r5
551	li	r6, MSR_TM >> 32
552	sldi	r6, r6, 32
553	or	r5, r5, r6
554	ori	r5, r5, MSR_FP
555	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
556	mtmsrd	r5
557
558	/*
559	 * The user may change these outside of a transaction, so they must
560	 * always be context switched.
561	 */
562	ld	r5, VCPU_TFHAR(r4)
563	ld	r6, VCPU_TFIAR(r4)
564	ld	r7, VCPU_TEXASR(r4)
565	mtspr	SPRN_TFHAR, r5
566	mtspr	SPRN_TFIAR, r6
567	mtspr	SPRN_TEXASR, r7
568
569	ld	r5, VCPU_MSR(r4)
570	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
571	beq	skip_tm	/* TM not active in guest */
572
573	/* Make sure the failure summary is set, otherwise we'll program check
574	 * when we trechkpt.  It's possible that this might have been not set
575	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
576	 * host.
577	 */
578	oris	r7, r7, (TEXASR_FS)@h
579	mtspr	SPRN_TEXASR, r7
580
581	/*
582	 * We need to load up the checkpointed state for the guest.
583	 * We need to do this early as it will blow away any GPRs, VSRs and
584	 * some SPRs.
585	 */
586
587	mr	r31, r4
588	addi	r3, r31, VCPU_FPRS_TM
589	bl	load_fp_state
590	addi	r3, r31, VCPU_VRS_TM
591	bl	load_vr_state
592	mr	r4, r31
593	lwz	r7, VCPU_VRSAVE_TM(r4)
594	mtspr	SPRN_VRSAVE, r7
595
596	ld	r5, VCPU_LR_TM(r4)
597	lwz	r6, VCPU_CR_TM(r4)
598	ld	r7, VCPU_CTR_TM(r4)
599	ld	r8, VCPU_AMR_TM(r4)
600	ld	r9, VCPU_TAR_TM(r4)
601	mtlr	r5
602	mtcr	r6
603	mtctr	r7
604	mtspr	SPRN_AMR, r8
605	mtspr	SPRN_TAR, r9
606
607	/*
608	 * Load up PPR and DSCR values but don't put them in the actual SPRs
609	 * till the last moment to avoid running with userspace PPR and DSCR for
610	 * too long.
611	 */
612	ld	r29, VCPU_DSCR_TM(r4)
613	ld	r30, VCPU_PPR_TM(r4)
614
615	std	r2, PACATMSCRATCH(r13) /* Save TOC */
616
617	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
618	li	r5, 0
619	mtmsrd	r5, 1
620
621	/* Load GPRs r0-r28 */
622	reg = 0
623	.rept	29
624	ld	reg, VCPU_GPRS_TM(reg)(r31)
625	reg = reg + 1
626	.endr
627
628	mtspr	SPRN_DSCR, r29
629	mtspr	SPRN_PPR, r30
630
631	/* Load final GPRs */
632	ld	29, VCPU_GPRS_TM(29)(r31)
633	ld	30, VCPU_GPRS_TM(30)(r31)
634	ld	31, VCPU_GPRS_TM(31)(r31)
635
636	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
637	TRECHKPT
638
639	/* Now let's get back the state we need. */
640	HMT_MEDIUM
641	GET_PACA(r13)
642	ld	r29, HSTATE_DSCR(r13)
643	mtspr	SPRN_DSCR, r29
644	ld	r4, HSTATE_KVM_VCPU(r13)
645	ld	r1, HSTATE_HOST_R1(r13)
646	ld	r2, PACATMSCRATCH(r13)
647
648	/* Set the MSR RI since we have our registers back. */
649	li	r5, MSR_RI
650	mtmsrd	r5, 1
651skip_tm:
652#endif
653
654	/* Load guest PMU registers */
655	/* R4 is live here (vcpu pointer) */
656	li	r3, 1
657	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
658	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
659	isync
660BEGIN_FTR_SECTION
661	ld	r3, VCPU_MMCR(r4)
662	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
663	cmpwi	r5, MMCR0_PMAO
664	beql	kvmppc_fix_pmao
665END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
666	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
667	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
668	lwz	r6, VCPU_PMC + 8(r4)
669	lwz	r7, VCPU_PMC + 12(r4)
670	lwz	r8, VCPU_PMC + 16(r4)
671	lwz	r9, VCPU_PMC + 20(r4)
672	mtspr	SPRN_PMC1, r3
673	mtspr	SPRN_PMC2, r5
674	mtspr	SPRN_PMC3, r6
675	mtspr	SPRN_PMC4, r7
676	mtspr	SPRN_PMC5, r8
677	mtspr	SPRN_PMC6, r9
678	ld	r3, VCPU_MMCR(r4)
679	ld	r5, VCPU_MMCR + 8(r4)
680	ld	r6, VCPU_MMCR + 16(r4)
681	ld	r7, VCPU_SIAR(r4)
682	ld	r8, VCPU_SDAR(r4)
683	mtspr	SPRN_MMCR1, r5
684	mtspr	SPRN_MMCRA, r6
685	mtspr	SPRN_SIAR, r7
686	mtspr	SPRN_SDAR, r8
687BEGIN_FTR_SECTION
688	ld	r5, VCPU_MMCR + 24(r4)
689	ld	r6, VCPU_SIER(r4)
690	lwz	r7, VCPU_PMC + 24(r4)
691	lwz	r8, VCPU_PMC + 28(r4)
692	ld	r9, VCPU_MMCR + 32(r4)
693	mtspr	SPRN_MMCR2, r5
694	mtspr	SPRN_SIER, r6
695	mtspr	SPRN_SPMC1, r7
696	mtspr	SPRN_SPMC2, r8
697	mtspr	SPRN_MMCRS, r9
698END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
699	mtspr	SPRN_MMCR0, r3
700	isync
701
702	/* Load up FP, VMX and VSX registers */
703	bl	kvmppc_load_fp
704
705	ld	r14, VCPU_GPR(R14)(r4)
706	ld	r15, VCPU_GPR(R15)(r4)
707	ld	r16, VCPU_GPR(R16)(r4)
708	ld	r17, VCPU_GPR(R17)(r4)
709	ld	r18, VCPU_GPR(R18)(r4)
710	ld	r19, VCPU_GPR(R19)(r4)
711	ld	r20, VCPU_GPR(R20)(r4)
712	ld	r21, VCPU_GPR(R21)(r4)
713	ld	r22, VCPU_GPR(R22)(r4)
714	ld	r23, VCPU_GPR(R23)(r4)
715	ld	r24, VCPU_GPR(R24)(r4)
716	ld	r25, VCPU_GPR(R25)(r4)
717	ld	r26, VCPU_GPR(R26)(r4)
718	ld	r27, VCPU_GPR(R27)(r4)
719	ld	r28, VCPU_GPR(R28)(r4)
720	ld	r29, VCPU_GPR(R29)(r4)
721	ld	r30, VCPU_GPR(R30)(r4)
722	ld	r31, VCPU_GPR(R31)(r4)
723
724	/* Switch DSCR to guest value */
725	ld	r5, VCPU_DSCR(r4)
726	mtspr	SPRN_DSCR, r5
727
728BEGIN_FTR_SECTION
729	/* Skip next section on POWER7 */
730	b	8f
731END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
732	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
733	mfmsr	r8
734	li	r0, 1
735	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
736	mtmsrd	r8
737
738	/* Load up POWER8-specific registers */
739	ld	r5, VCPU_IAMR(r4)
740	lwz	r6, VCPU_PSPB(r4)
741	ld	r7, VCPU_FSCR(r4)
742	mtspr	SPRN_IAMR, r5
743	mtspr	SPRN_PSPB, r6
744	mtspr	SPRN_FSCR, r7
745	ld	r5, VCPU_DAWR(r4)
746	ld	r6, VCPU_DAWRX(r4)
747	ld	r7, VCPU_CIABR(r4)
748	ld	r8, VCPU_TAR(r4)
749	mtspr	SPRN_DAWR, r5
750	mtspr	SPRN_DAWRX, r6
751	mtspr	SPRN_CIABR, r7
752	mtspr	SPRN_TAR, r8
753	ld	r5, VCPU_IC(r4)
754	ld	r6, VCPU_VTB(r4)
755	mtspr	SPRN_IC, r5
756	mtspr	SPRN_VTB, r6
757	ld	r8, VCPU_EBBHR(r4)
758	mtspr	SPRN_EBBHR, r8
759	ld	r5, VCPU_EBBRR(r4)
760	ld	r6, VCPU_BESCR(r4)
761	ld	r7, VCPU_CSIGR(r4)
762	ld	r8, VCPU_TACR(r4)
763	mtspr	SPRN_EBBRR, r5
764	mtspr	SPRN_BESCR, r6
765	mtspr	SPRN_CSIGR, r7
766	mtspr	SPRN_TACR, r8
767	ld	r5, VCPU_TCSCR(r4)
768	ld	r6, VCPU_ACOP(r4)
769	lwz	r7, VCPU_GUEST_PID(r4)
770	ld	r8, VCPU_WORT(r4)
771	mtspr	SPRN_TCSCR, r5
772	mtspr	SPRN_ACOP, r6
773	mtspr	SPRN_PID, r7
774	mtspr	SPRN_WORT, r8
7758:
776
777	/*
778	 * Set the decrementer to the guest decrementer.
779	 */
780	ld	r8,VCPU_DEC_EXPIRES(r4)
781	/* r8 is a host timebase value here, convert to guest TB */
782	ld	r5,HSTATE_KVM_VCORE(r13)
783	ld	r6,VCORE_TB_OFFSET(r5)
784	add	r8,r8,r6
785	mftb	r7
786	subf	r3,r7,r8
787	mtspr	SPRN_DEC,r3
788	stw	r3,VCPU_DEC(r4)
789
790	ld	r5, VCPU_SPRG0(r4)
791	ld	r6, VCPU_SPRG1(r4)
792	ld	r7, VCPU_SPRG2(r4)
793	ld	r8, VCPU_SPRG3(r4)
794	mtspr	SPRN_SPRG0, r5
795	mtspr	SPRN_SPRG1, r6
796	mtspr	SPRN_SPRG2, r7
797	mtspr	SPRN_SPRG3, r8
798
799	/* Load up DAR and DSISR */
800	ld	r5, VCPU_DAR(r4)
801	lwz	r6, VCPU_DSISR(r4)
802	mtspr	SPRN_DAR, r5
803	mtspr	SPRN_DSISR, r6
804
805	/* Restore AMR and UAMOR, set AMOR to all 1s */
806	ld	r5,VCPU_AMR(r4)
807	ld	r6,VCPU_UAMOR(r4)
808	li	r7,-1
809	mtspr	SPRN_AMR,r5
810	mtspr	SPRN_UAMOR,r6
811	mtspr	SPRN_AMOR,r7
812
813	/* Restore state of CTRL run bit; assume 1 on entry */
814	lwz	r5,VCPU_CTRL(r4)
815	andi.	r5,r5,1
816	bne	4f
817	mfspr	r6,SPRN_CTRLF
818	clrrdi	r6,r6,1
819	mtspr	SPRN_CTRLT,r6
8204:
821	ld	r6, VCPU_CTR(r4)
822	lwz	r7, VCPU_XER(r4)
823
824	mtctr	r6
825	mtxer	r7
826
827kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
828	ld	r10, VCPU_PC(r4)
829	ld	r11, VCPU_MSR(r4)
830	ld	r6, VCPU_SRR0(r4)
831	ld	r7, VCPU_SRR1(r4)
832	mtspr	SPRN_SRR0, r6
833	mtspr	SPRN_SRR1, r7
834
835deliver_guest_interrupt:
836	/* r11 = vcpu->arch.msr & ~MSR_HV */
837	rldicl	r11, r11, 63 - MSR_HV_LG, 1
838	rotldi	r11, r11, 1 + MSR_HV_LG
839	ori	r11, r11, MSR_ME
840
841	/* Check if we can deliver an external or decrementer interrupt now */
842	ld	r0, VCPU_PENDING_EXC(r4)
843	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
844	cmpdi	cr1, r0, 0
845	andi.	r8, r11, MSR_EE
846	mfspr	r8, SPRN_LPCR
847	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
848	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
849	mtspr	SPRN_LPCR, r8
850	isync
851	beq	5f
852	li	r0, BOOK3S_INTERRUPT_EXTERNAL
853	bne	cr1, 12f
854	mfspr	r0, SPRN_DEC
855	cmpwi	r0, 0
856	li	r0, BOOK3S_INTERRUPT_DECREMENTER
857	bge	5f
858
85912:	mtspr	SPRN_SRR0, r10
860	mr	r10,r0
861	mtspr	SPRN_SRR1, r11
862	mr	r9, r4
863	bl	kvmppc_msr_interrupt
8645:
865
866/*
867 * Required state:
868 * R4 = vcpu
869 * R10: value for HSRR0
870 * R11: value for HSRR1
871 * R13 = PACA
872 */
873fast_guest_return:
874	li	r0,0
875	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
876	mtspr	SPRN_HSRR0,r10
877	mtspr	SPRN_HSRR1,r11
878
879	/* Activate guest mode, so faults get handled by KVM */
880	li	r9, KVM_GUEST_MODE_GUEST_HV
881	stb	r9, HSTATE_IN_GUEST(r13)
882
883	/* Enter guest */
884
885BEGIN_FTR_SECTION
886	ld	r5, VCPU_CFAR(r4)
887	mtspr	SPRN_CFAR, r5
888END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
889BEGIN_FTR_SECTION
890	ld	r0, VCPU_PPR(r4)
891END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
892
893	ld	r5, VCPU_LR(r4)
894	lwz	r6, VCPU_CR(r4)
895	mtlr	r5
896	mtcr	r6
897
898	ld	r1, VCPU_GPR(R1)(r4)
899	ld	r2, VCPU_GPR(R2)(r4)
900	ld	r3, VCPU_GPR(R3)(r4)
901	ld	r5, VCPU_GPR(R5)(r4)
902	ld	r6, VCPU_GPR(R6)(r4)
903	ld	r7, VCPU_GPR(R7)(r4)
904	ld	r8, VCPU_GPR(R8)(r4)
905	ld	r9, VCPU_GPR(R9)(r4)
906	ld	r10, VCPU_GPR(R10)(r4)
907	ld	r11, VCPU_GPR(R11)(r4)
908	ld	r12, VCPU_GPR(R12)(r4)
909	ld	r13, VCPU_GPR(R13)(r4)
910
911BEGIN_FTR_SECTION
912	mtspr	SPRN_PPR, r0
913END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
914	ld	r0, VCPU_GPR(R0)(r4)
915	ld	r4, VCPU_GPR(R4)(r4)
916
917	hrfid
918	b	.
919
920/******************************************************************************
921 *                                                                            *
922 *                               Exit code                                    *
923 *                                                                            *
924 *****************************************************************************/
925
926/*
927 * We come here from the first-level interrupt handlers.
928 */
929	.globl	kvmppc_interrupt_hv
930kvmppc_interrupt_hv:
931	/*
932	 * Register contents:
933	 * R12		= interrupt vector
934	 * R13		= PACA
935	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
936	 * guest R13 saved in SPRN_SCRATCH0
937	 */
938	std	r9, HSTATE_SCRATCH2(r13)
939
940	lbz	r9, HSTATE_IN_GUEST(r13)
941	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
942	beq	kvmppc_bad_host_intr
943#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
944	cmpwi	r9, KVM_GUEST_MODE_GUEST
945	ld	r9, HSTATE_SCRATCH2(r13)
946	beq	kvmppc_interrupt_pr
947#endif
948	/* We're now back in the host but in guest MMU context */
949	li	r9, KVM_GUEST_MODE_HOST_HV
950	stb	r9, HSTATE_IN_GUEST(r13)
951
952	ld	r9, HSTATE_KVM_VCPU(r13)
953
954	/* Save registers */
955
956	std	r0, VCPU_GPR(R0)(r9)
957	std	r1, VCPU_GPR(R1)(r9)
958	std	r2, VCPU_GPR(R2)(r9)
959	std	r3, VCPU_GPR(R3)(r9)
960	std	r4, VCPU_GPR(R4)(r9)
961	std	r5, VCPU_GPR(R5)(r9)
962	std	r6, VCPU_GPR(R6)(r9)
963	std	r7, VCPU_GPR(R7)(r9)
964	std	r8, VCPU_GPR(R8)(r9)
965	ld	r0, HSTATE_SCRATCH2(r13)
966	std	r0, VCPU_GPR(R9)(r9)
967	std	r10, VCPU_GPR(R10)(r9)
968	std	r11, VCPU_GPR(R11)(r9)
969	ld	r3, HSTATE_SCRATCH0(r13)
970	lwz	r4, HSTATE_SCRATCH1(r13)
971	std	r3, VCPU_GPR(R12)(r9)
972	stw	r4, VCPU_CR(r9)
973BEGIN_FTR_SECTION
974	ld	r3, HSTATE_CFAR(r13)
975	std	r3, VCPU_CFAR(r9)
976END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
977BEGIN_FTR_SECTION
978	ld	r4, HSTATE_PPR(r13)
979	std	r4, VCPU_PPR(r9)
980END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
981
982	/* Restore R1/R2 so we can handle faults */
983	ld	r1, HSTATE_HOST_R1(r13)
984	ld	r2, PACATOC(r13)
985
986	mfspr	r10, SPRN_SRR0
987	mfspr	r11, SPRN_SRR1
988	std	r10, VCPU_SRR0(r9)
989	std	r11, VCPU_SRR1(r9)
990	andi.	r0, r12, 2		/* need to read HSRR0/1? */
991	beq	1f
992	mfspr	r10, SPRN_HSRR0
993	mfspr	r11, SPRN_HSRR1
994	clrrdi	r12, r12, 2
9951:	std	r10, VCPU_PC(r9)
996	std	r11, VCPU_MSR(r9)
997
998	GET_SCRATCH0(r3)
999	mflr	r4
1000	std	r3, VCPU_GPR(R13)(r9)
1001	std	r4, VCPU_LR(r9)
1002
1003	stw	r12,VCPU_TRAP(r9)
1004
1005	/* Save HEIR (HV emulation assist reg) in emul_inst
1006	   if this is an HEI (HV emulation interrupt, e40) */
1007	li	r3,KVM_INST_FETCH_FAILED
1008	stw	r3,VCPU_LAST_INST(r9)
1009	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1010	bne	11f
1011	mfspr	r3,SPRN_HEIR
101211:	stw	r3,VCPU_HEIR(r9)
1013
1014	/* these are volatile across C function calls */
1015	mfctr	r3
1016	mfxer	r4
1017	std	r3, VCPU_CTR(r9)
1018	stw	r4, VCPU_XER(r9)
1019
1020	/* If this is a page table miss then see if it's theirs or ours */
1021	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1022	beq	kvmppc_hdsi
1023	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1024	beq	kvmppc_hisi
1025
1026	/* See if this is a leftover HDEC interrupt */
1027	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1028	bne	2f
1029	mfspr	r3,SPRN_HDEC
1030	cmpwi	r3,0
1031	bge	ignore_hdec
10322:
1033	/* See if this is an hcall we can handle in real mode */
1034	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
1035	beq	hcall_try_real_mode
1036
1037	/* External interrupt ? */
1038	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1039	bne+	ext_interrupt_to_host
1040
1041	/* External interrupt, first check for host_ipi. If this is
1042	 * set, we know the host wants us out so let's do it now
1043	 */
1044	bl	kvmppc_read_intr
1045	cmpdi	r3, 0
1046	bgt	ext_interrupt_to_host
1047
1048	/* Check if any CPU is heading out to the host, if so head out too */
1049	ld	r5, HSTATE_KVM_VCORE(r13)
1050	lwz	r0, VCORE_ENTRY_EXIT(r5)
1051	cmpwi	r0, 0x100
1052	bge	ext_interrupt_to_host
1053
1054	/* Return to guest after delivering any pending interrupt */
1055	mr	r4, r9
1056	b	deliver_guest_interrupt
1057
1058ext_interrupt_to_host:
1059
1060guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1061	/* Save more register state  */
1062	mfdar	r6
1063	mfdsisr	r7
1064	std	r6, VCPU_DAR(r9)
1065	stw	r7, VCPU_DSISR(r9)
1066	/* don't overwrite fault_dar/fault_dsisr if HDSI */
1067	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1068	beq	6f
1069	std	r6, VCPU_FAULT_DAR(r9)
1070	stw	r7, VCPU_FAULT_DSISR(r9)
1071
1072	/* See if it is a machine check */
1073	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1074	beq	machine_check_realmode
1075mc_cont:
1076
1077	/* Save guest CTRL register, set runlatch to 1 */
10786:	mfspr	r6,SPRN_CTRLF
1079	stw	r6,VCPU_CTRL(r9)
1080	andi.	r0,r6,1
1081	bne	4f
1082	ori	r6,r6,1
1083	mtspr	SPRN_CTRLT,r6
10844:
1085	/* Read the guest SLB and save it away */
1086	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
1087	mtctr	r0
1088	li	r6,0
1089	addi	r7,r9,VCPU_SLB
1090	li	r5,0
10911:	slbmfee	r8,r6
1092	andis.	r0,r8,SLB_ESID_V@h
1093	beq	2f
1094	add	r8,r8,r6		/* put index in */
1095	slbmfev	r3,r6
1096	std	r8,VCPU_SLB_E(r7)
1097	std	r3,VCPU_SLB_V(r7)
1098	addi	r7,r7,VCPU_SLB_SIZE
1099	addi	r5,r5,1
11002:	addi	r6,r6,1
1101	bdnz	1b
1102	stw	r5,VCPU_SLB_MAX(r9)
1103
1104	/*
1105	 * Save the guest PURR/SPURR
1106	 */
1107	mfspr	r5,SPRN_PURR
1108	mfspr	r6,SPRN_SPURR
1109	ld	r7,VCPU_PURR(r9)
1110	ld	r8,VCPU_SPURR(r9)
1111	std	r5,VCPU_PURR(r9)
1112	std	r6,VCPU_SPURR(r9)
1113	subf	r5,r7,r5
1114	subf	r6,r8,r6
1115
1116	/*
1117	 * Restore host PURR/SPURR and add guest times
1118	 * so that the time in the guest gets accounted.
1119	 */
1120	ld	r3,HSTATE_PURR(r13)
1121	ld	r4,HSTATE_SPURR(r13)
1122	add	r3,r3,r5
1123	add	r4,r4,r6
1124	mtspr	SPRN_PURR,r3
1125	mtspr	SPRN_SPURR,r4
1126
1127	/* Save DEC */
1128	mfspr	r5,SPRN_DEC
1129	mftb	r6
1130	extsw	r5,r5
1131	add	r5,r5,r6
1132	/* r5 is a guest timebase value here, convert to host TB */
1133	ld	r3,HSTATE_KVM_VCORE(r13)
1134	ld	r4,VCORE_TB_OFFSET(r3)
1135	subf	r5,r4,r5
1136	std	r5,VCPU_DEC_EXPIRES(r9)
1137
1138BEGIN_FTR_SECTION
1139	b	8f
1140END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1141	/* Save POWER8-specific registers */
1142	mfspr	r5, SPRN_IAMR
1143	mfspr	r6, SPRN_PSPB
1144	mfspr	r7, SPRN_FSCR
1145	std	r5, VCPU_IAMR(r9)
1146	stw	r6, VCPU_PSPB(r9)
1147	std	r7, VCPU_FSCR(r9)
1148	mfspr	r5, SPRN_IC
1149	mfspr	r6, SPRN_VTB
1150	mfspr	r7, SPRN_TAR
1151	std	r5, VCPU_IC(r9)
1152	std	r6, VCPU_VTB(r9)
1153	std	r7, VCPU_TAR(r9)
1154	mfspr	r8, SPRN_EBBHR
1155	std	r8, VCPU_EBBHR(r9)
1156	mfspr	r5, SPRN_EBBRR
1157	mfspr	r6, SPRN_BESCR
1158	mfspr	r7, SPRN_CSIGR
1159	mfspr	r8, SPRN_TACR
1160	std	r5, VCPU_EBBRR(r9)
1161	std	r6, VCPU_BESCR(r9)
1162	std	r7, VCPU_CSIGR(r9)
1163	std	r8, VCPU_TACR(r9)
1164	mfspr	r5, SPRN_TCSCR
1165	mfspr	r6, SPRN_ACOP
1166	mfspr	r7, SPRN_PID
1167	mfspr	r8, SPRN_WORT
1168	std	r5, VCPU_TCSCR(r9)
1169	std	r6, VCPU_ACOP(r9)
1170	stw	r7, VCPU_GUEST_PID(r9)
1171	std	r8, VCPU_WORT(r9)
11728:
1173
1174	/* Save and reset AMR and UAMOR before turning on the MMU */
1175	mfspr	r5,SPRN_AMR
1176	mfspr	r6,SPRN_UAMOR
1177	std	r5,VCPU_AMR(r9)
1178	std	r6,VCPU_UAMOR(r9)
1179	li	r6,0
1180	mtspr	SPRN_AMR,r6
1181
1182	/* Switch DSCR back to host value */
1183	mfspr	r8, SPRN_DSCR
1184	ld	r7, HSTATE_DSCR(r13)
1185	std	r8, VCPU_DSCR(r9)
1186	mtspr	SPRN_DSCR, r7
1187
1188	/* Save non-volatile GPRs */
1189	std	r14, VCPU_GPR(R14)(r9)
1190	std	r15, VCPU_GPR(R15)(r9)
1191	std	r16, VCPU_GPR(R16)(r9)
1192	std	r17, VCPU_GPR(R17)(r9)
1193	std	r18, VCPU_GPR(R18)(r9)
1194	std	r19, VCPU_GPR(R19)(r9)
1195	std	r20, VCPU_GPR(R20)(r9)
1196	std	r21, VCPU_GPR(R21)(r9)
1197	std	r22, VCPU_GPR(R22)(r9)
1198	std	r23, VCPU_GPR(R23)(r9)
1199	std	r24, VCPU_GPR(R24)(r9)
1200	std	r25, VCPU_GPR(R25)(r9)
1201	std	r26, VCPU_GPR(R26)(r9)
1202	std	r27, VCPU_GPR(R27)(r9)
1203	std	r28, VCPU_GPR(R28)(r9)
1204	std	r29, VCPU_GPR(R29)(r9)
1205	std	r30, VCPU_GPR(R30)(r9)
1206	std	r31, VCPU_GPR(R31)(r9)
1207
1208	/* Save SPRGs */
1209	mfspr	r3, SPRN_SPRG0
1210	mfspr	r4, SPRN_SPRG1
1211	mfspr	r5, SPRN_SPRG2
1212	mfspr	r6, SPRN_SPRG3
1213	std	r3, VCPU_SPRG0(r9)
1214	std	r4, VCPU_SPRG1(r9)
1215	std	r5, VCPU_SPRG2(r9)
1216	std	r6, VCPU_SPRG3(r9)
1217
1218	/* save FP state */
1219	mr	r3, r9
1220	bl	kvmppc_save_fp
1221
1222#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1223BEGIN_FTR_SECTION
1224	b	2f
1225END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1226	/* Turn on TM. */
1227	mfmsr	r8
1228	li	r0, 1
1229	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1230	mtmsrd	r8
1231
1232	ld	r5, VCPU_MSR(r9)
1233	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1234	beq	1f	/* TM not active in guest. */
1235
1236	li	r3, TM_CAUSE_KVM_RESCHED
1237
1238	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
1239	li	r5, 0
1240	mtmsrd	r5, 1
1241
1242	/* All GPRs are volatile at this point. */
1243	TRECLAIM(R3)
1244
1245	/* Temporarily store r13 and r9 so we have some regs to play with */
1246	SET_SCRATCH0(r13)
1247	GET_PACA(r13)
1248	std	r9, PACATMSCRATCH(r13)
1249	ld	r9, HSTATE_KVM_VCPU(r13)
1250
1251	/* Get a few more GPRs free. */
1252	std	r29, VCPU_GPRS_TM(29)(r9)
1253	std	r30, VCPU_GPRS_TM(30)(r9)
1254	std	r31, VCPU_GPRS_TM(31)(r9)
1255
1256	/* Save away PPR and DSCR soon so don't run with user values. */
1257	mfspr	r31, SPRN_PPR
1258	HMT_MEDIUM
1259	mfspr	r30, SPRN_DSCR
1260	ld	r29, HSTATE_DSCR(r13)
1261	mtspr	SPRN_DSCR, r29
1262
1263	/* Save all but r9, r13 & r29-r31 */
1264	reg = 0
1265	.rept	29
1266	.if (reg != 9) && (reg != 13)
1267	std	reg, VCPU_GPRS_TM(reg)(r9)
1268	.endif
1269	reg = reg + 1
1270	.endr
1271	/* ... now save r13 */
1272	GET_SCRATCH0(r4)
1273	std	r4, VCPU_GPRS_TM(13)(r9)
1274	/* ... and save r9 */
1275	ld	r4, PACATMSCRATCH(r13)
1276	std	r4, VCPU_GPRS_TM(9)(r9)
1277
1278	/* Reload stack pointer and TOC. */
1279	ld	r1, HSTATE_HOST_R1(r13)
1280	ld	r2, PACATOC(r13)
1281
1282	/* Set MSR RI now we have r1 and r13 back. */
1283	li	r5, MSR_RI
1284	mtmsrd	r5, 1
1285
1286	/* Save away checkpinted SPRs. */
1287	std	r31, VCPU_PPR_TM(r9)
1288	std	r30, VCPU_DSCR_TM(r9)
1289	mflr	r5
1290	mfcr	r6
1291	mfctr	r7
1292	mfspr	r8, SPRN_AMR
1293	mfspr	r10, SPRN_TAR
1294	std	r5, VCPU_LR_TM(r9)
1295	stw	r6, VCPU_CR_TM(r9)
1296	std	r7, VCPU_CTR_TM(r9)
1297	std	r8, VCPU_AMR_TM(r9)
1298	std	r10, VCPU_TAR_TM(r9)
1299
1300	/* Restore r12 as trap number. */
1301	lwz	r12, VCPU_TRAP(r9)
1302
1303	/* Save FP/VSX. */
1304	addi	r3, r9, VCPU_FPRS_TM
1305	bl	store_fp_state
1306	addi	r3, r9, VCPU_VRS_TM
1307	bl	store_vr_state
1308	mfspr	r6, SPRN_VRSAVE
1309	stw	r6, VCPU_VRSAVE_TM(r9)
13101:
1311	/*
1312	 * We need to save these SPRs after the treclaim so that the software
1313	 * error code is recorded correctly in the TEXASR.  Also the user may
1314	 * change these outside of a transaction, so they must always be
1315	 * context switched.
1316	 */
1317	mfspr	r5, SPRN_TFHAR
1318	mfspr	r6, SPRN_TFIAR
1319	mfspr	r7, SPRN_TEXASR
1320	std	r5, VCPU_TFHAR(r9)
1321	std	r6, VCPU_TFIAR(r9)
1322	std	r7, VCPU_TEXASR(r9)
13232:
1324#endif
1325
1326	/* Increment yield count if they have a VPA */
1327	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
1328	cmpdi	r8, 0
1329	beq	25f
1330	li	r4, LPPACA_YIELDCOUNT
1331	LWZX_BE	r3, r8, r4
1332	addi	r3, r3, 1
1333	STWX_BE	r3, r8, r4
1334	li	r3, 1
1335	stb	r3, VCPU_VPA_DIRTY(r9)
133625:
1337	/* Save PMU registers if requested */
1338	/* r8 and cr0.eq are live here */
1339BEGIN_FTR_SECTION
1340	/*
1341	 * POWER8 seems to have a hardware bug where setting
1342	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1343	 * when some counters are already negative doesn't seem
1344	 * to cause a performance monitor alert (and hence interrupt).
1345	 * The effect of this is that when saving the PMU state,
1346	 * if there is no PMU alert pending when we read MMCR0
1347	 * before freezing the counters, but one becomes pending
1348	 * before we read the counters, we lose it.
1349	 * To work around this, we need a way to freeze the counters
1350	 * before reading MMCR0.  Normally, freezing the counters
1351	 * is done by writing MMCR0 (to set MMCR0[FC]) which
1352	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
1353	 * we can also freeze the counters using MMCR2, by writing
1354	 * 1s to all the counter freeze condition bits (there are
1355	 * 9 bits each for 6 counters).
1356	 */
1357	li	r3, -1			/* set all freeze bits */
1358	clrrdi	r3, r3, 10
1359	mfspr	r10, SPRN_MMCR2
1360	mtspr	SPRN_MMCR2, r3
1361	isync
1362END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1363	li	r3, 1
1364	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
1365	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
1366	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
1367	mfspr	r6, SPRN_MMCRA
1368	/* Clear MMCRA in order to disable SDAR updates */
1369	li	r7, 0
1370	mtspr	SPRN_MMCRA, r7
1371	isync
1372	beq	21f			/* if no VPA, save PMU stuff anyway */
1373	lbz	r7, LPPACA_PMCINUSE(r8)
1374	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
1375	bne	21f
1376	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
1377	b	22f
137821:	mfspr	r5, SPRN_MMCR1
1379	mfspr	r7, SPRN_SIAR
1380	mfspr	r8, SPRN_SDAR
1381	std	r4, VCPU_MMCR(r9)
1382	std	r5, VCPU_MMCR + 8(r9)
1383	std	r6, VCPU_MMCR + 16(r9)
1384BEGIN_FTR_SECTION
1385	std	r10, VCPU_MMCR + 24(r9)
1386END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1387	std	r7, VCPU_SIAR(r9)
1388	std	r8, VCPU_SDAR(r9)
1389	mfspr	r3, SPRN_PMC1
1390	mfspr	r4, SPRN_PMC2
1391	mfspr	r5, SPRN_PMC3
1392	mfspr	r6, SPRN_PMC4
1393	mfspr	r7, SPRN_PMC5
1394	mfspr	r8, SPRN_PMC6
1395	stw	r3, VCPU_PMC(r9)
1396	stw	r4, VCPU_PMC + 4(r9)
1397	stw	r5, VCPU_PMC + 8(r9)
1398	stw	r6, VCPU_PMC + 12(r9)
1399	stw	r7, VCPU_PMC + 16(r9)
1400	stw	r8, VCPU_PMC + 20(r9)
1401BEGIN_FTR_SECTION
1402	mfspr	r5, SPRN_SIER
1403	mfspr	r6, SPRN_SPMC1
1404	mfspr	r7, SPRN_SPMC2
1405	mfspr	r8, SPRN_MMCRS
1406	std	r5, VCPU_SIER(r9)
1407	stw	r6, VCPU_PMC + 24(r9)
1408	stw	r7, VCPU_PMC + 28(r9)
1409	std	r8, VCPU_MMCR + 32(r9)
1410	lis	r4, 0x8000
1411	mtspr	SPRN_MMCRS, r4
1412END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
141322:
1414	/* Clear out SLB */
1415	li	r5,0
1416	slbmte	r5,r5
1417	slbia
1418	ptesync
1419
1420hdec_soon:			/* r12 = trap, r13 = paca */
1421	/*
1422	 * POWER7/POWER8 guest -> host partition switch code.
1423	 * We don't have to lock against tlbies but we do
1424	 * have to coordinate the hardware threads.
1425	 */
1426	/* Increment the threads-exiting-guest count in the 0xff00
1427	   bits of vcore->entry_exit_count */
1428	ld	r5,HSTATE_KVM_VCORE(r13)
1429	addi	r6,r5,VCORE_ENTRY_EXIT
143041:	lwarx	r3,0,r6
1431	addi	r0,r3,0x100
1432	stwcx.	r0,0,r6
1433	bne	41b
1434	isync		/* order stwcx. vs. reading napping_threads */
1435
1436	/*
1437	 * At this point we have an interrupt that we have to pass
1438	 * up to the kernel or qemu; we can't handle it in real mode.
1439	 * Thus we have to do a partition switch, so we have to
1440	 * collect the other threads, if we are the first thread
1441	 * to take an interrupt.  To do this, we set the HDEC to 0,
1442	 * which causes an HDEC interrupt in all threads within 2ns
1443	 * because the HDEC register is shared between all 4 threads.
1444	 * However, we don't need to bother if this is an HDEC
1445	 * interrupt, since the other threads will already be on their
1446	 * way here in that case.
1447	 */
1448	cmpwi	r3,0x100	/* Are we the first here? */
1449	bge	43f
1450	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1451	beq	40f
1452	li	r0,0
1453	mtspr	SPRN_HDEC,r0
145440:
1455	/*
1456	 * Send an IPI to any napping threads, since an HDEC interrupt
1457	 * doesn't wake CPUs up from nap.
1458	 */
1459	lwz	r3,VCORE_NAPPING_THREADS(r5)
1460	lbz	r4,HSTATE_PTID(r13)
1461	li	r0,1
1462	sld	r0,r0,r4
1463	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
1464	beq	43f
1465	/* Order entry/exit update vs. IPIs */
1466	sync
1467	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
1468	subf	r6,r4,r13
146942:	andi.	r0,r3,1
1470	beq	44f
1471	ld	r8,HSTATE_XICS_PHYS(r6)	/* get thread's XICS reg addr */
1472	li	r0,IPI_PRIORITY
1473	li	r7,XICS_MFRR
1474	stbcix	r0,r7,r8		/* trigger the IPI */
147544:	srdi.	r3,r3,1
1476	addi	r6,r6,PACA_SIZE
1477	bne	42b
1478
1479secondary_too_late:
1480	/* Secondary threads wait for primary to do partition switch */
148143:	ld	r5,HSTATE_KVM_VCORE(r13)
1482	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
1483	lbz	r3,HSTATE_PTID(r13)
1484	cmpwi	r3,0
1485	beq	15f
1486	HMT_LOW
148713:	lbz	r3,VCORE_IN_GUEST(r5)
1488	cmpwi	r3,0
1489	bne	13b
1490	HMT_MEDIUM
1491	b	16f
1492
1493	/* Primary thread waits for all the secondaries to exit guest */
149415:	lwz	r3,VCORE_ENTRY_EXIT(r5)
1495	srwi	r0,r3,8
1496	clrldi	r3,r3,56
1497	cmpw	r3,r0
1498	bne	15b
1499	isync
1500
1501	/* Primary thread switches back to host partition */
1502	ld	r6,KVM_HOST_SDR1(r4)
1503	lwz	r7,KVM_HOST_LPID(r4)
1504	li	r8,LPID_RSVD		/* switch to reserved LPID */
1505	mtspr	SPRN_LPID,r8
1506	ptesync
1507	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
1508	mtspr	SPRN_LPID,r7
1509	isync
1510
1511BEGIN_FTR_SECTION
1512	/* DPDES is shared between threads */
1513	mfspr	r7, SPRN_DPDES
1514	std	r7, VCORE_DPDES(r5)
1515	/* clear DPDES so we don't get guest doorbells in the host */
1516	li	r8, 0
1517	mtspr	SPRN_DPDES, r8
1518END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1519
1520	/* Subtract timebase offset from timebase */
1521	ld	r8,VCORE_TB_OFFSET(r5)
1522	cmpdi	r8,0
1523	beq	17f
1524	mftb	r6			/* current guest timebase */
1525	subf	r8,r8,r6
1526	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
1527	mftb	r7			/* check if lower 24 bits overflowed */
1528	clrldi	r6,r6,40
1529	clrldi	r7,r7,40
1530	cmpld	r7,r6
1531	bge	17f
1532	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
1533	mtspr	SPRN_TBU40,r8
1534
1535	/* Reset PCR */
153617:	ld	r0, VCORE_PCR(r5)
1537	cmpdi	r0, 0
1538	beq	18f
1539	li	r0, 0
1540	mtspr	SPRN_PCR, r0
154118:
1542	/* Signal secondary CPUs to continue */
1543	stb	r0,VCORE_IN_GUEST(r5)
1544	lis	r8,0x7fff		/* MAX_INT@h */
1545	mtspr	SPRN_HDEC,r8
1546
154716:	ld	r8,KVM_HOST_LPCR(r4)
1548	mtspr	SPRN_LPCR,r8
1549	isync
1550
1551	/* load host SLB entries */
1552	ld	r8,PACA_SLBSHADOWPTR(r13)
1553
1554	.rept	SLB_NUM_BOLTED
1555	li	r3, SLBSHADOW_SAVEAREA
1556	LDX_BE	r5, r8, r3
1557	addi	r3, r3, 8
1558	LDX_BE	r6, r8, r3
1559	andis.	r7,r5,SLB_ESID_V@h
1560	beq	1f
1561	slbmte	r6,r5
15621:	addi	r8,r8,16
1563	.endr
1564
1565	/* Unset guest mode */
1566	li	r0, KVM_GUEST_MODE_NONE
1567	stb	r0, HSTATE_IN_GUEST(r13)
1568
1569	ld	r0, 112+PPC_LR_STKOFF(r1)
1570	addi	r1, r1, 112
1571	mtlr	r0
1572	blr
1573
1574/*
1575 * Check whether an HDSI is an HPTE not found fault or something else.
1576 * If it is an HPTE not found fault that is due to the guest accessing
1577 * a page that they have mapped but which we have paged out, then
1578 * we continue on with the guest exit path.  In all other cases,
1579 * reflect the HDSI to the guest as a DSI.
1580 */
1581kvmppc_hdsi:
1582	mfspr	r4, SPRN_HDAR
1583	mfspr	r6, SPRN_HDSISR
1584	/* HPTE not found fault or protection fault? */
1585	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1586	beq	1f			/* if not, send it to the guest */
1587	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
1588	beq	3f
1589	clrrdi	r0, r4, 28
1590	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1591	bne	1f			/* if no SLB entry found */
15924:	std	r4, VCPU_FAULT_DAR(r9)
1593	stw	r6, VCPU_FAULT_DSISR(r9)
1594
1595	/* Search the hash table. */
1596	mr	r3, r9			/* vcpu pointer */
1597	li	r7, 1			/* data fault */
1598	bl	kvmppc_hpte_hv_fault
1599	ld	r9, HSTATE_KVM_VCPU(r13)
1600	ld	r10, VCPU_PC(r9)
1601	ld	r11, VCPU_MSR(r9)
1602	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1603	cmpdi	r3, 0			/* retry the instruction */
1604	beq	6f
1605	cmpdi	r3, -1			/* handle in kernel mode */
1606	beq	guest_exit_cont
1607	cmpdi	r3, -2			/* MMIO emulation; need instr word */
1608	beq	2f
1609
1610	/* Synthesize a DSI for the guest */
1611	ld	r4, VCPU_FAULT_DAR(r9)
1612	mr	r6, r3
16131:	mtspr	SPRN_DAR, r4
1614	mtspr	SPRN_DSISR, r6
1615	mtspr	SPRN_SRR0, r10
1616	mtspr	SPRN_SRR1, r11
1617	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
1618	bl	kvmppc_msr_interrupt
1619fast_interrupt_c_return:
16206:	ld	r7, VCPU_CTR(r9)
1621	lwz	r8, VCPU_XER(r9)
1622	mtctr	r7
1623	mtxer	r8
1624	mr	r4, r9
1625	b	fast_guest_return
1626
16273:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
1628	ld	r5, KVM_VRMA_SLB_V(r5)
1629	b	4b
1630
1631	/* If this is for emulated MMIO, load the instruction word */
16322:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */
1633
1634	/* Set guest mode to 'jump over instruction' so if lwz faults
1635	 * we'll just continue at the next IP. */
1636	li	r0, KVM_GUEST_MODE_SKIP
1637	stb	r0, HSTATE_IN_GUEST(r13)
1638
1639	/* Do the access with MSR:DR enabled */
1640	mfmsr	r3
1641	ori	r4, r3, MSR_DR		/* Enable paging for data */
1642	mtmsrd	r4
1643	lwz	r8, 0(r10)
1644	mtmsrd	r3
1645
1646	/* Store the result */
1647	stw	r8, VCPU_LAST_INST(r9)
1648
1649	/* Unset guest mode. */
1650	li	r0, KVM_GUEST_MODE_HOST_HV
1651	stb	r0, HSTATE_IN_GUEST(r13)
1652	b	guest_exit_cont
1653
1654/*
1655 * Similarly for an HISI, reflect it to the guest as an ISI unless
1656 * it is an HPTE not found fault for a page that we have paged out.
1657 */
1658kvmppc_hisi:
1659	andis.	r0, r11, SRR1_ISI_NOPT@h
1660	beq	1f
1661	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
1662	beq	3f
1663	clrrdi	r0, r10, 28
1664	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1665	bne	1f			/* if no SLB entry found */
16664:
1667	/* Search the hash table. */
1668	mr	r3, r9			/* vcpu pointer */
1669	mr	r4, r10
1670	mr	r6, r11
1671	li	r7, 0			/* instruction fault */
1672	bl	kvmppc_hpte_hv_fault
1673	ld	r9, HSTATE_KVM_VCPU(r13)
1674	ld	r10, VCPU_PC(r9)
1675	ld	r11, VCPU_MSR(r9)
1676	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1677	cmpdi	r3, 0			/* retry the instruction */
1678	beq	fast_interrupt_c_return
1679	cmpdi	r3, -1			/* handle in kernel mode */
1680	beq	guest_exit_cont
1681
1682	/* Synthesize an ISI for the guest */
1683	mr	r11, r3
16841:	mtspr	SPRN_SRR0, r10
1685	mtspr	SPRN_SRR1, r11
1686	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
1687	bl	kvmppc_msr_interrupt
1688	b	fast_interrupt_c_return
1689
16903:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
1691	ld	r5, KVM_VRMA_SLB_V(r6)
1692	b	4b
1693
1694/*
1695 * Try to handle an hcall in real mode.
1696 * Returns to the guest if we handle it, or continues on up to
1697 * the kernel if we can't (i.e. if we don't have a handler for
1698 * it, or if the handler returns H_TOO_HARD).
1699 */
1700	.globl	hcall_try_real_mode
1701hcall_try_real_mode:
1702	ld	r3,VCPU_GPR(R3)(r9)
1703	andi.	r0,r11,MSR_PR
1704	/* sc 1 from userspace - reflect to guest syscall */
1705	bne	sc_1_fast_return
1706	clrrdi	r3,r3,2
1707	cmpldi	r3,hcall_real_table_end - hcall_real_table
1708	bge	guest_exit_cont
1709	/* See if this hcall is enabled for in-kernel handling */
1710	ld	r4, VCPU_KVM(r9)
1711	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
1712	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
1713	add	r4, r4, r0
1714	ld	r0, KVM_ENABLED_HCALLS(r4)
1715	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
1716	srd	r0, r0, r4
1717	andi.	r0, r0, 1
1718	beq	guest_exit_cont
1719	/* Get pointer to handler, if any, and call it */
1720	LOAD_REG_ADDR(r4, hcall_real_table)
1721	lwax	r3,r3,r4
1722	cmpwi	r3,0
1723	beq	guest_exit_cont
1724	add	r12,r3,r4
1725	mtctr	r12
1726	mr	r3,r9		/* get vcpu pointer */
1727	ld	r4,VCPU_GPR(R4)(r9)
1728	bctrl
1729	cmpdi	r3,H_TOO_HARD
1730	beq	hcall_real_fallback
1731	ld	r4,HSTATE_KVM_VCPU(r13)
1732	std	r3,VCPU_GPR(R3)(r4)
1733	ld	r10,VCPU_PC(r4)
1734	ld	r11,VCPU_MSR(r4)
1735	b	fast_guest_return
1736
1737sc_1_fast_return:
1738	mtspr	SPRN_SRR0,r10
1739	mtspr	SPRN_SRR1,r11
1740	li	r10, BOOK3S_INTERRUPT_SYSCALL
1741	bl	kvmppc_msr_interrupt
1742	mr	r4,r9
1743	b	fast_guest_return
1744
1745	/* We've attempted a real mode hcall, but it's punted it back
1746	 * to userspace.  We need to restore some clobbered volatiles
1747	 * before resuming the pass-it-to-qemu path */
1748hcall_real_fallback:
1749	li	r12,BOOK3S_INTERRUPT_SYSCALL
1750	ld	r9, HSTATE_KVM_VCPU(r13)
1751
1752	b	guest_exit_cont
1753
1754	.globl	hcall_real_table
1755hcall_real_table:
1756	.long	0		/* 0 - unused */
1757	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
1758	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
1759	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
1760	.long	0		/* 0x10 - H_CLEAR_MOD */
1761	.long	0		/* 0x14 - H_CLEAR_REF */
1762	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
1763	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1764	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1765	.long	0		/* 0x24 - H_SET_SPRG0 */
1766	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1767	.long	0		/* 0x2c */
1768	.long	0		/* 0x30 */
1769	.long	0		/* 0x34 */
1770	.long	0		/* 0x38 */
1771	.long	0		/* 0x3c */
1772	.long	0		/* 0x40 */
1773	.long	0		/* 0x44 */
1774	.long	0		/* 0x48 */
1775	.long	0		/* 0x4c */
1776	.long	0		/* 0x50 */
1777	.long	0		/* 0x54 */
1778	.long	0		/* 0x58 */
1779	.long	0		/* 0x5c */
1780	.long	0		/* 0x60 */
1781#ifdef CONFIG_KVM_XICS
1782	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1783	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1784	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1785	.long	0		/* 0x70 - H_IPOLL */
1786	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1787#else
1788	.long	0		/* 0x64 - H_EOI */
1789	.long	0		/* 0x68 - H_CPPR */
1790	.long	0		/* 0x6c - H_IPI */
1791	.long	0		/* 0x70 - H_IPOLL */
1792	.long	0		/* 0x74 - H_XIRR */
1793#endif
1794	.long	0		/* 0x78 */
1795	.long	0		/* 0x7c */
1796	.long	0		/* 0x80 */
1797	.long	0		/* 0x84 */
1798	.long	0		/* 0x88 */
1799	.long	0		/* 0x8c */
1800	.long	0		/* 0x90 */
1801	.long	0		/* 0x94 */
1802	.long	0		/* 0x98 */
1803	.long	0		/* 0x9c */
1804	.long	0		/* 0xa0 */
1805	.long	0		/* 0xa4 */
1806	.long	0		/* 0xa8 */
1807	.long	0		/* 0xac */
1808	.long	0		/* 0xb0 */
1809	.long	0		/* 0xb4 */
1810	.long	0		/* 0xb8 */
1811	.long	0		/* 0xbc */
1812	.long	0		/* 0xc0 */
1813	.long	0		/* 0xc4 */
1814	.long	0		/* 0xc8 */
1815	.long	0		/* 0xcc */
1816	.long	0		/* 0xd0 */
1817	.long	0		/* 0xd4 */
1818	.long	0		/* 0xd8 */
1819	.long	0		/* 0xdc */
1820	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
1821	.long	DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1822	.long	0		/* 0xe8 */
1823	.long	0		/* 0xec */
1824	.long	0		/* 0xf0 */
1825	.long	0		/* 0xf4 */
1826	.long	0		/* 0xf8 */
1827	.long	0		/* 0xfc */
1828	.long	0		/* 0x100 */
1829	.long	0		/* 0x104 */
1830	.long	0		/* 0x108 */
1831	.long	0		/* 0x10c */
1832	.long	0		/* 0x110 */
1833	.long	0		/* 0x114 */
1834	.long	0		/* 0x118 */
1835	.long	0		/* 0x11c */
1836	.long	0		/* 0x120 */
1837	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1838	.long	0		/* 0x128 */
1839	.long	0		/* 0x12c */
1840	.long	0		/* 0x130 */
1841	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1842	.globl	hcall_real_table_end
1843hcall_real_table_end:
1844
1845ignore_hdec:
1846	mr	r4,r9
1847	b	fast_guest_return
1848
1849_GLOBAL(kvmppc_h_set_xdabr)
1850	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
1851	beq	6f
1852	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1853	andc.	r0, r5, r0
1854	beq	3f
18556:	li	r3, H_PARAMETER
1856	blr
1857
1858_GLOBAL(kvmppc_h_set_dabr)
1859	li	r5, DABRX_USER | DABRX_KERNEL
18603:
1861BEGIN_FTR_SECTION
1862	b	2f
1863END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1864	std	r4,VCPU_DABR(r3)
1865	stw	r5, VCPU_DABRX(r3)
1866	mtspr	SPRN_DABRX, r5
1867	/* Work around P7 bug where DABR can get corrupted on mtspr */
18681:	mtspr	SPRN_DABR,r4
1869	mfspr	r5, SPRN_DABR
1870	cmpd	r4, r5
1871	bne	1b
1872	isync
1873	li	r3,0
1874	blr
1875
1876	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
18772:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
1878	rlwimi	r5, r4, 1, DAWRX_WT
1879	clrrdi	r4, r4, 3
1880	std	r4, VCPU_DAWR(r3)
1881	std	r5, VCPU_DAWRX(r3)
1882	mtspr	SPRN_DAWR, r4
1883	mtspr	SPRN_DAWRX, r5
1884	li	r3, 0
1885	blr
1886
1887_GLOBAL(kvmppc_h_cede)
1888	ori	r11,r11,MSR_EE
1889	std	r11,VCPU_MSR(r3)
1890	li	r0,1
1891	stb	r0,VCPU_CEDED(r3)
1892	sync			/* order setting ceded vs. testing prodded */
1893	lbz	r5,VCPU_PRODDED(r3)
1894	cmpwi	r5,0
1895	bne	kvm_cede_prodded
1896	li	r0,0		/* set trap to 0 to say hcall is handled */
1897	stw	r0,VCPU_TRAP(r3)
1898	li	r0,H_SUCCESS
1899	std	r0,VCPU_GPR(R3)(r3)
1900
1901	/*
1902	 * Set our bit in the bitmask of napping threads unless all the
1903	 * other threads are already napping, in which case we send this
1904	 * up to the host.
1905	 */
1906	ld	r5,HSTATE_KVM_VCORE(r13)
1907	lbz	r6,HSTATE_PTID(r13)
1908	lwz	r8,VCORE_ENTRY_EXIT(r5)
1909	clrldi	r8,r8,56
1910	li	r0,1
1911	sld	r0,r0,r6
1912	addi	r6,r5,VCORE_NAPPING_THREADS
191331:	lwarx	r4,0,r6
1914	or	r4,r4,r0
1915	PPC_POPCNTW(R7,R4)
1916	cmpw	r7,r8
1917	bge	kvm_cede_exit
1918	stwcx.	r4,0,r6
1919	bne	31b
1920	/* order napping_threads update vs testing entry_exit_count */
1921	isync
1922	li	r0,NAPPING_CEDE
1923	stb	r0,HSTATE_NAPPING(r13)
1924	lwz	r7,VCORE_ENTRY_EXIT(r5)
1925	cmpwi	r7,0x100
1926	bge	33f		/* another thread already exiting */
1927
1928/*
1929 * Although not specifically required by the architecture, POWER7
1930 * preserves the following registers in nap mode, even if an SMT mode
1931 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1932 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1933 */
1934	/* Save non-volatile GPRs */
1935	std	r14, VCPU_GPR(R14)(r3)
1936	std	r15, VCPU_GPR(R15)(r3)
1937	std	r16, VCPU_GPR(R16)(r3)
1938	std	r17, VCPU_GPR(R17)(r3)
1939	std	r18, VCPU_GPR(R18)(r3)
1940	std	r19, VCPU_GPR(R19)(r3)
1941	std	r20, VCPU_GPR(R20)(r3)
1942	std	r21, VCPU_GPR(R21)(r3)
1943	std	r22, VCPU_GPR(R22)(r3)
1944	std	r23, VCPU_GPR(R23)(r3)
1945	std	r24, VCPU_GPR(R24)(r3)
1946	std	r25, VCPU_GPR(R25)(r3)
1947	std	r26, VCPU_GPR(R26)(r3)
1948	std	r27, VCPU_GPR(R27)(r3)
1949	std	r28, VCPU_GPR(R28)(r3)
1950	std	r29, VCPU_GPR(R29)(r3)
1951	std	r30, VCPU_GPR(R30)(r3)
1952	std	r31, VCPU_GPR(R31)(r3)
1953
1954	/* save FP state */
1955	bl	kvmppc_save_fp
1956
1957	/*
1958	 * Take a nap until a decrementer or external or doobell interrupt
1959	 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
1960	 * runlatch bit before napping.
1961	 */
1962kvm_do_nap:
1963	mfspr	r2, SPRN_CTRLF
1964	clrrdi	r2, r2, 1
1965	mtspr	SPRN_CTRLT, r2
1966
1967	li	r0,1
1968	stb	r0,HSTATE_HWTHREAD_REQ(r13)
1969	mfspr	r5,SPRN_LPCR
1970	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
1971BEGIN_FTR_SECTION
1972	oris	r5,r5,LPCR_PECEDP@h
1973END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1974	mtspr	SPRN_LPCR,r5
1975	isync
1976	li	r0, 0
1977	std	r0, HSTATE_SCRATCH0(r13)
1978	ptesync
1979	ld	r0, HSTATE_SCRATCH0(r13)
19801:	cmpd	r0, r0
1981	bne	1b
1982	nap
1983	b	.
1984
198533:	mr	r4, r3
1986	li	r3, 0
1987	li	r12, 0
1988	b	34f
1989
1990kvm_end_cede:
1991	/* get vcpu pointer */
1992	ld	r4, HSTATE_KVM_VCPU(r13)
1993
1994	/* Woken by external or decrementer interrupt */
1995	ld	r1, HSTATE_HOST_R1(r13)
1996
1997	/* load up FP state */
1998	bl	kvmppc_load_fp
1999
2000	/* Load NV GPRS */
2001	ld	r14, VCPU_GPR(R14)(r4)
2002	ld	r15, VCPU_GPR(R15)(r4)
2003	ld	r16, VCPU_GPR(R16)(r4)
2004	ld	r17, VCPU_GPR(R17)(r4)
2005	ld	r18, VCPU_GPR(R18)(r4)
2006	ld	r19, VCPU_GPR(R19)(r4)
2007	ld	r20, VCPU_GPR(R20)(r4)
2008	ld	r21, VCPU_GPR(R21)(r4)
2009	ld	r22, VCPU_GPR(R22)(r4)
2010	ld	r23, VCPU_GPR(R23)(r4)
2011	ld	r24, VCPU_GPR(R24)(r4)
2012	ld	r25, VCPU_GPR(R25)(r4)
2013	ld	r26, VCPU_GPR(R26)(r4)
2014	ld	r27, VCPU_GPR(R27)(r4)
2015	ld	r28, VCPU_GPR(R28)(r4)
2016	ld	r29, VCPU_GPR(R29)(r4)
2017	ld	r30, VCPU_GPR(R30)(r4)
2018	ld	r31, VCPU_GPR(R31)(r4)
2019
2020	/* Check the wake reason in SRR1 to see why we got here */
2021	bl	kvmppc_check_wake_reason
2022
2023	/* clear our bit in vcore->napping_threads */
202434:	ld	r5,HSTATE_KVM_VCORE(r13)
2025	lbz	r7,HSTATE_PTID(r13)
2026	li	r0,1
2027	sld	r0,r0,r7
2028	addi	r6,r5,VCORE_NAPPING_THREADS
202932:	lwarx	r7,0,r6
2030	andc	r7,r7,r0
2031	stwcx.	r7,0,r6
2032	bne	32b
2033	li	r0,0
2034	stb	r0,HSTATE_NAPPING(r13)
2035
2036	/* See if the wake reason means we need to exit */
2037	stw	r12, VCPU_TRAP(r4)
2038	mr	r9, r4
2039	cmpdi	r3, 0
2040	bgt	guest_exit_cont
2041
2042	/* see if any other thread is already exiting */
2043	lwz	r0,VCORE_ENTRY_EXIT(r5)
2044	cmpwi	r0,0x100
2045	bge	guest_exit_cont
2046
2047	b	kvmppc_cede_reentry	/* if not go back to guest */
2048
2049	/* cede when already previously prodded case */
2050kvm_cede_prodded:
2051	li	r0,0
2052	stb	r0,VCPU_PRODDED(r3)
2053	sync			/* order testing prodded vs. clearing ceded */
2054	stb	r0,VCPU_CEDED(r3)
2055	li	r3,H_SUCCESS
2056	blr
2057
2058	/* we've ceded but we want to give control to the host */
2059kvm_cede_exit:
2060	b	hcall_real_fallback
2061
2062	/* Try to handle a machine check in real mode */
2063machine_check_realmode:
2064	mr	r3, r9		/* get vcpu pointer */
2065	bl	kvmppc_realmode_machine_check
2066	nop
2067	cmpdi	r3, 0		/* Did we handle MCE ? */
2068	ld	r9, HSTATE_KVM_VCPU(r13)
2069	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2070	/*
2071	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2072	 * machine check interrupt (set HSRR0 to 0x200). And for handled
2073	 * errors (no-fatal), just go back to guest execution with current
2074	 * HSRR0 instead of exiting guest. This new approach will inject
2075	 * machine check to guest for fatal error causing guest to crash.
2076	 *
2077	 * The old code used to return to host for unhandled errors which
2078	 * was causing guest to hang with soft lockups inside guest and
2079	 * makes it difficult to recover guest instance.
2080	 */
2081	ld	r10, VCPU_PC(r9)
2082	ld	r11, VCPU_MSR(r9)
2083	bne	2f	/* Continue guest execution. */
2084	/* If not, deliver a machine check.  SRR0/1 are already set */
2085	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2086	ld	r11, VCPU_MSR(r9)
2087	bl	kvmppc_msr_interrupt
20882:	b	fast_interrupt_c_return
2089
2090/*
2091 * Check the reason we woke from nap, and take appropriate action.
2092 * Returns:
2093 *	0 if nothing needs to be done
2094 *	1 if something happened that needs to be handled by the host
2095 *	-1 if there was a guest wakeup (IPI)
2096 *
2097 * Also sets r12 to the interrupt vector for any interrupt that needs
2098 * to be handled now by the host (0x500 for external interrupt), or zero.
2099 */
2100kvmppc_check_wake_reason:
2101	mfspr	r6, SPRN_SRR1
2102BEGIN_FTR_SECTION
2103	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
2104FTR_SECTION_ELSE
2105	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
2106ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2107	cmpwi	r6, 8			/* was it an external interrupt? */
2108	li	r12, BOOK3S_INTERRUPT_EXTERNAL
2109	beq	kvmppc_read_intr	/* if so, see what it was */
2110	li	r3, 0
2111	li	r12, 0
2112	cmpwi	r6, 6			/* was it the decrementer? */
2113	beq	0f
2114BEGIN_FTR_SECTION
2115	cmpwi	r6, 5			/* privileged doorbell? */
2116	beq	0f
2117	cmpwi	r6, 3			/* hypervisor doorbell? */
2118	beq	3f
2119END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2120	li	r3, 1			/* anything else, return 1 */
21210:	blr
2122
2123	/* hypervisor doorbell */
21243:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
2125	li	r3, 1
2126	blr
2127
2128/*
2129 * Determine what sort of external interrupt is pending (if any).
2130 * Returns:
2131 *	0 if no interrupt is pending
2132 *	1 if an interrupt is pending that needs to be handled by the host
2133 *	-1 if there was a guest wakeup IPI (which has now been cleared)
2134 */
2135kvmppc_read_intr:
2136	/* see if a host IPI is pending */
2137	li	r3, 1
2138	lbz	r0, HSTATE_HOST_IPI(r13)
2139	cmpwi	r0, 0
2140	bne	1f
2141
2142	/* Now read the interrupt from the ICP */
2143	ld	r6, HSTATE_XICS_PHYS(r13)
2144	li	r7, XICS_XIRR
2145	cmpdi	r6, 0
2146	beq-	1f
2147	lwzcix	r0, r6, r7
2148	/*
2149	 * Save XIRR for later. Since we get in in reverse endian on LE
2150	 * systems, save it byte reversed and fetch it back in host endian.
2151	 */
2152	li	r3, HSTATE_SAVED_XIRR
2153	STWX_BE	r0, r3, r13
2154#ifdef __LITTLE_ENDIAN__
2155	lwz	r3, HSTATE_SAVED_XIRR(r13)
2156#else
2157	mr	r3, r0
2158#endif
2159	rlwinm.	r3, r3, 0, 0xffffff
2160	sync
2161	beq	1f			/* if nothing pending in the ICP */
2162
2163	/* We found something in the ICP...
2164	 *
2165	 * If it's not an IPI, stash it in the PACA and return to
2166	 * the host, we don't (yet) handle directing real external
2167	 * interrupts directly to the guest
2168	 */
2169	cmpwi	r3, XICS_IPI		/* if there is, is it an IPI? */
2170	bne	42f
2171
2172	/* It's an IPI, clear the MFRR and EOI it */
2173	li	r3, 0xff
2174	li	r8, XICS_MFRR
2175	stbcix	r3, r6, r8		/* clear the IPI */
2176	stwcix	r0, r6, r7		/* EOI it */
2177	sync
2178
2179	/* We need to re-check host IPI now in case it got set in the
2180	 * meantime. If it's clear, we bounce the interrupt to the
2181	 * guest
2182	 */
2183	lbz	r0, HSTATE_HOST_IPI(r13)
2184	cmpwi	r0, 0
2185	bne-	43f
2186
2187	/* OK, it's an IPI for us */
2188	li	r3, -1
21891:	blr
2190
219142:	/* It's not an IPI and it's for the host. We saved a copy of XIRR in
2192	 * the PACA earlier, it will be picked up by the host ICP driver
2193	 */
2194	li	r3, 1
2195	b	1b
2196
219743:	/* We raced with the host, we need to resend that IPI, bummer */
2198	li	r0, IPI_PRIORITY
2199	stbcix	r0, r6, r8		/* set the IPI */
2200	sync
2201	li	r3, 1
2202	b	1b
2203
2204/*
2205 * Save away FP, VMX and VSX registers.
2206 * r3 = vcpu pointer
2207 * N.B. r30 and r31 are volatile across this function,
2208 * thus it is not callable from C.
2209 */
2210kvmppc_save_fp:
2211	mflr	r30
2212	mr	r31,r3
2213	mfmsr	r5
2214	ori	r8,r5,MSR_FP
2215#ifdef CONFIG_ALTIVEC
2216BEGIN_FTR_SECTION
2217	oris	r8,r8,MSR_VEC@h
2218END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2219#endif
2220#ifdef CONFIG_VSX
2221BEGIN_FTR_SECTION
2222	oris	r8,r8,MSR_VSX@h
2223END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2224#endif
2225	mtmsrd	r8
2226	addi	r3,r3,VCPU_FPRS
2227	bl	store_fp_state
2228#ifdef CONFIG_ALTIVEC
2229BEGIN_FTR_SECTION
2230	addi	r3,r31,VCPU_VRS
2231	bl	store_vr_state
2232END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2233#endif
2234	mfspr	r6,SPRN_VRSAVE
2235	stw	r6,VCPU_VRSAVE(r31)
2236	mtlr	r30
2237	blr
2238
2239/*
2240 * Load up FP, VMX and VSX registers
2241 * r4 = vcpu pointer
2242 * N.B. r30 and r31 are volatile across this function,
2243 * thus it is not callable from C.
2244 */
2245kvmppc_load_fp:
2246	mflr	r30
2247	mr	r31,r4
2248	mfmsr	r9
2249	ori	r8,r9,MSR_FP
2250#ifdef CONFIG_ALTIVEC
2251BEGIN_FTR_SECTION
2252	oris	r8,r8,MSR_VEC@h
2253END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2254#endif
2255#ifdef CONFIG_VSX
2256BEGIN_FTR_SECTION
2257	oris	r8,r8,MSR_VSX@h
2258END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2259#endif
2260	mtmsrd	r8
2261	addi	r3,r4,VCPU_FPRS
2262	bl	load_fp_state
2263#ifdef CONFIG_ALTIVEC
2264BEGIN_FTR_SECTION
2265	addi	r3,r31,VCPU_VRS
2266	bl	load_vr_state
2267END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2268#endif
2269	lwz	r7,VCPU_VRSAVE(r31)
2270	mtspr	SPRN_VRSAVE,r7
2271	mtlr	r30
2272	mr	r4,r31
2273	blr
2274
2275/*
2276 * We come here if we get any exception or interrupt while we are
2277 * executing host real mode code while in guest MMU context.
2278 * For now just spin, but we should do something better.
2279 */
2280kvmppc_bad_host_intr:
2281	b	.
2282
2283/*
2284 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
2285 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2286 *   r11 has the guest MSR value (in/out)
2287 *   r9 has a vcpu pointer (in)
2288 *   r0 is used as a scratch register
2289 */
2290kvmppc_msr_interrupt:
2291	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
2292	cmpwi	r0, 2 /* Check if we are in transactional state..  */
2293	ld	r11, VCPU_INTR_MSR(r9)
2294	bne	1f
2295	/* ... if transactional, change to suspended */
2296	li	r0, 1
22971:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2298	blr
2299
2300/*
2301 * This works around a hardware bug on POWER8E processors, where
2302 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2303 * performance monitor interrupt.  Instead, when we need to have
2304 * an interrupt pending, we have to arrange for a counter to overflow.
2305 */
2306kvmppc_fix_pmao:
2307	li	r3, 0
2308	mtspr	SPRN_MMCR2, r3
2309	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2310	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2311	mtspr	SPRN_MMCR0, r3
2312	lis	r3, 0x7fff
2313	ori	r3, r3, 0xffff
2314	mtspr	SPRN_PMC6, r3
2315	isync
2316	blr
2317