xref: /linux/arch/powerpc/kvm/book3s_emulate.c (revision 5e0266f0e5f57617472d5aac4013f58a3ef264ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright SUSE Linux Products GmbH 2009
5  *
6  * Authors: Alexander Graf <agraf@suse.de>
7  */
8 
9 #include <asm/kvm_ppc.h>
10 #include <asm/disassemble.h>
11 #include <asm/kvm_book3s.h>
12 #include <asm/reg.h>
13 #include <asm/switch_to.h>
14 #include <asm/time.h>
15 #include <asm/tm.h>
16 #include "book3s.h"
17 #include <asm/asm-prototypes.h>
18 
19 #define OP_19_XOP_RFID		18
20 #define OP_19_XOP_RFI		50
21 
22 #define OP_31_XOP_MFMSR		83
23 #define OP_31_XOP_MTMSR		146
24 #define OP_31_XOP_MTMSRD	178
25 #define OP_31_XOP_MTSR		210
26 #define OP_31_XOP_MTSRIN	242
27 #define OP_31_XOP_TLBIEL	274
28 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
29 #define OP_31_XOP_FAKE_SC1	308
30 #define OP_31_XOP_SLBMTE	402
31 #define OP_31_XOP_SLBIE		434
32 #define OP_31_XOP_SLBIA		498
33 #define OP_31_XOP_MFSR		595
34 #define OP_31_XOP_MFSRIN	659
35 #define OP_31_XOP_DCBA		758
36 #define OP_31_XOP_SLBMFEV	851
37 #define OP_31_XOP_EIOIO		854
38 #define OP_31_XOP_SLBMFEE	915
39 #define OP_31_XOP_SLBFEE	979
40 
41 #define OP_31_XOP_TBEGIN	654
42 #define OP_31_XOP_TABORT	910
43 
44 #define OP_31_XOP_TRECLAIM	942
45 #define OP_31_XOP_TRCHKPT	1006
46 
47 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
48 #define OP_31_XOP_DCBZ		1010
49 
50 #define OP_LFS			48
51 #define OP_LFD			50
52 #define OP_STFS			52
53 #define OP_STFD			54
54 
55 #define SPRN_GQR0		912
56 #define SPRN_GQR1		913
57 #define SPRN_GQR2		914
58 #define SPRN_GQR3		915
59 #define SPRN_GQR4		916
60 #define SPRN_GQR5		917
61 #define SPRN_GQR6		918
62 #define SPRN_GQR7		919
63 
64 enum priv_level {
65 	PRIV_PROBLEM = 0,
66 	PRIV_SUPER = 1,
67 	PRIV_HYPER = 2,
68 };
69 
70 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
71 {
72 	/* PAPR VMs only access supervisor SPRs */
73 	if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
74 		return false;
75 
76 	/* Limit user space to its own small SPR set */
77 	if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
78 		return false;
79 
80 	return true;
81 }
82 
83 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
84 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
85 {
86 	memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
87 			sizeof(vcpu->arch.gpr_tm));
88 	memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
89 			sizeof(struct thread_fp_state));
90 	memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
91 			sizeof(struct thread_vr_state));
92 	vcpu->arch.ppr_tm = vcpu->arch.ppr;
93 	vcpu->arch.dscr_tm = vcpu->arch.dscr;
94 	vcpu->arch.amr_tm = vcpu->arch.amr;
95 	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
96 	vcpu->arch.tar_tm = vcpu->arch.tar;
97 	vcpu->arch.lr_tm = vcpu->arch.regs.link;
98 	vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
99 	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
100 	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
101 }
102 
103 static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
104 {
105 	memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
106 			sizeof(vcpu->arch.regs.gpr));
107 	memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
108 			sizeof(struct thread_fp_state));
109 	memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
110 			sizeof(struct thread_vr_state));
111 	vcpu->arch.ppr = vcpu->arch.ppr_tm;
112 	vcpu->arch.dscr = vcpu->arch.dscr_tm;
113 	vcpu->arch.amr = vcpu->arch.amr_tm;
114 	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
115 	vcpu->arch.tar = vcpu->arch.tar_tm;
116 	vcpu->arch.regs.link = vcpu->arch.lr_tm;
117 	vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
118 	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
119 	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
120 }
121 
122 static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
123 {
124 	unsigned long guest_msr = kvmppc_get_msr(vcpu);
125 	int fc_val = ra_val ? ra_val : 1;
126 	uint64_t texasr;
127 
128 	/* CR0 = 0 | MSR[TS] | 0 */
129 	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
130 		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
131 		 << CR0_SHIFT);
132 
133 	preempt_disable();
134 	tm_enable();
135 	texasr = mfspr(SPRN_TEXASR);
136 	kvmppc_save_tm_pr(vcpu);
137 	kvmppc_copyfrom_vcpu_tm(vcpu);
138 
139 	/* failure recording depends on Failure Summary bit */
140 	if (!(texasr & TEXASR_FS)) {
141 		texasr &= ~TEXASR_FC;
142 		texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
143 
144 		texasr &= ~(TEXASR_PR | TEXASR_HV);
145 		if (kvmppc_get_msr(vcpu) & MSR_PR)
146 			texasr |= TEXASR_PR;
147 
148 		if (kvmppc_get_msr(vcpu) & MSR_HV)
149 			texasr |= TEXASR_HV;
150 
151 		vcpu->arch.texasr = texasr;
152 		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
153 		mtspr(SPRN_TEXASR, texasr);
154 		mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
155 	}
156 	tm_disable();
157 	/*
158 	 * treclaim need quit to non-transactional state.
159 	 */
160 	guest_msr &= ~(MSR_TS_MASK);
161 	kvmppc_set_msr(vcpu, guest_msr);
162 	preempt_enable();
163 
164 	if (vcpu->arch.shadow_fscr & FSCR_TAR)
165 		mtspr(SPRN_TAR, vcpu->arch.tar);
166 }
167 
168 static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
169 {
170 	unsigned long guest_msr = kvmppc_get_msr(vcpu);
171 
172 	preempt_disable();
173 	/*
174 	 * need flush FP/VEC/VSX to vcpu save area before
175 	 * copy.
176 	 */
177 	kvmppc_giveup_ext(vcpu, MSR_VSX);
178 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
179 	kvmppc_copyto_vcpu_tm(vcpu);
180 	kvmppc_save_tm_sprs(vcpu);
181 
182 	/*
183 	 * as a result of trecheckpoint. set TS to suspended.
184 	 */
185 	guest_msr &= ~(MSR_TS_MASK);
186 	guest_msr |= MSR_TS_S;
187 	kvmppc_set_msr(vcpu, guest_msr);
188 	kvmppc_restore_tm_pr(vcpu);
189 	preempt_enable();
190 }
191 
192 /* emulate tabort. at guest privilege state */
193 void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
194 {
195 	/* currently we only emulate tabort. but no emulation of other
196 	 * tabort variants since there is no kernel usage of them at
197 	 * present.
198 	 */
199 	unsigned long guest_msr = kvmppc_get_msr(vcpu);
200 	uint64_t org_texasr;
201 
202 	preempt_disable();
203 	tm_enable();
204 	org_texasr = mfspr(SPRN_TEXASR);
205 	tm_abort(ra_val);
206 
207 	/* CR0 = 0 | MSR[TS] | 0 */
208 	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
209 		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
210 		 << CR0_SHIFT);
211 
212 	vcpu->arch.texasr = mfspr(SPRN_TEXASR);
213 	/* failure recording depends on Failure Summary bit,
214 	 * and tabort will be treated as nops in non-transactional
215 	 * state.
216 	 */
217 	if (!(org_texasr & TEXASR_FS) &&
218 			MSR_TM_ACTIVE(guest_msr)) {
219 		vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
220 		if (guest_msr & MSR_PR)
221 			vcpu->arch.texasr |= TEXASR_PR;
222 
223 		if (guest_msr & MSR_HV)
224 			vcpu->arch.texasr |= TEXASR_HV;
225 
226 		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
227 	}
228 	tm_disable();
229 	preempt_enable();
230 }
231 
232 #endif
233 
234 int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
235 			      unsigned int inst, int *advance)
236 {
237 	int emulated = EMULATE_DONE;
238 	int rt = get_rt(inst);
239 	int rs = get_rs(inst);
240 	int ra = get_ra(inst);
241 	int rb = get_rb(inst);
242 	u32 inst_sc = 0x44000002;
243 
244 	switch (get_op(inst)) {
245 	case 0:
246 		emulated = EMULATE_FAIL;
247 		if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
248 		    (inst == swab32(inst_sc))) {
249 			/*
250 			 * This is the byte reversed syscall instruction of our
251 			 * hypercall handler. Early versions of LE Linux didn't
252 			 * swap the instructions correctly and ended up in
253 			 * illegal instructions.
254 			 * Just always fail hypercalls on these broken systems.
255 			 */
256 			kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
257 			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
258 			emulated = EMULATE_DONE;
259 		}
260 		break;
261 	case 19:
262 		switch (get_xop(inst)) {
263 		case OP_19_XOP_RFID:
264 		case OP_19_XOP_RFI: {
265 			unsigned long srr1 = kvmppc_get_srr1(vcpu);
266 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
267 			unsigned long cur_msr = kvmppc_get_msr(vcpu);
268 
269 			/*
270 			 * add rules to fit in ISA specification regarding TM
271 			 * state transition in TM disable/Suspended state,
272 			 * and target TM state is TM inactive(00) state. (the
273 			 * change should be suppressed).
274 			 */
275 			if (((cur_msr & MSR_TM) == 0) &&
276 				((srr1 & MSR_TM) == 0) &&
277 				MSR_TM_SUSPENDED(cur_msr) &&
278 				!MSR_TM_ACTIVE(srr1))
279 				srr1 |= MSR_TS_S;
280 #endif
281 			kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
282 			kvmppc_set_msr(vcpu, srr1);
283 			*advance = 0;
284 			break;
285 		}
286 
287 		default:
288 			emulated = EMULATE_FAIL;
289 			break;
290 		}
291 		break;
292 	case 31:
293 		switch (get_xop(inst)) {
294 		case OP_31_XOP_MFMSR:
295 			kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
296 			break;
297 		case OP_31_XOP_MTMSRD:
298 		{
299 			ulong rs_val = kvmppc_get_gpr(vcpu, rs);
300 			if (inst & 0x10000) {
301 				ulong new_msr = kvmppc_get_msr(vcpu);
302 				new_msr &= ~(MSR_RI | MSR_EE);
303 				new_msr |= rs_val & (MSR_RI | MSR_EE);
304 				kvmppc_set_msr_fast(vcpu, new_msr);
305 			} else
306 				kvmppc_set_msr(vcpu, rs_val);
307 			break;
308 		}
309 		case OP_31_XOP_MTMSR:
310 			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
311 			break;
312 		case OP_31_XOP_MFSR:
313 		{
314 			int srnum;
315 
316 			srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
317 			if (vcpu->arch.mmu.mfsrin) {
318 				u32 sr;
319 				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
320 				kvmppc_set_gpr(vcpu, rt, sr);
321 			}
322 			break;
323 		}
324 		case OP_31_XOP_MFSRIN:
325 		{
326 			int srnum;
327 
328 			srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
329 			if (vcpu->arch.mmu.mfsrin) {
330 				u32 sr;
331 				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
332 				kvmppc_set_gpr(vcpu, rt, sr);
333 			}
334 			break;
335 		}
336 		case OP_31_XOP_MTSR:
337 			vcpu->arch.mmu.mtsrin(vcpu,
338 				(inst >> 16) & 0xf,
339 				kvmppc_get_gpr(vcpu, rs));
340 			break;
341 		case OP_31_XOP_MTSRIN:
342 			vcpu->arch.mmu.mtsrin(vcpu,
343 				(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
344 				kvmppc_get_gpr(vcpu, rs));
345 			break;
346 		case OP_31_XOP_TLBIE:
347 		case OP_31_XOP_TLBIEL:
348 		{
349 			bool large = (inst & 0x00200000) ? true : false;
350 			ulong addr = kvmppc_get_gpr(vcpu, rb);
351 			vcpu->arch.mmu.tlbie(vcpu, addr, large);
352 			break;
353 		}
354 #ifdef CONFIG_PPC_BOOK3S_64
355 		case OP_31_XOP_FAKE_SC1:
356 		{
357 			/* SC 1 papr hypercalls */
358 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
359 			int i;
360 
361 		        if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
362 			    !vcpu->arch.papr_enabled) {
363 				emulated = EMULATE_FAIL;
364 				break;
365 			}
366 
367 			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
368 				break;
369 
370 			vcpu->run->papr_hcall.nr = cmd;
371 			for (i = 0; i < 9; ++i) {
372 				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
373 				vcpu->run->papr_hcall.args[i] = gpr;
374 			}
375 
376 			vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
377 			vcpu->arch.hcall_needed = 1;
378 			emulated = EMULATE_EXIT_USER;
379 			break;
380 		}
381 #endif
382 		case OP_31_XOP_EIOIO:
383 			break;
384 		case OP_31_XOP_SLBMTE:
385 			if (!vcpu->arch.mmu.slbmte)
386 				return EMULATE_FAIL;
387 
388 			vcpu->arch.mmu.slbmte(vcpu,
389 					kvmppc_get_gpr(vcpu, rs),
390 					kvmppc_get_gpr(vcpu, rb));
391 			break;
392 		case OP_31_XOP_SLBIE:
393 			if (!vcpu->arch.mmu.slbie)
394 				return EMULATE_FAIL;
395 
396 			vcpu->arch.mmu.slbie(vcpu,
397 					kvmppc_get_gpr(vcpu, rb));
398 			break;
399 		case OP_31_XOP_SLBIA:
400 			if (!vcpu->arch.mmu.slbia)
401 				return EMULATE_FAIL;
402 
403 			vcpu->arch.mmu.slbia(vcpu);
404 			break;
405 		case OP_31_XOP_SLBFEE:
406 			if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
407 				return EMULATE_FAIL;
408 			} else {
409 				ulong b, t;
410 				ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
411 
412 				b = kvmppc_get_gpr(vcpu, rb);
413 				if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
414 					cr |= 2 << CR0_SHIFT;
415 				kvmppc_set_gpr(vcpu, rt, t);
416 				/* copy XER[SO] bit to CR0[SO] */
417 				cr |= (vcpu->arch.regs.xer & 0x80000000) >>
418 					(31 - CR0_SHIFT);
419 				kvmppc_set_cr(vcpu, cr);
420 			}
421 			break;
422 		case OP_31_XOP_SLBMFEE:
423 			if (!vcpu->arch.mmu.slbmfee) {
424 				emulated = EMULATE_FAIL;
425 			} else {
426 				ulong t, rb_val;
427 
428 				rb_val = kvmppc_get_gpr(vcpu, rb);
429 				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
430 				kvmppc_set_gpr(vcpu, rt, t);
431 			}
432 			break;
433 		case OP_31_XOP_SLBMFEV:
434 			if (!vcpu->arch.mmu.slbmfev) {
435 				emulated = EMULATE_FAIL;
436 			} else {
437 				ulong t, rb_val;
438 
439 				rb_val = kvmppc_get_gpr(vcpu, rb);
440 				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
441 				kvmppc_set_gpr(vcpu, rt, t);
442 			}
443 			break;
444 		case OP_31_XOP_DCBA:
445 			/* Gets treated as NOP */
446 			break;
447 		case OP_31_XOP_DCBZ:
448 		{
449 			ulong rb_val = kvmppc_get_gpr(vcpu, rb);
450 			ulong ra_val = 0;
451 			ulong addr, vaddr;
452 			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
453 			u32 dsisr;
454 			int r;
455 
456 			if (ra)
457 				ra_val = kvmppc_get_gpr(vcpu, ra);
458 
459 			addr = (ra_val + rb_val) & ~31ULL;
460 			if (!(kvmppc_get_msr(vcpu) & MSR_SF))
461 				addr &= 0xffffffff;
462 			vaddr = addr;
463 
464 			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
465 			if ((r == -ENOENT) || (r == -EPERM)) {
466 				*advance = 0;
467 				kvmppc_set_dar(vcpu, vaddr);
468 				vcpu->arch.fault_dar = vaddr;
469 
470 				dsisr = DSISR_ISSTORE;
471 				if (r == -ENOENT)
472 					dsisr |= DSISR_NOHPTE;
473 				else if (r == -EPERM)
474 					dsisr |= DSISR_PROTFAULT;
475 
476 				kvmppc_set_dsisr(vcpu, dsisr);
477 				vcpu->arch.fault_dsisr = dsisr;
478 
479 				kvmppc_book3s_queue_irqprio(vcpu,
480 					BOOK3S_INTERRUPT_DATA_STORAGE);
481 			}
482 
483 			break;
484 		}
485 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
486 		case OP_31_XOP_TBEGIN:
487 		{
488 			if (!cpu_has_feature(CPU_FTR_TM))
489 				break;
490 
491 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
492 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
493 				emulated = EMULATE_AGAIN;
494 				break;
495 			}
496 
497 			if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
498 				preempt_disable();
499 				vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
500 				  (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
501 
502 				vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
503 					(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
504 						 << TEXASR_FC_LG));
505 
506 				if ((inst >> 21) & 0x1)
507 					vcpu->arch.texasr |= TEXASR_ROT;
508 
509 				if (kvmppc_get_msr(vcpu) & MSR_HV)
510 					vcpu->arch.texasr |= TEXASR_HV;
511 
512 				vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
513 				vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
514 
515 				kvmppc_restore_tm_sprs(vcpu);
516 				preempt_enable();
517 			} else
518 				emulated = EMULATE_FAIL;
519 			break;
520 		}
521 		case OP_31_XOP_TABORT:
522 		{
523 			ulong guest_msr = kvmppc_get_msr(vcpu);
524 			unsigned long ra_val = 0;
525 
526 			if (!cpu_has_feature(CPU_FTR_TM))
527 				break;
528 
529 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
530 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
531 				emulated = EMULATE_AGAIN;
532 				break;
533 			}
534 
535 			/* only emulate for privilege guest, since problem state
536 			 * guest can run with TM enabled and we don't expect to
537 			 * trap at here for that case.
538 			 */
539 			WARN_ON(guest_msr & MSR_PR);
540 
541 			if (ra)
542 				ra_val = kvmppc_get_gpr(vcpu, ra);
543 
544 			kvmppc_emulate_tabort(vcpu, ra_val);
545 			break;
546 		}
547 		case OP_31_XOP_TRECLAIM:
548 		{
549 			ulong guest_msr = kvmppc_get_msr(vcpu);
550 			unsigned long ra_val = 0;
551 
552 			if (!cpu_has_feature(CPU_FTR_TM))
553 				break;
554 
555 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
556 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
557 				emulated = EMULATE_AGAIN;
558 				break;
559 			}
560 
561 			/* generate interrupts based on priorities */
562 			if (guest_msr & MSR_PR) {
563 				/* Privileged Instruction type Program Interrupt */
564 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
565 				emulated = EMULATE_AGAIN;
566 				break;
567 			}
568 
569 			if (!MSR_TM_ACTIVE(guest_msr)) {
570 				/* TM bad thing interrupt */
571 				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
572 				emulated = EMULATE_AGAIN;
573 				break;
574 			}
575 
576 			if (ra)
577 				ra_val = kvmppc_get_gpr(vcpu, ra);
578 			kvmppc_emulate_treclaim(vcpu, ra_val);
579 			break;
580 		}
581 		case OP_31_XOP_TRCHKPT:
582 		{
583 			ulong guest_msr = kvmppc_get_msr(vcpu);
584 			unsigned long texasr;
585 
586 			if (!cpu_has_feature(CPU_FTR_TM))
587 				break;
588 
589 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
590 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
591 				emulated = EMULATE_AGAIN;
592 				break;
593 			}
594 
595 			/* generate interrupt based on priorities */
596 			if (guest_msr & MSR_PR) {
597 				/* Privileged Instruction type Program Intr */
598 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
599 				emulated = EMULATE_AGAIN;
600 				break;
601 			}
602 
603 			tm_enable();
604 			texasr = mfspr(SPRN_TEXASR);
605 			tm_disable();
606 
607 			if (MSR_TM_ACTIVE(guest_msr) ||
608 				!(texasr & (TEXASR_FS))) {
609 				/* TM bad thing interrupt */
610 				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
611 				emulated = EMULATE_AGAIN;
612 				break;
613 			}
614 
615 			kvmppc_emulate_trchkpt(vcpu);
616 			break;
617 		}
618 #endif
619 		default:
620 			emulated = EMULATE_FAIL;
621 		}
622 		break;
623 	default:
624 		emulated = EMULATE_FAIL;
625 	}
626 
627 	if (emulated == EMULATE_FAIL)
628 		emulated = kvmppc_emulate_paired_single(vcpu);
629 
630 	return emulated;
631 }
632 
633 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
634                     u32 val)
635 {
636 	if (upper) {
637 		/* Upper BAT */
638 		u32 bl = (val >> 2) & 0x7ff;
639 		bat->bepi_mask = (~bl << 17);
640 		bat->bepi = val & 0xfffe0000;
641 		bat->vs = (val & 2) ? 1 : 0;
642 		bat->vp = (val & 1) ? 1 : 0;
643 		bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
644 	} else {
645 		/* Lower BAT */
646 		bat->brpn = val & 0xfffe0000;
647 		bat->wimg = (val >> 3) & 0xf;
648 		bat->pp = val & 3;
649 		bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
650 	}
651 }
652 
653 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
654 {
655 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
656 	struct kvmppc_bat *bat;
657 
658 	switch (sprn) {
659 	case SPRN_IBAT0U ... SPRN_IBAT3L:
660 		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
661 		break;
662 	case SPRN_IBAT4U ... SPRN_IBAT7L:
663 		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
664 		break;
665 	case SPRN_DBAT0U ... SPRN_DBAT3L:
666 		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
667 		break;
668 	case SPRN_DBAT4U ... SPRN_DBAT7L:
669 		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
670 		break;
671 	default:
672 		BUG();
673 	}
674 
675 	return bat;
676 }
677 
678 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
679 {
680 	int emulated = EMULATE_DONE;
681 
682 	switch (sprn) {
683 	case SPRN_SDR1:
684 		if (!spr_allowed(vcpu, PRIV_HYPER))
685 			goto unprivileged;
686 		to_book3s(vcpu)->sdr1 = spr_val;
687 		break;
688 	case SPRN_DSISR:
689 		kvmppc_set_dsisr(vcpu, spr_val);
690 		break;
691 	case SPRN_DAR:
692 		kvmppc_set_dar(vcpu, spr_val);
693 		break;
694 	case SPRN_HIOR:
695 		to_book3s(vcpu)->hior = spr_val;
696 		break;
697 	case SPRN_IBAT0U ... SPRN_IBAT3L:
698 	case SPRN_IBAT4U ... SPRN_IBAT7L:
699 	case SPRN_DBAT0U ... SPRN_DBAT3L:
700 	case SPRN_DBAT4U ... SPRN_DBAT7L:
701 	{
702 		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
703 
704 		kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
705 		/* BAT writes happen so rarely that we're ok to flush
706 		 * everything here */
707 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
708 		kvmppc_mmu_flush_segments(vcpu);
709 		break;
710 	}
711 	case SPRN_HID0:
712 		to_book3s(vcpu)->hid[0] = spr_val;
713 		break;
714 	case SPRN_HID1:
715 		to_book3s(vcpu)->hid[1] = spr_val;
716 		break;
717 	case SPRN_HID2:
718 		to_book3s(vcpu)->hid[2] = spr_val;
719 		break;
720 	case SPRN_HID2_GEKKO:
721 		to_book3s(vcpu)->hid[2] = spr_val;
722 		/* HID2.PSE controls paired single on gekko */
723 		switch (vcpu->arch.pvr) {
724 		case 0x00080200:	/* lonestar 2.0 */
725 		case 0x00088202:	/* lonestar 2.2 */
726 		case 0x70000100:	/* gekko 1.0 */
727 		case 0x00080100:	/* gekko 2.0 */
728 		case 0x00083203:	/* gekko 2.3a */
729 		case 0x00083213:	/* gekko 2.3b */
730 		case 0x00083204:	/* gekko 2.4 */
731 		case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
732 		case 0x00087200:	/* broadway */
733 			if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
734 				/* Native paired singles */
735 			} else if (spr_val & (1 << 29)) { /* HID2.PSE */
736 				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
737 				kvmppc_giveup_ext(vcpu, MSR_FP);
738 			} else {
739 				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
740 			}
741 			break;
742 		}
743 		break;
744 	case SPRN_HID4:
745 	case SPRN_HID4_GEKKO:
746 		to_book3s(vcpu)->hid[4] = spr_val;
747 		break;
748 	case SPRN_HID5:
749 		to_book3s(vcpu)->hid[5] = spr_val;
750 		/* guest HID5 set can change is_dcbz32 */
751 		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
752 		    (mfmsr() & MSR_HV))
753 			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
754 		break;
755 	case SPRN_GQR0:
756 	case SPRN_GQR1:
757 	case SPRN_GQR2:
758 	case SPRN_GQR3:
759 	case SPRN_GQR4:
760 	case SPRN_GQR5:
761 	case SPRN_GQR6:
762 	case SPRN_GQR7:
763 		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
764 		break;
765 #ifdef CONFIG_PPC_BOOK3S_64
766 	case SPRN_FSCR:
767 		kvmppc_set_fscr(vcpu, spr_val);
768 		break;
769 	case SPRN_BESCR:
770 		vcpu->arch.bescr = spr_val;
771 		break;
772 	case SPRN_EBBHR:
773 		vcpu->arch.ebbhr = spr_val;
774 		break;
775 	case SPRN_EBBRR:
776 		vcpu->arch.ebbrr = spr_val;
777 		break;
778 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
779 	case SPRN_TFHAR:
780 	case SPRN_TEXASR:
781 	case SPRN_TFIAR:
782 		if (!cpu_has_feature(CPU_FTR_TM))
783 			break;
784 
785 		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
786 			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
787 			emulated = EMULATE_AGAIN;
788 			break;
789 		}
790 
791 		if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
792 			!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
793 					(sprn == SPRN_TFHAR))) {
794 			/* it is illegal to mtspr() TM regs in
795 			 * other than non-transactional state, with
796 			 * the exception of TFHAR in suspend state.
797 			 */
798 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
799 			emulated = EMULATE_AGAIN;
800 			break;
801 		}
802 
803 		tm_enable();
804 		if (sprn == SPRN_TFHAR)
805 			mtspr(SPRN_TFHAR, spr_val);
806 		else if (sprn == SPRN_TEXASR)
807 			mtspr(SPRN_TEXASR, spr_val);
808 		else
809 			mtspr(SPRN_TFIAR, spr_val);
810 		tm_disable();
811 
812 		break;
813 #endif
814 #endif
815 	case SPRN_ICTC:
816 	case SPRN_THRM1:
817 	case SPRN_THRM2:
818 	case SPRN_THRM3:
819 	case SPRN_CTRLF:
820 	case SPRN_CTRLT:
821 	case SPRN_L2CR:
822 	case SPRN_DSCR:
823 	case SPRN_MMCR0_GEKKO:
824 	case SPRN_MMCR1_GEKKO:
825 	case SPRN_PMC1_GEKKO:
826 	case SPRN_PMC2_GEKKO:
827 	case SPRN_PMC3_GEKKO:
828 	case SPRN_PMC4_GEKKO:
829 	case SPRN_WPAR_GEKKO:
830 	case SPRN_MSSSR0:
831 	case SPRN_DABR:
832 #ifdef CONFIG_PPC_BOOK3S_64
833 	case SPRN_MMCRS:
834 	case SPRN_MMCRA:
835 	case SPRN_MMCR0:
836 	case SPRN_MMCR1:
837 	case SPRN_MMCR2:
838 	case SPRN_UMMCR2:
839 	case SPRN_UAMOR:
840 	case SPRN_IAMR:
841 	case SPRN_AMR:
842 #endif
843 		break;
844 unprivileged:
845 	default:
846 		pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
847 		if (sprn & 0x10) {
848 			if (kvmppc_get_msr(vcpu) & MSR_PR) {
849 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
850 				emulated = EMULATE_AGAIN;
851 			}
852 		} else {
853 			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
854 				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
855 				emulated = EMULATE_AGAIN;
856 			}
857 		}
858 		break;
859 	}
860 
861 	return emulated;
862 }
863 
864 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
865 {
866 	int emulated = EMULATE_DONE;
867 
868 	switch (sprn) {
869 	case SPRN_IBAT0U ... SPRN_IBAT3L:
870 	case SPRN_IBAT4U ... SPRN_IBAT7L:
871 	case SPRN_DBAT0U ... SPRN_DBAT3L:
872 	case SPRN_DBAT4U ... SPRN_DBAT7L:
873 	{
874 		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
875 
876 		if (sprn % 2)
877 			*spr_val = bat->raw >> 32;
878 		else
879 			*spr_val = bat->raw;
880 
881 		break;
882 	}
883 	case SPRN_SDR1:
884 		if (!spr_allowed(vcpu, PRIV_HYPER))
885 			goto unprivileged;
886 		*spr_val = to_book3s(vcpu)->sdr1;
887 		break;
888 	case SPRN_DSISR:
889 		*spr_val = kvmppc_get_dsisr(vcpu);
890 		break;
891 	case SPRN_DAR:
892 		*spr_val = kvmppc_get_dar(vcpu);
893 		break;
894 	case SPRN_HIOR:
895 		*spr_val = to_book3s(vcpu)->hior;
896 		break;
897 	case SPRN_HID0:
898 		*spr_val = to_book3s(vcpu)->hid[0];
899 		break;
900 	case SPRN_HID1:
901 		*spr_val = to_book3s(vcpu)->hid[1];
902 		break;
903 	case SPRN_HID2:
904 	case SPRN_HID2_GEKKO:
905 		*spr_val = to_book3s(vcpu)->hid[2];
906 		break;
907 	case SPRN_HID4:
908 	case SPRN_HID4_GEKKO:
909 		*spr_val = to_book3s(vcpu)->hid[4];
910 		break;
911 	case SPRN_HID5:
912 		*spr_val = to_book3s(vcpu)->hid[5];
913 		break;
914 	case SPRN_CFAR:
915 	case SPRN_DSCR:
916 		*spr_val = 0;
917 		break;
918 	case SPRN_PURR:
919 		/*
920 		 * On exit we would have updated purr
921 		 */
922 		*spr_val = vcpu->arch.purr;
923 		break;
924 	case SPRN_SPURR:
925 		/*
926 		 * On exit we would have updated spurr
927 		 */
928 		*spr_val = vcpu->arch.spurr;
929 		break;
930 	case SPRN_VTB:
931 		*spr_val = to_book3s(vcpu)->vtb;
932 		break;
933 	case SPRN_IC:
934 		*spr_val = vcpu->arch.ic;
935 		break;
936 	case SPRN_GQR0:
937 	case SPRN_GQR1:
938 	case SPRN_GQR2:
939 	case SPRN_GQR3:
940 	case SPRN_GQR4:
941 	case SPRN_GQR5:
942 	case SPRN_GQR6:
943 	case SPRN_GQR7:
944 		*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
945 		break;
946 #ifdef CONFIG_PPC_BOOK3S_64
947 	case SPRN_FSCR:
948 		*spr_val = vcpu->arch.fscr;
949 		break;
950 	case SPRN_BESCR:
951 		*spr_val = vcpu->arch.bescr;
952 		break;
953 	case SPRN_EBBHR:
954 		*spr_val = vcpu->arch.ebbhr;
955 		break;
956 	case SPRN_EBBRR:
957 		*spr_val = vcpu->arch.ebbrr;
958 		break;
959 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
960 	case SPRN_TFHAR:
961 	case SPRN_TEXASR:
962 	case SPRN_TFIAR:
963 		if (!cpu_has_feature(CPU_FTR_TM))
964 			break;
965 
966 		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
967 			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
968 			emulated = EMULATE_AGAIN;
969 			break;
970 		}
971 
972 		tm_enable();
973 		if (sprn == SPRN_TFHAR)
974 			*spr_val = mfspr(SPRN_TFHAR);
975 		else if (sprn == SPRN_TEXASR)
976 			*spr_val = mfspr(SPRN_TEXASR);
977 		else if (sprn == SPRN_TFIAR)
978 			*spr_val = mfspr(SPRN_TFIAR);
979 		tm_disable();
980 		break;
981 #endif
982 #endif
983 	case SPRN_THRM1:
984 	case SPRN_THRM2:
985 	case SPRN_THRM3:
986 	case SPRN_CTRLF:
987 	case SPRN_CTRLT:
988 	case SPRN_L2CR:
989 	case SPRN_MMCR0_GEKKO:
990 	case SPRN_MMCR1_GEKKO:
991 	case SPRN_PMC1_GEKKO:
992 	case SPRN_PMC2_GEKKO:
993 	case SPRN_PMC3_GEKKO:
994 	case SPRN_PMC4_GEKKO:
995 	case SPRN_WPAR_GEKKO:
996 	case SPRN_MSSSR0:
997 	case SPRN_DABR:
998 #ifdef CONFIG_PPC_BOOK3S_64
999 	case SPRN_MMCRS:
1000 	case SPRN_MMCRA:
1001 	case SPRN_MMCR0:
1002 	case SPRN_MMCR1:
1003 	case SPRN_MMCR2:
1004 	case SPRN_UMMCR2:
1005 	case SPRN_TIR:
1006 	case SPRN_UAMOR:
1007 	case SPRN_IAMR:
1008 	case SPRN_AMR:
1009 #endif
1010 		*spr_val = 0;
1011 		break;
1012 	default:
1013 unprivileged:
1014 		pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
1015 		if (sprn & 0x10) {
1016 			if (kvmppc_get_msr(vcpu) & MSR_PR) {
1017 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1018 				emulated = EMULATE_AGAIN;
1019 			}
1020 		} else {
1021 			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
1022 			    sprn == 4 || sprn == 5 || sprn == 6) {
1023 				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1024 				emulated = EMULATE_AGAIN;
1025 			}
1026 		}
1027 
1028 		break;
1029 	}
1030 
1031 	return emulated;
1032 }
1033 
1034 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
1035 {
1036 	return make_dsisr(inst);
1037 }
1038 
1039 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
1040 {
1041 #ifdef CONFIG_PPC_BOOK3S_64
1042 	/*
1043 	 * Linux's fix_alignment() assumes that DAR is valid, so can we
1044 	 */
1045 	return vcpu->arch.fault_dar;
1046 #else
1047 	ulong dar = 0;
1048 	ulong ra = get_ra(inst);
1049 	ulong rb = get_rb(inst);
1050 
1051 	switch (get_op(inst)) {
1052 	case OP_LFS:
1053 	case OP_LFD:
1054 	case OP_STFD:
1055 	case OP_STFS:
1056 		if (ra)
1057 			dar = kvmppc_get_gpr(vcpu, ra);
1058 		dar += (s32)((s16)inst);
1059 		break;
1060 	case 31:
1061 		if (ra)
1062 			dar = kvmppc_get_gpr(vcpu, ra);
1063 		dar += kvmppc_get_gpr(vcpu, rb);
1064 		break;
1065 	default:
1066 		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
1067 		break;
1068 	}
1069 
1070 	return dar;
1071 #endif
1072 }
1073