xref: /linux/arch/powerpc/kvm/emulate.c (revision ce7240e445303de3ca66e6d08f17a2ec278a5bf6)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27 
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include "timing.h"
34 #include "trace.h"
35 
36 #define OP_TRAP 3
37 #define OP_TRAP_64 2
38 
39 #define OP_31_XOP_TRAP      4
40 #define OP_31_XOP_LWZX      23
41 #define OP_31_XOP_TRAP_64   68
42 #define OP_31_XOP_LBZX      87
43 #define OP_31_XOP_STWX      151
44 #define OP_31_XOP_STBX      215
45 #define OP_31_XOP_LBZUX     119
46 #define OP_31_XOP_STBUX     247
47 #define OP_31_XOP_LHZX      279
48 #define OP_31_XOP_LHZUX     311
49 #define OP_31_XOP_MFSPR     339
50 #define OP_31_XOP_LHAX      343
51 #define OP_31_XOP_STHX      407
52 #define OP_31_XOP_STHUX     439
53 #define OP_31_XOP_MTSPR     467
54 #define OP_31_XOP_DCBI      470
55 #define OP_31_XOP_LWBRX     534
56 #define OP_31_XOP_TLBSYNC   566
57 #define OP_31_XOP_STWBRX    662
58 #define OP_31_XOP_LHBRX     790
59 #define OP_31_XOP_STHBRX    918
60 
61 #define OP_LWZ  32
62 #define OP_LWZU 33
63 #define OP_LBZ  34
64 #define OP_LBZU 35
65 #define OP_STW  36
66 #define OP_STWU 37
67 #define OP_STB  38
68 #define OP_STBU 39
69 #define OP_LHZ  40
70 #define OP_LHZU 41
71 #define OP_LHA  42
72 #define OP_LHAU 43
73 #define OP_STH  44
74 #define OP_STHU 45
75 
76 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
77 {
78 	unsigned long dec_nsec;
79 	unsigned long long dec_time;
80 
81 	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
82 	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
83 
84 #ifdef CONFIG_PPC_BOOK3S
85 	/* mtdec lowers the interrupt line when positive. */
86 	kvmppc_core_dequeue_dec(vcpu);
87 
88 	/* POWER4+ triggers a dec interrupt if the value is < 0 */
89 	if (vcpu->arch.dec & 0x80000000) {
90 		kvmppc_core_queue_dec(vcpu);
91 		return;
92 	}
93 #endif
94 
95 #ifdef CONFIG_BOOKE
96 	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
97 	if (vcpu->arch.dec == 0)
98 		return;
99 #endif
100 
101 	/*
102 	 * The decrementer ticks at the same rate as the timebase, so
103 	 * that's how we convert the guest DEC value to the number of
104 	 * host ticks.
105 	 */
106 
107 	dec_time = vcpu->arch.dec;
108 	/*
109 	 * Guest timebase ticks at the same frequency as host decrementer.
110 	 * So use the host decrementer calculations for decrementer emulation.
111 	 */
112 	dec_time = dec_time << decrementer_clockevent.shift;
113 	do_div(dec_time, decrementer_clockevent.mult);
114 	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
115 	hrtimer_start(&vcpu->arch.dec_timer,
116 		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
117 	vcpu->arch.dec_jiffies = get_tb();
118 }
119 
120 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
121 {
122 	u64 jd = tb - vcpu->arch.dec_jiffies;
123 
124 #ifdef CONFIG_BOOKE
125 	if (vcpu->arch.dec < jd)
126 		return 0;
127 #endif
128 
129 	return vcpu->arch.dec - jd;
130 }
131 
132 /* XXX to do:
133  * lhax
134  * lhaux
135  * lswx
136  * lswi
137  * stswx
138  * stswi
139  * lha
140  * lhau
141  * lmw
142  * stmw
143  *
144  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
145  */
146 /* XXX Should probably auto-generate instruction decoding for a particular core
147  * from opcode tables in the future. */
148 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
149 {
150 	u32 inst = kvmppc_get_last_inst(vcpu);
151 	int ra = get_ra(inst);
152 	int rs = get_rs(inst);
153 	int rt = get_rt(inst);
154 	int sprn = get_sprn(inst);
155 	enum emulation_result emulated = EMULATE_DONE;
156 	int advance = 1;
157 	ulong spr_val = 0;
158 
159 	/* this default type might be overwritten by subcategories */
160 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
161 
162 	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
163 
164 	switch (get_op(inst)) {
165 	case OP_TRAP:
166 #ifdef CONFIG_PPC_BOOK3S
167 	case OP_TRAP_64:
168 		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
169 #else
170 		kvmppc_core_queue_program(vcpu,
171 					  vcpu->arch.shared->esr | ESR_PTR);
172 #endif
173 		advance = 0;
174 		break;
175 
176 	case 31:
177 		switch (get_xop(inst)) {
178 
179 		case OP_31_XOP_TRAP:
180 #ifdef CONFIG_64BIT
181 		case OP_31_XOP_TRAP_64:
182 #endif
183 #ifdef CONFIG_PPC_BOOK3S
184 			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
185 #else
186 			kvmppc_core_queue_program(vcpu,
187 					vcpu->arch.shared->esr | ESR_PTR);
188 #endif
189 			advance = 0;
190 			break;
191 		case OP_31_XOP_LWZX:
192 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
193 			break;
194 
195 		case OP_31_XOP_LBZX:
196 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
197 			break;
198 
199 		case OP_31_XOP_LBZUX:
200 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
201 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
202 			break;
203 
204 		case OP_31_XOP_STWX:
205 			emulated = kvmppc_handle_store(run, vcpu,
206 						       kvmppc_get_gpr(vcpu, rs),
207 			                               4, 1);
208 			break;
209 
210 		case OP_31_XOP_STBX:
211 			emulated = kvmppc_handle_store(run, vcpu,
212 						       kvmppc_get_gpr(vcpu, rs),
213 			                               1, 1);
214 			break;
215 
216 		case OP_31_XOP_STBUX:
217 			emulated = kvmppc_handle_store(run, vcpu,
218 						       kvmppc_get_gpr(vcpu, rs),
219 			                               1, 1);
220 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
221 			break;
222 
223 		case OP_31_XOP_LHAX:
224 			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
225 			break;
226 
227 		case OP_31_XOP_LHZX:
228 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
229 			break;
230 
231 		case OP_31_XOP_LHZUX:
232 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
233 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
234 			break;
235 
236 		case OP_31_XOP_MFSPR:
237 			switch (sprn) {
238 			case SPRN_SRR0:
239 				spr_val = vcpu->arch.shared->srr0;
240 				break;
241 			case SPRN_SRR1:
242 				spr_val = vcpu->arch.shared->srr1;
243 				break;
244 			case SPRN_PVR:
245 				spr_val = vcpu->arch.pvr;
246 				break;
247 			case SPRN_PIR:
248 				spr_val = vcpu->vcpu_id;
249 				break;
250 			case SPRN_MSSSR0:
251 				spr_val = 0;
252 				break;
253 
254 			/* Note: mftb and TBRL/TBWL are user-accessible, so
255 			 * the guest can always access the real TB anyways.
256 			 * In fact, we probably will never see these traps. */
257 			case SPRN_TBWL:
258 				spr_val = get_tb() >> 32;
259 				break;
260 			case SPRN_TBWU:
261 				spr_val = get_tb();
262 				break;
263 
264 			case SPRN_SPRG0:
265 				spr_val = vcpu->arch.shared->sprg0;
266 				break;
267 			case SPRN_SPRG1:
268 				spr_val = vcpu->arch.shared->sprg1;
269 				break;
270 			case SPRN_SPRG2:
271 				spr_val = vcpu->arch.shared->sprg2;
272 				break;
273 			case SPRN_SPRG3:
274 				spr_val = vcpu->arch.shared->sprg3;
275 				break;
276 			/* Note: SPRG4-7 are user-readable, so we don't get
277 			 * a trap. */
278 
279 			case SPRN_DEC:
280 				spr_val = kvmppc_get_dec(vcpu, get_tb());
281 				break;
282 			default:
283 				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
284 								     &spr_val);
285 				if (unlikely(emulated == EMULATE_FAIL)) {
286 					printk(KERN_INFO "mfspr: unknown spr "
287 						"0x%x\n", sprn);
288 				}
289 				break;
290 			}
291 			kvmppc_set_gpr(vcpu, rt, spr_val);
292 			kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
293 			break;
294 
295 		case OP_31_XOP_STHX:
296 			emulated = kvmppc_handle_store(run, vcpu,
297 						       kvmppc_get_gpr(vcpu, rs),
298 			                               2, 1);
299 			break;
300 
301 		case OP_31_XOP_STHUX:
302 			emulated = kvmppc_handle_store(run, vcpu,
303 						       kvmppc_get_gpr(vcpu, rs),
304 			                               2, 1);
305 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
306 			break;
307 
308 		case OP_31_XOP_MTSPR:
309 			spr_val = kvmppc_get_gpr(vcpu, rs);
310 			switch (sprn) {
311 			case SPRN_SRR0:
312 				vcpu->arch.shared->srr0 = spr_val;
313 				break;
314 			case SPRN_SRR1:
315 				vcpu->arch.shared->srr1 = spr_val;
316 				break;
317 
318 			/* XXX We need to context-switch the timebase for
319 			 * watchdog and FIT. */
320 			case SPRN_TBWL: break;
321 			case SPRN_TBWU: break;
322 
323 			case SPRN_MSSSR0: break;
324 
325 			case SPRN_DEC:
326 				vcpu->arch.dec = spr_val;
327 				kvmppc_emulate_dec(vcpu);
328 				break;
329 
330 			case SPRN_SPRG0:
331 				vcpu->arch.shared->sprg0 = spr_val;
332 				break;
333 			case SPRN_SPRG1:
334 				vcpu->arch.shared->sprg1 = spr_val;
335 				break;
336 			case SPRN_SPRG2:
337 				vcpu->arch.shared->sprg2 = spr_val;
338 				break;
339 			case SPRN_SPRG3:
340 				vcpu->arch.shared->sprg3 = spr_val;
341 				break;
342 
343 			default:
344 				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
345 								     spr_val);
346 				if (emulated == EMULATE_FAIL)
347 					printk(KERN_INFO "mtspr: unknown spr "
348 						"0x%x\n", sprn);
349 				break;
350 			}
351 			kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
352 			break;
353 
354 		case OP_31_XOP_DCBI:
355 			/* Do nothing. The guest is performing dcbi because
356 			 * hardware DMA is not snooped by the dcache, but
357 			 * emulated DMA either goes through the dcache as
358 			 * normal writes, or the host kernel has handled dcache
359 			 * coherence. */
360 			break;
361 
362 		case OP_31_XOP_LWBRX:
363 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
364 			break;
365 
366 		case OP_31_XOP_TLBSYNC:
367 			break;
368 
369 		case OP_31_XOP_STWBRX:
370 			emulated = kvmppc_handle_store(run, vcpu,
371 						       kvmppc_get_gpr(vcpu, rs),
372 			                               4, 0);
373 			break;
374 
375 		case OP_31_XOP_LHBRX:
376 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
377 			break;
378 
379 		case OP_31_XOP_STHBRX:
380 			emulated = kvmppc_handle_store(run, vcpu,
381 						       kvmppc_get_gpr(vcpu, rs),
382 			                               2, 0);
383 			break;
384 
385 		default:
386 			/* Attempt core-specific emulation below. */
387 			emulated = EMULATE_FAIL;
388 		}
389 		break;
390 
391 	case OP_LWZ:
392 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
393 		break;
394 
395 	case OP_LWZU:
396 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
397 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
398 		break;
399 
400 	case OP_LBZ:
401 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
402 		break;
403 
404 	case OP_LBZU:
405 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
406 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
407 		break;
408 
409 	case OP_STW:
410 		emulated = kvmppc_handle_store(run, vcpu,
411 					       kvmppc_get_gpr(vcpu, rs),
412 		                               4, 1);
413 		break;
414 
415 	case OP_STWU:
416 		emulated = kvmppc_handle_store(run, vcpu,
417 					       kvmppc_get_gpr(vcpu, rs),
418 		                               4, 1);
419 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
420 		break;
421 
422 	case OP_STB:
423 		emulated = kvmppc_handle_store(run, vcpu,
424 					       kvmppc_get_gpr(vcpu, rs),
425 		                               1, 1);
426 		break;
427 
428 	case OP_STBU:
429 		emulated = kvmppc_handle_store(run, vcpu,
430 					       kvmppc_get_gpr(vcpu, rs),
431 		                               1, 1);
432 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
433 		break;
434 
435 	case OP_LHZ:
436 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
437 		break;
438 
439 	case OP_LHZU:
440 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
441 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
442 		break;
443 
444 	case OP_LHA:
445 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
446 		break;
447 
448 	case OP_LHAU:
449 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
450 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
451 		break;
452 
453 	case OP_STH:
454 		emulated = kvmppc_handle_store(run, vcpu,
455 					       kvmppc_get_gpr(vcpu, rs),
456 		                               2, 1);
457 		break;
458 
459 	case OP_STHU:
460 		emulated = kvmppc_handle_store(run, vcpu,
461 					       kvmppc_get_gpr(vcpu, rs),
462 		                               2, 1);
463 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
464 		break;
465 
466 	default:
467 		emulated = EMULATE_FAIL;
468 	}
469 
470 	if (emulated == EMULATE_FAIL) {
471 		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
472 		if (emulated == EMULATE_AGAIN) {
473 			advance = 0;
474 		} else if (emulated == EMULATE_FAIL) {
475 			advance = 0;
476 			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
477 			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
478 			kvmppc_core_queue_program(vcpu, 0);
479 		}
480 	}
481 
482 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
483 
484 	/* Advance past emulated instruction. */
485 	if (advance)
486 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
487 
488 	return emulated;
489 }
490