1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 */ 19 20 #include <linux/jiffies.h> 21 #include <linux/timer.h> 22 #include <linux/types.h> 23 #include <linux/string.h> 24 #include <linux/kvm_host.h> 25 26 #include <asm/reg.h> 27 #include <asm/time.h> 28 #include <asm/byteorder.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/disassemble.h> 31 #include "timing.h" 32 #include "trace.h" 33 34 #define OP_TRAP 3 35 36 #define OP_31_XOP_LWZX 23 37 #define OP_31_XOP_LBZX 87 38 #define OP_31_XOP_STWX 151 39 #define OP_31_XOP_STBX 215 40 #define OP_31_XOP_STBUX 247 41 #define OP_31_XOP_LHZX 279 42 #define OP_31_XOP_LHZUX 311 43 #define OP_31_XOP_MFSPR 339 44 #define OP_31_XOP_STHX 407 45 #define OP_31_XOP_STHUX 439 46 #define OP_31_XOP_MTSPR 467 47 #define OP_31_XOP_DCBI 470 48 #define OP_31_XOP_LWBRX 534 49 #define OP_31_XOP_TLBSYNC 566 50 #define OP_31_XOP_STWBRX 662 51 #define OP_31_XOP_LHBRX 790 52 #define OP_31_XOP_STHBRX 918 53 54 #define OP_LWZ 32 55 #define OP_LWZU 33 56 #define OP_LBZ 34 57 #define OP_LBZU 35 58 #define OP_STW 36 59 #define OP_STWU 37 60 #define OP_STB 38 61 #define OP_STBU 39 62 #define OP_LHZ 40 63 #define OP_LHZU 41 64 #define OP_STH 44 65 #define OP_STHU 45 66 67 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 68 { 69 if (vcpu->arch.tcr & TCR_DIE) { 70 /* The decrementer ticks at the same rate as the timebase, so 71 * that's how we convert the guest DEC value to the number of 72 * host ticks. */ 73 unsigned long nr_jiffies; 74 75 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; 76 mod_timer(&vcpu->arch.dec_timer, 77 get_jiffies_64() + nr_jiffies); 78 } else { 79 del_timer(&vcpu->arch.dec_timer); 80 } 81 } 82 83 /* XXX to do: 84 * lhax 85 * lhaux 86 * lswx 87 * lswi 88 * stswx 89 * stswi 90 * lha 91 * lhau 92 * lmw 93 * stmw 94 * 95 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 96 */ 97 /* XXX Should probably auto-generate instruction decoding for a particular core 98 * from opcode tables in the future. */ 99 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 100 { 101 u32 inst = vcpu->arch.last_inst; 102 u32 ea; 103 int ra; 104 int rb; 105 int rs; 106 int rt; 107 int sprn; 108 enum emulation_result emulated = EMULATE_DONE; 109 int advance = 1; 110 111 /* this default type might be overwritten by subcategories */ 112 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 113 114 switch (get_op(inst)) { 115 case OP_TRAP: 116 vcpu->arch.esr |= ESR_PTR; 117 kvmppc_core_queue_program(vcpu); 118 advance = 0; 119 break; 120 121 case 31: 122 switch (get_xop(inst)) { 123 124 case OP_31_XOP_LWZX: 125 rt = get_rt(inst); 126 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 127 break; 128 129 case OP_31_XOP_LBZX: 130 rt = get_rt(inst); 131 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 132 break; 133 134 case OP_31_XOP_STWX: 135 rs = get_rs(inst); 136 emulated = kvmppc_handle_store(run, vcpu, 137 vcpu->arch.gpr[rs], 138 4, 1); 139 break; 140 141 case OP_31_XOP_STBX: 142 rs = get_rs(inst); 143 emulated = kvmppc_handle_store(run, vcpu, 144 vcpu->arch.gpr[rs], 145 1, 1); 146 break; 147 148 case OP_31_XOP_STBUX: 149 rs = get_rs(inst); 150 ra = get_ra(inst); 151 rb = get_rb(inst); 152 153 ea = vcpu->arch.gpr[rb]; 154 if (ra) 155 ea += vcpu->arch.gpr[ra]; 156 157 emulated = kvmppc_handle_store(run, vcpu, 158 vcpu->arch.gpr[rs], 159 1, 1); 160 vcpu->arch.gpr[rs] = ea; 161 break; 162 163 case OP_31_XOP_LHZX: 164 rt = get_rt(inst); 165 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 166 break; 167 168 case OP_31_XOP_LHZUX: 169 rt = get_rt(inst); 170 ra = get_ra(inst); 171 rb = get_rb(inst); 172 173 ea = vcpu->arch.gpr[rb]; 174 if (ra) 175 ea += vcpu->arch.gpr[ra]; 176 177 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 178 vcpu->arch.gpr[ra] = ea; 179 break; 180 181 case OP_31_XOP_MFSPR: 182 sprn = get_sprn(inst); 183 rt = get_rt(inst); 184 185 switch (sprn) { 186 case SPRN_SRR0: 187 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 188 case SPRN_SRR1: 189 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 190 case SPRN_PVR: 191 vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; 192 case SPRN_PIR: 193 vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; 194 195 /* Note: mftb and TBRL/TBWL are user-accessible, so 196 * the guest can always access the real TB anyways. 197 * In fact, we probably will never see these traps. */ 198 case SPRN_TBWL: 199 vcpu->arch.gpr[rt] = mftbl(); break; 200 case SPRN_TBWU: 201 vcpu->arch.gpr[rt] = mftbu(); break; 202 203 case SPRN_SPRG0: 204 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 205 case SPRN_SPRG1: 206 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; 207 case SPRN_SPRG2: 208 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; 209 case SPRN_SPRG3: 210 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; 211 /* Note: SPRG4-7 are user-readable, so we don't get 212 * a trap. */ 213 214 default: 215 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 216 if (emulated == EMULATE_FAIL) { 217 printk("mfspr: unknown spr %x\n", sprn); 218 vcpu->arch.gpr[rt] = 0; 219 } 220 break; 221 } 222 break; 223 224 case OP_31_XOP_STHX: 225 rs = get_rs(inst); 226 ra = get_ra(inst); 227 rb = get_rb(inst); 228 229 emulated = kvmppc_handle_store(run, vcpu, 230 vcpu->arch.gpr[rs], 231 2, 1); 232 break; 233 234 case OP_31_XOP_STHUX: 235 rs = get_rs(inst); 236 ra = get_ra(inst); 237 rb = get_rb(inst); 238 239 ea = vcpu->arch.gpr[rb]; 240 if (ra) 241 ea += vcpu->arch.gpr[ra]; 242 243 emulated = kvmppc_handle_store(run, vcpu, 244 vcpu->arch.gpr[rs], 245 2, 1); 246 vcpu->arch.gpr[ra] = ea; 247 break; 248 249 case OP_31_XOP_MTSPR: 250 sprn = get_sprn(inst); 251 rs = get_rs(inst); 252 switch (sprn) { 253 case SPRN_SRR0: 254 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 255 case SPRN_SRR1: 256 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 257 258 /* XXX We need to context-switch the timebase for 259 * watchdog and FIT. */ 260 case SPRN_TBWL: break; 261 case SPRN_TBWU: break; 262 263 case SPRN_DEC: 264 vcpu->arch.dec = vcpu->arch.gpr[rs]; 265 kvmppc_emulate_dec(vcpu); 266 break; 267 268 case SPRN_SPRG0: 269 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 270 case SPRN_SPRG1: 271 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; 272 case SPRN_SPRG2: 273 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; 274 case SPRN_SPRG3: 275 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 276 277 default: 278 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 279 if (emulated == EMULATE_FAIL) 280 printk("mtspr: unknown spr %x\n", sprn); 281 break; 282 } 283 break; 284 285 case OP_31_XOP_DCBI: 286 /* Do nothing. The guest is performing dcbi because 287 * hardware DMA is not snooped by the dcache, but 288 * emulated DMA either goes through the dcache as 289 * normal writes, or the host kernel has handled dcache 290 * coherence. */ 291 break; 292 293 case OP_31_XOP_LWBRX: 294 rt = get_rt(inst); 295 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 296 break; 297 298 case OP_31_XOP_TLBSYNC: 299 break; 300 301 case OP_31_XOP_STWBRX: 302 rs = get_rs(inst); 303 ra = get_ra(inst); 304 rb = get_rb(inst); 305 306 emulated = kvmppc_handle_store(run, vcpu, 307 vcpu->arch.gpr[rs], 308 4, 0); 309 break; 310 311 case OP_31_XOP_LHBRX: 312 rt = get_rt(inst); 313 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 314 break; 315 316 case OP_31_XOP_STHBRX: 317 rs = get_rs(inst); 318 ra = get_ra(inst); 319 rb = get_rb(inst); 320 321 emulated = kvmppc_handle_store(run, vcpu, 322 vcpu->arch.gpr[rs], 323 2, 0); 324 break; 325 326 default: 327 /* Attempt core-specific emulation below. */ 328 emulated = EMULATE_FAIL; 329 } 330 break; 331 332 case OP_LWZ: 333 rt = get_rt(inst); 334 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 335 break; 336 337 case OP_LWZU: 338 ra = get_ra(inst); 339 rt = get_rt(inst); 340 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 341 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 342 break; 343 344 case OP_LBZ: 345 rt = get_rt(inst); 346 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 347 break; 348 349 case OP_LBZU: 350 ra = get_ra(inst); 351 rt = get_rt(inst); 352 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 353 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 354 break; 355 356 case OP_STW: 357 rs = get_rs(inst); 358 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 359 4, 1); 360 break; 361 362 case OP_STWU: 363 ra = get_ra(inst); 364 rs = get_rs(inst); 365 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 366 4, 1); 367 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 368 break; 369 370 case OP_STB: 371 rs = get_rs(inst); 372 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 373 1, 1); 374 break; 375 376 case OP_STBU: 377 ra = get_ra(inst); 378 rs = get_rs(inst); 379 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 380 1, 1); 381 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 382 break; 383 384 case OP_LHZ: 385 rt = get_rt(inst); 386 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 387 break; 388 389 case OP_LHZU: 390 ra = get_ra(inst); 391 rt = get_rt(inst); 392 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 393 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 394 break; 395 396 case OP_STH: 397 rs = get_rs(inst); 398 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 399 2, 1); 400 break; 401 402 case OP_STHU: 403 ra = get_ra(inst); 404 rs = get_rs(inst); 405 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 406 2, 1); 407 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 408 break; 409 410 default: 411 emulated = EMULATE_FAIL; 412 } 413 414 if (emulated == EMULATE_FAIL) { 415 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 416 if (emulated == EMULATE_FAIL) { 417 advance = 0; 418 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 419 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 420 } 421 } 422 423 trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); 424 425 if (advance) 426 vcpu->arch.pc += 4; /* Advance past emulated instruction. */ 427 428 return emulated; 429 } 430