1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright IBM Corp. 2007 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * 7 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 8 */ 9 10 #include <linux/jiffies.h> 11 #include <linux/hrtimer.h> 12 #include <linux/types.h> 13 #include <linux/string.h> 14 #include <linux/kvm_host.h> 15 #include <linux/clockchips.h> 16 17 #include <asm/reg.h> 18 #include <asm/time.h> 19 #include <asm/byteorder.h> 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/ppc-opcode.h> 23 #include "timing.h" 24 #include "trace.h" 25 26 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 27 { 28 unsigned long dec_nsec; 29 unsigned long long dec_time; 30 31 pr_debug("mtDEC: %lx\n", vcpu->arch.dec); 32 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 33 34 #ifdef CONFIG_PPC_BOOK3S 35 /* mtdec lowers the interrupt line when positive. */ 36 kvmppc_core_dequeue_dec(vcpu); 37 #endif 38 39 #ifdef CONFIG_BOOKE 40 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ 41 if (vcpu->arch.dec == 0) 42 return; 43 #endif 44 45 /* 46 * The decrementer ticks at the same rate as the timebase, so 47 * that's how we convert the guest DEC value to the number of 48 * host ticks. 49 */ 50 51 dec_time = vcpu->arch.dec; 52 /* 53 * Guest timebase ticks at the same frequency as host timebase. 54 * So use the host timebase calculations for decrementer emulation. 55 */ 56 dec_time = tb_to_ns(dec_time); 57 dec_nsec = do_div(dec_time, NSEC_PER_SEC); 58 hrtimer_start(&vcpu->arch.dec_timer, 59 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); 60 vcpu->arch.dec_jiffies = get_tb(); 61 } 62 63 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) 64 { 65 u64 jd = tb - vcpu->arch.dec_jiffies; 66 67 #ifdef CONFIG_BOOKE 68 if (vcpu->arch.dec < jd) 69 return 0; 70 #endif 71 72 return vcpu->arch.dec - jd; 73 } 74 75 static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 76 { 77 enum emulation_result emulated = EMULATE_DONE; 78 ulong spr_val = kvmppc_get_gpr(vcpu, rs); 79 80 switch (sprn) { 81 case SPRN_SRR0: 82 kvmppc_set_srr0(vcpu, spr_val); 83 break; 84 case SPRN_SRR1: 85 kvmppc_set_srr1(vcpu, spr_val); 86 break; 87 88 /* XXX We need to context-switch the timebase for 89 * watchdog and FIT. */ 90 case SPRN_TBWL: break; 91 case SPRN_TBWU: break; 92 93 case SPRN_DEC: 94 vcpu->arch.dec = (u32) spr_val; 95 kvmppc_emulate_dec(vcpu); 96 break; 97 98 case SPRN_SPRG0: 99 kvmppc_set_sprg0(vcpu, spr_val); 100 break; 101 case SPRN_SPRG1: 102 kvmppc_set_sprg1(vcpu, spr_val); 103 break; 104 case SPRN_SPRG2: 105 kvmppc_set_sprg2(vcpu, spr_val); 106 break; 107 case SPRN_SPRG3: 108 kvmppc_set_sprg3(vcpu, spr_val); 109 break; 110 111 /* PIR can legally be written, but we ignore it */ 112 case SPRN_PIR: break; 113 114 default: 115 emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, 116 spr_val); 117 if (emulated == EMULATE_FAIL) 118 printk(KERN_INFO "mtspr: unknown spr " 119 "0x%x\n", sprn); 120 break; 121 } 122 123 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); 124 125 return emulated; 126 } 127 128 static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 129 { 130 enum emulation_result emulated = EMULATE_DONE; 131 ulong spr_val = 0; 132 133 switch (sprn) { 134 case SPRN_SRR0: 135 spr_val = kvmppc_get_srr0(vcpu); 136 break; 137 case SPRN_SRR1: 138 spr_val = kvmppc_get_srr1(vcpu); 139 break; 140 case SPRN_PVR: 141 spr_val = vcpu->arch.pvr; 142 break; 143 case SPRN_PIR: 144 spr_val = vcpu->vcpu_id; 145 break; 146 147 /* Note: mftb and TBRL/TBWL are user-accessible, so 148 * the guest can always access the real TB anyways. 149 * In fact, we probably will never see these traps. */ 150 case SPRN_TBWL: 151 spr_val = get_tb() >> 32; 152 break; 153 case SPRN_TBWU: 154 spr_val = get_tb(); 155 break; 156 157 case SPRN_SPRG0: 158 spr_val = kvmppc_get_sprg0(vcpu); 159 break; 160 case SPRN_SPRG1: 161 spr_val = kvmppc_get_sprg1(vcpu); 162 break; 163 case SPRN_SPRG2: 164 spr_val = kvmppc_get_sprg2(vcpu); 165 break; 166 case SPRN_SPRG3: 167 spr_val = kvmppc_get_sprg3(vcpu); 168 break; 169 /* Note: SPRG4-7 are user-readable, so we don't get 170 * a trap. */ 171 172 case SPRN_DEC: 173 spr_val = kvmppc_get_dec(vcpu, get_tb()); 174 break; 175 default: 176 emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, 177 &spr_val); 178 if (unlikely(emulated == EMULATE_FAIL)) { 179 printk(KERN_INFO "mfspr: unknown spr " 180 "0x%x\n", sprn); 181 } 182 break; 183 } 184 185 if (emulated == EMULATE_DONE) 186 kvmppc_set_gpr(vcpu, rt, spr_val); 187 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); 188 189 return emulated; 190 } 191 192 /* XXX Should probably auto-generate instruction decoding for a particular core 193 * from opcode tables in the future. */ 194 int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) 195 { 196 u32 inst; 197 ppc_inst_t pinst; 198 int rs, rt, sprn; 199 enum emulation_result emulated; 200 int advance = 1; 201 202 /* this default type might be overwritten by subcategories */ 203 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 204 205 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); 206 inst = ppc_inst_val(pinst); 207 if (emulated != EMULATE_DONE) 208 return emulated; 209 210 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 211 212 rs = get_rs(inst); 213 rt = get_rt(inst); 214 sprn = get_sprn(inst); 215 216 switch (get_op(inst)) { 217 case OP_TRAP: 218 #ifdef CONFIG_PPC_BOOK3S 219 case OP_TRAP_64: 220 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 221 #else 222 kvmppc_core_queue_program(vcpu, 223 vcpu->arch.shared->esr | ESR_PTR); 224 #endif 225 advance = 0; 226 break; 227 228 case 31: 229 switch (get_xop(inst)) { 230 231 case OP_31_XOP_TRAP: 232 #ifdef CONFIG_64BIT 233 case OP_31_XOP_TRAP_64: 234 #endif 235 #ifdef CONFIG_PPC_BOOK3S 236 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 237 #else 238 kvmppc_core_queue_program(vcpu, 239 vcpu->arch.shared->esr | ESR_PTR); 240 #endif 241 advance = 0; 242 break; 243 244 case OP_31_XOP_MFSPR: 245 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); 246 if (emulated == EMULATE_AGAIN) { 247 emulated = EMULATE_DONE; 248 advance = 0; 249 } 250 break; 251 252 case OP_31_XOP_MTSPR: 253 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 254 if (emulated == EMULATE_AGAIN) { 255 emulated = EMULATE_DONE; 256 advance = 0; 257 } 258 break; 259 260 case OP_31_XOP_TLBSYNC: 261 break; 262 263 default: 264 /* Attempt core-specific emulation below. */ 265 emulated = EMULATE_FAIL; 266 } 267 break; 268 269 case 0: 270 /* 271 * Instruction with primary opcode 0. Based on PowerISA 272 * these are illegal instructions. 273 */ 274 if (inst == KVMPPC_INST_SW_BREAKPOINT) { 275 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 276 vcpu->run->debug.arch.status = 0; 277 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); 278 emulated = EMULATE_EXIT_USER; 279 advance = 0; 280 } else 281 emulated = EMULATE_FAIL; 282 283 break; 284 285 default: 286 emulated = EMULATE_FAIL; 287 } 288 289 if (emulated == EMULATE_FAIL) { 290 emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst, 291 &advance); 292 if (emulated == EMULATE_AGAIN) { 293 advance = 0; 294 } else if (emulated == EMULATE_FAIL) { 295 advance = 0; 296 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 297 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 298 } 299 } 300 301 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); 302 303 /* Advance past emulated instruction. */ 304 /* 305 * If this ever handles prefixed instructions, the 4 306 * will need to become ppc_inst_len(pinst) instead. 307 */ 308 if (advance) 309 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 310 311 return emulated; 312 } 313 EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction); 314