1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Instruction/Exception emulation 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/ktime.h> 15 #include <linux/kvm_host.h> 16 #include <linux/module.h> 17 #include <linux/vmalloc.h> 18 #include <linux/fs.h> 19 #include <linux/bootmem.h> 20 #include <linux/random.h> 21 #include <asm/page.h> 22 #include <asm/cacheflush.h> 23 #include <asm/cacheops.h> 24 #include <asm/cpu-info.h> 25 #include <asm/mmu_context.h> 26 #include <asm/tlbflush.h> 27 #include <asm/inst.h> 28 29 #undef CONFIG_MIPS_MT 30 #include <asm/r4kcache.h> 31 #define CONFIG_MIPS_MT 32 33 #include "interrupt.h" 34 #include "commpage.h" 35 36 #include "trace.h" 37 38 /* 39 * Compute the return address and do emulate branch simulation, if required. 40 * This function should be called only in branch delay slot active. 41 */ 42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, 43 unsigned long instpc) 44 { 45 unsigned int dspcontrol; 46 union mips_instruction insn; 47 struct kvm_vcpu_arch *arch = &vcpu->arch; 48 long epc = instpc; 49 long nextpc = KVM_INVALID_INST; 50 51 if (epc & 3) 52 goto unaligned; 53 54 /* Read the instruction */ 55 insn.word = kvm_get_inst((u32 *) epc, vcpu); 56 57 if (insn.word == KVM_INVALID_INST) 58 return KVM_INVALID_INST; 59 60 switch (insn.i_format.opcode) { 61 /* jr and jalr are in r_format format. */ 62 case spec_op: 63 switch (insn.r_format.func) { 64 case jalr_op: 65 arch->gprs[insn.r_format.rd] = epc + 8; 66 /* Fall through */ 67 case jr_op: 68 nextpc = arch->gprs[insn.r_format.rs]; 69 break; 70 } 71 break; 72 73 /* 74 * This group contains: 75 * bltz_op, bgez_op, bltzl_op, bgezl_op, 76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op. 77 */ 78 case bcond_op: 79 switch (insn.i_format.rt) { 80 case bltz_op: 81 case bltzl_op: 82 if ((long)arch->gprs[insn.i_format.rs] < 0) 83 epc = epc + 4 + (insn.i_format.simmediate << 2); 84 else 85 epc += 8; 86 nextpc = epc; 87 break; 88 89 case bgez_op: 90 case bgezl_op: 91 if ((long)arch->gprs[insn.i_format.rs] >= 0) 92 epc = epc + 4 + (insn.i_format.simmediate << 2); 93 else 94 epc += 8; 95 nextpc = epc; 96 break; 97 98 case bltzal_op: 99 case bltzall_op: 100 arch->gprs[31] = epc + 8; 101 if ((long)arch->gprs[insn.i_format.rs] < 0) 102 epc = epc + 4 + (insn.i_format.simmediate << 2); 103 else 104 epc += 8; 105 nextpc = epc; 106 break; 107 108 case bgezal_op: 109 case bgezall_op: 110 arch->gprs[31] = epc + 8; 111 if ((long)arch->gprs[insn.i_format.rs] >= 0) 112 epc = epc + 4 + (insn.i_format.simmediate << 2); 113 else 114 epc += 8; 115 nextpc = epc; 116 break; 117 case bposge32_op: 118 if (!cpu_has_dsp) 119 goto sigill; 120 121 dspcontrol = rddsp(0x01); 122 123 if (dspcontrol >= 32) 124 epc = epc + 4 + (insn.i_format.simmediate << 2); 125 else 126 epc += 8; 127 nextpc = epc; 128 break; 129 } 130 break; 131 132 /* These are unconditional and in j_format. */ 133 case jal_op: 134 arch->gprs[31] = instpc + 8; 135 case j_op: 136 epc += 4; 137 epc >>= 28; 138 epc <<= 28; 139 epc |= (insn.j_format.target << 2); 140 nextpc = epc; 141 break; 142 143 /* These are conditional and in i_format. */ 144 case beq_op: 145 case beql_op: 146 if (arch->gprs[insn.i_format.rs] == 147 arch->gprs[insn.i_format.rt]) 148 epc = epc + 4 + (insn.i_format.simmediate << 2); 149 else 150 epc += 8; 151 nextpc = epc; 152 break; 153 154 case bne_op: 155 case bnel_op: 156 if (arch->gprs[insn.i_format.rs] != 157 arch->gprs[insn.i_format.rt]) 158 epc = epc + 4 + (insn.i_format.simmediate << 2); 159 else 160 epc += 8; 161 nextpc = epc; 162 break; 163 164 case blez_op: /* POP06 */ 165 #ifndef CONFIG_CPU_MIPSR6 166 case blezl_op: /* removed in R6 */ 167 #endif 168 if (insn.i_format.rt != 0) 169 goto compact_branch; 170 if ((long)arch->gprs[insn.i_format.rs] <= 0) 171 epc = epc + 4 + (insn.i_format.simmediate << 2); 172 else 173 epc += 8; 174 nextpc = epc; 175 break; 176 177 case bgtz_op: /* POP07 */ 178 #ifndef CONFIG_CPU_MIPSR6 179 case bgtzl_op: /* removed in R6 */ 180 #endif 181 if (insn.i_format.rt != 0) 182 goto compact_branch; 183 if ((long)arch->gprs[insn.i_format.rs] > 0) 184 epc = epc + 4 + (insn.i_format.simmediate << 2); 185 else 186 epc += 8; 187 nextpc = epc; 188 break; 189 190 /* And now the FPA/cp1 branch instructions. */ 191 case cop1_op: 192 kvm_err("%s: unsupported cop1_op\n", __func__); 193 break; 194 195 #ifdef CONFIG_CPU_MIPSR6 196 /* R6 added the following compact branches with forbidden slots */ 197 case blezl_op: /* POP26 */ 198 case bgtzl_op: /* POP27 */ 199 /* only rt == 0 isn't compact branch */ 200 if (insn.i_format.rt != 0) 201 goto compact_branch; 202 break; 203 case pop10_op: 204 case pop30_op: 205 /* only rs == rt == 0 is reserved, rest are compact branches */ 206 if (insn.i_format.rs != 0 || insn.i_format.rt != 0) 207 goto compact_branch; 208 break; 209 case pop66_op: 210 case pop76_op: 211 /* only rs == 0 isn't compact branch */ 212 if (insn.i_format.rs != 0) 213 goto compact_branch; 214 break; 215 compact_branch: 216 /* 217 * If we've hit an exception on the forbidden slot, then 218 * the branch must not have been taken. 219 */ 220 epc += 8; 221 nextpc = epc; 222 break; 223 #else 224 compact_branch: 225 /* Compact branches not supported before R6 */ 226 break; 227 #endif 228 } 229 230 return nextpc; 231 232 unaligned: 233 kvm_err("%s: unaligned epc\n", __func__); 234 return nextpc; 235 236 sigill: 237 kvm_err("%s: DSP branch but not DSP ASE\n", __func__); 238 return nextpc; 239 } 240 241 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) 242 { 243 unsigned long branch_pc; 244 enum emulation_result er = EMULATE_DONE; 245 246 if (cause & CAUSEF_BD) { 247 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); 248 if (branch_pc == KVM_INVALID_INST) { 249 er = EMULATE_FAIL; 250 } else { 251 vcpu->arch.pc = branch_pc; 252 kvm_debug("BD update_pc(): New PC: %#lx\n", 253 vcpu->arch.pc); 254 } 255 } else 256 vcpu->arch.pc += 4; 257 258 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); 259 260 return er; 261 } 262 263 /** 264 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. 265 * @vcpu: Virtual CPU. 266 * 267 * Returns: 1 if the CP0_Count timer is disabled by either the guest 268 * CP0_Cause.DC bit or the count_ctl.DC bit. 269 * 0 otherwise (in which case CP0_Count timer is running). 270 */ 271 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 272 { 273 struct mips_coproc *cop0 = vcpu->arch.cop0; 274 275 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 276 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); 277 } 278 279 /** 280 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. 281 * 282 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. 283 * 284 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 285 */ 286 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) 287 { 288 s64 now_ns, periods; 289 u64 delta; 290 291 now_ns = ktime_to_ns(now); 292 delta = now_ns + vcpu->arch.count_dyn_bias; 293 294 if (delta >= vcpu->arch.count_period) { 295 /* If delta is out of safe range the bias needs adjusting */ 296 periods = div64_s64(now_ns, vcpu->arch.count_period); 297 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; 298 /* Recalculate delta with new bias */ 299 delta = now_ns + vcpu->arch.count_dyn_bias; 300 } 301 302 /* 303 * We've ensured that: 304 * delta < count_period 305 * 306 * Therefore the intermediate delta*count_hz will never overflow since 307 * at the boundary condition: 308 * delta = count_period 309 * delta = NSEC_PER_SEC * 2^32 / count_hz 310 * delta * count_hz = NSEC_PER_SEC * 2^32 311 */ 312 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); 313 } 314 315 /** 316 * kvm_mips_count_time() - Get effective current time. 317 * @vcpu: Virtual CPU. 318 * 319 * Get effective monotonic ktime. This is usually a straightforward ktime_get(), 320 * except when the master disable bit is set in count_ctl, in which case it is 321 * count_resume, i.e. the time that the count was disabled. 322 * 323 * Returns: Effective monotonic ktime for CP0_Count. 324 */ 325 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) 326 { 327 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 328 return vcpu->arch.count_resume; 329 330 return ktime_get(); 331 } 332 333 /** 334 * kvm_mips_read_count_running() - Read the current count value as if running. 335 * @vcpu: Virtual CPU. 336 * @now: Kernel time to read CP0_Count at. 337 * 338 * Returns the current guest CP0_Count register at time @now and handles if the 339 * timer interrupt is pending and hasn't been handled yet. 340 * 341 * Returns: The current value of the guest CP0_Count register. 342 */ 343 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) 344 { 345 struct mips_coproc *cop0 = vcpu->arch.cop0; 346 ktime_t expires, threshold; 347 u32 count, compare; 348 int running; 349 350 /* Calculate the biased and scaled guest CP0_Count */ 351 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); 352 compare = kvm_read_c0_guest_compare(cop0); 353 354 /* 355 * Find whether CP0_Count has reached the closest timer interrupt. If 356 * not, we shouldn't inject it. 357 */ 358 if ((s32)(count - compare) < 0) 359 return count; 360 361 /* 362 * The CP0_Count we're going to return has already reached the closest 363 * timer interrupt. Quickly check if it really is a new interrupt by 364 * looking at whether the interval until the hrtimer expiry time is 365 * less than 1/4 of the timer period. 366 */ 367 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); 368 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); 369 if (ktime_before(expires, threshold)) { 370 /* 371 * Cancel it while we handle it so there's no chance of 372 * interference with the timeout handler. 373 */ 374 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); 375 376 /* Nothing should be waiting on the timeout */ 377 kvm_mips_callbacks->queue_timer_int(vcpu); 378 379 /* 380 * Restart the timer if it was running based on the expiry time 381 * we read, so that we don't push it back 2 periods. 382 */ 383 if (running) { 384 expires = ktime_add_ns(expires, 385 vcpu->arch.count_period); 386 hrtimer_start(&vcpu->arch.comparecount_timer, expires, 387 HRTIMER_MODE_ABS); 388 } 389 } 390 391 return count; 392 } 393 394 /** 395 * kvm_mips_read_count() - Read the current count value. 396 * @vcpu: Virtual CPU. 397 * 398 * Read the current guest CP0_Count value, taking into account whether the timer 399 * is stopped. 400 * 401 * Returns: The current guest CP0_Count value. 402 */ 403 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) 404 { 405 struct mips_coproc *cop0 = vcpu->arch.cop0; 406 407 /* If count disabled just read static copy of count */ 408 if (kvm_mips_count_disabled(vcpu)) 409 return kvm_read_c0_guest_count(cop0); 410 411 return kvm_mips_read_count_running(vcpu, ktime_get()); 412 } 413 414 /** 415 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. 416 * @vcpu: Virtual CPU. 417 * @count: Output pointer for CP0_Count value at point of freeze. 418 * 419 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value 420 * at the point it was frozen. It is guaranteed that any pending interrupts at 421 * the point it was frozen are handled, and none after that point. 422 * 423 * This is useful where the time/CP0_Count is needed in the calculation of the 424 * new parameters. 425 * 426 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 427 * 428 * Returns: The ktime at the point of freeze. 429 */ 430 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) 431 { 432 ktime_t now; 433 434 /* stop hrtimer before finding time */ 435 hrtimer_cancel(&vcpu->arch.comparecount_timer); 436 now = ktime_get(); 437 438 /* find count at this point and handle pending hrtimer */ 439 *count = kvm_mips_read_count_running(vcpu, now); 440 441 return now; 442 } 443 444 /** 445 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. 446 * @vcpu: Virtual CPU. 447 * @now: ktime at point of resume. 448 * @count: CP0_Count at point of resume. 449 * 450 * Resumes the timer and updates the timer expiry based on @now and @count. 451 * This can be used in conjunction with kvm_mips_freeze_timer() when timer 452 * parameters need to be changed. 453 * 454 * It is guaranteed that a timer interrupt immediately after resume will be 455 * handled, but not if CP_Compare is exactly at @count. That case is already 456 * handled by kvm_mips_freeze_timer(). 457 * 458 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 459 */ 460 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, 461 ktime_t now, u32 count) 462 { 463 struct mips_coproc *cop0 = vcpu->arch.cop0; 464 u32 compare; 465 u64 delta; 466 ktime_t expire; 467 468 /* Calculate timeout (wrap 0 to 2^32) */ 469 compare = kvm_read_c0_guest_compare(cop0); 470 delta = (u64)(u32)(compare - count - 1) + 1; 471 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); 472 expire = ktime_add_ns(now, delta); 473 474 /* Update hrtimer to use new timeout */ 475 hrtimer_cancel(&vcpu->arch.comparecount_timer); 476 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); 477 } 478 479 /** 480 * kvm_mips_write_count() - Modify the count and update timer. 481 * @vcpu: Virtual CPU. 482 * @count: Guest CP0_Count value to set. 483 * 484 * Sets the CP0_Count value and updates the timer accordingly. 485 */ 486 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) 487 { 488 struct mips_coproc *cop0 = vcpu->arch.cop0; 489 ktime_t now; 490 491 /* Calculate bias */ 492 now = kvm_mips_count_time(vcpu); 493 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 494 495 if (kvm_mips_count_disabled(vcpu)) 496 /* The timer's disabled, adjust the static count */ 497 kvm_write_c0_guest_count(cop0, count); 498 else 499 /* Update timeout */ 500 kvm_mips_resume_hrtimer(vcpu, now, count); 501 } 502 503 /** 504 * kvm_mips_init_count() - Initialise timer. 505 * @vcpu: Virtual CPU. 506 * 507 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set 508 * it going if it's enabled. 509 */ 510 void kvm_mips_init_count(struct kvm_vcpu *vcpu) 511 { 512 /* 100 MHz */ 513 vcpu->arch.count_hz = 100*1000*1000; 514 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, 515 vcpu->arch.count_hz); 516 vcpu->arch.count_dyn_bias = 0; 517 518 /* Starting at 0 */ 519 kvm_mips_write_count(vcpu, 0); 520 } 521 522 /** 523 * kvm_mips_set_count_hz() - Update the frequency of the timer. 524 * @vcpu: Virtual CPU. 525 * @count_hz: Frequency of CP0_Count timer in Hz. 526 * 527 * Change the frequency of the CP0_Count timer. This is done atomically so that 528 * CP0_Count is continuous and no timer interrupt is lost. 529 * 530 * Returns: -EINVAL if @count_hz is out of range. 531 * 0 on success. 532 */ 533 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) 534 { 535 struct mips_coproc *cop0 = vcpu->arch.cop0; 536 int dc; 537 ktime_t now; 538 u32 count; 539 540 /* ensure the frequency is in a sensible range... */ 541 if (count_hz <= 0 || count_hz > NSEC_PER_SEC) 542 return -EINVAL; 543 /* ... and has actually changed */ 544 if (vcpu->arch.count_hz == count_hz) 545 return 0; 546 547 /* Safely freeze timer so we can keep it continuous */ 548 dc = kvm_mips_count_disabled(vcpu); 549 if (dc) { 550 now = kvm_mips_count_time(vcpu); 551 count = kvm_read_c0_guest_count(cop0); 552 } else { 553 now = kvm_mips_freeze_hrtimer(vcpu, &count); 554 } 555 556 /* Update the frequency */ 557 vcpu->arch.count_hz = count_hz; 558 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); 559 vcpu->arch.count_dyn_bias = 0; 560 561 /* Calculate adjusted bias so dynamic count is unchanged */ 562 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 563 564 /* Update and resume hrtimer */ 565 if (!dc) 566 kvm_mips_resume_hrtimer(vcpu, now, count); 567 return 0; 568 } 569 570 /** 571 * kvm_mips_write_compare() - Modify compare and update timer. 572 * @vcpu: Virtual CPU. 573 * @compare: New CP0_Compare value. 574 * @ack: Whether to acknowledge timer interrupt. 575 * 576 * Update CP0_Compare to a new value and update the timeout. 577 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure 578 * any pending timer interrupt is preserved. 579 */ 580 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) 581 { 582 struct mips_coproc *cop0 = vcpu->arch.cop0; 583 int dc; 584 u32 old_compare = kvm_read_c0_guest_compare(cop0); 585 ktime_t now; 586 u32 count; 587 588 /* if unchanged, must just be an ack */ 589 if (old_compare == compare) { 590 if (!ack) 591 return; 592 kvm_mips_callbacks->dequeue_timer_int(vcpu); 593 kvm_write_c0_guest_compare(cop0, compare); 594 return; 595 } 596 597 /* freeze_hrtimer() takes care of timer interrupts <= count */ 598 dc = kvm_mips_count_disabled(vcpu); 599 if (!dc) 600 now = kvm_mips_freeze_hrtimer(vcpu, &count); 601 602 if (ack) 603 kvm_mips_callbacks->dequeue_timer_int(vcpu); 604 605 kvm_write_c0_guest_compare(cop0, compare); 606 607 /* resume_hrtimer() takes care of timer interrupts > count */ 608 if (!dc) 609 kvm_mips_resume_hrtimer(vcpu, now, count); 610 } 611 612 /** 613 * kvm_mips_count_disable() - Disable count. 614 * @vcpu: Virtual CPU. 615 * 616 * Disable the CP0_Count timer. A timer interrupt on or before the final stop 617 * time will be handled but not after. 618 * 619 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or 620 * count_ctl.DC has been set (count disabled). 621 * 622 * Returns: The time that the timer was stopped. 623 */ 624 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) 625 { 626 struct mips_coproc *cop0 = vcpu->arch.cop0; 627 u32 count; 628 ktime_t now; 629 630 /* Stop hrtimer */ 631 hrtimer_cancel(&vcpu->arch.comparecount_timer); 632 633 /* Set the static count from the dynamic count, handling pending TI */ 634 now = ktime_get(); 635 count = kvm_mips_read_count_running(vcpu, now); 636 kvm_write_c0_guest_count(cop0, count); 637 638 return now; 639 } 640 641 /** 642 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. 643 * @vcpu: Virtual CPU. 644 * 645 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or 646 * before the final stop time will be handled if the timer isn't disabled by 647 * count_ctl.DC, but not after. 648 * 649 * Assumes CP0_Cause.DC is clear (count enabled). 650 */ 651 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) 652 { 653 struct mips_coproc *cop0 = vcpu->arch.cop0; 654 655 kvm_set_c0_guest_cause(cop0, CAUSEF_DC); 656 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 657 kvm_mips_count_disable(vcpu); 658 } 659 660 /** 661 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. 662 * @vcpu: Virtual CPU. 663 * 664 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after 665 * the start time will be handled if the timer isn't disabled by count_ctl.DC, 666 * potentially before even returning, so the caller should be careful with 667 * ordering of CP0_Cause modifications so as not to lose it. 668 * 669 * Assumes CP0_Cause.DC is set (count disabled). 670 */ 671 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) 672 { 673 struct mips_coproc *cop0 = vcpu->arch.cop0; 674 u32 count; 675 676 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); 677 678 /* 679 * Set the dynamic count to match the static count. 680 * This starts the hrtimer if count_ctl.DC allows it. 681 * Otherwise it conveniently updates the biases. 682 */ 683 count = kvm_read_c0_guest_count(cop0); 684 kvm_mips_write_count(vcpu, count); 685 } 686 687 /** 688 * kvm_mips_set_count_ctl() - Update the count control KVM register. 689 * @vcpu: Virtual CPU. 690 * @count_ctl: Count control register new value. 691 * 692 * Set the count control KVM register. The timer is updated accordingly. 693 * 694 * Returns: -EINVAL if reserved bits are set. 695 * 0 on success. 696 */ 697 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) 698 { 699 struct mips_coproc *cop0 = vcpu->arch.cop0; 700 s64 changed = count_ctl ^ vcpu->arch.count_ctl; 701 s64 delta; 702 ktime_t expire, now; 703 u32 count, compare; 704 705 /* Only allow defined bits to be changed */ 706 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) 707 return -EINVAL; 708 709 /* Apply new value */ 710 vcpu->arch.count_ctl = count_ctl; 711 712 /* Master CP0_Count disable */ 713 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { 714 /* Is CP0_Cause.DC already disabling CP0_Count? */ 715 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { 716 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) 717 /* Just record the current time */ 718 vcpu->arch.count_resume = ktime_get(); 719 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { 720 /* disable timer and record current time */ 721 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); 722 } else { 723 /* 724 * Calculate timeout relative to static count at resume 725 * time (wrap 0 to 2^32). 726 */ 727 count = kvm_read_c0_guest_count(cop0); 728 compare = kvm_read_c0_guest_compare(cop0); 729 delta = (u64)(u32)(compare - count - 1) + 1; 730 delta = div_u64(delta * NSEC_PER_SEC, 731 vcpu->arch.count_hz); 732 expire = ktime_add_ns(vcpu->arch.count_resume, delta); 733 734 /* Handle pending interrupt */ 735 now = ktime_get(); 736 if (ktime_compare(now, expire) >= 0) 737 /* Nothing should be waiting on the timeout */ 738 kvm_mips_callbacks->queue_timer_int(vcpu); 739 740 /* Resume hrtimer without changing bias */ 741 count = kvm_mips_read_count_running(vcpu, now); 742 kvm_mips_resume_hrtimer(vcpu, now, count); 743 } 744 } 745 746 return 0; 747 } 748 749 /** 750 * kvm_mips_set_count_resume() - Update the count resume KVM register. 751 * @vcpu: Virtual CPU. 752 * @count_resume: Count resume register new value. 753 * 754 * Set the count resume KVM register. 755 * 756 * Returns: -EINVAL if out of valid range (0..now). 757 * 0 on success. 758 */ 759 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) 760 { 761 /* 762 * It doesn't make sense for the resume time to be in the future, as it 763 * would be possible for the next interrupt to be more than a full 764 * period in the future. 765 */ 766 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) 767 return -EINVAL; 768 769 vcpu->arch.count_resume = ns_to_ktime(count_resume); 770 return 0; 771 } 772 773 /** 774 * kvm_mips_count_timeout() - Push timer forward on timeout. 775 * @vcpu: Virtual CPU. 776 * 777 * Handle an hrtimer event by push the hrtimer forward a period. 778 * 779 * Returns: The hrtimer_restart value to return to the hrtimer subsystem. 780 */ 781 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) 782 { 783 /* Add the Count period to the current expiry time */ 784 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, 785 vcpu->arch.count_period); 786 return HRTIMER_RESTART; 787 } 788 789 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) 790 { 791 struct mips_coproc *cop0 = vcpu->arch.cop0; 792 enum emulation_result er = EMULATE_DONE; 793 794 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 795 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 796 kvm_read_c0_guest_epc(cop0)); 797 kvm_clear_c0_guest_status(cop0, ST0_EXL); 798 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 799 800 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { 801 kvm_clear_c0_guest_status(cop0, ST0_ERL); 802 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 803 } else { 804 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 805 vcpu->arch.pc); 806 er = EMULATE_FAIL; 807 } 808 809 return er; 810 } 811 812 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) 813 { 814 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, 815 vcpu->arch.pending_exceptions); 816 817 ++vcpu->stat.wait_exits; 818 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); 819 if (!vcpu->arch.pending_exceptions) { 820 vcpu->arch.wait = 1; 821 kvm_vcpu_block(vcpu); 822 823 /* 824 * We we are runnable, then definitely go off to user space to 825 * check if any I/O interrupts are pending. 826 */ 827 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 828 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 829 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 830 } 831 } 832 833 return EMULATE_DONE; 834 } 835 836 /* 837 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that 838 * we can catch this, if things ever change 839 */ 840 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 841 { 842 struct mips_coproc *cop0 = vcpu->arch.cop0; 843 unsigned long pc = vcpu->arch.pc; 844 845 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); 846 return EMULATE_FAIL; 847 } 848 849 /** 850 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. 851 * @vcpu: VCPU with changed mappings. 852 * @tlb: TLB entry being removed. 853 * 854 * This is called to indicate a single change in guest MMU mappings, so that we 855 * can arrange TLB flushes on this and other CPUs. 856 */ 857 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, 858 struct kvm_mips_tlb *tlb) 859 { 860 int cpu, i; 861 bool user; 862 863 /* No need to flush for entries which are already invalid */ 864 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) 865 return; 866 /* User address space doesn't need flushing for KSeg2/3 changes */ 867 user = tlb->tlb_hi < KVM_GUEST_KSEG0; 868 869 preempt_disable(); 870 871 /* 872 * Probe the shadow host TLB for the entry being overwritten, if one 873 * matches, invalidate it 874 */ 875 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 876 877 /* Invalidate the whole ASID on other CPUs */ 878 cpu = smp_processor_id(); 879 for_each_possible_cpu(i) { 880 if (i == cpu) 881 continue; 882 if (user) 883 vcpu->arch.guest_user_asid[i] = 0; 884 vcpu->arch.guest_kernel_asid[i] = 0; 885 } 886 887 preempt_enable(); 888 } 889 890 /* Write Guest TLB Entry @ Index */ 891 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) 892 { 893 struct mips_coproc *cop0 = vcpu->arch.cop0; 894 int index = kvm_read_c0_guest_index(cop0); 895 struct kvm_mips_tlb *tlb = NULL; 896 unsigned long pc = vcpu->arch.pc; 897 898 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 899 kvm_debug("%s: illegal index: %d\n", __func__, index); 900 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 901 pc, index, kvm_read_c0_guest_entryhi(cop0), 902 kvm_read_c0_guest_entrylo0(cop0), 903 kvm_read_c0_guest_entrylo1(cop0), 904 kvm_read_c0_guest_pagemask(cop0)); 905 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 906 } 907 908 tlb = &vcpu->arch.guest_tlb[index]; 909 910 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 911 912 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 913 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 914 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 915 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 916 917 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 918 pc, index, kvm_read_c0_guest_entryhi(cop0), 919 kvm_read_c0_guest_entrylo0(cop0), 920 kvm_read_c0_guest_entrylo1(cop0), 921 kvm_read_c0_guest_pagemask(cop0)); 922 923 return EMULATE_DONE; 924 } 925 926 /* Write Guest TLB Entry @ Random Index */ 927 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) 928 { 929 struct mips_coproc *cop0 = vcpu->arch.cop0; 930 struct kvm_mips_tlb *tlb = NULL; 931 unsigned long pc = vcpu->arch.pc; 932 int index; 933 934 get_random_bytes(&index, sizeof(index)); 935 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 936 937 tlb = &vcpu->arch.guest_tlb[index]; 938 939 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 940 941 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 942 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 943 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 944 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 945 946 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 947 pc, index, kvm_read_c0_guest_entryhi(cop0), 948 kvm_read_c0_guest_entrylo0(cop0), 949 kvm_read_c0_guest_entrylo1(cop0)); 950 951 return EMULATE_DONE; 952 } 953 954 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) 955 { 956 struct mips_coproc *cop0 = vcpu->arch.cop0; 957 long entryhi = kvm_read_c0_guest_entryhi(cop0); 958 unsigned long pc = vcpu->arch.pc; 959 int index = -1; 960 961 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 962 963 kvm_write_c0_guest_index(cop0, index); 964 965 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, 966 index); 967 968 return EMULATE_DONE; 969 } 970 971 /** 972 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 973 * @vcpu: Virtual CPU. 974 * 975 * Finds the mask of bits which are writable in the guest's Config1 CP0 976 * register, by userland (currently read-only to the guest). 977 */ 978 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) 979 { 980 unsigned int mask = 0; 981 982 /* Permit FPU to be present if FPU is supported */ 983 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 984 mask |= MIPS_CONF1_FP; 985 986 return mask; 987 } 988 989 /** 990 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 991 * @vcpu: Virtual CPU. 992 * 993 * Finds the mask of bits which are writable in the guest's Config3 CP0 994 * register, by userland (currently read-only to the guest). 995 */ 996 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) 997 { 998 /* Config4 and ULRI are optional */ 999 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; 1000 1001 /* Permit MSA to be present if MSA is supported */ 1002 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 1003 mask |= MIPS_CONF3_MSA; 1004 1005 return mask; 1006 } 1007 1008 /** 1009 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 1010 * @vcpu: Virtual CPU. 1011 * 1012 * Finds the mask of bits which are writable in the guest's Config4 CP0 1013 * register, by userland (currently read-only to the guest). 1014 */ 1015 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) 1016 { 1017 /* Config5 is optional */ 1018 unsigned int mask = MIPS_CONF_M; 1019 1020 /* KScrExist */ 1021 mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16; 1022 1023 return mask; 1024 } 1025 1026 /** 1027 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 1028 * @vcpu: Virtual CPU. 1029 * 1030 * Finds the mask of bits which are writable in the guest's Config5 CP0 1031 * register, by the guest itself. 1032 */ 1033 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) 1034 { 1035 unsigned int mask = 0; 1036 1037 /* Permit MSAEn changes if MSA supported and enabled */ 1038 if (kvm_mips_guest_has_msa(&vcpu->arch)) 1039 mask |= MIPS_CONF5_MSAEN; 1040 1041 /* 1042 * Permit guest FPU mode changes if FPU is enabled and the relevant 1043 * feature exists according to FIR register. 1044 */ 1045 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 1046 if (cpu_has_fre) 1047 mask |= MIPS_CONF5_FRE; 1048 /* We don't support UFR or UFE */ 1049 } 1050 1051 return mask; 1052 } 1053 1054 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 1055 u32 *opc, u32 cause, 1056 struct kvm_run *run, 1057 struct kvm_vcpu *vcpu) 1058 { 1059 struct mips_coproc *cop0 = vcpu->arch.cop0; 1060 enum emulation_result er = EMULATE_DONE; 1061 u32 rt, rd, sel; 1062 unsigned long curr_pc; 1063 int cpu, i; 1064 1065 /* 1066 * Update PC and hold onto current PC in case there is 1067 * an error and we want to rollback the PC 1068 */ 1069 curr_pc = vcpu->arch.pc; 1070 er = update_pc(vcpu, cause); 1071 if (er == EMULATE_FAIL) 1072 return er; 1073 1074 if (inst.co_format.co) { 1075 switch (inst.co_format.func) { 1076 case tlbr_op: /* Read indexed TLB entry */ 1077 er = kvm_mips_emul_tlbr(vcpu); 1078 break; 1079 case tlbwi_op: /* Write indexed */ 1080 er = kvm_mips_emul_tlbwi(vcpu); 1081 break; 1082 case tlbwr_op: /* Write random */ 1083 er = kvm_mips_emul_tlbwr(vcpu); 1084 break; 1085 case tlbp_op: /* TLB Probe */ 1086 er = kvm_mips_emul_tlbp(vcpu); 1087 break; 1088 case rfe_op: 1089 kvm_err("!!!COP0_RFE!!!\n"); 1090 break; 1091 case eret_op: 1092 er = kvm_mips_emul_eret(vcpu); 1093 goto dont_update_pc; 1094 case wait_op: 1095 er = kvm_mips_emul_wait(vcpu); 1096 break; 1097 } 1098 } else { 1099 rt = inst.c0r_format.rt; 1100 rd = inst.c0r_format.rd; 1101 sel = inst.c0r_format.sel; 1102 1103 switch (inst.c0r_format.rs) { 1104 case mfc_op: 1105 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1106 cop0->stat[rd][sel]++; 1107 #endif 1108 /* Get reg */ 1109 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1110 vcpu->arch.gprs[rt] = 1111 (s32)kvm_mips_read_count(vcpu); 1112 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { 1113 vcpu->arch.gprs[rt] = 0x0; 1114 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1115 kvm_mips_trans_mfc0(inst, opc, vcpu); 1116 #endif 1117 } else { 1118 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; 1119 1120 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1121 kvm_mips_trans_mfc0(inst, opc, vcpu); 1122 #endif 1123 } 1124 1125 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, 1126 KVM_TRACE_COP0(rd, sel), 1127 vcpu->arch.gprs[rt]); 1128 break; 1129 1130 case dmfc_op: 1131 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 1132 1133 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, 1134 KVM_TRACE_COP0(rd, sel), 1135 vcpu->arch.gprs[rt]); 1136 break; 1137 1138 case mtc_op: 1139 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1140 cop0->stat[rd][sel]++; 1141 #endif 1142 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, 1143 KVM_TRACE_COP0(rd, sel), 1144 vcpu->arch.gprs[rt]); 1145 1146 if ((rd == MIPS_CP0_TLB_INDEX) 1147 && (vcpu->arch.gprs[rt] >= 1148 KVM_MIPS_GUEST_TLB_SIZE)) { 1149 kvm_err("Invalid TLB Index: %ld", 1150 vcpu->arch.gprs[rt]); 1151 er = EMULATE_FAIL; 1152 break; 1153 } 1154 #define C0_EBASE_CORE_MASK 0xff 1155 if ((rd == MIPS_CP0_PRID) && (sel == 1)) { 1156 /* Preserve CORE number */ 1157 kvm_change_c0_guest_ebase(cop0, 1158 ~(C0_EBASE_CORE_MASK), 1159 vcpu->arch.gprs[rt]); 1160 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", 1161 kvm_read_c0_guest_ebase(cop0)); 1162 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1163 u32 nasid = 1164 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; 1165 if (((kvm_read_c0_guest_entryhi(cop0) & 1166 KVM_ENTRYHI_ASID) != nasid)) { 1167 trace_kvm_asid_change(vcpu, 1168 kvm_read_c0_guest_entryhi(cop0) 1169 & KVM_ENTRYHI_ASID, 1170 nasid); 1171 1172 /* 1173 * Regenerate/invalidate kernel MMU 1174 * context. 1175 * The user MMU context will be 1176 * regenerated lazily on re-entry to 1177 * guest user if the guest ASID actually 1178 * changes. 1179 */ 1180 preempt_disable(); 1181 cpu = smp_processor_id(); 1182 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, 1183 cpu, vcpu); 1184 vcpu->arch.guest_kernel_asid[cpu] = 1185 vcpu->arch.guest_kernel_mm.context.asid[cpu]; 1186 for_each_possible_cpu(i) 1187 if (i != cpu) 1188 vcpu->arch.guest_kernel_asid[i] = 0; 1189 preempt_enable(); 1190 } 1191 kvm_write_c0_guest_entryhi(cop0, 1192 vcpu->arch.gprs[rt]); 1193 } 1194 /* Are we writing to COUNT */ 1195 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1196 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 1197 goto done; 1198 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { 1199 /* If we are writing to COMPARE */ 1200 /* Clear pending timer interrupt, if any */ 1201 kvm_mips_write_compare(vcpu, 1202 vcpu->arch.gprs[rt], 1203 true); 1204 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1205 unsigned int old_val, val, change; 1206 1207 old_val = kvm_read_c0_guest_status(cop0); 1208 val = vcpu->arch.gprs[rt]; 1209 change = val ^ old_val; 1210 1211 /* Make sure that the NMI bit is never set */ 1212 val &= ~ST0_NMI; 1213 1214 /* 1215 * Don't allow CU1 or FR to be set unless FPU 1216 * capability enabled and exists in guest 1217 * configuration. 1218 */ 1219 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1220 val &= ~(ST0_CU1 | ST0_FR); 1221 1222 /* 1223 * Also don't allow FR to be set if host doesn't 1224 * support it. 1225 */ 1226 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 1227 val &= ~ST0_FR; 1228 1229 1230 /* Handle changes in FPU mode */ 1231 preempt_disable(); 1232 1233 /* 1234 * FPU and Vector register state is made 1235 * UNPREDICTABLE by a change of FR, so don't 1236 * even bother saving it. 1237 */ 1238 if (change & ST0_FR) 1239 kvm_drop_fpu(vcpu); 1240 1241 /* 1242 * If MSA state is already live, it is undefined 1243 * how it interacts with FR=0 FPU state, and we 1244 * don't want to hit reserved instruction 1245 * exceptions trying to save the MSA state later 1246 * when CU=1 && FR=1, so play it safe and save 1247 * it first. 1248 */ 1249 if (change & ST0_CU1 && !(val & ST0_FR) && 1250 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1251 kvm_lose_fpu(vcpu); 1252 1253 /* 1254 * Propagate CU1 (FPU enable) changes 1255 * immediately if the FPU context is already 1256 * loaded. When disabling we leave the context 1257 * loaded so it can be quickly enabled again in 1258 * the near future. 1259 */ 1260 if (change & ST0_CU1 && 1261 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1262 change_c0_status(ST0_CU1, val); 1263 1264 preempt_enable(); 1265 1266 kvm_write_c0_guest_status(cop0, val); 1267 1268 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1269 /* 1270 * If FPU present, we need CU1/FR bits to take 1271 * effect fairly soon. 1272 */ 1273 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1274 kvm_mips_trans_mtc0(inst, opc, vcpu); 1275 #endif 1276 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1277 unsigned int old_val, val, change, wrmask; 1278 1279 old_val = kvm_read_c0_guest_config5(cop0); 1280 val = vcpu->arch.gprs[rt]; 1281 1282 /* Only a few bits are writable in Config5 */ 1283 wrmask = kvm_mips_config5_wrmask(vcpu); 1284 change = (val ^ old_val) & wrmask; 1285 val = old_val ^ change; 1286 1287 1288 /* Handle changes in FPU/MSA modes */ 1289 preempt_disable(); 1290 1291 /* 1292 * Propagate FRE changes immediately if the FPU 1293 * context is already loaded. 1294 */ 1295 if (change & MIPS_CONF5_FRE && 1296 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1297 change_c0_config5(MIPS_CONF5_FRE, val); 1298 1299 /* 1300 * Propagate MSAEn changes immediately if the 1301 * MSA context is already loaded. When disabling 1302 * we leave the context loaded so it can be 1303 * quickly enabled again in the near future. 1304 */ 1305 if (change & MIPS_CONF5_MSAEN && 1306 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1307 change_c0_config5(MIPS_CONF5_MSAEN, 1308 val); 1309 1310 preempt_enable(); 1311 1312 kvm_write_c0_guest_config5(cop0, val); 1313 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1314 u32 old_cause, new_cause; 1315 1316 old_cause = kvm_read_c0_guest_cause(cop0); 1317 new_cause = vcpu->arch.gprs[rt]; 1318 /* Update R/W bits */ 1319 kvm_change_c0_guest_cause(cop0, 0x08800300, 1320 new_cause); 1321 /* DC bit enabling/disabling timer? */ 1322 if ((old_cause ^ new_cause) & CAUSEF_DC) { 1323 if (new_cause & CAUSEF_DC) 1324 kvm_mips_count_disable_cause(vcpu); 1325 else 1326 kvm_mips_count_enable_cause(vcpu); 1327 } 1328 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { 1329 u32 mask = MIPS_HWRENA_CPUNUM | 1330 MIPS_HWRENA_SYNCISTEP | 1331 MIPS_HWRENA_CC | 1332 MIPS_HWRENA_CCRES; 1333 1334 if (kvm_read_c0_guest_config3(cop0) & 1335 MIPS_CONF3_ULRI) 1336 mask |= MIPS_HWRENA_ULR; 1337 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; 1338 } else { 1339 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; 1340 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1341 kvm_mips_trans_mtc0(inst, opc, vcpu); 1342 #endif 1343 } 1344 break; 1345 1346 case dmtc_op: 1347 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1348 vcpu->arch.pc, rt, rd, sel); 1349 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, 1350 KVM_TRACE_COP0(rd, sel), 1351 vcpu->arch.gprs[rt]); 1352 er = EMULATE_FAIL; 1353 break; 1354 1355 case mfmc0_op: 1356 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS 1357 cop0->stat[MIPS_CP0_STATUS][0]++; 1358 #endif 1359 if (rt != 0) 1360 vcpu->arch.gprs[rt] = 1361 kvm_read_c0_guest_status(cop0); 1362 /* EI */ 1363 if (inst.mfmc0_format.sc) { 1364 kvm_debug("[%#lx] mfmc0_op: EI\n", 1365 vcpu->arch.pc); 1366 kvm_set_c0_guest_status(cop0, ST0_IE); 1367 } else { 1368 kvm_debug("[%#lx] mfmc0_op: DI\n", 1369 vcpu->arch.pc); 1370 kvm_clear_c0_guest_status(cop0, ST0_IE); 1371 } 1372 1373 break; 1374 1375 case wrpgpr_op: 1376 { 1377 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1378 u32 pss = 1379 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1380 /* 1381 * We don't support any shadow register sets, so 1382 * SRSCtl[PSS] == SRSCtl[CSS] = 0 1383 */ 1384 if (css || pss) { 1385 er = EMULATE_FAIL; 1386 break; 1387 } 1388 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, 1389 vcpu->arch.gprs[rt]); 1390 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; 1391 } 1392 break; 1393 default: 1394 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1395 vcpu->arch.pc, inst.c0r_format.rs); 1396 er = EMULATE_FAIL; 1397 break; 1398 } 1399 } 1400 1401 done: 1402 /* Rollback PC only if emulation was unsuccessful */ 1403 if (er == EMULATE_FAIL) 1404 vcpu->arch.pc = curr_pc; 1405 1406 dont_update_pc: 1407 /* 1408 * This is for special instructions whose emulation 1409 * updates the PC, so do not overwrite the PC under 1410 * any circumstances 1411 */ 1412 1413 return er; 1414 } 1415 1416 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 1417 u32 cause, 1418 struct kvm_run *run, 1419 struct kvm_vcpu *vcpu) 1420 { 1421 enum emulation_result er = EMULATE_DO_MMIO; 1422 u32 rt; 1423 u32 bytes; 1424 void *data = run->mmio.data; 1425 unsigned long curr_pc; 1426 1427 /* 1428 * Update PC and hold onto current PC in case there is 1429 * an error and we want to rollback the PC 1430 */ 1431 curr_pc = vcpu->arch.pc; 1432 er = update_pc(vcpu, cause); 1433 if (er == EMULATE_FAIL) 1434 return er; 1435 1436 rt = inst.i_format.rt; 1437 1438 switch (inst.i_format.opcode) { 1439 case sb_op: 1440 bytes = 1; 1441 if (bytes > sizeof(run->mmio.data)) { 1442 kvm_err("%s: bad MMIO length: %d\n", __func__, 1443 run->mmio.len); 1444 } 1445 run->mmio.phys_addr = 1446 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1447 host_cp0_badvaddr); 1448 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1449 er = EMULATE_FAIL; 1450 break; 1451 } 1452 run->mmio.len = bytes; 1453 run->mmio.is_write = 1; 1454 vcpu->mmio_needed = 1; 1455 vcpu->mmio_is_write = 1; 1456 *(u8 *) data = vcpu->arch.gprs[rt]; 1457 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1458 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], 1459 *(u8 *) data); 1460 1461 break; 1462 1463 case sw_op: 1464 bytes = 4; 1465 if (bytes > sizeof(run->mmio.data)) { 1466 kvm_err("%s: bad MMIO length: %d\n", __func__, 1467 run->mmio.len); 1468 } 1469 run->mmio.phys_addr = 1470 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1471 host_cp0_badvaddr); 1472 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1473 er = EMULATE_FAIL; 1474 break; 1475 } 1476 1477 run->mmio.len = bytes; 1478 run->mmio.is_write = 1; 1479 vcpu->mmio_needed = 1; 1480 vcpu->mmio_is_write = 1; 1481 *(u32 *) data = vcpu->arch.gprs[rt]; 1482 1483 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1484 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1485 vcpu->arch.gprs[rt], *(u32 *) data); 1486 break; 1487 1488 case sh_op: 1489 bytes = 2; 1490 if (bytes > sizeof(run->mmio.data)) { 1491 kvm_err("%s: bad MMIO length: %d\n", __func__, 1492 run->mmio.len); 1493 } 1494 run->mmio.phys_addr = 1495 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1496 host_cp0_badvaddr); 1497 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1498 er = EMULATE_FAIL; 1499 break; 1500 } 1501 1502 run->mmio.len = bytes; 1503 run->mmio.is_write = 1; 1504 vcpu->mmio_needed = 1; 1505 vcpu->mmio_is_write = 1; 1506 *(u16 *) data = vcpu->arch.gprs[rt]; 1507 1508 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1509 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1510 vcpu->arch.gprs[rt], *(u32 *) data); 1511 break; 1512 1513 default: 1514 kvm_err("Store not yet supported (inst=0x%08x)\n", 1515 inst.word); 1516 er = EMULATE_FAIL; 1517 break; 1518 } 1519 1520 /* Rollback PC if emulation was unsuccessful */ 1521 if (er == EMULATE_FAIL) 1522 vcpu->arch.pc = curr_pc; 1523 1524 return er; 1525 } 1526 1527 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, 1528 u32 cause, struct kvm_run *run, 1529 struct kvm_vcpu *vcpu) 1530 { 1531 enum emulation_result er = EMULATE_DO_MMIO; 1532 u32 op, rt; 1533 u32 bytes; 1534 1535 rt = inst.i_format.rt; 1536 op = inst.i_format.opcode; 1537 1538 vcpu->arch.pending_load_cause = cause; 1539 vcpu->arch.io_gpr = rt; 1540 1541 switch (op) { 1542 case lw_op: 1543 bytes = 4; 1544 if (bytes > sizeof(run->mmio.data)) { 1545 kvm_err("%s: bad MMIO length: %d\n", __func__, 1546 run->mmio.len); 1547 er = EMULATE_FAIL; 1548 break; 1549 } 1550 run->mmio.phys_addr = 1551 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1552 host_cp0_badvaddr); 1553 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1554 er = EMULATE_FAIL; 1555 break; 1556 } 1557 1558 run->mmio.len = bytes; 1559 run->mmio.is_write = 0; 1560 vcpu->mmio_needed = 1; 1561 vcpu->mmio_is_write = 0; 1562 break; 1563 1564 case lh_op: 1565 case lhu_op: 1566 bytes = 2; 1567 if (bytes > sizeof(run->mmio.data)) { 1568 kvm_err("%s: bad MMIO length: %d\n", __func__, 1569 run->mmio.len); 1570 er = EMULATE_FAIL; 1571 break; 1572 } 1573 run->mmio.phys_addr = 1574 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1575 host_cp0_badvaddr); 1576 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1577 er = EMULATE_FAIL; 1578 break; 1579 } 1580 1581 run->mmio.len = bytes; 1582 run->mmio.is_write = 0; 1583 vcpu->mmio_needed = 1; 1584 vcpu->mmio_is_write = 0; 1585 1586 if (op == lh_op) 1587 vcpu->mmio_needed = 2; 1588 else 1589 vcpu->mmio_needed = 1; 1590 1591 break; 1592 1593 case lbu_op: 1594 case lb_op: 1595 bytes = 1; 1596 if (bytes > sizeof(run->mmio.data)) { 1597 kvm_err("%s: bad MMIO length: %d\n", __func__, 1598 run->mmio.len); 1599 er = EMULATE_FAIL; 1600 break; 1601 } 1602 run->mmio.phys_addr = 1603 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1604 host_cp0_badvaddr); 1605 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1606 er = EMULATE_FAIL; 1607 break; 1608 } 1609 1610 run->mmio.len = bytes; 1611 run->mmio.is_write = 0; 1612 vcpu->mmio_is_write = 0; 1613 1614 if (op == lb_op) 1615 vcpu->mmio_needed = 2; 1616 else 1617 vcpu->mmio_needed = 1; 1618 1619 break; 1620 1621 default: 1622 kvm_err("Load not yet supported (inst=0x%08x)\n", 1623 inst.word); 1624 er = EMULATE_FAIL; 1625 break; 1626 } 1627 1628 return er; 1629 } 1630 1631 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 1632 u32 *opc, u32 cause, 1633 struct kvm_run *run, 1634 struct kvm_vcpu *vcpu) 1635 { 1636 struct mips_coproc *cop0 = vcpu->arch.cop0; 1637 enum emulation_result er = EMULATE_DONE; 1638 u32 cache, op_inst, op, base; 1639 s16 offset; 1640 struct kvm_vcpu_arch *arch = &vcpu->arch; 1641 unsigned long va; 1642 unsigned long curr_pc; 1643 1644 /* 1645 * Update PC and hold onto current PC in case there is 1646 * an error and we want to rollback the PC 1647 */ 1648 curr_pc = vcpu->arch.pc; 1649 er = update_pc(vcpu, cause); 1650 if (er == EMULATE_FAIL) 1651 return er; 1652 1653 base = inst.i_format.rs; 1654 op_inst = inst.i_format.rt; 1655 if (cpu_has_mips_r6) 1656 offset = inst.spec3_format.simmediate; 1657 else 1658 offset = inst.i_format.simmediate; 1659 cache = op_inst & CacheOp_Cache; 1660 op = op_inst & CacheOp_Op; 1661 1662 va = arch->gprs[base] + offset; 1663 1664 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1665 cache, op, base, arch->gprs[base], offset); 1666 1667 /* 1668 * Treat INDEX_INV as a nop, basically issued by Linux on startup to 1669 * invalidate the caches entirely by stepping through all the 1670 * ways/indexes 1671 */ 1672 if (op == Index_Writeback_Inv) { 1673 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1674 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 1675 arch->gprs[base], offset); 1676 1677 if (cache == Cache_D) 1678 r4k_blast_dcache(); 1679 else if (cache == Cache_I) 1680 r4k_blast_icache(); 1681 else { 1682 kvm_err("%s: unsupported CACHE INDEX operation\n", 1683 __func__); 1684 return EMULATE_FAIL; 1685 } 1686 1687 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1688 kvm_mips_trans_cache_index(inst, opc, vcpu); 1689 #endif 1690 goto done; 1691 } 1692 1693 preempt_disable(); 1694 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1695 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && 1696 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { 1697 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", 1698 __func__, va, vcpu, read_c0_entryhi()); 1699 er = EMULATE_FAIL; 1700 preempt_enable(); 1701 goto done; 1702 } 1703 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1704 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1705 int index; 1706 1707 /* If an entry already exists then skip */ 1708 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) 1709 goto skip_fault; 1710 1711 /* 1712 * If address not in the guest TLB, then give the guest a fault, 1713 * the resulting handler will do the right thing 1714 */ 1715 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 1716 (kvm_read_c0_guest_entryhi 1717 (cop0) & KVM_ENTRYHI_ASID)); 1718 1719 if (index < 0) { 1720 vcpu->arch.host_cp0_badvaddr = va; 1721 vcpu->arch.pc = curr_pc; 1722 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, 1723 vcpu); 1724 preempt_enable(); 1725 goto dont_update_pc; 1726 } else { 1727 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 1728 /* 1729 * Check if the entry is valid, if not then setup a TLB 1730 * invalid exception to the guest 1731 */ 1732 if (!TLB_IS_VALID(*tlb, va)) { 1733 vcpu->arch.host_cp0_badvaddr = va; 1734 vcpu->arch.pc = curr_pc; 1735 er = kvm_mips_emulate_tlbinv_ld(cause, NULL, 1736 run, vcpu); 1737 preempt_enable(); 1738 goto dont_update_pc; 1739 } 1740 /* 1741 * We fault an entry from the guest tlb to the 1742 * shadow host TLB 1743 */ 1744 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { 1745 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 1746 __func__, va, index, vcpu, 1747 read_c0_entryhi()); 1748 er = EMULATE_FAIL; 1749 preempt_enable(); 1750 goto done; 1751 } 1752 } 1753 } else { 1754 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1755 cache, op, base, arch->gprs[base], offset); 1756 er = EMULATE_FAIL; 1757 preempt_enable(); 1758 goto done; 1759 1760 } 1761 1762 skip_fault: 1763 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ 1764 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { 1765 flush_dcache_line(va); 1766 1767 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1768 /* 1769 * Replace the CACHE instruction, with a SYNCI, not the same, 1770 * but avoids a trap 1771 */ 1772 kvm_mips_trans_cache_va(inst, opc, vcpu); 1773 #endif 1774 } else if (op_inst == Hit_Invalidate_I) { 1775 flush_dcache_line(va); 1776 flush_icache_line(va); 1777 1778 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1779 /* Replace the CACHE instruction, with a SYNCI */ 1780 kvm_mips_trans_cache_va(inst, opc, vcpu); 1781 #endif 1782 } else { 1783 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1784 cache, op, base, arch->gprs[base], offset); 1785 er = EMULATE_FAIL; 1786 } 1787 1788 preempt_enable(); 1789 done: 1790 /* Rollback PC only if emulation was unsuccessful */ 1791 if (er == EMULATE_FAIL) 1792 vcpu->arch.pc = curr_pc; 1793 1794 dont_update_pc: 1795 /* 1796 * This is for exceptions whose emulation updates the PC, so do not 1797 * overwrite the PC under any circumstances 1798 */ 1799 1800 return er; 1801 } 1802 1803 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, 1804 struct kvm_run *run, 1805 struct kvm_vcpu *vcpu) 1806 { 1807 union mips_instruction inst; 1808 enum emulation_result er = EMULATE_DONE; 1809 1810 /* Fetch the instruction. */ 1811 if (cause & CAUSEF_BD) 1812 opc += 1; 1813 1814 inst.word = kvm_get_inst(opc, vcpu); 1815 1816 switch (inst.r_format.opcode) { 1817 case cop0_op: 1818 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); 1819 break; 1820 case sb_op: 1821 case sh_op: 1822 case sw_op: 1823 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1824 break; 1825 case lb_op: 1826 case lbu_op: 1827 case lhu_op: 1828 case lh_op: 1829 case lw_op: 1830 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1831 break; 1832 1833 #ifndef CONFIG_CPU_MIPSR6 1834 case cache_op: 1835 ++vcpu->stat.cache_exits; 1836 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1837 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); 1838 break; 1839 #else 1840 case spec3_op: 1841 switch (inst.spec3_format.func) { 1842 case cache6_op: 1843 ++vcpu->stat.cache_exits; 1844 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1845 er = kvm_mips_emulate_cache(inst, opc, cause, run, 1846 vcpu); 1847 break; 1848 default: 1849 goto unknown; 1850 }; 1851 break; 1852 unknown: 1853 #endif 1854 1855 default: 1856 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, 1857 inst.word); 1858 kvm_arch_vcpu_dump_regs(vcpu); 1859 er = EMULATE_FAIL; 1860 break; 1861 } 1862 1863 return er; 1864 } 1865 1866 enum emulation_result kvm_mips_emulate_syscall(u32 cause, 1867 u32 *opc, 1868 struct kvm_run *run, 1869 struct kvm_vcpu *vcpu) 1870 { 1871 struct mips_coproc *cop0 = vcpu->arch.cop0; 1872 struct kvm_vcpu_arch *arch = &vcpu->arch; 1873 enum emulation_result er = EMULATE_DONE; 1874 1875 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1876 /* save old pc */ 1877 kvm_write_c0_guest_epc(cop0, arch->pc); 1878 kvm_set_c0_guest_status(cop0, ST0_EXL); 1879 1880 if (cause & CAUSEF_BD) 1881 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1882 else 1883 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1884 1885 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); 1886 1887 kvm_change_c0_guest_cause(cop0, (0xff), 1888 (EXCCODE_SYS << CAUSEB_EXCCODE)); 1889 1890 /* Set PC to the exception entry point */ 1891 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1892 1893 } else { 1894 kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); 1895 er = EMULATE_FAIL; 1896 } 1897 1898 return er; 1899 } 1900 1901 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 1902 u32 *opc, 1903 struct kvm_run *run, 1904 struct kvm_vcpu *vcpu) 1905 { 1906 struct mips_coproc *cop0 = vcpu->arch.cop0; 1907 struct kvm_vcpu_arch *arch = &vcpu->arch; 1908 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1909 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1910 1911 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1912 /* save old pc */ 1913 kvm_write_c0_guest_epc(cop0, arch->pc); 1914 kvm_set_c0_guest_status(cop0, ST0_EXL); 1915 1916 if (cause & CAUSEF_BD) 1917 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1918 else 1919 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1920 1921 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", 1922 arch->pc); 1923 1924 /* set pc to the exception entry point */ 1925 arch->pc = KVM_GUEST_KSEG0 + 0x0; 1926 1927 } else { 1928 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1929 arch->pc); 1930 1931 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1932 } 1933 1934 kvm_change_c0_guest_cause(cop0, (0xff), 1935 (EXCCODE_TLBL << CAUSEB_EXCCODE)); 1936 1937 /* setup badvaddr, context and entryhi registers for the guest */ 1938 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1939 /* XXXKYMA: is the context register used by linux??? */ 1940 kvm_write_c0_guest_entryhi(cop0, entryhi); 1941 /* Blow away the shadow host TLBs */ 1942 kvm_mips_flush_host_tlb(1); 1943 1944 return EMULATE_DONE; 1945 } 1946 1947 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 1948 u32 *opc, 1949 struct kvm_run *run, 1950 struct kvm_vcpu *vcpu) 1951 { 1952 struct mips_coproc *cop0 = vcpu->arch.cop0; 1953 struct kvm_vcpu_arch *arch = &vcpu->arch; 1954 unsigned long entryhi = 1955 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1956 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1957 1958 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1959 /* save old pc */ 1960 kvm_write_c0_guest_epc(cop0, arch->pc); 1961 kvm_set_c0_guest_status(cop0, ST0_EXL); 1962 1963 if (cause & CAUSEF_BD) 1964 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1965 else 1966 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1967 1968 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", 1969 arch->pc); 1970 1971 /* set pc to the exception entry point */ 1972 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1973 1974 } else { 1975 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1976 arch->pc); 1977 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1978 } 1979 1980 kvm_change_c0_guest_cause(cop0, (0xff), 1981 (EXCCODE_TLBL << CAUSEB_EXCCODE)); 1982 1983 /* setup badvaddr, context and entryhi registers for the guest */ 1984 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1985 /* XXXKYMA: is the context register used by linux??? */ 1986 kvm_write_c0_guest_entryhi(cop0, entryhi); 1987 /* Blow away the shadow host TLBs */ 1988 kvm_mips_flush_host_tlb(1); 1989 1990 return EMULATE_DONE; 1991 } 1992 1993 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 1994 u32 *opc, 1995 struct kvm_run *run, 1996 struct kvm_vcpu *vcpu) 1997 { 1998 struct mips_coproc *cop0 = vcpu->arch.cop0; 1999 struct kvm_vcpu_arch *arch = &vcpu->arch; 2000 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2001 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2002 2003 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2004 /* save old pc */ 2005 kvm_write_c0_guest_epc(cop0, arch->pc); 2006 kvm_set_c0_guest_status(cop0, ST0_EXL); 2007 2008 if (cause & CAUSEF_BD) 2009 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2010 else 2011 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2012 2013 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 2014 arch->pc); 2015 2016 /* Set PC to the exception entry point */ 2017 arch->pc = KVM_GUEST_KSEG0 + 0x0; 2018 } else { 2019 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 2020 arch->pc); 2021 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2022 } 2023 2024 kvm_change_c0_guest_cause(cop0, (0xff), 2025 (EXCCODE_TLBS << CAUSEB_EXCCODE)); 2026 2027 /* setup badvaddr, context and entryhi registers for the guest */ 2028 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2029 /* XXXKYMA: is the context register used by linux??? */ 2030 kvm_write_c0_guest_entryhi(cop0, entryhi); 2031 /* Blow away the shadow host TLBs */ 2032 kvm_mips_flush_host_tlb(1); 2033 2034 return EMULATE_DONE; 2035 } 2036 2037 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 2038 u32 *opc, 2039 struct kvm_run *run, 2040 struct kvm_vcpu *vcpu) 2041 { 2042 struct mips_coproc *cop0 = vcpu->arch.cop0; 2043 struct kvm_vcpu_arch *arch = &vcpu->arch; 2044 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2045 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2046 2047 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2048 /* save old pc */ 2049 kvm_write_c0_guest_epc(cop0, arch->pc); 2050 kvm_set_c0_guest_status(cop0, ST0_EXL); 2051 2052 if (cause & CAUSEF_BD) 2053 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2054 else 2055 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2056 2057 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 2058 arch->pc); 2059 2060 /* Set PC to the exception entry point */ 2061 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2062 } else { 2063 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 2064 arch->pc); 2065 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2066 } 2067 2068 kvm_change_c0_guest_cause(cop0, (0xff), 2069 (EXCCODE_TLBS << CAUSEB_EXCCODE)); 2070 2071 /* setup badvaddr, context and entryhi registers for the guest */ 2072 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2073 /* XXXKYMA: is the context register used by linux??? */ 2074 kvm_write_c0_guest_entryhi(cop0, entryhi); 2075 /* Blow away the shadow host TLBs */ 2076 kvm_mips_flush_host_tlb(1); 2077 2078 return EMULATE_DONE; 2079 } 2080 2081 /* TLBMOD: store into address matching TLB with Dirty bit off */ 2082 enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc, 2083 struct kvm_run *run, 2084 struct kvm_vcpu *vcpu) 2085 { 2086 enum emulation_result er = EMULATE_DONE; 2087 #ifdef DEBUG 2088 struct mips_coproc *cop0 = vcpu->arch.cop0; 2089 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2090 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2091 int index; 2092 2093 /* If address not in the guest TLB, then we are in trouble */ 2094 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 2095 if (index < 0) { 2096 /* XXXKYMA Invalidate and retry */ 2097 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); 2098 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", 2099 __func__, entryhi); 2100 kvm_mips_dump_guest_tlbs(vcpu); 2101 kvm_mips_dump_host_tlbs(); 2102 return EMULATE_FAIL; 2103 } 2104 #endif 2105 2106 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); 2107 return er; 2108 } 2109 2110 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 2111 u32 *opc, 2112 struct kvm_run *run, 2113 struct kvm_vcpu *vcpu) 2114 { 2115 struct mips_coproc *cop0 = vcpu->arch.cop0; 2116 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2117 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2118 struct kvm_vcpu_arch *arch = &vcpu->arch; 2119 2120 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2121 /* save old pc */ 2122 kvm_write_c0_guest_epc(cop0, arch->pc); 2123 kvm_set_c0_guest_status(cop0, ST0_EXL); 2124 2125 if (cause & CAUSEF_BD) 2126 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2127 else 2128 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2129 2130 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", 2131 arch->pc); 2132 2133 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2134 } else { 2135 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", 2136 arch->pc); 2137 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2138 } 2139 2140 kvm_change_c0_guest_cause(cop0, (0xff), 2141 (EXCCODE_MOD << CAUSEB_EXCCODE)); 2142 2143 /* setup badvaddr, context and entryhi registers for the guest */ 2144 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2145 /* XXXKYMA: is the context register used by linux??? */ 2146 kvm_write_c0_guest_entryhi(cop0, entryhi); 2147 /* Blow away the shadow host TLBs */ 2148 kvm_mips_flush_host_tlb(1); 2149 2150 return EMULATE_DONE; 2151 } 2152 2153 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 2154 u32 *opc, 2155 struct kvm_run *run, 2156 struct kvm_vcpu *vcpu) 2157 { 2158 struct mips_coproc *cop0 = vcpu->arch.cop0; 2159 struct kvm_vcpu_arch *arch = &vcpu->arch; 2160 2161 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2162 /* save old pc */ 2163 kvm_write_c0_guest_epc(cop0, arch->pc); 2164 kvm_set_c0_guest_status(cop0, ST0_EXL); 2165 2166 if (cause & CAUSEF_BD) 2167 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2168 else 2169 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2170 2171 } 2172 2173 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2174 2175 kvm_change_c0_guest_cause(cop0, (0xff), 2176 (EXCCODE_CPU << CAUSEB_EXCCODE)); 2177 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); 2178 2179 return EMULATE_DONE; 2180 } 2181 2182 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 2183 u32 *opc, 2184 struct kvm_run *run, 2185 struct kvm_vcpu *vcpu) 2186 { 2187 struct mips_coproc *cop0 = vcpu->arch.cop0; 2188 struct kvm_vcpu_arch *arch = &vcpu->arch; 2189 enum emulation_result er = EMULATE_DONE; 2190 2191 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2192 /* save old pc */ 2193 kvm_write_c0_guest_epc(cop0, arch->pc); 2194 kvm_set_c0_guest_status(cop0, ST0_EXL); 2195 2196 if (cause & CAUSEF_BD) 2197 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2198 else 2199 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2200 2201 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); 2202 2203 kvm_change_c0_guest_cause(cop0, (0xff), 2204 (EXCCODE_RI << CAUSEB_EXCCODE)); 2205 2206 /* Set PC to the exception entry point */ 2207 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2208 2209 } else { 2210 kvm_err("Trying to deliver RI when EXL is already set\n"); 2211 er = EMULATE_FAIL; 2212 } 2213 2214 return er; 2215 } 2216 2217 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 2218 u32 *opc, 2219 struct kvm_run *run, 2220 struct kvm_vcpu *vcpu) 2221 { 2222 struct mips_coproc *cop0 = vcpu->arch.cop0; 2223 struct kvm_vcpu_arch *arch = &vcpu->arch; 2224 enum emulation_result er = EMULATE_DONE; 2225 2226 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2227 /* save old pc */ 2228 kvm_write_c0_guest_epc(cop0, arch->pc); 2229 kvm_set_c0_guest_status(cop0, ST0_EXL); 2230 2231 if (cause & CAUSEF_BD) 2232 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2233 else 2234 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2235 2236 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); 2237 2238 kvm_change_c0_guest_cause(cop0, (0xff), 2239 (EXCCODE_BP << CAUSEB_EXCCODE)); 2240 2241 /* Set PC to the exception entry point */ 2242 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2243 2244 } else { 2245 kvm_err("Trying to deliver BP when EXL is already set\n"); 2246 er = EMULATE_FAIL; 2247 } 2248 2249 return er; 2250 } 2251 2252 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 2253 u32 *opc, 2254 struct kvm_run *run, 2255 struct kvm_vcpu *vcpu) 2256 { 2257 struct mips_coproc *cop0 = vcpu->arch.cop0; 2258 struct kvm_vcpu_arch *arch = &vcpu->arch; 2259 enum emulation_result er = EMULATE_DONE; 2260 2261 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2262 /* save old pc */ 2263 kvm_write_c0_guest_epc(cop0, arch->pc); 2264 kvm_set_c0_guest_status(cop0, ST0_EXL); 2265 2266 if (cause & CAUSEF_BD) 2267 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2268 else 2269 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2270 2271 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); 2272 2273 kvm_change_c0_guest_cause(cop0, (0xff), 2274 (EXCCODE_TR << CAUSEB_EXCCODE)); 2275 2276 /* Set PC to the exception entry point */ 2277 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2278 2279 } else { 2280 kvm_err("Trying to deliver TRAP when EXL is already set\n"); 2281 er = EMULATE_FAIL; 2282 } 2283 2284 return er; 2285 } 2286 2287 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 2288 u32 *opc, 2289 struct kvm_run *run, 2290 struct kvm_vcpu *vcpu) 2291 { 2292 struct mips_coproc *cop0 = vcpu->arch.cop0; 2293 struct kvm_vcpu_arch *arch = &vcpu->arch; 2294 enum emulation_result er = EMULATE_DONE; 2295 2296 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2297 /* save old pc */ 2298 kvm_write_c0_guest_epc(cop0, arch->pc); 2299 kvm_set_c0_guest_status(cop0, ST0_EXL); 2300 2301 if (cause & CAUSEF_BD) 2302 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2303 else 2304 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2305 2306 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); 2307 2308 kvm_change_c0_guest_cause(cop0, (0xff), 2309 (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); 2310 2311 /* Set PC to the exception entry point */ 2312 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2313 2314 } else { 2315 kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); 2316 er = EMULATE_FAIL; 2317 } 2318 2319 return er; 2320 } 2321 2322 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 2323 u32 *opc, 2324 struct kvm_run *run, 2325 struct kvm_vcpu *vcpu) 2326 { 2327 struct mips_coproc *cop0 = vcpu->arch.cop0; 2328 struct kvm_vcpu_arch *arch = &vcpu->arch; 2329 enum emulation_result er = EMULATE_DONE; 2330 2331 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2332 /* save old pc */ 2333 kvm_write_c0_guest_epc(cop0, arch->pc); 2334 kvm_set_c0_guest_status(cop0, ST0_EXL); 2335 2336 if (cause & CAUSEF_BD) 2337 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2338 else 2339 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2340 2341 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); 2342 2343 kvm_change_c0_guest_cause(cop0, (0xff), 2344 (EXCCODE_FPE << CAUSEB_EXCCODE)); 2345 2346 /* Set PC to the exception entry point */ 2347 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2348 2349 } else { 2350 kvm_err("Trying to deliver FPE when EXL is already set\n"); 2351 er = EMULATE_FAIL; 2352 } 2353 2354 return er; 2355 } 2356 2357 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 2358 u32 *opc, 2359 struct kvm_run *run, 2360 struct kvm_vcpu *vcpu) 2361 { 2362 struct mips_coproc *cop0 = vcpu->arch.cop0; 2363 struct kvm_vcpu_arch *arch = &vcpu->arch; 2364 enum emulation_result er = EMULATE_DONE; 2365 2366 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2367 /* save old pc */ 2368 kvm_write_c0_guest_epc(cop0, arch->pc); 2369 kvm_set_c0_guest_status(cop0, ST0_EXL); 2370 2371 if (cause & CAUSEF_BD) 2372 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2373 else 2374 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2375 2376 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); 2377 2378 kvm_change_c0_guest_cause(cop0, (0xff), 2379 (EXCCODE_MSADIS << CAUSEB_EXCCODE)); 2380 2381 /* Set PC to the exception entry point */ 2382 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2383 2384 } else { 2385 kvm_err("Trying to deliver MSADIS when EXL is already set\n"); 2386 er = EMULATE_FAIL; 2387 } 2388 2389 return er; 2390 } 2391 2392 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, 2393 struct kvm_run *run, 2394 struct kvm_vcpu *vcpu) 2395 { 2396 struct mips_coproc *cop0 = vcpu->arch.cop0; 2397 struct kvm_vcpu_arch *arch = &vcpu->arch; 2398 enum emulation_result er = EMULATE_DONE; 2399 unsigned long curr_pc; 2400 union mips_instruction inst; 2401 2402 /* 2403 * Update PC and hold onto current PC in case there is 2404 * an error and we want to rollback the PC 2405 */ 2406 curr_pc = vcpu->arch.pc; 2407 er = update_pc(vcpu, cause); 2408 if (er == EMULATE_FAIL) 2409 return er; 2410 2411 /* Fetch the instruction. */ 2412 if (cause & CAUSEF_BD) 2413 opc += 1; 2414 2415 inst.word = kvm_get_inst(opc, vcpu); 2416 2417 if (inst.word == KVM_INVALID_INST) { 2418 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); 2419 return EMULATE_FAIL; 2420 } 2421 2422 if (inst.r_format.opcode == spec3_op && 2423 inst.r_format.func == rdhwr_op && 2424 inst.r_format.rs == 0 && 2425 (inst.r_format.re >> 3) == 0) { 2426 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2427 int rd = inst.r_format.rd; 2428 int rt = inst.r_format.rt; 2429 int sel = inst.r_format.re & 0x7; 2430 2431 /* If usermode, check RDHWR rd is allowed by guest HWREna */ 2432 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { 2433 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", 2434 rd, opc); 2435 goto emulate_ri; 2436 } 2437 switch (rd) { 2438 case MIPS_HWR_CPUNUM: /* CPU number */ 2439 arch->gprs[rt] = vcpu->vcpu_id; 2440 break; 2441 case MIPS_HWR_SYNCISTEP: /* SYNCI length */ 2442 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, 2443 current_cpu_data.icache.linesz); 2444 break; 2445 case MIPS_HWR_CC: /* Read count register */ 2446 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); 2447 break; 2448 case MIPS_HWR_CCRES: /* Count register resolution */ 2449 switch (current_cpu_data.cputype) { 2450 case CPU_20KC: 2451 case CPU_25KF: 2452 arch->gprs[rt] = 1; 2453 break; 2454 default: 2455 arch->gprs[rt] = 2; 2456 } 2457 break; 2458 case MIPS_HWR_ULR: /* Read UserLocal register */ 2459 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); 2460 break; 2461 2462 default: 2463 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); 2464 goto emulate_ri; 2465 } 2466 2467 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), 2468 vcpu->arch.gprs[rt]); 2469 } else { 2470 kvm_debug("Emulate RI not supported @ %p: %#x\n", 2471 opc, inst.word); 2472 goto emulate_ri; 2473 } 2474 2475 return EMULATE_DONE; 2476 2477 emulate_ri: 2478 /* 2479 * Rollback PC (if in branch delay slot then the PC already points to 2480 * branch target), and pass the RI exception to the guest OS. 2481 */ 2482 vcpu->arch.pc = curr_pc; 2483 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); 2484 } 2485 2486 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 2487 struct kvm_run *run) 2488 { 2489 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; 2490 enum emulation_result er = EMULATE_DONE; 2491 2492 if (run->mmio.len > sizeof(*gpr)) { 2493 kvm_err("Bad MMIO length: %d", run->mmio.len); 2494 er = EMULATE_FAIL; 2495 goto done; 2496 } 2497 2498 er = update_pc(vcpu, vcpu->arch.pending_load_cause); 2499 if (er == EMULATE_FAIL) 2500 return er; 2501 2502 switch (run->mmio.len) { 2503 case 4: 2504 *gpr = *(s32 *) run->mmio.data; 2505 break; 2506 2507 case 2: 2508 if (vcpu->mmio_needed == 2) 2509 *gpr = *(s16 *) run->mmio.data; 2510 else 2511 *gpr = *(u16 *)run->mmio.data; 2512 2513 break; 2514 case 1: 2515 if (vcpu->mmio_needed == 2) 2516 *gpr = *(s8 *) run->mmio.data; 2517 else 2518 *gpr = *(u8 *) run->mmio.data; 2519 break; 2520 } 2521 2522 if (vcpu->arch.pending_load_cause & CAUSEF_BD) 2523 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", 2524 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, 2525 vcpu->mmio_needed); 2526 2527 done: 2528 return er; 2529 } 2530 2531 static enum emulation_result kvm_mips_emulate_exc(u32 cause, 2532 u32 *opc, 2533 struct kvm_run *run, 2534 struct kvm_vcpu *vcpu) 2535 { 2536 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2537 struct mips_coproc *cop0 = vcpu->arch.cop0; 2538 struct kvm_vcpu_arch *arch = &vcpu->arch; 2539 enum emulation_result er = EMULATE_DONE; 2540 2541 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2542 /* save old pc */ 2543 kvm_write_c0_guest_epc(cop0, arch->pc); 2544 kvm_set_c0_guest_status(cop0, ST0_EXL); 2545 2546 if (cause & CAUSEF_BD) 2547 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2548 else 2549 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2550 2551 kvm_change_c0_guest_cause(cop0, (0xff), 2552 (exccode << CAUSEB_EXCCODE)); 2553 2554 /* Set PC to the exception entry point */ 2555 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2556 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2557 2558 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", 2559 exccode, kvm_read_c0_guest_epc(cop0), 2560 kvm_read_c0_guest_badvaddr(cop0)); 2561 } else { 2562 kvm_err("Trying to deliver EXC when EXL is already set\n"); 2563 er = EMULATE_FAIL; 2564 } 2565 2566 return er; 2567 } 2568 2569 enum emulation_result kvm_mips_check_privilege(u32 cause, 2570 u32 *opc, 2571 struct kvm_run *run, 2572 struct kvm_vcpu *vcpu) 2573 { 2574 enum emulation_result er = EMULATE_DONE; 2575 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2576 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 2577 2578 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2579 2580 if (usermode) { 2581 switch (exccode) { 2582 case EXCCODE_INT: 2583 case EXCCODE_SYS: 2584 case EXCCODE_BP: 2585 case EXCCODE_RI: 2586 case EXCCODE_TR: 2587 case EXCCODE_MSAFPE: 2588 case EXCCODE_FPE: 2589 case EXCCODE_MSADIS: 2590 break; 2591 2592 case EXCCODE_CPU: 2593 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) 2594 er = EMULATE_PRIV_FAIL; 2595 break; 2596 2597 case EXCCODE_MOD: 2598 break; 2599 2600 case EXCCODE_TLBL: 2601 /* 2602 * We we are accessing Guest kernel space, then send an 2603 * address error exception to the guest 2604 */ 2605 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2606 kvm_debug("%s: LD MISS @ %#lx\n", __func__, 2607 badvaddr); 2608 cause &= ~0xff; 2609 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); 2610 er = EMULATE_PRIV_FAIL; 2611 } 2612 break; 2613 2614 case EXCCODE_TLBS: 2615 /* 2616 * We we are accessing Guest kernel space, then send an 2617 * address error exception to the guest 2618 */ 2619 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2620 kvm_debug("%s: ST MISS @ %#lx\n", __func__, 2621 badvaddr); 2622 cause &= ~0xff; 2623 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); 2624 er = EMULATE_PRIV_FAIL; 2625 } 2626 break; 2627 2628 case EXCCODE_ADES: 2629 kvm_debug("%s: address error ST @ %#lx\n", __func__, 2630 badvaddr); 2631 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2632 cause &= ~0xff; 2633 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); 2634 } 2635 er = EMULATE_PRIV_FAIL; 2636 break; 2637 case EXCCODE_ADEL: 2638 kvm_debug("%s: address error LD @ %#lx\n", __func__, 2639 badvaddr); 2640 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2641 cause &= ~0xff; 2642 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); 2643 } 2644 er = EMULATE_PRIV_FAIL; 2645 break; 2646 default: 2647 er = EMULATE_PRIV_FAIL; 2648 break; 2649 } 2650 } 2651 2652 if (er == EMULATE_PRIV_FAIL) 2653 kvm_mips_emulate_exc(cause, opc, run, vcpu); 2654 2655 return er; 2656 } 2657 2658 /* 2659 * User Address (UA) fault, this could happen if 2660 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 2661 * case we pass on the fault to the guest kernel and let it handle it. 2662 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 2663 * case we inject the TLB from the Guest TLB into the shadow host TLB 2664 */ 2665 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 2666 u32 *opc, 2667 struct kvm_run *run, 2668 struct kvm_vcpu *vcpu) 2669 { 2670 enum emulation_result er = EMULATE_DONE; 2671 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2672 unsigned long va = vcpu->arch.host_cp0_badvaddr; 2673 int index; 2674 2675 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", 2676 vcpu->arch.host_cp0_badvaddr); 2677 2678 /* 2679 * KVM would not have got the exception if this entry was valid in the 2680 * shadow host TLB. Check the Guest TLB, if the entry is not there then 2681 * send the guest an exception. The guest exc handler should then inject 2682 * an entry into the guest TLB. 2683 */ 2684 index = kvm_mips_guest_tlb_lookup(vcpu, 2685 (va & VPN2_MASK) | 2686 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & 2687 KVM_ENTRYHI_ASID)); 2688 if (index < 0) { 2689 if (exccode == EXCCODE_TLBL) { 2690 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 2691 } else if (exccode == EXCCODE_TLBS) { 2692 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); 2693 } else { 2694 kvm_err("%s: invalid exc code: %d\n", __func__, 2695 exccode); 2696 er = EMULATE_FAIL; 2697 } 2698 } else { 2699 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 2700 2701 /* 2702 * Check if the entry is valid, if not then setup a TLB invalid 2703 * exception to the guest 2704 */ 2705 if (!TLB_IS_VALID(*tlb, va)) { 2706 if (exccode == EXCCODE_TLBL) { 2707 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, 2708 vcpu); 2709 } else if (exccode == EXCCODE_TLBS) { 2710 er = kvm_mips_emulate_tlbinv_st(cause, opc, run, 2711 vcpu); 2712 } else { 2713 kvm_err("%s: invalid exc code: %d\n", __func__, 2714 exccode); 2715 er = EMULATE_FAIL; 2716 } 2717 } else { 2718 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 2719 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); 2720 /* 2721 * OK we have a Guest TLB entry, now inject it into the 2722 * shadow host TLB 2723 */ 2724 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { 2725 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 2726 __func__, va, index, vcpu, 2727 read_c0_entryhi()); 2728 er = EMULATE_FAIL; 2729 } 2730 } 2731 } 2732 2733 return er; 2734 } 2735