1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Instruction/Exception emulation 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/ktime.h> 15 #include <linux/kvm_host.h> 16 #include <linux/vmalloc.h> 17 #include <linux/fs.h> 18 #include <linux/memblock.h> 19 #include <linux/random.h> 20 #include <asm/page.h> 21 #include <asm/cacheflush.h> 22 #include <asm/cacheops.h> 23 #include <asm/cpu-info.h> 24 #include <asm/mmu_context.h> 25 #include <asm/tlbflush.h> 26 #include <asm/inst.h> 27 28 #undef CONFIG_MIPS_MT 29 #include <asm/r4kcache.h> 30 #define CONFIG_MIPS_MT 31 32 #include "interrupt.h" 33 #include "commpage.h" 34 35 #include "trace.h" 36 37 /* 38 * Compute the return address and do emulate branch simulation, if required. 39 * This function should be called only in branch delay slot active. 40 */ 41 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, 42 unsigned long *out) 43 { 44 unsigned int dspcontrol; 45 union mips_instruction insn; 46 struct kvm_vcpu_arch *arch = &vcpu->arch; 47 long epc = instpc; 48 long nextpc; 49 int err; 50 51 if (epc & 3) { 52 kvm_err("%s: unaligned epc\n", __func__); 53 return -EINVAL; 54 } 55 56 /* Read the instruction */ 57 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); 58 if (err) 59 return err; 60 61 switch (insn.i_format.opcode) { 62 /* jr and jalr are in r_format format. */ 63 case spec_op: 64 switch (insn.r_format.func) { 65 case jalr_op: 66 arch->gprs[insn.r_format.rd] = epc + 8; 67 fallthrough; 68 case jr_op: 69 nextpc = arch->gprs[insn.r_format.rs]; 70 break; 71 default: 72 return -EINVAL; 73 } 74 break; 75 76 /* 77 * This group contains: 78 * bltz_op, bgez_op, bltzl_op, bgezl_op, 79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op. 80 */ 81 case bcond_op: 82 switch (insn.i_format.rt) { 83 case bltz_op: 84 case bltzl_op: 85 if ((long)arch->gprs[insn.i_format.rs] < 0) 86 epc = epc + 4 + (insn.i_format.simmediate << 2); 87 else 88 epc += 8; 89 nextpc = epc; 90 break; 91 92 case bgez_op: 93 case bgezl_op: 94 if ((long)arch->gprs[insn.i_format.rs] >= 0) 95 epc = epc + 4 + (insn.i_format.simmediate << 2); 96 else 97 epc += 8; 98 nextpc = epc; 99 break; 100 101 case bltzal_op: 102 case bltzall_op: 103 arch->gprs[31] = epc + 8; 104 if ((long)arch->gprs[insn.i_format.rs] < 0) 105 epc = epc + 4 + (insn.i_format.simmediate << 2); 106 else 107 epc += 8; 108 nextpc = epc; 109 break; 110 111 case bgezal_op: 112 case bgezall_op: 113 arch->gprs[31] = epc + 8; 114 if ((long)arch->gprs[insn.i_format.rs] >= 0) 115 epc = epc + 4 + (insn.i_format.simmediate << 2); 116 else 117 epc += 8; 118 nextpc = epc; 119 break; 120 case bposge32_op: 121 if (!cpu_has_dsp) { 122 kvm_err("%s: DSP branch but not DSP ASE\n", 123 __func__); 124 return -EINVAL; 125 } 126 127 dspcontrol = rddsp(0x01); 128 129 if (dspcontrol >= 32) 130 epc = epc + 4 + (insn.i_format.simmediate << 2); 131 else 132 epc += 8; 133 nextpc = epc; 134 break; 135 default: 136 return -EINVAL; 137 } 138 break; 139 140 /* These are unconditional and in j_format. */ 141 case jal_op: 142 arch->gprs[31] = instpc + 8; 143 fallthrough; 144 case j_op: 145 epc += 4; 146 epc >>= 28; 147 epc <<= 28; 148 epc |= (insn.j_format.target << 2); 149 nextpc = epc; 150 break; 151 152 /* These are conditional and in i_format. */ 153 case beq_op: 154 case beql_op: 155 if (arch->gprs[insn.i_format.rs] == 156 arch->gprs[insn.i_format.rt]) 157 epc = epc + 4 + (insn.i_format.simmediate << 2); 158 else 159 epc += 8; 160 nextpc = epc; 161 break; 162 163 case bne_op: 164 case bnel_op: 165 if (arch->gprs[insn.i_format.rs] != 166 arch->gprs[insn.i_format.rt]) 167 epc = epc + 4 + (insn.i_format.simmediate << 2); 168 else 169 epc += 8; 170 nextpc = epc; 171 break; 172 173 case blez_op: /* POP06 */ 174 #ifndef CONFIG_CPU_MIPSR6 175 case blezl_op: /* removed in R6 */ 176 #endif 177 if (insn.i_format.rt != 0) 178 goto compact_branch; 179 if ((long)arch->gprs[insn.i_format.rs] <= 0) 180 epc = epc + 4 + (insn.i_format.simmediate << 2); 181 else 182 epc += 8; 183 nextpc = epc; 184 break; 185 186 case bgtz_op: /* POP07 */ 187 #ifndef CONFIG_CPU_MIPSR6 188 case bgtzl_op: /* removed in R6 */ 189 #endif 190 if (insn.i_format.rt != 0) 191 goto compact_branch; 192 if ((long)arch->gprs[insn.i_format.rs] > 0) 193 epc = epc + 4 + (insn.i_format.simmediate << 2); 194 else 195 epc += 8; 196 nextpc = epc; 197 break; 198 199 /* And now the FPA/cp1 branch instructions. */ 200 case cop1_op: 201 kvm_err("%s: unsupported cop1_op\n", __func__); 202 return -EINVAL; 203 204 #ifdef CONFIG_CPU_MIPSR6 205 /* R6 added the following compact branches with forbidden slots */ 206 case blezl_op: /* POP26 */ 207 case bgtzl_op: /* POP27 */ 208 /* only rt == 0 isn't compact branch */ 209 if (insn.i_format.rt != 0) 210 goto compact_branch; 211 return -EINVAL; 212 case pop10_op: 213 case pop30_op: 214 /* only rs == rt == 0 is reserved, rest are compact branches */ 215 if (insn.i_format.rs != 0 || insn.i_format.rt != 0) 216 goto compact_branch; 217 return -EINVAL; 218 case pop66_op: 219 case pop76_op: 220 /* only rs == 0 isn't compact branch */ 221 if (insn.i_format.rs != 0) 222 goto compact_branch; 223 return -EINVAL; 224 compact_branch: 225 /* 226 * If we've hit an exception on the forbidden slot, then 227 * the branch must not have been taken. 228 */ 229 epc += 8; 230 nextpc = epc; 231 break; 232 #else 233 compact_branch: 234 /* Fall through - Compact branches not supported before R6 */ 235 #endif 236 default: 237 return -EINVAL; 238 } 239 240 *out = nextpc; 241 return 0; 242 } 243 244 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) 245 { 246 int err; 247 248 if (cause & CAUSEF_BD) { 249 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, 250 &vcpu->arch.pc); 251 if (err) 252 return EMULATE_FAIL; 253 } else { 254 vcpu->arch.pc += 4; 255 } 256 257 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); 258 259 return EMULATE_DONE; 260 } 261 262 /** 263 * kvm_get_badinstr() - Get bad instruction encoding. 264 * @opc: Guest pointer to faulting instruction. 265 * @vcpu: KVM VCPU information. 266 * 267 * Gets the instruction encoding of the faulting instruction, using the saved 268 * BadInstr register value if it exists, otherwise falling back to reading guest 269 * memory at @opc. 270 * 271 * Returns: The instruction encoding of the faulting instruction. 272 */ 273 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) 274 { 275 if (cpu_has_badinstr) { 276 *out = vcpu->arch.host_cp0_badinstr; 277 return 0; 278 } else { 279 return kvm_get_inst(opc, vcpu, out); 280 } 281 } 282 283 /** 284 * kvm_get_badinstrp() - Get bad prior instruction encoding. 285 * @opc: Guest pointer to prior faulting instruction. 286 * @vcpu: KVM VCPU information. 287 * 288 * Gets the instruction encoding of the prior faulting instruction (the branch 289 * containing the delay slot which faulted), using the saved BadInstrP register 290 * value if it exists, otherwise falling back to reading guest memory at @opc. 291 * 292 * Returns: The instruction encoding of the prior faulting instruction. 293 */ 294 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) 295 { 296 if (cpu_has_badinstrp) { 297 *out = vcpu->arch.host_cp0_badinstrp; 298 return 0; 299 } else { 300 return kvm_get_inst(opc, vcpu, out); 301 } 302 } 303 304 /** 305 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. 306 * @vcpu: Virtual CPU. 307 * 308 * Returns: 1 if the CP0_Count timer is disabled by either the guest 309 * CP0_Cause.DC bit or the count_ctl.DC bit. 310 * 0 otherwise (in which case CP0_Count timer is running). 311 */ 312 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 313 { 314 struct mips_coproc *cop0 = vcpu->arch.cop0; 315 316 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 317 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); 318 } 319 320 /** 321 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. 322 * 323 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. 324 * 325 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 326 */ 327 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) 328 { 329 s64 now_ns, periods; 330 u64 delta; 331 332 now_ns = ktime_to_ns(now); 333 delta = now_ns + vcpu->arch.count_dyn_bias; 334 335 if (delta >= vcpu->arch.count_period) { 336 /* If delta is out of safe range the bias needs adjusting */ 337 periods = div64_s64(now_ns, vcpu->arch.count_period); 338 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; 339 /* Recalculate delta with new bias */ 340 delta = now_ns + vcpu->arch.count_dyn_bias; 341 } 342 343 /* 344 * We've ensured that: 345 * delta < count_period 346 * 347 * Therefore the intermediate delta*count_hz will never overflow since 348 * at the boundary condition: 349 * delta = count_period 350 * delta = NSEC_PER_SEC * 2^32 / count_hz 351 * delta * count_hz = NSEC_PER_SEC * 2^32 352 */ 353 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); 354 } 355 356 /** 357 * kvm_mips_count_time() - Get effective current time. 358 * @vcpu: Virtual CPU. 359 * 360 * Get effective monotonic ktime. This is usually a straightforward ktime_get(), 361 * except when the master disable bit is set in count_ctl, in which case it is 362 * count_resume, i.e. the time that the count was disabled. 363 * 364 * Returns: Effective monotonic ktime for CP0_Count. 365 */ 366 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) 367 { 368 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 369 return vcpu->arch.count_resume; 370 371 return ktime_get(); 372 } 373 374 /** 375 * kvm_mips_read_count_running() - Read the current count value as if running. 376 * @vcpu: Virtual CPU. 377 * @now: Kernel time to read CP0_Count at. 378 * 379 * Returns the current guest CP0_Count register at time @now and handles if the 380 * timer interrupt is pending and hasn't been handled yet. 381 * 382 * Returns: The current value of the guest CP0_Count register. 383 */ 384 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) 385 { 386 struct mips_coproc *cop0 = vcpu->arch.cop0; 387 ktime_t expires, threshold; 388 u32 count, compare; 389 int running; 390 391 /* Calculate the biased and scaled guest CP0_Count */ 392 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); 393 compare = kvm_read_c0_guest_compare(cop0); 394 395 /* 396 * Find whether CP0_Count has reached the closest timer interrupt. If 397 * not, we shouldn't inject it. 398 */ 399 if ((s32)(count - compare) < 0) 400 return count; 401 402 /* 403 * The CP0_Count we're going to return has already reached the closest 404 * timer interrupt. Quickly check if it really is a new interrupt by 405 * looking at whether the interval until the hrtimer expiry time is 406 * less than 1/4 of the timer period. 407 */ 408 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); 409 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); 410 if (ktime_before(expires, threshold)) { 411 /* 412 * Cancel it while we handle it so there's no chance of 413 * interference with the timeout handler. 414 */ 415 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); 416 417 /* Nothing should be waiting on the timeout */ 418 kvm_mips_callbacks->queue_timer_int(vcpu); 419 420 /* 421 * Restart the timer if it was running based on the expiry time 422 * we read, so that we don't push it back 2 periods. 423 */ 424 if (running) { 425 expires = ktime_add_ns(expires, 426 vcpu->arch.count_period); 427 hrtimer_start(&vcpu->arch.comparecount_timer, expires, 428 HRTIMER_MODE_ABS); 429 } 430 } 431 432 return count; 433 } 434 435 /** 436 * kvm_mips_read_count() - Read the current count value. 437 * @vcpu: Virtual CPU. 438 * 439 * Read the current guest CP0_Count value, taking into account whether the timer 440 * is stopped. 441 * 442 * Returns: The current guest CP0_Count value. 443 */ 444 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) 445 { 446 struct mips_coproc *cop0 = vcpu->arch.cop0; 447 448 /* If count disabled just read static copy of count */ 449 if (kvm_mips_count_disabled(vcpu)) 450 return kvm_read_c0_guest_count(cop0); 451 452 return kvm_mips_read_count_running(vcpu, ktime_get()); 453 } 454 455 /** 456 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. 457 * @vcpu: Virtual CPU. 458 * @count: Output pointer for CP0_Count value at point of freeze. 459 * 460 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value 461 * at the point it was frozen. It is guaranteed that any pending interrupts at 462 * the point it was frozen are handled, and none after that point. 463 * 464 * This is useful where the time/CP0_Count is needed in the calculation of the 465 * new parameters. 466 * 467 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 468 * 469 * Returns: The ktime at the point of freeze. 470 */ 471 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) 472 { 473 ktime_t now; 474 475 /* stop hrtimer before finding time */ 476 hrtimer_cancel(&vcpu->arch.comparecount_timer); 477 now = ktime_get(); 478 479 /* find count at this point and handle pending hrtimer */ 480 *count = kvm_mips_read_count_running(vcpu, now); 481 482 return now; 483 } 484 485 /** 486 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. 487 * @vcpu: Virtual CPU. 488 * @now: ktime at point of resume. 489 * @count: CP0_Count at point of resume. 490 * 491 * Resumes the timer and updates the timer expiry based on @now and @count. 492 * This can be used in conjunction with kvm_mips_freeze_timer() when timer 493 * parameters need to be changed. 494 * 495 * It is guaranteed that a timer interrupt immediately after resume will be 496 * handled, but not if CP_Compare is exactly at @count. That case is already 497 * handled by kvm_mips_freeze_timer(). 498 * 499 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 500 */ 501 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, 502 ktime_t now, u32 count) 503 { 504 struct mips_coproc *cop0 = vcpu->arch.cop0; 505 u32 compare; 506 u64 delta; 507 ktime_t expire; 508 509 /* Calculate timeout (wrap 0 to 2^32) */ 510 compare = kvm_read_c0_guest_compare(cop0); 511 delta = (u64)(u32)(compare - count - 1) + 1; 512 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); 513 expire = ktime_add_ns(now, delta); 514 515 /* Update hrtimer to use new timeout */ 516 hrtimer_cancel(&vcpu->arch.comparecount_timer); 517 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); 518 } 519 520 /** 521 * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry. 522 * @vcpu: Virtual CPU. 523 * @before: Time before Count was saved, lower bound of drift calculation. 524 * @count: CP0_Count at point of restore. 525 * @min_drift: Minimum amount of drift permitted before correction. 526 * Must be <= 0. 527 * 528 * Restores the timer from a particular @count, accounting for drift. This can 529 * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is 530 * to be used for a period of time, but the exact ktime corresponding to the 531 * final Count that must be restored is not known. 532 * 533 * It is gauranteed that a timer interrupt immediately after restore will be 534 * handled, but not if CP0_Compare is exactly at @count. That case should 535 * already be handled when the hardware timer state is saved. 536 * 537 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not 538 * stopped). 539 * 540 * Returns: Amount of correction to count_bias due to drift. 541 */ 542 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, 543 u32 count, int min_drift) 544 { 545 ktime_t now, count_time; 546 u32 now_count, before_count; 547 u64 delta; 548 int drift, ret = 0; 549 550 /* Calculate expected count at before */ 551 before_count = vcpu->arch.count_bias + 552 kvm_mips_ktime_to_count(vcpu, before); 553 554 /* 555 * Detect significantly negative drift, where count is lower than 556 * expected. Some negative drift is expected when hardware counter is 557 * set after kvm_mips_freeze_timer(), and it is harmless to allow the 558 * time to jump forwards a little, within reason. If the drift is too 559 * significant, adjust the bias to avoid a big Guest.CP0_Count jump. 560 */ 561 drift = count - before_count; 562 if (drift < min_drift) { 563 count_time = before; 564 vcpu->arch.count_bias += drift; 565 ret = drift; 566 goto resume; 567 } 568 569 /* Calculate expected count right now */ 570 now = ktime_get(); 571 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); 572 573 /* 574 * Detect positive drift, where count is higher than expected, and 575 * adjust the bias to avoid guest time going backwards. 576 */ 577 drift = count - now_count; 578 if (drift > 0) { 579 count_time = now; 580 vcpu->arch.count_bias += drift; 581 ret = drift; 582 goto resume; 583 } 584 585 /* Subtract nanosecond delta to find ktime when count was read */ 586 delta = (u64)(u32)(now_count - count); 587 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); 588 count_time = ktime_sub_ns(now, delta); 589 590 resume: 591 /* Resume using the calculated ktime */ 592 kvm_mips_resume_hrtimer(vcpu, count_time, count); 593 return ret; 594 } 595 596 /** 597 * kvm_mips_write_count() - Modify the count and update timer. 598 * @vcpu: Virtual CPU. 599 * @count: Guest CP0_Count value to set. 600 * 601 * Sets the CP0_Count value and updates the timer accordingly. 602 */ 603 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) 604 { 605 struct mips_coproc *cop0 = vcpu->arch.cop0; 606 ktime_t now; 607 608 /* Calculate bias */ 609 now = kvm_mips_count_time(vcpu); 610 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 611 612 if (kvm_mips_count_disabled(vcpu)) 613 /* The timer's disabled, adjust the static count */ 614 kvm_write_c0_guest_count(cop0, count); 615 else 616 /* Update timeout */ 617 kvm_mips_resume_hrtimer(vcpu, now, count); 618 } 619 620 /** 621 * kvm_mips_init_count() - Initialise timer. 622 * @vcpu: Virtual CPU. 623 * @count_hz: Frequency of timer. 624 * 625 * Initialise the timer to the specified frequency, zero it, and set it going if 626 * it's enabled. 627 */ 628 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz) 629 { 630 vcpu->arch.count_hz = count_hz; 631 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); 632 vcpu->arch.count_dyn_bias = 0; 633 634 /* Starting at 0 */ 635 kvm_mips_write_count(vcpu, 0); 636 } 637 638 /** 639 * kvm_mips_set_count_hz() - Update the frequency of the timer. 640 * @vcpu: Virtual CPU. 641 * @count_hz: Frequency of CP0_Count timer in Hz. 642 * 643 * Change the frequency of the CP0_Count timer. This is done atomically so that 644 * CP0_Count is continuous and no timer interrupt is lost. 645 * 646 * Returns: -EINVAL if @count_hz is out of range. 647 * 0 on success. 648 */ 649 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) 650 { 651 struct mips_coproc *cop0 = vcpu->arch.cop0; 652 int dc; 653 ktime_t now; 654 u32 count; 655 656 /* ensure the frequency is in a sensible range... */ 657 if (count_hz <= 0 || count_hz > NSEC_PER_SEC) 658 return -EINVAL; 659 /* ... and has actually changed */ 660 if (vcpu->arch.count_hz == count_hz) 661 return 0; 662 663 /* Safely freeze timer so we can keep it continuous */ 664 dc = kvm_mips_count_disabled(vcpu); 665 if (dc) { 666 now = kvm_mips_count_time(vcpu); 667 count = kvm_read_c0_guest_count(cop0); 668 } else { 669 now = kvm_mips_freeze_hrtimer(vcpu, &count); 670 } 671 672 /* Update the frequency */ 673 vcpu->arch.count_hz = count_hz; 674 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); 675 vcpu->arch.count_dyn_bias = 0; 676 677 /* Calculate adjusted bias so dynamic count is unchanged */ 678 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 679 680 /* Update and resume hrtimer */ 681 if (!dc) 682 kvm_mips_resume_hrtimer(vcpu, now, count); 683 return 0; 684 } 685 686 /** 687 * kvm_mips_write_compare() - Modify compare and update timer. 688 * @vcpu: Virtual CPU. 689 * @compare: New CP0_Compare value. 690 * @ack: Whether to acknowledge timer interrupt. 691 * 692 * Update CP0_Compare to a new value and update the timeout. 693 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure 694 * any pending timer interrupt is preserved. 695 */ 696 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) 697 { 698 struct mips_coproc *cop0 = vcpu->arch.cop0; 699 int dc; 700 u32 old_compare = kvm_read_c0_guest_compare(cop0); 701 s32 delta = compare - old_compare; 702 u32 cause; 703 ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */ 704 u32 count; 705 706 /* if unchanged, must just be an ack */ 707 if (old_compare == compare) { 708 if (!ack) 709 return; 710 kvm_mips_callbacks->dequeue_timer_int(vcpu); 711 kvm_write_c0_guest_compare(cop0, compare); 712 return; 713 } 714 715 /* 716 * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted 717 * too to prevent guest CP0_Count hitting guest CP0_Compare. 718 * 719 * The new GTOffset corresponds to the new value of CP0_Compare, and is 720 * set prior to it being written into the guest context. We disable 721 * preemption until the new value is written to prevent restore of a 722 * GTOffset corresponding to the old CP0_Compare value. 723 */ 724 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) { 725 preempt_disable(); 726 write_c0_gtoffset(compare - read_c0_count()); 727 back_to_back_c0_hazard(); 728 } 729 730 /* freeze_hrtimer() takes care of timer interrupts <= count */ 731 dc = kvm_mips_count_disabled(vcpu); 732 if (!dc) 733 now = kvm_mips_freeze_hrtimer(vcpu, &count); 734 735 if (ack) 736 kvm_mips_callbacks->dequeue_timer_int(vcpu); 737 else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) 738 /* 739 * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so 740 * preserve guest CP0_Cause.TI if we don't want to ack it. 741 */ 742 cause = kvm_read_c0_guest_cause(cop0); 743 744 kvm_write_c0_guest_compare(cop0, compare); 745 746 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { 747 if (delta > 0) 748 preempt_enable(); 749 750 back_to_back_c0_hazard(); 751 752 if (!ack && cause & CAUSEF_TI) 753 kvm_write_c0_guest_cause(cop0, cause); 754 } 755 756 /* resume_hrtimer() takes care of timer interrupts > count */ 757 if (!dc) 758 kvm_mips_resume_hrtimer(vcpu, now, count); 759 760 /* 761 * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change 762 * until after the new CP0_Compare is written, otherwise new guest 763 * CP0_Count could hit new guest CP0_Compare. 764 */ 765 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0) 766 write_c0_gtoffset(compare - read_c0_count()); 767 } 768 769 /** 770 * kvm_mips_count_disable() - Disable count. 771 * @vcpu: Virtual CPU. 772 * 773 * Disable the CP0_Count timer. A timer interrupt on or before the final stop 774 * time will be handled but not after. 775 * 776 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or 777 * count_ctl.DC has been set (count disabled). 778 * 779 * Returns: The time that the timer was stopped. 780 */ 781 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) 782 { 783 struct mips_coproc *cop0 = vcpu->arch.cop0; 784 u32 count; 785 ktime_t now; 786 787 /* Stop hrtimer */ 788 hrtimer_cancel(&vcpu->arch.comparecount_timer); 789 790 /* Set the static count from the dynamic count, handling pending TI */ 791 now = ktime_get(); 792 count = kvm_mips_read_count_running(vcpu, now); 793 kvm_write_c0_guest_count(cop0, count); 794 795 return now; 796 } 797 798 /** 799 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. 800 * @vcpu: Virtual CPU. 801 * 802 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or 803 * before the final stop time will be handled if the timer isn't disabled by 804 * count_ctl.DC, but not after. 805 * 806 * Assumes CP0_Cause.DC is clear (count enabled). 807 */ 808 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) 809 { 810 struct mips_coproc *cop0 = vcpu->arch.cop0; 811 812 kvm_set_c0_guest_cause(cop0, CAUSEF_DC); 813 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 814 kvm_mips_count_disable(vcpu); 815 } 816 817 /** 818 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. 819 * @vcpu: Virtual CPU. 820 * 821 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after 822 * the start time will be handled if the timer isn't disabled by count_ctl.DC, 823 * potentially before even returning, so the caller should be careful with 824 * ordering of CP0_Cause modifications so as not to lose it. 825 * 826 * Assumes CP0_Cause.DC is set (count disabled). 827 */ 828 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) 829 { 830 struct mips_coproc *cop0 = vcpu->arch.cop0; 831 u32 count; 832 833 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); 834 835 /* 836 * Set the dynamic count to match the static count. 837 * This starts the hrtimer if count_ctl.DC allows it. 838 * Otherwise it conveniently updates the biases. 839 */ 840 count = kvm_read_c0_guest_count(cop0); 841 kvm_mips_write_count(vcpu, count); 842 } 843 844 /** 845 * kvm_mips_set_count_ctl() - Update the count control KVM register. 846 * @vcpu: Virtual CPU. 847 * @count_ctl: Count control register new value. 848 * 849 * Set the count control KVM register. The timer is updated accordingly. 850 * 851 * Returns: -EINVAL if reserved bits are set. 852 * 0 on success. 853 */ 854 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) 855 { 856 struct mips_coproc *cop0 = vcpu->arch.cop0; 857 s64 changed = count_ctl ^ vcpu->arch.count_ctl; 858 s64 delta; 859 ktime_t expire, now; 860 u32 count, compare; 861 862 /* Only allow defined bits to be changed */ 863 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) 864 return -EINVAL; 865 866 /* Apply new value */ 867 vcpu->arch.count_ctl = count_ctl; 868 869 /* Master CP0_Count disable */ 870 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { 871 /* Is CP0_Cause.DC already disabling CP0_Count? */ 872 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { 873 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) 874 /* Just record the current time */ 875 vcpu->arch.count_resume = ktime_get(); 876 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { 877 /* disable timer and record current time */ 878 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); 879 } else { 880 /* 881 * Calculate timeout relative to static count at resume 882 * time (wrap 0 to 2^32). 883 */ 884 count = kvm_read_c0_guest_count(cop0); 885 compare = kvm_read_c0_guest_compare(cop0); 886 delta = (u64)(u32)(compare - count - 1) + 1; 887 delta = div_u64(delta * NSEC_PER_SEC, 888 vcpu->arch.count_hz); 889 expire = ktime_add_ns(vcpu->arch.count_resume, delta); 890 891 /* Handle pending interrupt */ 892 now = ktime_get(); 893 if (ktime_compare(now, expire) >= 0) 894 /* Nothing should be waiting on the timeout */ 895 kvm_mips_callbacks->queue_timer_int(vcpu); 896 897 /* Resume hrtimer without changing bias */ 898 count = kvm_mips_read_count_running(vcpu, now); 899 kvm_mips_resume_hrtimer(vcpu, now, count); 900 } 901 } 902 903 return 0; 904 } 905 906 /** 907 * kvm_mips_set_count_resume() - Update the count resume KVM register. 908 * @vcpu: Virtual CPU. 909 * @count_resume: Count resume register new value. 910 * 911 * Set the count resume KVM register. 912 * 913 * Returns: -EINVAL if out of valid range (0..now). 914 * 0 on success. 915 */ 916 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) 917 { 918 /* 919 * It doesn't make sense for the resume time to be in the future, as it 920 * would be possible for the next interrupt to be more than a full 921 * period in the future. 922 */ 923 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) 924 return -EINVAL; 925 926 vcpu->arch.count_resume = ns_to_ktime(count_resume); 927 return 0; 928 } 929 930 /** 931 * kvm_mips_count_timeout() - Push timer forward on timeout. 932 * @vcpu: Virtual CPU. 933 * 934 * Handle an hrtimer event by push the hrtimer forward a period. 935 * 936 * Returns: The hrtimer_restart value to return to the hrtimer subsystem. 937 */ 938 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) 939 { 940 /* Add the Count period to the current expiry time */ 941 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, 942 vcpu->arch.count_period); 943 return HRTIMER_RESTART; 944 } 945 946 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) 947 { 948 struct mips_coproc *cop0 = vcpu->arch.cop0; 949 enum emulation_result er = EMULATE_DONE; 950 951 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { 952 kvm_clear_c0_guest_status(cop0, ST0_ERL); 953 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 954 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 955 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 956 kvm_read_c0_guest_epc(cop0)); 957 kvm_clear_c0_guest_status(cop0, ST0_EXL); 958 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 959 960 } else { 961 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 962 vcpu->arch.pc); 963 er = EMULATE_FAIL; 964 } 965 966 return er; 967 } 968 969 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) 970 { 971 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, 972 vcpu->arch.pending_exceptions); 973 974 ++vcpu->stat.wait_exits; 975 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); 976 if (!vcpu->arch.pending_exceptions) { 977 kvm_vz_lose_htimer(vcpu); 978 vcpu->arch.wait = 1; 979 kvm_vcpu_block(vcpu); 980 981 /* 982 * We we are runnable, then definitely go off to user space to 983 * check if any I/O interrupts are pending. 984 */ 985 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 986 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 987 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 988 } 989 } 990 991 return EMULATE_DONE; 992 } 993 994 static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, 995 unsigned long entryhi) 996 { 997 struct mips_coproc *cop0 = vcpu->arch.cop0; 998 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 999 int cpu, i; 1000 u32 nasid = entryhi & KVM_ENTRYHI_ASID; 1001 1002 if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { 1003 trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & 1004 KVM_ENTRYHI_ASID, nasid); 1005 1006 /* 1007 * Flush entries from the GVA page tables. 1008 * Guest user page table will get flushed lazily on re-entry to 1009 * guest user if the guest ASID actually changes. 1010 */ 1011 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); 1012 1013 /* 1014 * Regenerate/invalidate kernel MMU context. 1015 * The user MMU context will be regenerated lazily on re-entry 1016 * to guest user if the guest ASID actually changes. 1017 */ 1018 preempt_disable(); 1019 cpu = smp_processor_id(); 1020 get_new_mmu_context(kern_mm); 1021 for_each_possible_cpu(i) 1022 if (i != cpu) 1023 set_cpu_context(i, kern_mm, 0); 1024 preempt_enable(); 1025 } 1026 kvm_write_c0_guest_entryhi(cop0, entryhi); 1027 } 1028 1029 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 1030 { 1031 struct mips_coproc *cop0 = vcpu->arch.cop0; 1032 struct kvm_mips_tlb *tlb; 1033 unsigned long pc = vcpu->arch.pc; 1034 int index; 1035 1036 index = kvm_read_c0_guest_index(cop0); 1037 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 1038 /* UNDEFINED */ 1039 kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index); 1040 index &= KVM_MIPS_GUEST_TLB_SIZE - 1; 1041 } 1042 1043 tlb = &vcpu->arch.guest_tlb[index]; 1044 kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); 1045 kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); 1046 kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); 1047 kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); 1048 1049 return EMULATE_DONE; 1050 } 1051 1052 /** 1053 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. 1054 * @vcpu: VCPU with changed mappings. 1055 * @tlb: TLB entry being removed. 1056 * 1057 * This is called to indicate a single change in guest MMU mappings, so that we 1058 * can arrange TLB flushes on this and other CPUs. 1059 */ 1060 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, 1061 struct kvm_mips_tlb *tlb) 1062 { 1063 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 1064 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 1065 int cpu, i; 1066 bool user; 1067 1068 /* No need to flush for entries which are already invalid */ 1069 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) 1070 return; 1071 /* Don't touch host kernel page tables or TLB mappings */ 1072 if ((unsigned long)tlb->tlb_hi > 0x7fffffff) 1073 return; 1074 /* User address space doesn't need flushing for KSeg2/3 changes */ 1075 user = tlb->tlb_hi < KVM_GUEST_KSEG0; 1076 1077 preempt_disable(); 1078 1079 /* Invalidate page table entries */ 1080 kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); 1081 1082 /* 1083 * Probe the shadow host TLB for the entry being overwritten, if one 1084 * matches, invalidate it 1085 */ 1086 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); 1087 1088 /* Invalidate the whole ASID on other CPUs */ 1089 cpu = smp_processor_id(); 1090 for_each_possible_cpu(i) { 1091 if (i == cpu) 1092 continue; 1093 if (user) 1094 set_cpu_context(i, user_mm, 0); 1095 set_cpu_context(i, kern_mm, 0); 1096 } 1097 1098 preempt_enable(); 1099 } 1100 1101 /* Write Guest TLB Entry @ Index */ 1102 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) 1103 { 1104 struct mips_coproc *cop0 = vcpu->arch.cop0; 1105 int index = kvm_read_c0_guest_index(cop0); 1106 struct kvm_mips_tlb *tlb = NULL; 1107 unsigned long pc = vcpu->arch.pc; 1108 1109 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 1110 kvm_debug("%s: illegal index: %d\n", __func__, index); 1111 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 1112 pc, index, kvm_read_c0_guest_entryhi(cop0), 1113 kvm_read_c0_guest_entrylo0(cop0), 1114 kvm_read_c0_guest_entrylo1(cop0), 1115 kvm_read_c0_guest_pagemask(cop0)); 1116 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 1117 } 1118 1119 tlb = &vcpu->arch.guest_tlb[index]; 1120 1121 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 1122 1123 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 1124 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 1125 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 1126 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 1127 1128 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 1129 pc, index, kvm_read_c0_guest_entryhi(cop0), 1130 kvm_read_c0_guest_entrylo0(cop0), 1131 kvm_read_c0_guest_entrylo1(cop0), 1132 kvm_read_c0_guest_pagemask(cop0)); 1133 1134 return EMULATE_DONE; 1135 } 1136 1137 /* Write Guest TLB Entry @ Random Index */ 1138 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) 1139 { 1140 struct mips_coproc *cop0 = vcpu->arch.cop0; 1141 struct kvm_mips_tlb *tlb = NULL; 1142 unsigned long pc = vcpu->arch.pc; 1143 int index; 1144 1145 index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); 1146 tlb = &vcpu->arch.guest_tlb[index]; 1147 1148 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 1149 1150 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 1151 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 1152 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 1153 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 1154 1155 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 1156 pc, index, kvm_read_c0_guest_entryhi(cop0), 1157 kvm_read_c0_guest_entrylo0(cop0), 1158 kvm_read_c0_guest_entrylo1(cop0)); 1159 1160 return EMULATE_DONE; 1161 } 1162 1163 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) 1164 { 1165 struct mips_coproc *cop0 = vcpu->arch.cop0; 1166 long entryhi = kvm_read_c0_guest_entryhi(cop0); 1167 unsigned long pc = vcpu->arch.pc; 1168 int index = -1; 1169 1170 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 1171 1172 kvm_write_c0_guest_index(cop0, index); 1173 1174 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, 1175 index); 1176 1177 return EMULATE_DONE; 1178 } 1179 1180 /** 1181 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 1182 * @vcpu: Virtual CPU. 1183 * 1184 * Finds the mask of bits which are writable in the guest's Config1 CP0 1185 * register, by userland (currently read-only to the guest). 1186 */ 1187 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) 1188 { 1189 unsigned int mask = 0; 1190 1191 /* Permit FPU to be present if FPU is supported */ 1192 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 1193 mask |= MIPS_CONF1_FP; 1194 1195 return mask; 1196 } 1197 1198 /** 1199 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 1200 * @vcpu: Virtual CPU. 1201 * 1202 * Finds the mask of bits which are writable in the guest's Config3 CP0 1203 * register, by userland (currently read-only to the guest). 1204 */ 1205 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) 1206 { 1207 /* Config4 and ULRI are optional */ 1208 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; 1209 1210 /* Permit MSA to be present if MSA is supported */ 1211 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 1212 mask |= MIPS_CONF3_MSA; 1213 1214 return mask; 1215 } 1216 1217 /** 1218 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 1219 * @vcpu: Virtual CPU. 1220 * 1221 * Finds the mask of bits which are writable in the guest's Config4 CP0 1222 * register, by userland (currently read-only to the guest). 1223 */ 1224 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) 1225 { 1226 /* Config5 is optional */ 1227 unsigned int mask = MIPS_CONF_M; 1228 1229 /* KScrExist */ 1230 mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; 1231 1232 return mask; 1233 } 1234 1235 /** 1236 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 1237 * @vcpu: Virtual CPU. 1238 * 1239 * Finds the mask of bits which are writable in the guest's Config5 CP0 1240 * register, by the guest itself. 1241 */ 1242 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) 1243 { 1244 unsigned int mask = 0; 1245 1246 /* Permit MSAEn changes if MSA supported and enabled */ 1247 if (kvm_mips_guest_has_msa(&vcpu->arch)) 1248 mask |= MIPS_CONF5_MSAEN; 1249 1250 /* 1251 * Permit guest FPU mode changes if FPU is enabled and the relevant 1252 * feature exists according to FIR register. 1253 */ 1254 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 1255 if (cpu_has_fre) 1256 mask |= MIPS_CONF5_FRE; 1257 /* We don't support UFR or UFE */ 1258 } 1259 1260 return mask; 1261 } 1262 1263 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 1264 u32 *opc, u32 cause, 1265 struct kvm_run *run, 1266 struct kvm_vcpu *vcpu) 1267 { 1268 struct mips_coproc *cop0 = vcpu->arch.cop0; 1269 enum emulation_result er = EMULATE_DONE; 1270 u32 rt, rd, sel; 1271 unsigned long curr_pc; 1272 1273 /* 1274 * Update PC and hold onto current PC in case there is 1275 * an error and we want to rollback the PC 1276 */ 1277 curr_pc = vcpu->arch.pc; 1278 er = update_pc(vcpu, cause); 1279 if (er == EMULATE_FAIL) 1280 return er; 1281 1282 if (inst.co_format.co) { 1283 switch (inst.co_format.func) { 1284 case tlbr_op: /* Read indexed TLB entry */ 1285 er = kvm_mips_emul_tlbr(vcpu); 1286 break; 1287 case tlbwi_op: /* Write indexed */ 1288 er = kvm_mips_emul_tlbwi(vcpu); 1289 break; 1290 case tlbwr_op: /* Write random */ 1291 er = kvm_mips_emul_tlbwr(vcpu); 1292 break; 1293 case tlbp_op: /* TLB Probe */ 1294 er = kvm_mips_emul_tlbp(vcpu); 1295 break; 1296 case rfe_op: 1297 kvm_err("!!!COP0_RFE!!!\n"); 1298 break; 1299 case eret_op: 1300 er = kvm_mips_emul_eret(vcpu); 1301 goto dont_update_pc; 1302 case wait_op: 1303 er = kvm_mips_emul_wait(vcpu); 1304 break; 1305 case hypcall_op: 1306 er = kvm_mips_emul_hypcall(vcpu, inst); 1307 break; 1308 } 1309 } else { 1310 rt = inst.c0r_format.rt; 1311 rd = inst.c0r_format.rd; 1312 sel = inst.c0r_format.sel; 1313 1314 switch (inst.c0r_format.rs) { 1315 case mfc_op: 1316 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1317 cop0->stat[rd][sel]++; 1318 #endif 1319 /* Get reg */ 1320 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1321 vcpu->arch.gprs[rt] = 1322 (s32)kvm_mips_read_count(vcpu); 1323 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { 1324 vcpu->arch.gprs[rt] = 0x0; 1325 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1326 kvm_mips_trans_mfc0(inst, opc, vcpu); 1327 #endif 1328 } else { 1329 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; 1330 1331 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1332 kvm_mips_trans_mfc0(inst, opc, vcpu); 1333 #endif 1334 } 1335 1336 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, 1337 KVM_TRACE_COP0(rd, sel), 1338 vcpu->arch.gprs[rt]); 1339 break; 1340 1341 case dmfc_op: 1342 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 1343 1344 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, 1345 KVM_TRACE_COP0(rd, sel), 1346 vcpu->arch.gprs[rt]); 1347 break; 1348 1349 case mtc_op: 1350 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1351 cop0->stat[rd][sel]++; 1352 #endif 1353 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, 1354 KVM_TRACE_COP0(rd, sel), 1355 vcpu->arch.gprs[rt]); 1356 1357 if ((rd == MIPS_CP0_TLB_INDEX) 1358 && (vcpu->arch.gprs[rt] >= 1359 KVM_MIPS_GUEST_TLB_SIZE)) { 1360 kvm_err("Invalid TLB Index: %ld", 1361 vcpu->arch.gprs[rt]); 1362 er = EMULATE_FAIL; 1363 break; 1364 } 1365 if ((rd == MIPS_CP0_PRID) && (sel == 1)) { 1366 /* 1367 * Preserve core number, and keep the exception 1368 * base in guest KSeg0. 1369 */ 1370 kvm_change_c0_guest_ebase(cop0, 0x1ffff000, 1371 vcpu->arch.gprs[rt]); 1372 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1373 kvm_mips_change_entryhi(vcpu, 1374 vcpu->arch.gprs[rt]); 1375 } 1376 /* Are we writing to COUNT */ 1377 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1378 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 1379 goto done; 1380 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { 1381 /* If we are writing to COMPARE */ 1382 /* Clear pending timer interrupt, if any */ 1383 kvm_mips_write_compare(vcpu, 1384 vcpu->arch.gprs[rt], 1385 true); 1386 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1387 unsigned int old_val, val, change; 1388 1389 old_val = kvm_read_c0_guest_status(cop0); 1390 val = vcpu->arch.gprs[rt]; 1391 change = val ^ old_val; 1392 1393 /* Make sure that the NMI bit is never set */ 1394 val &= ~ST0_NMI; 1395 1396 /* 1397 * Don't allow CU1 or FR to be set unless FPU 1398 * capability enabled and exists in guest 1399 * configuration. 1400 */ 1401 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1402 val &= ~(ST0_CU1 | ST0_FR); 1403 1404 /* 1405 * Also don't allow FR to be set if host doesn't 1406 * support it. 1407 */ 1408 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 1409 val &= ~ST0_FR; 1410 1411 1412 /* Handle changes in FPU mode */ 1413 preempt_disable(); 1414 1415 /* 1416 * FPU and Vector register state is made 1417 * UNPREDICTABLE by a change of FR, so don't 1418 * even bother saving it. 1419 */ 1420 if (change & ST0_FR) 1421 kvm_drop_fpu(vcpu); 1422 1423 /* 1424 * If MSA state is already live, it is undefined 1425 * how it interacts with FR=0 FPU state, and we 1426 * don't want to hit reserved instruction 1427 * exceptions trying to save the MSA state later 1428 * when CU=1 && FR=1, so play it safe and save 1429 * it first. 1430 */ 1431 if (change & ST0_CU1 && !(val & ST0_FR) && 1432 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1433 kvm_lose_fpu(vcpu); 1434 1435 /* 1436 * Propagate CU1 (FPU enable) changes 1437 * immediately if the FPU context is already 1438 * loaded. When disabling we leave the context 1439 * loaded so it can be quickly enabled again in 1440 * the near future. 1441 */ 1442 if (change & ST0_CU1 && 1443 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1444 change_c0_status(ST0_CU1, val); 1445 1446 preempt_enable(); 1447 1448 kvm_write_c0_guest_status(cop0, val); 1449 1450 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1451 /* 1452 * If FPU present, we need CU1/FR bits to take 1453 * effect fairly soon. 1454 */ 1455 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1456 kvm_mips_trans_mtc0(inst, opc, vcpu); 1457 #endif 1458 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1459 unsigned int old_val, val, change, wrmask; 1460 1461 old_val = kvm_read_c0_guest_config5(cop0); 1462 val = vcpu->arch.gprs[rt]; 1463 1464 /* Only a few bits are writable in Config5 */ 1465 wrmask = kvm_mips_config5_wrmask(vcpu); 1466 change = (val ^ old_val) & wrmask; 1467 val = old_val ^ change; 1468 1469 1470 /* Handle changes in FPU/MSA modes */ 1471 preempt_disable(); 1472 1473 /* 1474 * Propagate FRE changes immediately if the FPU 1475 * context is already loaded. 1476 */ 1477 if (change & MIPS_CONF5_FRE && 1478 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1479 change_c0_config5(MIPS_CONF5_FRE, val); 1480 1481 /* 1482 * Propagate MSAEn changes immediately if the 1483 * MSA context is already loaded. When disabling 1484 * we leave the context loaded so it can be 1485 * quickly enabled again in the near future. 1486 */ 1487 if (change & MIPS_CONF5_MSAEN && 1488 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1489 change_c0_config5(MIPS_CONF5_MSAEN, 1490 val); 1491 1492 preempt_enable(); 1493 1494 kvm_write_c0_guest_config5(cop0, val); 1495 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1496 u32 old_cause, new_cause; 1497 1498 old_cause = kvm_read_c0_guest_cause(cop0); 1499 new_cause = vcpu->arch.gprs[rt]; 1500 /* Update R/W bits */ 1501 kvm_change_c0_guest_cause(cop0, 0x08800300, 1502 new_cause); 1503 /* DC bit enabling/disabling timer? */ 1504 if ((old_cause ^ new_cause) & CAUSEF_DC) { 1505 if (new_cause & CAUSEF_DC) 1506 kvm_mips_count_disable_cause(vcpu); 1507 else 1508 kvm_mips_count_enable_cause(vcpu); 1509 } 1510 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { 1511 u32 mask = MIPS_HWRENA_CPUNUM | 1512 MIPS_HWRENA_SYNCISTEP | 1513 MIPS_HWRENA_CC | 1514 MIPS_HWRENA_CCRES; 1515 1516 if (kvm_read_c0_guest_config3(cop0) & 1517 MIPS_CONF3_ULRI) 1518 mask |= MIPS_HWRENA_ULR; 1519 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; 1520 } else { 1521 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; 1522 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1523 kvm_mips_trans_mtc0(inst, opc, vcpu); 1524 #endif 1525 } 1526 break; 1527 1528 case dmtc_op: 1529 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1530 vcpu->arch.pc, rt, rd, sel); 1531 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, 1532 KVM_TRACE_COP0(rd, sel), 1533 vcpu->arch.gprs[rt]); 1534 er = EMULATE_FAIL; 1535 break; 1536 1537 case mfmc0_op: 1538 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS 1539 cop0->stat[MIPS_CP0_STATUS][0]++; 1540 #endif 1541 if (rt != 0) 1542 vcpu->arch.gprs[rt] = 1543 kvm_read_c0_guest_status(cop0); 1544 /* EI */ 1545 if (inst.mfmc0_format.sc) { 1546 kvm_debug("[%#lx] mfmc0_op: EI\n", 1547 vcpu->arch.pc); 1548 kvm_set_c0_guest_status(cop0, ST0_IE); 1549 } else { 1550 kvm_debug("[%#lx] mfmc0_op: DI\n", 1551 vcpu->arch.pc); 1552 kvm_clear_c0_guest_status(cop0, ST0_IE); 1553 } 1554 1555 break; 1556 1557 case wrpgpr_op: 1558 { 1559 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1560 u32 pss = 1561 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1562 /* 1563 * We don't support any shadow register sets, so 1564 * SRSCtl[PSS] == SRSCtl[CSS] = 0 1565 */ 1566 if (css || pss) { 1567 er = EMULATE_FAIL; 1568 break; 1569 } 1570 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, 1571 vcpu->arch.gprs[rt]); 1572 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; 1573 } 1574 break; 1575 default: 1576 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1577 vcpu->arch.pc, inst.c0r_format.rs); 1578 er = EMULATE_FAIL; 1579 break; 1580 } 1581 } 1582 1583 done: 1584 /* Rollback PC only if emulation was unsuccessful */ 1585 if (er == EMULATE_FAIL) 1586 vcpu->arch.pc = curr_pc; 1587 1588 dont_update_pc: 1589 /* 1590 * This is for special instructions whose emulation 1591 * updates the PC, so do not overwrite the PC under 1592 * any circumstances 1593 */ 1594 1595 return er; 1596 } 1597 1598 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 1599 u32 cause, 1600 struct kvm_run *run, 1601 struct kvm_vcpu *vcpu) 1602 { 1603 int r; 1604 enum emulation_result er; 1605 u32 rt; 1606 void *data = run->mmio.data; 1607 unsigned int imme; 1608 unsigned long curr_pc; 1609 1610 /* 1611 * Update PC and hold onto current PC in case there is 1612 * an error and we want to rollback the PC 1613 */ 1614 curr_pc = vcpu->arch.pc; 1615 er = update_pc(vcpu, cause); 1616 if (er == EMULATE_FAIL) 1617 return er; 1618 1619 rt = inst.i_format.rt; 1620 1621 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1622 vcpu->arch.host_cp0_badvaddr); 1623 if (run->mmio.phys_addr == KVM_INVALID_ADDR) 1624 goto out_fail; 1625 1626 switch (inst.i_format.opcode) { 1627 #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1628 case sd_op: 1629 run->mmio.len = 8; 1630 *(u64 *)data = vcpu->arch.gprs[rt]; 1631 1632 kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n", 1633 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1634 vcpu->arch.gprs[rt], *(u64 *)data); 1635 break; 1636 #endif 1637 1638 case sw_op: 1639 run->mmio.len = 4; 1640 *(u32 *)data = vcpu->arch.gprs[rt]; 1641 1642 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1643 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1644 vcpu->arch.gprs[rt], *(u32 *)data); 1645 break; 1646 1647 case sh_op: 1648 run->mmio.len = 2; 1649 *(u16 *)data = vcpu->arch.gprs[rt]; 1650 1651 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1652 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1653 vcpu->arch.gprs[rt], *(u16 *)data); 1654 break; 1655 1656 case sb_op: 1657 run->mmio.len = 1; 1658 *(u8 *)data = vcpu->arch.gprs[rt]; 1659 1660 kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1661 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1662 vcpu->arch.gprs[rt], *(u8 *)data); 1663 break; 1664 1665 case swl_op: 1666 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1667 vcpu->arch.host_cp0_badvaddr) & (~0x3); 1668 run->mmio.len = 4; 1669 imme = vcpu->arch.host_cp0_badvaddr & 0x3; 1670 switch (imme) { 1671 case 0: 1672 *(u32 *)data = ((*(u32 *)data) & 0xffffff00) | 1673 (vcpu->arch.gprs[rt] >> 24); 1674 break; 1675 case 1: 1676 *(u32 *)data = ((*(u32 *)data) & 0xffff0000) | 1677 (vcpu->arch.gprs[rt] >> 16); 1678 break; 1679 case 2: 1680 *(u32 *)data = ((*(u32 *)data) & 0xff000000) | 1681 (vcpu->arch.gprs[rt] >> 8); 1682 break; 1683 case 3: 1684 *(u32 *)data = vcpu->arch.gprs[rt]; 1685 break; 1686 default: 1687 break; 1688 } 1689 1690 kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1691 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1692 vcpu->arch.gprs[rt], *(u32 *)data); 1693 break; 1694 1695 case swr_op: 1696 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1697 vcpu->arch.host_cp0_badvaddr) & (~0x3); 1698 run->mmio.len = 4; 1699 imme = vcpu->arch.host_cp0_badvaddr & 0x3; 1700 switch (imme) { 1701 case 0: 1702 *(u32 *)data = vcpu->arch.gprs[rt]; 1703 break; 1704 case 1: 1705 *(u32 *)data = ((*(u32 *)data) & 0xff) | 1706 (vcpu->arch.gprs[rt] << 8); 1707 break; 1708 case 2: 1709 *(u32 *)data = ((*(u32 *)data) & 0xffff) | 1710 (vcpu->arch.gprs[rt] << 16); 1711 break; 1712 case 3: 1713 *(u32 *)data = ((*(u32 *)data) & 0xffffff) | 1714 (vcpu->arch.gprs[rt] << 24); 1715 break; 1716 default: 1717 break; 1718 } 1719 1720 kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1721 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1722 vcpu->arch.gprs[rt], *(u32 *)data); 1723 break; 1724 1725 #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1726 case sdl_op: 1727 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1728 vcpu->arch.host_cp0_badvaddr) & (~0x7); 1729 1730 run->mmio.len = 8; 1731 imme = vcpu->arch.host_cp0_badvaddr & 0x7; 1732 switch (imme) { 1733 case 0: 1734 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) | 1735 ((vcpu->arch.gprs[rt] >> 56) & 0xff); 1736 break; 1737 case 1: 1738 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) | 1739 ((vcpu->arch.gprs[rt] >> 48) & 0xffff); 1740 break; 1741 case 2: 1742 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) | 1743 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff); 1744 break; 1745 case 3: 1746 *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) | 1747 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff); 1748 break; 1749 case 4: 1750 *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) | 1751 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff); 1752 break; 1753 case 5: 1754 *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) | 1755 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff); 1756 break; 1757 case 6: 1758 *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) | 1759 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff); 1760 break; 1761 case 7: 1762 *(u64 *)data = vcpu->arch.gprs[rt]; 1763 break; 1764 default: 1765 break; 1766 } 1767 1768 kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n", 1769 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1770 vcpu->arch.gprs[rt], *(u64 *)data); 1771 break; 1772 1773 case sdr_op: 1774 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1775 vcpu->arch.host_cp0_badvaddr) & (~0x7); 1776 1777 run->mmio.len = 8; 1778 imme = vcpu->arch.host_cp0_badvaddr & 0x7; 1779 switch (imme) { 1780 case 0: 1781 *(u64 *)data = vcpu->arch.gprs[rt]; 1782 break; 1783 case 1: 1784 *(u64 *)data = ((*(u64 *)data) & 0xff) | 1785 (vcpu->arch.gprs[rt] << 8); 1786 break; 1787 case 2: 1788 *(u64 *)data = ((*(u64 *)data) & 0xffff) | 1789 (vcpu->arch.gprs[rt] << 16); 1790 break; 1791 case 3: 1792 *(u64 *)data = ((*(u64 *)data) & 0xffffff) | 1793 (vcpu->arch.gprs[rt] << 24); 1794 break; 1795 case 4: 1796 *(u64 *)data = ((*(u64 *)data) & 0xffffffff) | 1797 (vcpu->arch.gprs[rt] << 32); 1798 break; 1799 case 5: 1800 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) | 1801 (vcpu->arch.gprs[rt] << 40); 1802 break; 1803 case 6: 1804 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) | 1805 (vcpu->arch.gprs[rt] << 48); 1806 break; 1807 case 7: 1808 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) | 1809 (vcpu->arch.gprs[rt] << 56); 1810 break; 1811 default: 1812 break; 1813 } 1814 1815 kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n", 1816 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1817 vcpu->arch.gprs[rt], *(u64 *)data); 1818 break; 1819 #endif 1820 1821 #ifdef CONFIG_CPU_LOONGSON64 1822 case sdc2_op: 1823 rt = inst.loongson3_lsdc2_format.rt; 1824 switch (inst.loongson3_lsdc2_format.opcode1) { 1825 /* 1826 * Loongson-3 overridden sdc2 instructions. 1827 * opcode1 instruction 1828 * 0x0 gssbx: store 1 bytes from GPR 1829 * 0x1 gsshx: store 2 bytes from GPR 1830 * 0x2 gsswx: store 4 bytes from GPR 1831 * 0x3 gssdx: store 8 bytes from GPR 1832 */ 1833 case 0x0: 1834 run->mmio.len = 1; 1835 *(u8 *)data = vcpu->arch.gprs[rt]; 1836 1837 kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1838 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1839 vcpu->arch.gprs[rt], *(u8 *)data); 1840 break; 1841 case 0x1: 1842 run->mmio.len = 2; 1843 *(u16 *)data = vcpu->arch.gprs[rt]; 1844 1845 kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1846 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1847 vcpu->arch.gprs[rt], *(u16 *)data); 1848 break; 1849 case 0x2: 1850 run->mmio.len = 4; 1851 *(u32 *)data = vcpu->arch.gprs[rt]; 1852 1853 kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1854 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1855 vcpu->arch.gprs[rt], *(u32 *)data); 1856 break; 1857 case 0x3: 1858 run->mmio.len = 8; 1859 *(u64 *)data = vcpu->arch.gprs[rt]; 1860 1861 kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n", 1862 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1863 vcpu->arch.gprs[rt], *(u64 *)data); 1864 break; 1865 default: 1866 kvm_err("Godson Exteneded GS-Store not yet supported (inst=0x%08x)\n", 1867 inst.word); 1868 break; 1869 } 1870 break; 1871 #endif 1872 default: 1873 kvm_err("Store not yet supported (inst=0x%08x)\n", 1874 inst.word); 1875 goto out_fail; 1876 } 1877 1878 vcpu->mmio_needed = 1; 1879 run->mmio.is_write = 1; 1880 vcpu->mmio_is_write = 1; 1881 1882 r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, 1883 run->mmio.phys_addr, run->mmio.len, data); 1884 1885 if (!r) { 1886 vcpu->mmio_needed = 0; 1887 return EMULATE_DONE; 1888 } 1889 1890 return EMULATE_DO_MMIO; 1891 1892 out_fail: 1893 /* Rollback PC if emulation was unsuccessful */ 1894 vcpu->arch.pc = curr_pc; 1895 return EMULATE_FAIL; 1896 } 1897 1898 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, 1899 u32 cause, struct kvm_run *run, 1900 struct kvm_vcpu *vcpu) 1901 { 1902 int r; 1903 enum emulation_result er; 1904 unsigned long curr_pc; 1905 u32 op, rt; 1906 unsigned int imme; 1907 1908 rt = inst.i_format.rt; 1909 op = inst.i_format.opcode; 1910 1911 /* 1912 * Find the resume PC now while we have safe and easy access to the 1913 * prior branch instruction, and save it for 1914 * kvm_mips_complete_mmio_load() to restore later. 1915 */ 1916 curr_pc = vcpu->arch.pc; 1917 er = update_pc(vcpu, cause); 1918 if (er == EMULATE_FAIL) 1919 return er; 1920 vcpu->arch.io_pc = vcpu->arch.pc; 1921 vcpu->arch.pc = curr_pc; 1922 1923 vcpu->arch.io_gpr = rt; 1924 1925 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1926 vcpu->arch.host_cp0_badvaddr); 1927 if (run->mmio.phys_addr == KVM_INVALID_ADDR) 1928 return EMULATE_FAIL; 1929 1930 vcpu->mmio_needed = 2; /* signed */ 1931 switch (op) { 1932 #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1933 case ld_op: 1934 run->mmio.len = 8; 1935 break; 1936 1937 case lwu_op: 1938 vcpu->mmio_needed = 1; /* unsigned */ 1939 /* fall through */ 1940 #endif 1941 case lw_op: 1942 run->mmio.len = 4; 1943 break; 1944 1945 case lhu_op: 1946 vcpu->mmio_needed = 1; /* unsigned */ 1947 fallthrough; 1948 case lh_op: 1949 run->mmio.len = 2; 1950 break; 1951 1952 case lbu_op: 1953 vcpu->mmio_needed = 1; /* unsigned */ 1954 fallthrough; 1955 case lb_op: 1956 run->mmio.len = 1; 1957 break; 1958 1959 case lwl_op: 1960 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1961 vcpu->arch.host_cp0_badvaddr) & (~0x3); 1962 1963 run->mmio.len = 4; 1964 imme = vcpu->arch.host_cp0_badvaddr & 0x3; 1965 switch (imme) { 1966 case 0: 1967 vcpu->mmio_needed = 3; /* 1 byte */ 1968 break; 1969 case 1: 1970 vcpu->mmio_needed = 4; /* 2 bytes */ 1971 break; 1972 case 2: 1973 vcpu->mmio_needed = 5; /* 3 bytes */ 1974 break; 1975 case 3: 1976 vcpu->mmio_needed = 6; /* 4 bytes */ 1977 break; 1978 default: 1979 break; 1980 } 1981 break; 1982 1983 case lwr_op: 1984 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1985 vcpu->arch.host_cp0_badvaddr) & (~0x3); 1986 1987 run->mmio.len = 4; 1988 imme = vcpu->arch.host_cp0_badvaddr & 0x3; 1989 switch (imme) { 1990 case 0: 1991 vcpu->mmio_needed = 7; /* 4 bytes */ 1992 break; 1993 case 1: 1994 vcpu->mmio_needed = 8; /* 3 bytes */ 1995 break; 1996 case 2: 1997 vcpu->mmio_needed = 9; /* 2 bytes */ 1998 break; 1999 case 3: 2000 vcpu->mmio_needed = 10; /* 1 byte */ 2001 break; 2002 default: 2003 break; 2004 } 2005 break; 2006 2007 #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 2008 case ldl_op: 2009 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 2010 vcpu->arch.host_cp0_badvaddr) & (~0x7); 2011 2012 run->mmio.len = 8; 2013 imme = vcpu->arch.host_cp0_badvaddr & 0x7; 2014 switch (imme) { 2015 case 0: 2016 vcpu->mmio_needed = 11; /* 1 byte */ 2017 break; 2018 case 1: 2019 vcpu->mmio_needed = 12; /* 2 bytes */ 2020 break; 2021 case 2: 2022 vcpu->mmio_needed = 13; /* 3 bytes */ 2023 break; 2024 case 3: 2025 vcpu->mmio_needed = 14; /* 4 bytes */ 2026 break; 2027 case 4: 2028 vcpu->mmio_needed = 15; /* 5 bytes */ 2029 break; 2030 case 5: 2031 vcpu->mmio_needed = 16; /* 6 bytes */ 2032 break; 2033 case 6: 2034 vcpu->mmio_needed = 17; /* 7 bytes */ 2035 break; 2036 case 7: 2037 vcpu->mmio_needed = 18; /* 8 bytes */ 2038 break; 2039 default: 2040 break; 2041 } 2042 break; 2043 2044 case ldr_op: 2045 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 2046 vcpu->arch.host_cp0_badvaddr) & (~0x7); 2047 2048 run->mmio.len = 8; 2049 imme = vcpu->arch.host_cp0_badvaddr & 0x7; 2050 switch (imme) { 2051 case 0: 2052 vcpu->mmio_needed = 19; /* 8 bytes */ 2053 break; 2054 case 1: 2055 vcpu->mmio_needed = 20; /* 7 bytes */ 2056 break; 2057 case 2: 2058 vcpu->mmio_needed = 21; /* 6 bytes */ 2059 break; 2060 case 3: 2061 vcpu->mmio_needed = 22; /* 5 bytes */ 2062 break; 2063 case 4: 2064 vcpu->mmio_needed = 23; /* 4 bytes */ 2065 break; 2066 case 5: 2067 vcpu->mmio_needed = 24; /* 3 bytes */ 2068 break; 2069 case 6: 2070 vcpu->mmio_needed = 25; /* 2 bytes */ 2071 break; 2072 case 7: 2073 vcpu->mmio_needed = 26; /* 1 byte */ 2074 break; 2075 default: 2076 break; 2077 } 2078 break; 2079 #endif 2080 2081 #ifdef CONFIG_CPU_LOONGSON64 2082 case ldc2_op: 2083 rt = inst.loongson3_lsdc2_format.rt; 2084 switch (inst.loongson3_lsdc2_format.opcode1) { 2085 /* 2086 * Loongson-3 overridden ldc2 instructions. 2087 * opcode1 instruction 2088 * 0x0 gslbx: store 1 bytes from GPR 2089 * 0x1 gslhx: store 2 bytes from GPR 2090 * 0x2 gslwx: store 4 bytes from GPR 2091 * 0x3 gsldx: store 8 bytes from GPR 2092 */ 2093 case 0x0: 2094 run->mmio.len = 1; 2095 vcpu->mmio_needed = 27; /* signed */ 2096 break; 2097 case 0x1: 2098 run->mmio.len = 2; 2099 vcpu->mmio_needed = 28; /* signed */ 2100 break; 2101 case 0x2: 2102 run->mmio.len = 4; 2103 vcpu->mmio_needed = 29; /* signed */ 2104 break; 2105 case 0x3: 2106 run->mmio.len = 8; 2107 vcpu->mmio_needed = 30; /* signed */ 2108 break; 2109 default: 2110 kvm_err("Godson Exteneded GS-Load for float not yet supported (inst=0x%08x)\n", 2111 inst.word); 2112 break; 2113 } 2114 break; 2115 #endif 2116 2117 default: 2118 kvm_err("Load not yet supported (inst=0x%08x)\n", 2119 inst.word); 2120 vcpu->mmio_needed = 0; 2121 return EMULATE_FAIL; 2122 } 2123 2124 run->mmio.is_write = 0; 2125 vcpu->mmio_is_write = 0; 2126 2127 r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, 2128 run->mmio.phys_addr, run->mmio.len, run->mmio.data); 2129 2130 if (!r) { 2131 kvm_mips_complete_mmio_load(vcpu, run); 2132 vcpu->mmio_needed = 0; 2133 return EMULATE_DONE; 2134 } 2135 2136 return EMULATE_DO_MMIO; 2137 } 2138 2139 #ifndef CONFIG_KVM_MIPS_VZ 2140 static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), 2141 unsigned long curr_pc, 2142 unsigned long addr, 2143 struct kvm_run *run, 2144 struct kvm_vcpu *vcpu, 2145 u32 cause) 2146 { 2147 int err; 2148 2149 for (;;) { 2150 /* Carefully attempt the cache operation */ 2151 kvm_trap_emul_gva_lockless_begin(vcpu); 2152 err = fn(addr); 2153 kvm_trap_emul_gva_lockless_end(vcpu); 2154 2155 if (likely(!err)) 2156 return EMULATE_DONE; 2157 2158 /* 2159 * Try to handle the fault and retry, maybe we just raced with a 2160 * GVA invalidation. 2161 */ 2162 switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { 2163 case KVM_MIPS_GVA: 2164 case KVM_MIPS_GPA: 2165 /* bad virtual or physical address */ 2166 return EMULATE_FAIL; 2167 case KVM_MIPS_TLB: 2168 /* no matching guest TLB */ 2169 vcpu->arch.host_cp0_badvaddr = addr; 2170 vcpu->arch.pc = curr_pc; 2171 kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); 2172 return EMULATE_EXCEPT; 2173 case KVM_MIPS_TLBINV: 2174 /* invalid matching guest TLB */ 2175 vcpu->arch.host_cp0_badvaddr = addr; 2176 vcpu->arch.pc = curr_pc; 2177 kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); 2178 return EMULATE_EXCEPT; 2179 default: 2180 break; 2181 } 2182 } 2183 } 2184 2185 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 2186 u32 *opc, u32 cause, 2187 struct kvm_run *run, 2188 struct kvm_vcpu *vcpu) 2189 { 2190 enum emulation_result er = EMULATE_DONE; 2191 u32 cache, op_inst, op, base; 2192 s16 offset; 2193 struct kvm_vcpu_arch *arch = &vcpu->arch; 2194 unsigned long va; 2195 unsigned long curr_pc; 2196 2197 /* 2198 * Update PC and hold onto current PC in case there is 2199 * an error and we want to rollback the PC 2200 */ 2201 curr_pc = vcpu->arch.pc; 2202 er = update_pc(vcpu, cause); 2203 if (er == EMULATE_FAIL) 2204 return er; 2205 2206 base = inst.i_format.rs; 2207 op_inst = inst.i_format.rt; 2208 if (cpu_has_mips_r6) 2209 offset = inst.spec3_format.simmediate; 2210 else 2211 offset = inst.i_format.simmediate; 2212 cache = op_inst & CacheOp_Cache; 2213 op = op_inst & CacheOp_Op; 2214 2215 va = arch->gprs[base] + offset; 2216 2217 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 2218 cache, op, base, arch->gprs[base], offset); 2219 2220 /* 2221 * Treat INDEX_INV as a nop, basically issued by Linux on startup to 2222 * invalidate the caches entirely by stepping through all the 2223 * ways/indexes 2224 */ 2225 if (op == Index_Writeback_Inv) { 2226 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 2227 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 2228 arch->gprs[base], offset); 2229 2230 if (cache == Cache_D) { 2231 #ifdef CONFIG_CPU_R4K_CACHE_TLB 2232 r4k_blast_dcache(); 2233 #else 2234 switch (boot_cpu_type()) { 2235 case CPU_CAVIUM_OCTEON3: 2236 /* locally flush icache */ 2237 local_flush_icache_range(0, 0); 2238 break; 2239 default: 2240 __flush_cache_all(); 2241 break; 2242 } 2243 #endif 2244 } else if (cache == Cache_I) { 2245 #ifdef CONFIG_CPU_R4K_CACHE_TLB 2246 r4k_blast_icache(); 2247 #else 2248 switch (boot_cpu_type()) { 2249 case CPU_CAVIUM_OCTEON3: 2250 /* locally flush icache */ 2251 local_flush_icache_range(0, 0); 2252 break; 2253 default: 2254 flush_icache_all(); 2255 break; 2256 } 2257 #endif 2258 } else { 2259 kvm_err("%s: unsupported CACHE INDEX operation\n", 2260 __func__); 2261 return EMULATE_FAIL; 2262 } 2263 2264 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 2265 kvm_mips_trans_cache_index(inst, opc, vcpu); 2266 #endif 2267 goto done; 2268 } 2269 2270 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ 2271 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { 2272 /* 2273 * Perform the dcache part of icache synchronisation on the 2274 * guest's behalf. 2275 */ 2276 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, 2277 curr_pc, va, run, vcpu, cause); 2278 if (er != EMULATE_DONE) 2279 goto done; 2280 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 2281 /* 2282 * Replace the CACHE instruction, with a SYNCI, not the same, 2283 * but avoids a trap 2284 */ 2285 kvm_mips_trans_cache_va(inst, opc, vcpu); 2286 #endif 2287 } else if (op_inst == Hit_Invalidate_I) { 2288 /* Perform the icache synchronisation on the guest's behalf */ 2289 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, 2290 curr_pc, va, run, vcpu, cause); 2291 if (er != EMULATE_DONE) 2292 goto done; 2293 er = kvm_mips_guest_cache_op(protected_flush_icache_line, 2294 curr_pc, va, run, vcpu, cause); 2295 if (er != EMULATE_DONE) 2296 goto done; 2297 2298 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 2299 /* Replace the CACHE instruction, with a SYNCI */ 2300 kvm_mips_trans_cache_va(inst, opc, vcpu); 2301 #endif 2302 } else { 2303 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 2304 cache, op, base, arch->gprs[base], offset); 2305 er = EMULATE_FAIL; 2306 } 2307 2308 done: 2309 /* Rollback PC only if emulation was unsuccessful */ 2310 if (er == EMULATE_FAIL) 2311 vcpu->arch.pc = curr_pc; 2312 /* Guest exception needs guest to resume */ 2313 if (er == EMULATE_EXCEPT) 2314 er = EMULATE_DONE; 2315 2316 return er; 2317 } 2318 2319 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, 2320 struct kvm_run *run, 2321 struct kvm_vcpu *vcpu) 2322 { 2323 union mips_instruction inst; 2324 enum emulation_result er = EMULATE_DONE; 2325 int err; 2326 2327 /* Fetch the instruction. */ 2328 if (cause & CAUSEF_BD) 2329 opc += 1; 2330 err = kvm_get_badinstr(opc, vcpu, &inst.word); 2331 if (err) 2332 return EMULATE_FAIL; 2333 2334 switch (inst.r_format.opcode) { 2335 case cop0_op: 2336 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); 2337 break; 2338 2339 #ifndef CONFIG_CPU_MIPSR6 2340 case cache_op: 2341 ++vcpu->stat.cache_exits; 2342 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 2343 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); 2344 break; 2345 #else 2346 case spec3_op: 2347 switch (inst.spec3_format.func) { 2348 case cache6_op: 2349 ++vcpu->stat.cache_exits; 2350 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 2351 er = kvm_mips_emulate_cache(inst, opc, cause, run, 2352 vcpu); 2353 break; 2354 default: 2355 goto unknown; 2356 } 2357 break; 2358 unknown: 2359 #endif 2360 2361 default: 2362 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, 2363 inst.word); 2364 kvm_arch_vcpu_dump_regs(vcpu); 2365 er = EMULATE_FAIL; 2366 break; 2367 } 2368 2369 return er; 2370 } 2371 #endif /* CONFIG_KVM_MIPS_VZ */ 2372 2373 /** 2374 * kvm_mips_guest_exception_base() - Find guest exception vector base address. 2375 * 2376 * Returns: The base address of the current guest exception vector, taking 2377 * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. 2378 */ 2379 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) 2380 { 2381 struct mips_coproc *cop0 = vcpu->arch.cop0; 2382 2383 if (kvm_read_c0_guest_status(cop0) & ST0_BEV) 2384 return KVM_GUEST_CKSEG1ADDR(0x1fc00200); 2385 else 2386 return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; 2387 } 2388 2389 enum emulation_result kvm_mips_emulate_syscall(u32 cause, 2390 u32 *opc, 2391 struct kvm_run *run, 2392 struct kvm_vcpu *vcpu) 2393 { 2394 struct mips_coproc *cop0 = vcpu->arch.cop0; 2395 struct kvm_vcpu_arch *arch = &vcpu->arch; 2396 enum emulation_result er = EMULATE_DONE; 2397 2398 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2399 /* save old pc */ 2400 kvm_write_c0_guest_epc(cop0, arch->pc); 2401 kvm_set_c0_guest_status(cop0, ST0_EXL); 2402 2403 if (cause & CAUSEF_BD) 2404 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2405 else 2406 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2407 2408 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); 2409 2410 kvm_change_c0_guest_cause(cop0, (0xff), 2411 (EXCCODE_SYS << CAUSEB_EXCCODE)); 2412 2413 /* Set PC to the exception entry point */ 2414 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2415 2416 } else { 2417 kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); 2418 er = EMULATE_FAIL; 2419 } 2420 2421 return er; 2422 } 2423 2424 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 2425 u32 *opc, 2426 struct kvm_run *run, 2427 struct kvm_vcpu *vcpu) 2428 { 2429 struct mips_coproc *cop0 = vcpu->arch.cop0; 2430 struct kvm_vcpu_arch *arch = &vcpu->arch; 2431 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 2432 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2433 2434 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2435 /* save old pc */ 2436 kvm_write_c0_guest_epc(cop0, arch->pc); 2437 kvm_set_c0_guest_status(cop0, ST0_EXL); 2438 2439 if (cause & CAUSEF_BD) 2440 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2441 else 2442 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2443 2444 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", 2445 arch->pc); 2446 2447 /* set pc to the exception entry point */ 2448 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; 2449 2450 } else { 2451 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 2452 arch->pc); 2453 2454 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2455 } 2456 2457 kvm_change_c0_guest_cause(cop0, (0xff), 2458 (EXCCODE_TLBL << CAUSEB_EXCCODE)); 2459 2460 /* setup badvaddr, context and entryhi registers for the guest */ 2461 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2462 /* XXXKYMA: is the context register used by linux??? */ 2463 kvm_write_c0_guest_entryhi(cop0, entryhi); 2464 2465 return EMULATE_DONE; 2466 } 2467 2468 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 2469 u32 *opc, 2470 struct kvm_run *run, 2471 struct kvm_vcpu *vcpu) 2472 { 2473 struct mips_coproc *cop0 = vcpu->arch.cop0; 2474 struct kvm_vcpu_arch *arch = &vcpu->arch; 2475 unsigned long entryhi = 2476 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2477 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2478 2479 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2480 /* save old pc */ 2481 kvm_write_c0_guest_epc(cop0, arch->pc); 2482 kvm_set_c0_guest_status(cop0, ST0_EXL); 2483 2484 if (cause & CAUSEF_BD) 2485 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2486 else 2487 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2488 2489 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", 2490 arch->pc); 2491 } else { 2492 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 2493 arch->pc); 2494 } 2495 2496 /* set pc to the exception entry point */ 2497 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2498 2499 kvm_change_c0_guest_cause(cop0, (0xff), 2500 (EXCCODE_TLBL << CAUSEB_EXCCODE)); 2501 2502 /* setup badvaddr, context and entryhi registers for the guest */ 2503 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2504 /* XXXKYMA: is the context register used by linux??? */ 2505 kvm_write_c0_guest_entryhi(cop0, entryhi); 2506 2507 return EMULATE_DONE; 2508 } 2509 2510 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 2511 u32 *opc, 2512 struct kvm_run *run, 2513 struct kvm_vcpu *vcpu) 2514 { 2515 struct mips_coproc *cop0 = vcpu->arch.cop0; 2516 struct kvm_vcpu_arch *arch = &vcpu->arch; 2517 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2518 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2519 2520 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2521 /* save old pc */ 2522 kvm_write_c0_guest_epc(cop0, arch->pc); 2523 kvm_set_c0_guest_status(cop0, ST0_EXL); 2524 2525 if (cause & CAUSEF_BD) 2526 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2527 else 2528 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2529 2530 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 2531 arch->pc); 2532 2533 /* Set PC to the exception entry point */ 2534 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; 2535 } else { 2536 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 2537 arch->pc); 2538 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2539 } 2540 2541 kvm_change_c0_guest_cause(cop0, (0xff), 2542 (EXCCODE_TLBS << CAUSEB_EXCCODE)); 2543 2544 /* setup badvaddr, context and entryhi registers for the guest */ 2545 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2546 /* XXXKYMA: is the context register used by linux??? */ 2547 kvm_write_c0_guest_entryhi(cop0, entryhi); 2548 2549 return EMULATE_DONE; 2550 } 2551 2552 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 2553 u32 *opc, 2554 struct kvm_run *run, 2555 struct kvm_vcpu *vcpu) 2556 { 2557 struct mips_coproc *cop0 = vcpu->arch.cop0; 2558 struct kvm_vcpu_arch *arch = &vcpu->arch; 2559 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2560 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2561 2562 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2563 /* save old pc */ 2564 kvm_write_c0_guest_epc(cop0, arch->pc); 2565 kvm_set_c0_guest_status(cop0, ST0_EXL); 2566 2567 if (cause & CAUSEF_BD) 2568 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2569 else 2570 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2571 2572 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 2573 arch->pc); 2574 } else { 2575 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 2576 arch->pc); 2577 } 2578 2579 /* Set PC to the exception entry point */ 2580 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2581 2582 kvm_change_c0_guest_cause(cop0, (0xff), 2583 (EXCCODE_TLBS << CAUSEB_EXCCODE)); 2584 2585 /* setup badvaddr, context and entryhi registers for the guest */ 2586 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2587 /* XXXKYMA: is the context register used by linux??? */ 2588 kvm_write_c0_guest_entryhi(cop0, entryhi); 2589 2590 return EMULATE_DONE; 2591 } 2592 2593 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 2594 u32 *opc, 2595 struct kvm_run *run, 2596 struct kvm_vcpu *vcpu) 2597 { 2598 struct mips_coproc *cop0 = vcpu->arch.cop0; 2599 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2600 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2601 struct kvm_vcpu_arch *arch = &vcpu->arch; 2602 2603 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2604 /* save old pc */ 2605 kvm_write_c0_guest_epc(cop0, arch->pc); 2606 kvm_set_c0_guest_status(cop0, ST0_EXL); 2607 2608 if (cause & CAUSEF_BD) 2609 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2610 else 2611 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2612 2613 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", 2614 arch->pc); 2615 } else { 2616 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", 2617 arch->pc); 2618 } 2619 2620 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2621 2622 kvm_change_c0_guest_cause(cop0, (0xff), 2623 (EXCCODE_MOD << CAUSEB_EXCCODE)); 2624 2625 /* setup badvaddr, context and entryhi registers for the guest */ 2626 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2627 /* XXXKYMA: is the context register used by linux??? */ 2628 kvm_write_c0_guest_entryhi(cop0, entryhi); 2629 2630 return EMULATE_DONE; 2631 } 2632 2633 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 2634 u32 *opc, 2635 struct kvm_run *run, 2636 struct kvm_vcpu *vcpu) 2637 { 2638 struct mips_coproc *cop0 = vcpu->arch.cop0; 2639 struct kvm_vcpu_arch *arch = &vcpu->arch; 2640 2641 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2642 /* save old pc */ 2643 kvm_write_c0_guest_epc(cop0, arch->pc); 2644 kvm_set_c0_guest_status(cop0, ST0_EXL); 2645 2646 if (cause & CAUSEF_BD) 2647 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2648 else 2649 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2650 2651 } 2652 2653 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2654 2655 kvm_change_c0_guest_cause(cop0, (0xff), 2656 (EXCCODE_CPU << CAUSEB_EXCCODE)); 2657 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); 2658 2659 return EMULATE_DONE; 2660 } 2661 2662 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 2663 u32 *opc, 2664 struct kvm_run *run, 2665 struct kvm_vcpu *vcpu) 2666 { 2667 struct mips_coproc *cop0 = vcpu->arch.cop0; 2668 struct kvm_vcpu_arch *arch = &vcpu->arch; 2669 enum emulation_result er = EMULATE_DONE; 2670 2671 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2672 /* save old pc */ 2673 kvm_write_c0_guest_epc(cop0, arch->pc); 2674 kvm_set_c0_guest_status(cop0, ST0_EXL); 2675 2676 if (cause & CAUSEF_BD) 2677 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2678 else 2679 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2680 2681 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); 2682 2683 kvm_change_c0_guest_cause(cop0, (0xff), 2684 (EXCCODE_RI << CAUSEB_EXCCODE)); 2685 2686 /* Set PC to the exception entry point */ 2687 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2688 2689 } else { 2690 kvm_err("Trying to deliver RI when EXL is already set\n"); 2691 er = EMULATE_FAIL; 2692 } 2693 2694 return er; 2695 } 2696 2697 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 2698 u32 *opc, 2699 struct kvm_run *run, 2700 struct kvm_vcpu *vcpu) 2701 { 2702 struct mips_coproc *cop0 = vcpu->arch.cop0; 2703 struct kvm_vcpu_arch *arch = &vcpu->arch; 2704 enum emulation_result er = EMULATE_DONE; 2705 2706 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2707 /* save old pc */ 2708 kvm_write_c0_guest_epc(cop0, arch->pc); 2709 kvm_set_c0_guest_status(cop0, ST0_EXL); 2710 2711 if (cause & CAUSEF_BD) 2712 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2713 else 2714 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2715 2716 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); 2717 2718 kvm_change_c0_guest_cause(cop0, (0xff), 2719 (EXCCODE_BP << CAUSEB_EXCCODE)); 2720 2721 /* Set PC to the exception entry point */ 2722 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2723 2724 } else { 2725 kvm_err("Trying to deliver BP when EXL is already set\n"); 2726 er = EMULATE_FAIL; 2727 } 2728 2729 return er; 2730 } 2731 2732 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 2733 u32 *opc, 2734 struct kvm_run *run, 2735 struct kvm_vcpu *vcpu) 2736 { 2737 struct mips_coproc *cop0 = vcpu->arch.cop0; 2738 struct kvm_vcpu_arch *arch = &vcpu->arch; 2739 enum emulation_result er = EMULATE_DONE; 2740 2741 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2742 /* save old pc */ 2743 kvm_write_c0_guest_epc(cop0, arch->pc); 2744 kvm_set_c0_guest_status(cop0, ST0_EXL); 2745 2746 if (cause & CAUSEF_BD) 2747 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2748 else 2749 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2750 2751 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); 2752 2753 kvm_change_c0_guest_cause(cop0, (0xff), 2754 (EXCCODE_TR << CAUSEB_EXCCODE)); 2755 2756 /* Set PC to the exception entry point */ 2757 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2758 2759 } else { 2760 kvm_err("Trying to deliver TRAP when EXL is already set\n"); 2761 er = EMULATE_FAIL; 2762 } 2763 2764 return er; 2765 } 2766 2767 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 2768 u32 *opc, 2769 struct kvm_run *run, 2770 struct kvm_vcpu *vcpu) 2771 { 2772 struct mips_coproc *cop0 = vcpu->arch.cop0; 2773 struct kvm_vcpu_arch *arch = &vcpu->arch; 2774 enum emulation_result er = EMULATE_DONE; 2775 2776 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2777 /* save old pc */ 2778 kvm_write_c0_guest_epc(cop0, arch->pc); 2779 kvm_set_c0_guest_status(cop0, ST0_EXL); 2780 2781 if (cause & CAUSEF_BD) 2782 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2783 else 2784 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2785 2786 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); 2787 2788 kvm_change_c0_guest_cause(cop0, (0xff), 2789 (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); 2790 2791 /* Set PC to the exception entry point */ 2792 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2793 2794 } else { 2795 kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); 2796 er = EMULATE_FAIL; 2797 } 2798 2799 return er; 2800 } 2801 2802 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 2803 u32 *opc, 2804 struct kvm_run *run, 2805 struct kvm_vcpu *vcpu) 2806 { 2807 struct mips_coproc *cop0 = vcpu->arch.cop0; 2808 struct kvm_vcpu_arch *arch = &vcpu->arch; 2809 enum emulation_result er = EMULATE_DONE; 2810 2811 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2812 /* save old pc */ 2813 kvm_write_c0_guest_epc(cop0, arch->pc); 2814 kvm_set_c0_guest_status(cop0, ST0_EXL); 2815 2816 if (cause & CAUSEF_BD) 2817 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2818 else 2819 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2820 2821 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); 2822 2823 kvm_change_c0_guest_cause(cop0, (0xff), 2824 (EXCCODE_FPE << CAUSEB_EXCCODE)); 2825 2826 /* Set PC to the exception entry point */ 2827 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2828 2829 } else { 2830 kvm_err("Trying to deliver FPE when EXL is already set\n"); 2831 er = EMULATE_FAIL; 2832 } 2833 2834 return er; 2835 } 2836 2837 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 2838 u32 *opc, 2839 struct kvm_run *run, 2840 struct kvm_vcpu *vcpu) 2841 { 2842 struct mips_coproc *cop0 = vcpu->arch.cop0; 2843 struct kvm_vcpu_arch *arch = &vcpu->arch; 2844 enum emulation_result er = EMULATE_DONE; 2845 2846 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2847 /* save old pc */ 2848 kvm_write_c0_guest_epc(cop0, arch->pc); 2849 kvm_set_c0_guest_status(cop0, ST0_EXL); 2850 2851 if (cause & CAUSEF_BD) 2852 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2853 else 2854 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2855 2856 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); 2857 2858 kvm_change_c0_guest_cause(cop0, (0xff), 2859 (EXCCODE_MSADIS << CAUSEB_EXCCODE)); 2860 2861 /* Set PC to the exception entry point */ 2862 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2863 2864 } else { 2865 kvm_err("Trying to deliver MSADIS when EXL is already set\n"); 2866 er = EMULATE_FAIL; 2867 } 2868 2869 return er; 2870 } 2871 2872 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, 2873 struct kvm_run *run, 2874 struct kvm_vcpu *vcpu) 2875 { 2876 struct mips_coproc *cop0 = vcpu->arch.cop0; 2877 struct kvm_vcpu_arch *arch = &vcpu->arch; 2878 enum emulation_result er = EMULATE_DONE; 2879 unsigned long curr_pc; 2880 union mips_instruction inst; 2881 int err; 2882 2883 /* 2884 * Update PC and hold onto current PC in case there is 2885 * an error and we want to rollback the PC 2886 */ 2887 curr_pc = vcpu->arch.pc; 2888 er = update_pc(vcpu, cause); 2889 if (er == EMULATE_FAIL) 2890 return er; 2891 2892 /* Fetch the instruction. */ 2893 if (cause & CAUSEF_BD) 2894 opc += 1; 2895 err = kvm_get_badinstr(opc, vcpu, &inst.word); 2896 if (err) { 2897 kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); 2898 return EMULATE_FAIL; 2899 } 2900 2901 if (inst.r_format.opcode == spec3_op && 2902 inst.r_format.func == rdhwr_op && 2903 inst.r_format.rs == 0 && 2904 (inst.r_format.re >> 3) == 0) { 2905 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2906 int rd = inst.r_format.rd; 2907 int rt = inst.r_format.rt; 2908 int sel = inst.r_format.re & 0x7; 2909 2910 /* If usermode, check RDHWR rd is allowed by guest HWREna */ 2911 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { 2912 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", 2913 rd, opc); 2914 goto emulate_ri; 2915 } 2916 switch (rd) { 2917 case MIPS_HWR_CPUNUM: /* CPU number */ 2918 arch->gprs[rt] = vcpu->vcpu_id; 2919 break; 2920 case MIPS_HWR_SYNCISTEP: /* SYNCI length */ 2921 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, 2922 current_cpu_data.icache.linesz); 2923 break; 2924 case MIPS_HWR_CC: /* Read count register */ 2925 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); 2926 break; 2927 case MIPS_HWR_CCRES: /* Count register resolution */ 2928 switch (current_cpu_data.cputype) { 2929 case CPU_20KC: 2930 case CPU_25KF: 2931 arch->gprs[rt] = 1; 2932 break; 2933 default: 2934 arch->gprs[rt] = 2; 2935 } 2936 break; 2937 case MIPS_HWR_ULR: /* Read UserLocal register */ 2938 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); 2939 break; 2940 2941 default: 2942 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); 2943 goto emulate_ri; 2944 } 2945 2946 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), 2947 vcpu->arch.gprs[rt]); 2948 } else { 2949 kvm_debug("Emulate RI not supported @ %p: %#x\n", 2950 opc, inst.word); 2951 goto emulate_ri; 2952 } 2953 2954 return EMULATE_DONE; 2955 2956 emulate_ri: 2957 /* 2958 * Rollback PC (if in branch delay slot then the PC already points to 2959 * branch target), and pass the RI exception to the guest OS. 2960 */ 2961 vcpu->arch.pc = curr_pc; 2962 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); 2963 } 2964 2965 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 2966 struct kvm_run *run) 2967 { 2968 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; 2969 enum emulation_result er = EMULATE_DONE; 2970 2971 if (run->mmio.len > sizeof(*gpr)) { 2972 kvm_err("Bad MMIO length: %d", run->mmio.len); 2973 er = EMULATE_FAIL; 2974 goto done; 2975 } 2976 2977 /* Restore saved resume PC */ 2978 vcpu->arch.pc = vcpu->arch.io_pc; 2979 2980 switch (run->mmio.len) { 2981 case 8: 2982 switch (vcpu->mmio_needed) { 2983 case 11: 2984 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) | 2985 (((*(s64 *)run->mmio.data) & 0xff) << 56); 2986 break; 2987 case 12: 2988 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) | 2989 (((*(s64 *)run->mmio.data) & 0xffff) << 48); 2990 break; 2991 case 13: 2992 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) | 2993 (((*(s64 *)run->mmio.data) & 0xffffff) << 40); 2994 break; 2995 case 14: 2996 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) | 2997 (((*(s64 *)run->mmio.data) & 0xffffffff) << 32); 2998 break; 2999 case 15: 3000 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | 3001 (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24); 3002 break; 3003 case 16: 3004 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | 3005 (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16); 3006 break; 3007 case 17: 3008 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | 3009 (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8); 3010 break; 3011 case 18: 3012 case 19: 3013 *gpr = *(s64 *)run->mmio.data; 3014 break; 3015 case 20: 3016 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) | 3017 ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff); 3018 break; 3019 case 21: 3020 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) | 3021 ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff); 3022 break; 3023 case 22: 3024 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) | 3025 ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff); 3026 break; 3027 case 23: 3028 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) | 3029 ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff); 3030 break; 3031 case 24: 3032 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) | 3033 ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff); 3034 break; 3035 case 25: 3036 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) | 3037 ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff); 3038 break; 3039 case 26: 3040 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) | 3041 ((((*(s64 *)run->mmio.data)) >> 56) & 0xff); 3042 break; 3043 default: 3044 *gpr = *(s64 *)run->mmio.data; 3045 } 3046 break; 3047 3048 case 4: 3049 switch (vcpu->mmio_needed) { 3050 case 1: 3051 *gpr = *(u32 *)run->mmio.data; 3052 break; 3053 case 2: 3054 *gpr = *(s32 *)run->mmio.data; 3055 break; 3056 case 3: 3057 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | 3058 (((*(s32 *)run->mmio.data) & 0xff) << 24); 3059 break; 3060 case 4: 3061 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | 3062 (((*(s32 *)run->mmio.data) & 0xffff) << 16); 3063 break; 3064 case 5: 3065 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | 3066 (((*(s32 *)run->mmio.data) & 0xffffff) << 8); 3067 break; 3068 case 6: 3069 case 7: 3070 *gpr = *(s32 *)run->mmio.data; 3071 break; 3072 case 8: 3073 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) | 3074 ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff); 3075 break; 3076 case 9: 3077 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) | 3078 ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff); 3079 break; 3080 case 10: 3081 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) | 3082 ((((*(s32 *)run->mmio.data)) >> 24) & 0xff); 3083 break; 3084 default: 3085 *gpr = *(s32 *)run->mmio.data; 3086 } 3087 break; 3088 3089 case 2: 3090 if (vcpu->mmio_needed == 1) 3091 *gpr = *(u16 *)run->mmio.data; 3092 else 3093 *gpr = *(s16 *)run->mmio.data; 3094 3095 break; 3096 case 1: 3097 if (vcpu->mmio_needed == 1) 3098 *gpr = *(u8 *)run->mmio.data; 3099 else 3100 *gpr = *(s8 *)run->mmio.data; 3101 break; 3102 } 3103 3104 done: 3105 return er; 3106 } 3107 3108 static enum emulation_result kvm_mips_emulate_exc(u32 cause, 3109 u32 *opc, 3110 struct kvm_run *run, 3111 struct kvm_vcpu *vcpu) 3112 { 3113 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 3114 struct mips_coproc *cop0 = vcpu->arch.cop0; 3115 struct kvm_vcpu_arch *arch = &vcpu->arch; 3116 enum emulation_result er = EMULATE_DONE; 3117 3118 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 3119 /* save old pc */ 3120 kvm_write_c0_guest_epc(cop0, arch->pc); 3121 kvm_set_c0_guest_status(cop0, ST0_EXL); 3122 3123 if (cause & CAUSEF_BD) 3124 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 3125 else 3126 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 3127 3128 kvm_change_c0_guest_cause(cop0, (0xff), 3129 (exccode << CAUSEB_EXCCODE)); 3130 3131 /* Set PC to the exception entry point */ 3132 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 3133 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 3134 3135 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", 3136 exccode, kvm_read_c0_guest_epc(cop0), 3137 kvm_read_c0_guest_badvaddr(cop0)); 3138 } else { 3139 kvm_err("Trying to deliver EXC when EXL is already set\n"); 3140 er = EMULATE_FAIL; 3141 } 3142 3143 return er; 3144 } 3145 3146 enum emulation_result kvm_mips_check_privilege(u32 cause, 3147 u32 *opc, 3148 struct kvm_run *run, 3149 struct kvm_vcpu *vcpu) 3150 { 3151 enum emulation_result er = EMULATE_DONE; 3152 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 3153 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 3154 3155 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 3156 3157 if (usermode) { 3158 switch (exccode) { 3159 case EXCCODE_INT: 3160 case EXCCODE_SYS: 3161 case EXCCODE_BP: 3162 case EXCCODE_RI: 3163 case EXCCODE_TR: 3164 case EXCCODE_MSAFPE: 3165 case EXCCODE_FPE: 3166 case EXCCODE_MSADIS: 3167 break; 3168 3169 case EXCCODE_CPU: 3170 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) 3171 er = EMULATE_PRIV_FAIL; 3172 break; 3173 3174 case EXCCODE_MOD: 3175 break; 3176 3177 case EXCCODE_TLBL: 3178 /* 3179 * We we are accessing Guest kernel space, then send an 3180 * address error exception to the guest 3181 */ 3182 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 3183 kvm_debug("%s: LD MISS @ %#lx\n", __func__, 3184 badvaddr); 3185 cause &= ~0xff; 3186 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); 3187 er = EMULATE_PRIV_FAIL; 3188 } 3189 break; 3190 3191 case EXCCODE_TLBS: 3192 /* 3193 * We we are accessing Guest kernel space, then send an 3194 * address error exception to the guest 3195 */ 3196 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 3197 kvm_debug("%s: ST MISS @ %#lx\n", __func__, 3198 badvaddr); 3199 cause &= ~0xff; 3200 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); 3201 er = EMULATE_PRIV_FAIL; 3202 } 3203 break; 3204 3205 case EXCCODE_ADES: 3206 kvm_debug("%s: address error ST @ %#lx\n", __func__, 3207 badvaddr); 3208 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 3209 cause &= ~0xff; 3210 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); 3211 } 3212 er = EMULATE_PRIV_FAIL; 3213 break; 3214 case EXCCODE_ADEL: 3215 kvm_debug("%s: address error LD @ %#lx\n", __func__, 3216 badvaddr); 3217 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 3218 cause &= ~0xff; 3219 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); 3220 } 3221 er = EMULATE_PRIV_FAIL; 3222 break; 3223 default: 3224 er = EMULATE_PRIV_FAIL; 3225 break; 3226 } 3227 } 3228 3229 if (er == EMULATE_PRIV_FAIL) 3230 kvm_mips_emulate_exc(cause, opc, run, vcpu); 3231 3232 return er; 3233 } 3234 3235 /* 3236 * User Address (UA) fault, this could happen if 3237 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 3238 * case we pass on the fault to the guest kernel and let it handle it. 3239 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 3240 * case we inject the TLB from the Guest TLB into the shadow host TLB 3241 */ 3242 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 3243 u32 *opc, 3244 struct kvm_run *run, 3245 struct kvm_vcpu *vcpu, 3246 bool write_fault) 3247 { 3248 enum emulation_result er = EMULATE_DONE; 3249 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 3250 unsigned long va = vcpu->arch.host_cp0_badvaddr; 3251 int index; 3252 3253 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", 3254 vcpu->arch.host_cp0_badvaddr); 3255 3256 /* 3257 * KVM would not have got the exception if this entry was valid in the 3258 * shadow host TLB. Check the Guest TLB, if the entry is not there then 3259 * send the guest an exception. The guest exc handler should then inject 3260 * an entry into the guest TLB. 3261 */ 3262 index = kvm_mips_guest_tlb_lookup(vcpu, 3263 (va & VPN2_MASK) | 3264 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & 3265 KVM_ENTRYHI_ASID)); 3266 if (index < 0) { 3267 if (exccode == EXCCODE_TLBL) { 3268 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 3269 } else if (exccode == EXCCODE_TLBS) { 3270 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); 3271 } else { 3272 kvm_err("%s: invalid exc code: %d\n", __func__, 3273 exccode); 3274 er = EMULATE_FAIL; 3275 } 3276 } else { 3277 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 3278 3279 /* 3280 * Check if the entry is valid, if not then setup a TLB invalid 3281 * exception to the guest 3282 */ 3283 if (!TLB_IS_VALID(*tlb, va)) { 3284 if (exccode == EXCCODE_TLBL) { 3285 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, 3286 vcpu); 3287 } else if (exccode == EXCCODE_TLBS) { 3288 er = kvm_mips_emulate_tlbinv_st(cause, opc, run, 3289 vcpu); 3290 } else { 3291 kvm_err("%s: invalid exc code: %d\n", __func__, 3292 exccode); 3293 er = EMULATE_FAIL; 3294 } 3295 } else { 3296 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 3297 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); 3298 /* 3299 * OK we have a Guest TLB entry, now inject it into the 3300 * shadow host TLB 3301 */ 3302 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, 3303 write_fault)) { 3304 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 3305 __func__, va, index, vcpu, 3306 read_c0_entryhi()); 3307 er = EMULATE_FAIL; 3308 } 3309 } 3310 } 3311 3312 return er; 3313 } 3314