1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29#include <asm/kvm_book3s_asm.h> 30#include <asm/mmu-hash64.h> 31#include <asm/tm.h> 32 33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 34 35/* Values in HSTATE_NAPPING(r13) */ 36#define NAPPING_CEDE 1 37#define NAPPING_NOVCPU 2 38 39/* 40 * Call kvmppc_hv_entry in real mode. 41 * Must be called with interrupts hard-disabled. 42 * 43 * Input Registers: 44 * 45 * LR = return address to continue at after eventually re-enabling MMU 46 */ 47_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 48 mflr r0 49 std r0, PPC_LR_STKOFF(r1) 50 stdu r1, -112(r1) 51 mfmsr r10 52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 53 li r0,MSR_RI 54 andc r0,r10,r0 55 li r6,MSR_IR | MSR_DR 56 andc r6,r10,r6 57 mtmsrd r0,1 /* clear RI in MSR */ 58 mtsrr0 r5 59 mtsrr1 r6 60 RFI 61 62kvmppc_call_hv_entry: 63 ld r4, HSTATE_KVM_VCPU(r13) 64 bl kvmppc_hv_entry 65 66 /* Back from guest - restore host state and return to caller */ 67 68BEGIN_FTR_SECTION 69 /* Restore host DABR and DABRX */ 70 ld r5,HSTATE_DABR(r13) 71 li r6,7 72 mtspr SPRN_DABR,r5 73 mtspr SPRN_DABRX,r6 74END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 75 76 /* Restore SPRG3 */ 77 ld r3,PACA_SPRG_VDSO(r13) 78 mtspr SPRN_SPRG_VDSO_WRITE,r3 79 80 /* Reload the host's PMU registers */ 81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 82 lbz r4, LPPACA_PMCINUSE(r3) 83 cmpwi r4, 0 84 beq 23f /* skip if not */ 85BEGIN_FTR_SECTION 86 ld r3, HSTATE_MMCR(r13) 87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 88 cmpwi r4, MMCR0_PMAO 89 beql kvmppc_fix_pmao 90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 91 lwz r3, HSTATE_PMC(r13) 92 lwz r4, HSTATE_PMC + 4(r13) 93 lwz r5, HSTATE_PMC + 8(r13) 94 lwz r6, HSTATE_PMC + 12(r13) 95 lwz r8, HSTATE_PMC + 16(r13) 96 lwz r9, HSTATE_PMC + 20(r13) 97BEGIN_FTR_SECTION 98 lwz r10, HSTATE_PMC + 24(r13) 99 lwz r11, HSTATE_PMC + 28(r13) 100END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 101 mtspr SPRN_PMC1, r3 102 mtspr SPRN_PMC2, r4 103 mtspr SPRN_PMC3, r5 104 mtspr SPRN_PMC4, r6 105 mtspr SPRN_PMC5, r8 106 mtspr SPRN_PMC6, r9 107BEGIN_FTR_SECTION 108 mtspr SPRN_PMC7, r10 109 mtspr SPRN_PMC8, r11 110END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 111 ld r3, HSTATE_MMCR(r13) 112 ld r4, HSTATE_MMCR + 8(r13) 113 ld r5, HSTATE_MMCR + 16(r13) 114 ld r6, HSTATE_MMCR + 24(r13) 115 ld r7, HSTATE_MMCR + 32(r13) 116 mtspr SPRN_MMCR1, r4 117 mtspr SPRN_MMCRA, r5 118 mtspr SPRN_SIAR, r6 119 mtspr SPRN_SDAR, r7 120BEGIN_FTR_SECTION 121 ld r8, HSTATE_MMCR + 40(r13) 122 ld r9, HSTATE_MMCR + 48(r13) 123 mtspr SPRN_MMCR2, r8 124 mtspr SPRN_SIER, r9 125END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 126 mtspr SPRN_MMCR0, r3 127 isync 12823: 129 130 /* 131 * Reload DEC. HDEC interrupts were disabled when 132 * we reloaded the host's LPCR value. 133 */ 134 ld r3, HSTATE_DECEXP(r13) 135 mftb r4 136 subf r4, r4, r3 137 mtspr SPRN_DEC, r4 138 139 /* 140 * For external and machine check interrupts, we need 141 * to call the Linux handler to process the interrupt. 142 * We do that by jumping to absolute address 0x500 for 143 * external interrupts, or the machine_check_fwnmi label 144 * for machine checks (since firmware might have patched 145 * the vector area at 0x200). The [h]rfid at the end of the 146 * handler will return to the book3s_hv_interrupts.S code. 147 * For other interrupts we do the rfid to get back 148 * to the book3s_hv_interrupts.S code here. 149 */ 150 ld r8, 112+PPC_LR_STKOFF(r1) 151 addi r1, r1, 112 152 ld r7, HSTATE_HOST_MSR(r13) 153 154 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 155 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 156BEGIN_FTR_SECTION 157 beq 11f 158 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI 159 beq cr2, 14f /* HMI check */ 160END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 161 162 /* RFI into the highmem handler, or branch to interrupt handler */ 163 mfmsr r6 164 li r0, MSR_RI 165 andc r6, r6, r0 166 mtmsrd r6, 1 /* Clear RI in MSR */ 167 mtsrr0 r8 168 mtsrr1 r7 169 beqa 0x500 /* external interrupt (PPC970) */ 170 beq cr1, 13f /* machine check */ 171 RFI 172 173 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 17411: mtspr SPRN_HSRR0, r8 175 mtspr SPRN_HSRR1, r7 176 ba 0x500 177 17813: b machine_check_fwnmi 179 18014: mtspr SPRN_HSRR0, r8 181 mtspr SPRN_HSRR1, r7 182 b hmi_exception_after_realmode 183 184kvmppc_primary_no_guest: 185 /* We handle this much like a ceded vcpu */ 186 /* set our bit in napping_threads */ 187 ld r5, HSTATE_KVM_VCORE(r13) 188 lbz r7, HSTATE_PTID(r13) 189 li r0, 1 190 sld r0, r0, r7 191 addi r6, r5, VCORE_NAPPING_THREADS 1921: lwarx r3, 0, r6 193 or r3, r3, r0 194 stwcx. r3, 0, r6 195 bne 1b 196 /* order napping_threads update vs testing entry_exit_count */ 197 isync 198 li r12, 0 199 lwz r7, VCORE_ENTRY_EXIT(r5) 200 cmpwi r7, 0x100 201 bge kvm_novcpu_exit /* another thread already exiting */ 202 li r3, NAPPING_NOVCPU 203 stb r3, HSTATE_NAPPING(r13) 204 li r3, 1 205 stb r3, HSTATE_HWTHREAD_REQ(r13) 206 207 b kvm_do_nap 208 209kvm_novcpu_wakeup: 210 ld r1, HSTATE_HOST_R1(r13) 211 ld r5, HSTATE_KVM_VCORE(r13) 212 li r0, 0 213 stb r0, HSTATE_NAPPING(r13) 214 stb r0, HSTATE_HWTHREAD_REQ(r13) 215 216 /* check the wake reason */ 217 bl kvmppc_check_wake_reason 218 219 /* see if any other thread is already exiting */ 220 lwz r0, VCORE_ENTRY_EXIT(r5) 221 cmpwi r0, 0x100 222 bge kvm_novcpu_exit 223 224 /* clear our bit in napping_threads */ 225 lbz r7, HSTATE_PTID(r13) 226 li r0, 1 227 sld r0, r0, r7 228 addi r6, r5, VCORE_NAPPING_THREADS 2294: lwarx r7, 0, r6 230 andc r7, r7, r0 231 stwcx. r7, 0, r6 232 bne 4b 233 234 /* See if the wake reason means we need to exit */ 235 cmpdi r3, 0 236 bge kvm_novcpu_exit 237 238 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 239 ld r4, HSTATE_KVM_VCPU(r13) 240 cmpdi r4, 0 241 bne kvmppc_got_guest 242 243kvm_novcpu_exit: 244 b hdec_soon 245 246/* 247 * We come in here when wakened from nap mode. 248 * Relocation is off and most register values are lost. 249 * r13 points to the PACA. 250 */ 251 .globl kvm_start_guest 252kvm_start_guest: 253 254 /* Set runlatch bit the minute you wake up from nap */ 255 mfspr r1, SPRN_CTRLF 256 ori r1, r1, 1 257 mtspr SPRN_CTRLT, r1 258 259 ld r2,PACATOC(r13) 260 261 li r0,KVM_HWTHREAD_IN_KVM 262 stb r0,HSTATE_HWTHREAD_STATE(r13) 263 264 /* NV GPR values from power7_idle() will no longer be valid */ 265 li r0,1 266 stb r0,PACA_NAPSTATELOST(r13) 267 268 /* were we napping due to cede? */ 269 lbz r0,HSTATE_NAPPING(r13) 270 cmpwi r0,NAPPING_CEDE 271 beq kvm_end_cede 272 cmpwi r0,NAPPING_NOVCPU 273 beq kvm_novcpu_wakeup 274 275 ld r1,PACAEMERGSP(r13) 276 subi r1,r1,STACK_FRAME_OVERHEAD 277 278 /* 279 * We weren't napping due to cede, so this must be a secondary 280 * thread being woken up to run a guest, or being woken up due 281 * to a stray IPI. (Or due to some machine check or hypervisor 282 * maintenance interrupt while the core is in KVM.) 283 */ 284 285 /* Check the wake reason in SRR1 to see why we got here */ 286 bl kvmppc_check_wake_reason 287 cmpdi r3, 0 288 bge kvm_no_guest 289 290 /* get vcpu pointer, NULL if we have no vcpu to run */ 291 ld r4,HSTATE_KVM_VCPU(r13) 292 cmpdi r4,0 293 /* if we have no vcpu to run, go back to sleep */ 294 beq kvm_no_guest 295 296 /* Set HSTATE_DSCR(r13) to something sensible */ 297 ld r6, PACA_DSCR(r13) 298 std r6, HSTATE_DSCR(r13) 299 300 bl kvmppc_hv_entry 301 302 /* Back from the guest, go back to nap */ 303 /* Clear our vcpu pointer so we don't come back in early */ 304 li r0, 0 305 std r0, HSTATE_KVM_VCPU(r13) 306 /* 307 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing 308 * the nap_count, because once the increment to nap_count is 309 * visible we could be given another vcpu. 310 */ 311 lwsync 312 313 /* increment the nap count and then go to nap mode */ 314 ld r4, HSTATE_KVM_VCORE(r13) 315 addi r4, r4, VCORE_NAP_COUNT 31651: lwarx r3, 0, r4 317 addi r3, r3, 1 318 stwcx. r3, 0, r4 319 bne 51b 320 321kvm_no_guest: 322 li r0, KVM_HWTHREAD_IN_NAP 323 stb r0, HSTATE_HWTHREAD_STATE(r13) 324kvm_do_nap: 325 /* Clear the runlatch bit before napping */ 326 mfspr r2, SPRN_CTRLF 327 clrrdi r2, r2, 1 328 mtspr SPRN_CTRLT, r2 329 330 li r3, LPCR_PECE0 331 mfspr r4, SPRN_LPCR 332 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 333 mtspr SPRN_LPCR, r4 334 isync 335 std r0, HSTATE_SCRATCH0(r13) 336 ptesync 337 ld r0, HSTATE_SCRATCH0(r13) 3381: cmpd r0, r0 339 bne 1b 340 nap 341 b . 342 343/****************************************************************************** 344 * * 345 * Entry code * 346 * * 347 *****************************************************************************/ 348 349.global kvmppc_hv_entry 350kvmppc_hv_entry: 351 352 /* Required state: 353 * 354 * R4 = vcpu pointer (or NULL) 355 * MSR = ~IR|DR 356 * R13 = PACA 357 * R1 = host R1 358 * all other volatile GPRS = free 359 */ 360 mflr r0 361 std r0, PPC_LR_STKOFF(r1) 362 stdu r1, -112(r1) 363 364 /* Save R1 in the PACA */ 365 std r1, HSTATE_HOST_R1(r13) 366 367 li r6, KVM_GUEST_MODE_HOST_HV 368 stb r6, HSTATE_IN_GUEST(r13) 369 370 /* Clear out SLB */ 371 li r6,0 372 slbmte r6,r6 373 slbia 374 ptesync 375 376BEGIN_FTR_SECTION 377 b 30f 378END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 379 /* 380 * POWER7 host -> guest partition switch code. 381 * We don't have to lock against concurrent tlbies, 382 * but we do have to coordinate across hardware threads. 383 */ 384 /* Increment entry count iff exit count is zero. */ 385 ld r5,HSTATE_KVM_VCORE(r13) 386 addi r9,r5,VCORE_ENTRY_EXIT 38721: lwarx r3,0,r9 388 cmpwi r3,0x100 /* any threads starting to exit? */ 389 bge secondary_too_late /* if so we're too late to the party */ 390 addi r3,r3,1 391 stwcx. r3,0,r9 392 bne 21b 393 394 /* Primary thread switches to guest partition. */ 395 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 396 lbz r6,HSTATE_PTID(r13) 397 cmpwi r6,0 398 bne 20f 399 ld r6,KVM_SDR1(r9) 400 lwz r7,KVM_LPID(r9) 401 li r0,LPID_RSVD /* switch to reserved LPID */ 402 mtspr SPRN_LPID,r0 403 ptesync 404 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 405 mtspr SPRN_LPID,r7 406 isync 407 408 /* See if we need to flush the TLB */ 409 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 410 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 411 srdi r6,r6,6 /* doubleword number */ 412 sldi r6,r6,3 /* address offset */ 413 add r6,r6,r9 414 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 415 li r0,1 416 sld r0,r0,r7 417 ld r7,0(r6) 418 and. r7,r7,r0 419 beq 22f 42023: ldarx r7,0,r6 /* if set, clear the bit */ 421 andc r7,r7,r0 422 stdcx. r7,0,r6 423 bne 23b 424 /* Flush the TLB of any entries for this LPID */ 425 /* use arch 2.07S as a proxy for POWER8 */ 426BEGIN_FTR_SECTION 427 li r6,512 /* POWER8 has 512 sets */ 428FTR_SECTION_ELSE 429 li r6,128 /* POWER7 has 128 sets */ 430ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 431 mtctr r6 432 li r7,0x800 /* IS field = 0b10 */ 433 ptesync 43428: tlbiel r7 435 addi r7,r7,0x1000 436 bdnz 28b 437 ptesync 438 439 /* Add timebase offset onto timebase */ 44022: ld r8,VCORE_TB_OFFSET(r5) 441 cmpdi r8,0 442 beq 37f 443 mftb r6 /* current host timebase */ 444 add r8,r8,r6 445 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 446 mftb r7 /* check if lower 24 bits overflowed */ 447 clrldi r6,r6,40 448 clrldi r7,r7,40 449 cmpld r7,r6 450 bge 37f 451 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 452 mtspr SPRN_TBU40,r8 453 454 /* Load guest PCR value to select appropriate compat mode */ 45537: ld r7, VCORE_PCR(r5) 456 cmpdi r7, 0 457 beq 38f 458 mtspr SPRN_PCR, r7 45938: 460 461BEGIN_FTR_SECTION 462 /* DPDES is shared between threads */ 463 ld r8, VCORE_DPDES(r5) 464 mtspr SPRN_DPDES, r8 465END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 466 467 li r0,1 468 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 469 b 10f 470 471 /* Secondary threads wait for primary to have done partition switch */ 47220: lbz r0,VCORE_IN_GUEST(r5) 473 cmpwi r0,0 474 beq 20b 475 476 /* Set LPCR and RMOR. */ 47710: ld r8,VCORE_LPCR(r5) 478 mtspr SPRN_LPCR,r8 479 ld r8,KVM_RMOR(r9) 480 mtspr SPRN_RMOR,r8 481 isync 482 483 /* Check if HDEC expires soon */ 484 mfspr r3,SPRN_HDEC 485 cmpwi r3,512 /* 1 microsecond */ 486 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 487 blt hdec_soon 488 b 31f 489 490 /* 491 * PPC970 host -> guest partition switch code. 492 * We have to lock against concurrent tlbies, 493 * using native_tlbie_lock to lock against host tlbies 494 * and kvm->arch.tlbie_lock to lock against guest tlbies. 495 * We also have to invalidate the TLB since its 496 * entries aren't tagged with the LPID. 497 */ 49830: ld r5,HSTATE_KVM_VCORE(r13) 499 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 500 501 /* first take native_tlbie_lock */ 502 .section ".toc","aw" 503toc_tlbie_lock: 504 .tc native_tlbie_lock[TC],native_tlbie_lock 505 .previous 506 ld r3,toc_tlbie_lock@toc(2) 507#ifdef __BIG_ENDIAN__ 508 lwz r8,PACA_LOCK_TOKEN(r13) 509#else 510 lwz r8,PACAPACAINDEX(r13) 511#endif 51224: lwarx r0,0,r3 513 cmpwi r0,0 514 bne 24b 515 stwcx. r8,0,r3 516 bne 24b 517 isync 518 519 ld r5,HSTATE_KVM_VCORE(r13) 520 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ 521 li r0,0x18f 522 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 523 or r0,r7,r0 524 ptesync 525 sync 526 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 527 isync 528 li r0,0 529 stw r0,0(r3) /* drop native_tlbie_lock */ 530 531 /* invalidate the whole TLB */ 532 li r0,256 533 mtctr r0 534 li r6,0 53525: tlbiel r6 536 addi r6,r6,0x1000 537 bdnz 25b 538 ptesync 539 540 /* Take the guest's tlbie_lock */ 541 addi r3,r9,KVM_TLBIE_LOCK 54224: lwarx r0,0,r3 543 cmpwi r0,0 544 bne 24b 545 stwcx. r8,0,r3 546 bne 24b 547 isync 548 ld r6,KVM_SDR1(r9) 549 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 550 551 /* Set up HID4 with the guest's LPID etc. */ 552 sync 553 mtspr SPRN_HID4,r7 554 isync 555 556 /* drop the guest's tlbie_lock */ 557 li r0,0 558 stw r0,0(r3) 559 560 /* Check if HDEC expires soon */ 561 mfspr r3,SPRN_HDEC 562 cmpwi r3,10 563 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 564 blt hdec_soon 565 566 /* Enable HDEC interrupts */ 567 mfspr r0,SPRN_HID0 568 li r3,1 569 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 570 sync 571 mtspr SPRN_HID0,r0 572 mfspr r0,SPRN_HID0 573 mfspr r0,SPRN_HID0 574 mfspr r0,SPRN_HID0 575 mfspr r0,SPRN_HID0 576 mfspr r0,SPRN_HID0 577 mfspr r0,SPRN_HID0 57831: 579 /* Do we have a guest vcpu to run? */ 580 cmpdi r4, 0 581 beq kvmppc_primary_no_guest 582kvmppc_got_guest: 583 584 /* Load up guest SLB entries */ 585 lwz r5,VCPU_SLB_MAX(r4) 586 cmpwi r5,0 587 beq 9f 588 mtctr r5 589 addi r6,r4,VCPU_SLB 5901: ld r8,VCPU_SLB_E(r6) 591 ld r9,VCPU_SLB_V(r6) 592 slbmte r9,r8 593 addi r6,r6,VCPU_SLB_SIZE 594 bdnz 1b 5959: 596 /* Increment yield count if they have a VPA */ 597 ld r3, VCPU_VPA(r4) 598 cmpdi r3, 0 599 beq 25f 600 li r6, LPPACA_YIELDCOUNT 601 LWZX_BE r5, r3, r6 602 addi r5, r5, 1 603 STWX_BE r5, r3, r6 604 li r6, 1 605 stb r6, VCPU_VPA_DIRTY(r4) 60625: 607 608BEGIN_FTR_SECTION 609 /* Save purr/spurr */ 610 mfspr r5,SPRN_PURR 611 mfspr r6,SPRN_SPURR 612 std r5,HSTATE_PURR(r13) 613 std r6,HSTATE_SPURR(r13) 614 ld r7,VCPU_PURR(r4) 615 ld r8,VCPU_SPURR(r4) 616 mtspr SPRN_PURR,r7 617 mtspr SPRN_SPURR,r8 618END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 619 620BEGIN_FTR_SECTION 621 /* Set partition DABR */ 622 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 623 lwz r5,VCPU_DABRX(r4) 624 ld r6,VCPU_DABR(r4) 625 mtspr SPRN_DABRX,r5 626 mtspr SPRN_DABR,r6 627 BEGIN_FTR_SECTION_NESTED(89) 628 isync 629 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 630END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 631 632#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 633BEGIN_FTR_SECTION 634 b skip_tm 635END_FTR_SECTION_IFCLR(CPU_FTR_TM) 636 637 /* Turn on TM/FP/VSX/VMX so we can restore them. */ 638 mfmsr r5 639 li r6, MSR_TM >> 32 640 sldi r6, r6, 32 641 or r5, r5, r6 642 ori r5, r5, MSR_FP 643 oris r5, r5, (MSR_VEC | MSR_VSX)@h 644 mtmsrd r5 645 646 /* 647 * The user may change these outside of a transaction, so they must 648 * always be context switched. 649 */ 650 ld r5, VCPU_TFHAR(r4) 651 ld r6, VCPU_TFIAR(r4) 652 ld r7, VCPU_TEXASR(r4) 653 mtspr SPRN_TFHAR, r5 654 mtspr SPRN_TFIAR, r6 655 mtspr SPRN_TEXASR, r7 656 657 ld r5, VCPU_MSR(r4) 658 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 659 beq skip_tm /* TM not active in guest */ 660 661 /* Make sure the failure summary is set, otherwise we'll program check 662 * when we trechkpt. It's possible that this might have been not set 663 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the 664 * host. 665 */ 666 oris r7, r7, (TEXASR_FS)@h 667 mtspr SPRN_TEXASR, r7 668 669 /* 670 * We need to load up the checkpointed state for the guest. 671 * We need to do this early as it will blow away any GPRs, VSRs and 672 * some SPRs. 673 */ 674 675 mr r31, r4 676 addi r3, r31, VCPU_FPRS_TM 677 bl load_fp_state 678 addi r3, r31, VCPU_VRS_TM 679 bl load_vr_state 680 mr r4, r31 681 lwz r7, VCPU_VRSAVE_TM(r4) 682 mtspr SPRN_VRSAVE, r7 683 684 ld r5, VCPU_LR_TM(r4) 685 lwz r6, VCPU_CR_TM(r4) 686 ld r7, VCPU_CTR_TM(r4) 687 ld r8, VCPU_AMR_TM(r4) 688 ld r9, VCPU_TAR_TM(r4) 689 mtlr r5 690 mtcr r6 691 mtctr r7 692 mtspr SPRN_AMR, r8 693 mtspr SPRN_TAR, r9 694 695 /* 696 * Load up PPR and DSCR values but don't put them in the actual SPRs 697 * till the last moment to avoid running with userspace PPR and DSCR for 698 * too long. 699 */ 700 ld r29, VCPU_DSCR_TM(r4) 701 ld r30, VCPU_PPR_TM(r4) 702 703 std r2, PACATMSCRATCH(r13) /* Save TOC */ 704 705 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 706 li r5, 0 707 mtmsrd r5, 1 708 709 /* Load GPRs r0-r28 */ 710 reg = 0 711 .rept 29 712 ld reg, VCPU_GPRS_TM(reg)(r31) 713 reg = reg + 1 714 .endr 715 716 mtspr SPRN_DSCR, r29 717 mtspr SPRN_PPR, r30 718 719 /* Load final GPRs */ 720 ld 29, VCPU_GPRS_TM(29)(r31) 721 ld 30, VCPU_GPRS_TM(30)(r31) 722 ld 31, VCPU_GPRS_TM(31)(r31) 723 724 /* TM checkpointed state is now setup. All GPRs are now volatile. */ 725 TRECHKPT 726 727 /* Now let's get back the state we need. */ 728 HMT_MEDIUM 729 GET_PACA(r13) 730 ld r29, HSTATE_DSCR(r13) 731 mtspr SPRN_DSCR, r29 732 ld r4, HSTATE_KVM_VCPU(r13) 733 ld r1, HSTATE_HOST_R1(r13) 734 ld r2, PACATMSCRATCH(r13) 735 736 /* Set the MSR RI since we have our registers back. */ 737 li r5, MSR_RI 738 mtmsrd r5, 1 739skip_tm: 740#endif 741 742 /* Load guest PMU registers */ 743 /* R4 is live here (vcpu pointer) */ 744 li r3, 1 745 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 746 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 747 isync 748BEGIN_FTR_SECTION 749 ld r3, VCPU_MMCR(r4) 750 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 751 cmpwi r5, MMCR0_PMAO 752 beql kvmppc_fix_pmao 753END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 754 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 755 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 756 lwz r6, VCPU_PMC + 8(r4) 757 lwz r7, VCPU_PMC + 12(r4) 758 lwz r8, VCPU_PMC + 16(r4) 759 lwz r9, VCPU_PMC + 20(r4) 760BEGIN_FTR_SECTION 761 lwz r10, VCPU_PMC + 24(r4) 762 lwz r11, VCPU_PMC + 28(r4) 763END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 764 mtspr SPRN_PMC1, r3 765 mtspr SPRN_PMC2, r5 766 mtspr SPRN_PMC3, r6 767 mtspr SPRN_PMC4, r7 768 mtspr SPRN_PMC5, r8 769 mtspr SPRN_PMC6, r9 770BEGIN_FTR_SECTION 771 mtspr SPRN_PMC7, r10 772 mtspr SPRN_PMC8, r11 773END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 774 ld r3, VCPU_MMCR(r4) 775 ld r5, VCPU_MMCR + 8(r4) 776 ld r6, VCPU_MMCR + 16(r4) 777 ld r7, VCPU_SIAR(r4) 778 ld r8, VCPU_SDAR(r4) 779 mtspr SPRN_MMCR1, r5 780 mtspr SPRN_MMCRA, r6 781 mtspr SPRN_SIAR, r7 782 mtspr SPRN_SDAR, r8 783BEGIN_FTR_SECTION 784 ld r5, VCPU_MMCR + 24(r4) 785 ld r6, VCPU_SIER(r4) 786 lwz r7, VCPU_PMC + 24(r4) 787 lwz r8, VCPU_PMC + 28(r4) 788 ld r9, VCPU_MMCR + 32(r4) 789 mtspr SPRN_MMCR2, r5 790 mtspr SPRN_SIER, r6 791 mtspr SPRN_SPMC1, r7 792 mtspr SPRN_SPMC2, r8 793 mtspr SPRN_MMCRS, r9 794END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 795 mtspr SPRN_MMCR0, r3 796 isync 797 798 /* Load up FP, VMX and VSX registers */ 799 bl kvmppc_load_fp 800 801 ld r14, VCPU_GPR(R14)(r4) 802 ld r15, VCPU_GPR(R15)(r4) 803 ld r16, VCPU_GPR(R16)(r4) 804 ld r17, VCPU_GPR(R17)(r4) 805 ld r18, VCPU_GPR(R18)(r4) 806 ld r19, VCPU_GPR(R19)(r4) 807 ld r20, VCPU_GPR(R20)(r4) 808 ld r21, VCPU_GPR(R21)(r4) 809 ld r22, VCPU_GPR(R22)(r4) 810 ld r23, VCPU_GPR(R23)(r4) 811 ld r24, VCPU_GPR(R24)(r4) 812 ld r25, VCPU_GPR(R25)(r4) 813 ld r26, VCPU_GPR(R26)(r4) 814 ld r27, VCPU_GPR(R27)(r4) 815 ld r28, VCPU_GPR(R28)(r4) 816 ld r29, VCPU_GPR(R29)(r4) 817 ld r30, VCPU_GPR(R30)(r4) 818 ld r31, VCPU_GPR(R31)(r4) 819 820BEGIN_FTR_SECTION 821 /* Switch DSCR to guest value */ 822 ld r5, VCPU_DSCR(r4) 823 mtspr SPRN_DSCR, r5 824END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 825 826BEGIN_FTR_SECTION 827 /* Skip next section on POWER7 or PPC970 */ 828 b 8f 829END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 830 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 831 mfmsr r8 832 li r0, 1 833 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 834 mtmsrd r8 835 836 /* Load up POWER8-specific registers */ 837 ld r5, VCPU_IAMR(r4) 838 lwz r6, VCPU_PSPB(r4) 839 ld r7, VCPU_FSCR(r4) 840 mtspr SPRN_IAMR, r5 841 mtspr SPRN_PSPB, r6 842 mtspr SPRN_FSCR, r7 843 ld r5, VCPU_DAWR(r4) 844 ld r6, VCPU_DAWRX(r4) 845 ld r7, VCPU_CIABR(r4) 846 ld r8, VCPU_TAR(r4) 847 mtspr SPRN_DAWR, r5 848 mtspr SPRN_DAWRX, r6 849 mtspr SPRN_CIABR, r7 850 mtspr SPRN_TAR, r8 851 ld r5, VCPU_IC(r4) 852 ld r6, VCPU_VTB(r4) 853 mtspr SPRN_IC, r5 854 mtspr SPRN_VTB, r6 855 ld r8, VCPU_EBBHR(r4) 856 mtspr SPRN_EBBHR, r8 857 ld r5, VCPU_EBBRR(r4) 858 ld r6, VCPU_BESCR(r4) 859 ld r7, VCPU_CSIGR(r4) 860 ld r8, VCPU_TACR(r4) 861 mtspr SPRN_EBBRR, r5 862 mtspr SPRN_BESCR, r6 863 mtspr SPRN_CSIGR, r7 864 mtspr SPRN_TACR, r8 865 ld r5, VCPU_TCSCR(r4) 866 ld r6, VCPU_ACOP(r4) 867 lwz r7, VCPU_GUEST_PID(r4) 868 ld r8, VCPU_WORT(r4) 869 mtspr SPRN_TCSCR, r5 870 mtspr SPRN_ACOP, r6 871 mtspr SPRN_PID, r7 872 mtspr SPRN_WORT, r8 8738: 874 875 /* 876 * Set the decrementer to the guest decrementer. 877 */ 878 ld r8,VCPU_DEC_EXPIRES(r4) 879 /* r8 is a host timebase value here, convert to guest TB */ 880 ld r5,HSTATE_KVM_VCORE(r13) 881 ld r6,VCORE_TB_OFFSET(r5) 882 add r8,r8,r6 883 mftb r7 884 subf r3,r7,r8 885 mtspr SPRN_DEC,r3 886 stw r3,VCPU_DEC(r4) 887 888 ld r5, VCPU_SPRG0(r4) 889 ld r6, VCPU_SPRG1(r4) 890 ld r7, VCPU_SPRG2(r4) 891 ld r8, VCPU_SPRG3(r4) 892 mtspr SPRN_SPRG0, r5 893 mtspr SPRN_SPRG1, r6 894 mtspr SPRN_SPRG2, r7 895 mtspr SPRN_SPRG3, r8 896 897 /* Load up DAR and DSISR */ 898 ld r5, VCPU_DAR(r4) 899 lwz r6, VCPU_DSISR(r4) 900 mtspr SPRN_DAR, r5 901 mtspr SPRN_DSISR, r6 902 903BEGIN_FTR_SECTION 904 /* Restore AMR and UAMOR, set AMOR to all 1s */ 905 ld r5,VCPU_AMR(r4) 906 ld r6,VCPU_UAMOR(r4) 907 li r7,-1 908 mtspr SPRN_AMR,r5 909 mtspr SPRN_UAMOR,r6 910 mtspr SPRN_AMOR,r7 911END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 912 913 /* Restore state of CTRL run bit; assume 1 on entry */ 914 lwz r5,VCPU_CTRL(r4) 915 andi. r5,r5,1 916 bne 4f 917 mfspr r6,SPRN_CTRLF 918 clrrdi r6,r6,1 919 mtspr SPRN_CTRLT,r6 9204: 921 ld r6, VCPU_CTR(r4) 922 lwz r7, VCPU_XER(r4) 923 924 mtctr r6 925 mtxer r7 926 927kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 928 ld r10, VCPU_PC(r4) 929 ld r11, VCPU_MSR(r4) 930 ld r6, VCPU_SRR0(r4) 931 ld r7, VCPU_SRR1(r4) 932 mtspr SPRN_SRR0, r6 933 mtspr SPRN_SRR1, r7 934 935deliver_guest_interrupt: 936 /* r11 = vcpu->arch.msr & ~MSR_HV */ 937 rldicl r11, r11, 63 - MSR_HV_LG, 1 938 rotldi r11, r11, 1 + MSR_HV_LG 939 ori r11, r11, MSR_ME 940 941 /* Check if we can deliver an external or decrementer interrupt now */ 942 ld r0, VCPU_PENDING_EXC(r4) 943 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 944 cmpdi cr1, r0, 0 945 andi. r8, r11, MSR_EE 946BEGIN_FTR_SECTION 947 mfspr r8, SPRN_LPCR 948 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 949 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 950 mtspr SPRN_LPCR, r8 951 isync 952END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 953 beq 5f 954 li r0, BOOK3S_INTERRUPT_EXTERNAL 955 bne cr1, 12f 956 mfspr r0, SPRN_DEC 957 cmpwi r0, 0 958 li r0, BOOK3S_INTERRUPT_DECREMENTER 959 bge 5f 960 96112: mtspr SPRN_SRR0, r10 962 mr r10,r0 963 mtspr SPRN_SRR1, r11 964 mr r9, r4 965 bl kvmppc_msr_interrupt 9665: 967 968/* 969 * Required state: 970 * R4 = vcpu 971 * R10: value for HSRR0 972 * R11: value for HSRR1 973 * R13 = PACA 974 */ 975fast_guest_return: 976 li r0,0 977 stb r0,VCPU_CEDED(r4) /* cancel cede */ 978 mtspr SPRN_HSRR0,r10 979 mtspr SPRN_HSRR1,r11 980 981 /* Activate guest mode, so faults get handled by KVM */ 982 li r9, KVM_GUEST_MODE_GUEST_HV 983 stb r9, HSTATE_IN_GUEST(r13) 984 985 /* Enter guest */ 986 987BEGIN_FTR_SECTION 988 ld r5, VCPU_CFAR(r4) 989 mtspr SPRN_CFAR, r5 990END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 991BEGIN_FTR_SECTION 992 ld r0, VCPU_PPR(r4) 993END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 994 995 ld r5, VCPU_LR(r4) 996 lwz r6, VCPU_CR(r4) 997 mtlr r5 998 mtcr r6 999 1000 ld r1, VCPU_GPR(R1)(r4) 1001 ld r2, VCPU_GPR(R2)(r4) 1002 ld r3, VCPU_GPR(R3)(r4) 1003 ld r5, VCPU_GPR(R5)(r4) 1004 ld r6, VCPU_GPR(R6)(r4) 1005 ld r7, VCPU_GPR(R7)(r4) 1006 ld r8, VCPU_GPR(R8)(r4) 1007 ld r9, VCPU_GPR(R9)(r4) 1008 ld r10, VCPU_GPR(R10)(r4) 1009 ld r11, VCPU_GPR(R11)(r4) 1010 ld r12, VCPU_GPR(R12)(r4) 1011 ld r13, VCPU_GPR(R13)(r4) 1012 1013BEGIN_FTR_SECTION 1014 mtspr SPRN_PPR, r0 1015END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1016 ld r0, VCPU_GPR(R0)(r4) 1017 ld r4, VCPU_GPR(R4)(r4) 1018 1019 hrfid 1020 b . 1021 1022/****************************************************************************** 1023 * * 1024 * Exit code * 1025 * * 1026 *****************************************************************************/ 1027 1028/* 1029 * We come here from the first-level interrupt handlers. 1030 */ 1031 .globl kvmppc_interrupt_hv 1032kvmppc_interrupt_hv: 1033 /* 1034 * Register contents: 1035 * R12 = interrupt vector 1036 * R13 = PACA 1037 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 1038 * guest R13 saved in SPRN_SCRATCH0 1039 */ 1040 std r9, HSTATE_SCRATCH2(r13) 1041 1042 lbz r9, HSTATE_IN_GUEST(r13) 1043 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1044 beq kvmppc_bad_host_intr 1045#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1046 cmpwi r9, KVM_GUEST_MODE_GUEST 1047 ld r9, HSTATE_SCRATCH2(r13) 1048 beq kvmppc_interrupt_pr 1049#endif 1050 /* We're now back in the host but in guest MMU context */ 1051 li r9, KVM_GUEST_MODE_HOST_HV 1052 stb r9, HSTATE_IN_GUEST(r13) 1053 1054 ld r9, HSTATE_KVM_VCPU(r13) 1055 1056 /* Save registers */ 1057 1058 std r0, VCPU_GPR(R0)(r9) 1059 std r1, VCPU_GPR(R1)(r9) 1060 std r2, VCPU_GPR(R2)(r9) 1061 std r3, VCPU_GPR(R3)(r9) 1062 std r4, VCPU_GPR(R4)(r9) 1063 std r5, VCPU_GPR(R5)(r9) 1064 std r6, VCPU_GPR(R6)(r9) 1065 std r7, VCPU_GPR(R7)(r9) 1066 std r8, VCPU_GPR(R8)(r9) 1067 ld r0, HSTATE_SCRATCH2(r13) 1068 std r0, VCPU_GPR(R9)(r9) 1069 std r10, VCPU_GPR(R10)(r9) 1070 std r11, VCPU_GPR(R11)(r9) 1071 ld r3, HSTATE_SCRATCH0(r13) 1072 lwz r4, HSTATE_SCRATCH1(r13) 1073 std r3, VCPU_GPR(R12)(r9) 1074 stw r4, VCPU_CR(r9) 1075BEGIN_FTR_SECTION 1076 ld r3, HSTATE_CFAR(r13) 1077 std r3, VCPU_CFAR(r9) 1078END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1079BEGIN_FTR_SECTION 1080 ld r4, HSTATE_PPR(r13) 1081 std r4, VCPU_PPR(r9) 1082END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1083 1084 /* Restore R1/R2 so we can handle faults */ 1085 ld r1, HSTATE_HOST_R1(r13) 1086 ld r2, PACATOC(r13) 1087 1088 mfspr r10, SPRN_SRR0 1089 mfspr r11, SPRN_SRR1 1090 std r10, VCPU_SRR0(r9) 1091 std r11, VCPU_SRR1(r9) 1092 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1093 beq 1f 1094 mfspr r10, SPRN_HSRR0 1095 mfspr r11, SPRN_HSRR1 1096 clrrdi r12, r12, 2 10971: std r10, VCPU_PC(r9) 1098 std r11, VCPU_MSR(r9) 1099 1100 GET_SCRATCH0(r3) 1101 mflr r4 1102 std r3, VCPU_GPR(R13)(r9) 1103 std r4, VCPU_LR(r9) 1104 1105 stw r12,VCPU_TRAP(r9) 1106 1107 /* Save HEIR (HV emulation assist reg) in last_inst 1108 if this is an HEI (HV emulation interrupt, e40) */ 1109 li r3,KVM_INST_FETCH_FAILED 1110BEGIN_FTR_SECTION 1111 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1112 bne 11f 1113 mfspr r3,SPRN_HEIR 1114END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 111511: stw r3,VCPU_LAST_INST(r9) 1116 1117 /* these are volatile across C function calls */ 1118 mfctr r3 1119 mfxer r4 1120 std r3, VCPU_CTR(r9) 1121 stw r4, VCPU_XER(r9) 1122 1123BEGIN_FTR_SECTION 1124 /* If this is a page table miss then see if it's theirs or ours */ 1125 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1126 beq kvmppc_hdsi 1127 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1128 beq kvmppc_hisi 1129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1130 1131 /* See if this is a leftover HDEC interrupt */ 1132 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1133 bne 2f 1134 mfspr r3,SPRN_HDEC 1135 cmpwi r3,0 1136 bge ignore_hdec 11372: 1138 /* See if this is an hcall we can handle in real mode */ 1139 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1140 beq hcall_try_real_mode 1141 1142 /* Only handle external interrupts here on arch 206 and later */ 1143BEGIN_FTR_SECTION 1144 b ext_interrupt_to_host 1145END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1146 1147 /* External interrupt ? */ 1148 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1149 bne+ ext_interrupt_to_host 1150 1151 /* External interrupt, first check for host_ipi. If this is 1152 * set, we know the host wants us out so let's do it now 1153 */ 1154 bl kvmppc_read_intr 1155 cmpdi r3, 0 1156 bgt ext_interrupt_to_host 1157 1158 /* Check if any CPU is heading out to the host, if so head out too */ 1159 ld r5, HSTATE_KVM_VCORE(r13) 1160 lwz r0, VCORE_ENTRY_EXIT(r5) 1161 cmpwi r0, 0x100 1162 bge ext_interrupt_to_host 1163 1164 /* Return to guest after delivering any pending interrupt */ 1165 mr r4, r9 1166 b deliver_guest_interrupt 1167 1168ext_interrupt_to_host: 1169 1170guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1171 /* Save more register state */ 1172 mfdar r6 1173 mfdsisr r7 1174 std r6, VCPU_DAR(r9) 1175 stw r7, VCPU_DSISR(r9) 1176BEGIN_FTR_SECTION 1177 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1178 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1179 beq 6f 1180END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1181 std r6, VCPU_FAULT_DAR(r9) 1182 stw r7, VCPU_FAULT_DSISR(r9) 1183 1184 /* See if it is a machine check */ 1185 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1186 beq machine_check_realmode 1187mc_cont: 1188 1189 /* Save guest CTRL register, set runlatch to 1 */ 11906: mfspr r6,SPRN_CTRLF 1191 stw r6,VCPU_CTRL(r9) 1192 andi. r0,r6,1 1193 bne 4f 1194 ori r6,r6,1 1195 mtspr SPRN_CTRLT,r6 11964: 1197 /* Read the guest SLB and save it away */ 1198 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1199 mtctr r0 1200 li r6,0 1201 addi r7,r9,VCPU_SLB 1202 li r5,0 12031: slbmfee r8,r6 1204 andis. r0,r8,SLB_ESID_V@h 1205 beq 2f 1206 add r8,r8,r6 /* put index in */ 1207 slbmfev r3,r6 1208 std r8,VCPU_SLB_E(r7) 1209 std r3,VCPU_SLB_V(r7) 1210 addi r7,r7,VCPU_SLB_SIZE 1211 addi r5,r5,1 12122: addi r6,r6,1 1213 bdnz 1b 1214 stw r5,VCPU_SLB_MAX(r9) 1215 1216 /* 1217 * Save the guest PURR/SPURR 1218 */ 1219BEGIN_FTR_SECTION 1220 mfspr r5,SPRN_PURR 1221 mfspr r6,SPRN_SPURR 1222 ld r7,VCPU_PURR(r9) 1223 ld r8,VCPU_SPURR(r9) 1224 std r5,VCPU_PURR(r9) 1225 std r6,VCPU_SPURR(r9) 1226 subf r5,r7,r5 1227 subf r6,r8,r6 1228 1229 /* 1230 * Restore host PURR/SPURR and add guest times 1231 * so that the time in the guest gets accounted. 1232 */ 1233 ld r3,HSTATE_PURR(r13) 1234 ld r4,HSTATE_SPURR(r13) 1235 add r3,r3,r5 1236 add r4,r4,r6 1237 mtspr SPRN_PURR,r3 1238 mtspr SPRN_SPURR,r4 1239END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1240 1241 /* Save DEC */ 1242 mfspr r5,SPRN_DEC 1243 mftb r6 1244 extsw r5,r5 1245 add r5,r5,r6 1246 /* r5 is a guest timebase value here, convert to host TB */ 1247 ld r3,HSTATE_KVM_VCORE(r13) 1248 ld r4,VCORE_TB_OFFSET(r3) 1249 subf r5,r4,r5 1250 std r5,VCPU_DEC_EXPIRES(r9) 1251 1252BEGIN_FTR_SECTION 1253 b 8f 1254END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1255 /* Save POWER8-specific registers */ 1256 mfspr r5, SPRN_IAMR 1257 mfspr r6, SPRN_PSPB 1258 mfspr r7, SPRN_FSCR 1259 std r5, VCPU_IAMR(r9) 1260 stw r6, VCPU_PSPB(r9) 1261 std r7, VCPU_FSCR(r9) 1262 mfspr r5, SPRN_IC 1263 mfspr r6, SPRN_VTB 1264 mfspr r7, SPRN_TAR 1265 std r5, VCPU_IC(r9) 1266 std r6, VCPU_VTB(r9) 1267 std r7, VCPU_TAR(r9) 1268 mfspr r8, SPRN_EBBHR 1269 std r8, VCPU_EBBHR(r9) 1270 mfspr r5, SPRN_EBBRR 1271 mfspr r6, SPRN_BESCR 1272 mfspr r7, SPRN_CSIGR 1273 mfspr r8, SPRN_TACR 1274 std r5, VCPU_EBBRR(r9) 1275 std r6, VCPU_BESCR(r9) 1276 std r7, VCPU_CSIGR(r9) 1277 std r8, VCPU_TACR(r9) 1278 mfspr r5, SPRN_TCSCR 1279 mfspr r6, SPRN_ACOP 1280 mfspr r7, SPRN_PID 1281 mfspr r8, SPRN_WORT 1282 std r5, VCPU_TCSCR(r9) 1283 std r6, VCPU_ACOP(r9) 1284 stw r7, VCPU_GUEST_PID(r9) 1285 std r8, VCPU_WORT(r9) 12868: 1287 1288 /* Save and reset AMR and UAMOR before turning on the MMU */ 1289BEGIN_FTR_SECTION 1290 mfspr r5,SPRN_AMR 1291 mfspr r6,SPRN_UAMOR 1292 std r5,VCPU_AMR(r9) 1293 std r6,VCPU_UAMOR(r9) 1294 li r6,0 1295 mtspr SPRN_AMR,r6 1296END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1297 1298 /* Switch DSCR back to host value */ 1299BEGIN_FTR_SECTION 1300 mfspr r8, SPRN_DSCR 1301 ld r7, HSTATE_DSCR(r13) 1302 std r8, VCPU_DSCR(r9) 1303 mtspr SPRN_DSCR, r7 1304END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1305 1306 /* Save non-volatile GPRs */ 1307 std r14, VCPU_GPR(R14)(r9) 1308 std r15, VCPU_GPR(R15)(r9) 1309 std r16, VCPU_GPR(R16)(r9) 1310 std r17, VCPU_GPR(R17)(r9) 1311 std r18, VCPU_GPR(R18)(r9) 1312 std r19, VCPU_GPR(R19)(r9) 1313 std r20, VCPU_GPR(R20)(r9) 1314 std r21, VCPU_GPR(R21)(r9) 1315 std r22, VCPU_GPR(R22)(r9) 1316 std r23, VCPU_GPR(R23)(r9) 1317 std r24, VCPU_GPR(R24)(r9) 1318 std r25, VCPU_GPR(R25)(r9) 1319 std r26, VCPU_GPR(R26)(r9) 1320 std r27, VCPU_GPR(R27)(r9) 1321 std r28, VCPU_GPR(R28)(r9) 1322 std r29, VCPU_GPR(R29)(r9) 1323 std r30, VCPU_GPR(R30)(r9) 1324 std r31, VCPU_GPR(R31)(r9) 1325 1326 /* Save SPRGs */ 1327 mfspr r3, SPRN_SPRG0 1328 mfspr r4, SPRN_SPRG1 1329 mfspr r5, SPRN_SPRG2 1330 mfspr r6, SPRN_SPRG3 1331 std r3, VCPU_SPRG0(r9) 1332 std r4, VCPU_SPRG1(r9) 1333 std r5, VCPU_SPRG2(r9) 1334 std r6, VCPU_SPRG3(r9) 1335 1336 /* save FP state */ 1337 mr r3, r9 1338 bl kvmppc_save_fp 1339 1340#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1341BEGIN_FTR_SECTION 1342 b 2f 1343END_FTR_SECTION_IFCLR(CPU_FTR_TM) 1344 /* Turn on TM. */ 1345 mfmsr r8 1346 li r0, 1 1347 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 1348 mtmsrd r8 1349 1350 ld r5, VCPU_MSR(r9) 1351 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 1352 beq 1f /* TM not active in guest. */ 1353 1354 li r3, TM_CAUSE_KVM_RESCHED 1355 1356 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 1357 li r5, 0 1358 mtmsrd r5, 1 1359 1360 /* All GPRs are volatile at this point. */ 1361 TRECLAIM(R3) 1362 1363 /* Temporarily store r13 and r9 so we have some regs to play with */ 1364 SET_SCRATCH0(r13) 1365 GET_PACA(r13) 1366 std r9, PACATMSCRATCH(r13) 1367 ld r9, HSTATE_KVM_VCPU(r13) 1368 1369 /* Get a few more GPRs free. */ 1370 std r29, VCPU_GPRS_TM(29)(r9) 1371 std r30, VCPU_GPRS_TM(30)(r9) 1372 std r31, VCPU_GPRS_TM(31)(r9) 1373 1374 /* Save away PPR and DSCR soon so don't run with user values. */ 1375 mfspr r31, SPRN_PPR 1376 HMT_MEDIUM 1377 mfspr r30, SPRN_DSCR 1378 ld r29, HSTATE_DSCR(r13) 1379 mtspr SPRN_DSCR, r29 1380 1381 /* Save all but r9, r13 & r29-r31 */ 1382 reg = 0 1383 .rept 29 1384 .if (reg != 9) && (reg != 13) 1385 std reg, VCPU_GPRS_TM(reg)(r9) 1386 .endif 1387 reg = reg + 1 1388 .endr 1389 /* ... now save r13 */ 1390 GET_SCRATCH0(r4) 1391 std r4, VCPU_GPRS_TM(13)(r9) 1392 /* ... and save r9 */ 1393 ld r4, PACATMSCRATCH(r13) 1394 std r4, VCPU_GPRS_TM(9)(r9) 1395 1396 /* Reload stack pointer and TOC. */ 1397 ld r1, HSTATE_HOST_R1(r13) 1398 ld r2, PACATOC(r13) 1399 1400 /* Set MSR RI now we have r1 and r13 back. */ 1401 li r5, MSR_RI 1402 mtmsrd r5, 1 1403 1404 /* Save away checkpinted SPRs. */ 1405 std r31, VCPU_PPR_TM(r9) 1406 std r30, VCPU_DSCR_TM(r9) 1407 mflr r5 1408 mfcr r6 1409 mfctr r7 1410 mfspr r8, SPRN_AMR 1411 mfspr r10, SPRN_TAR 1412 std r5, VCPU_LR_TM(r9) 1413 stw r6, VCPU_CR_TM(r9) 1414 std r7, VCPU_CTR_TM(r9) 1415 std r8, VCPU_AMR_TM(r9) 1416 std r10, VCPU_TAR_TM(r9) 1417 1418 /* Restore r12 as trap number. */ 1419 lwz r12, VCPU_TRAP(r9) 1420 1421 /* Save FP/VSX. */ 1422 addi r3, r9, VCPU_FPRS_TM 1423 bl store_fp_state 1424 addi r3, r9, VCPU_VRS_TM 1425 bl store_vr_state 1426 mfspr r6, SPRN_VRSAVE 1427 stw r6, VCPU_VRSAVE_TM(r9) 14281: 1429 /* 1430 * We need to save these SPRs after the treclaim so that the software 1431 * error code is recorded correctly in the TEXASR. Also the user may 1432 * change these outside of a transaction, so they must always be 1433 * context switched. 1434 */ 1435 mfspr r5, SPRN_TFHAR 1436 mfspr r6, SPRN_TFIAR 1437 mfspr r7, SPRN_TEXASR 1438 std r5, VCPU_TFHAR(r9) 1439 std r6, VCPU_TFIAR(r9) 1440 std r7, VCPU_TEXASR(r9) 14412: 1442#endif 1443 1444 /* Increment yield count if they have a VPA */ 1445 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1446 cmpdi r8, 0 1447 beq 25f 1448 li r4, LPPACA_YIELDCOUNT 1449 LWZX_BE r3, r8, r4 1450 addi r3, r3, 1 1451 STWX_BE r3, r8, r4 1452 li r3, 1 1453 stb r3, VCPU_VPA_DIRTY(r9) 145425: 1455 /* Save PMU registers if requested */ 1456 /* r8 and cr0.eq are live here */ 1457BEGIN_FTR_SECTION 1458 /* 1459 * POWER8 seems to have a hardware bug where setting 1460 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 1461 * when some counters are already negative doesn't seem 1462 * to cause a performance monitor alert (and hence interrupt). 1463 * The effect of this is that when saving the PMU state, 1464 * if there is no PMU alert pending when we read MMCR0 1465 * before freezing the counters, but one becomes pending 1466 * before we read the counters, we lose it. 1467 * To work around this, we need a way to freeze the counters 1468 * before reading MMCR0. Normally, freezing the counters 1469 * is done by writing MMCR0 (to set MMCR0[FC]) which 1470 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 1471 * we can also freeze the counters using MMCR2, by writing 1472 * 1s to all the counter freeze condition bits (there are 1473 * 9 bits each for 6 counters). 1474 */ 1475 li r3, -1 /* set all freeze bits */ 1476 clrrdi r3, r3, 10 1477 mfspr r10, SPRN_MMCR2 1478 mtspr SPRN_MMCR2, r3 1479 isync 1480END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1481 li r3, 1 1482 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1483 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1484 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1485 mfspr r6, SPRN_MMCRA 1486BEGIN_FTR_SECTION 1487 /* On P7, clear MMCRA in order to disable SDAR updates */ 1488 li r7, 0 1489 mtspr SPRN_MMCRA, r7 1490END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1491 isync 1492 beq 21f /* if no VPA, save PMU stuff anyway */ 1493 lbz r7, LPPACA_PMCINUSE(r8) 1494 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1495 bne 21f 1496 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1497 b 22f 149821: mfspr r5, SPRN_MMCR1 1499 mfspr r7, SPRN_SIAR 1500 mfspr r8, SPRN_SDAR 1501 std r4, VCPU_MMCR(r9) 1502 std r5, VCPU_MMCR + 8(r9) 1503 std r6, VCPU_MMCR + 16(r9) 1504BEGIN_FTR_SECTION 1505 std r10, VCPU_MMCR + 24(r9) 1506END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1507 std r7, VCPU_SIAR(r9) 1508 std r8, VCPU_SDAR(r9) 1509 mfspr r3, SPRN_PMC1 1510 mfspr r4, SPRN_PMC2 1511 mfspr r5, SPRN_PMC3 1512 mfspr r6, SPRN_PMC4 1513 mfspr r7, SPRN_PMC5 1514 mfspr r8, SPRN_PMC6 1515BEGIN_FTR_SECTION 1516 mfspr r10, SPRN_PMC7 1517 mfspr r11, SPRN_PMC8 1518END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1519 stw r3, VCPU_PMC(r9) 1520 stw r4, VCPU_PMC + 4(r9) 1521 stw r5, VCPU_PMC + 8(r9) 1522 stw r6, VCPU_PMC + 12(r9) 1523 stw r7, VCPU_PMC + 16(r9) 1524 stw r8, VCPU_PMC + 20(r9) 1525BEGIN_FTR_SECTION 1526 stw r10, VCPU_PMC + 24(r9) 1527 stw r11, VCPU_PMC + 28(r9) 1528END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1529BEGIN_FTR_SECTION 1530 mfspr r5, SPRN_SIER 1531 mfspr r6, SPRN_SPMC1 1532 mfspr r7, SPRN_SPMC2 1533 mfspr r8, SPRN_MMCRS 1534 std r5, VCPU_SIER(r9) 1535 stw r6, VCPU_PMC + 24(r9) 1536 stw r7, VCPU_PMC + 28(r9) 1537 std r8, VCPU_MMCR + 32(r9) 1538 lis r4, 0x8000 1539 mtspr SPRN_MMCRS, r4 1540END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 154122: 1542 /* Clear out SLB */ 1543 li r5,0 1544 slbmte r5,r5 1545 slbia 1546 ptesync 1547 1548hdec_soon: /* r12 = trap, r13 = paca */ 1549BEGIN_FTR_SECTION 1550 b 32f 1551END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1552 /* 1553 * POWER7 guest -> host partition switch code. 1554 * We don't have to lock against tlbies but we do 1555 * have to coordinate the hardware threads. 1556 */ 1557 /* Increment the threads-exiting-guest count in the 0xff00 1558 bits of vcore->entry_exit_count */ 1559 ld r5,HSTATE_KVM_VCORE(r13) 1560 addi r6,r5,VCORE_ENTRY_EXIT 156141: lwarx r3,0,r6 1562 addi r0,r3,0x100 1563 stwcx. r0,0,r6 1564 bne 41b 1565 isync /* order stwcx. vs. reading napping_threads */ 1566 1567 /* 1568 * At this point we have an interrupt that we have to pass 1569 * up to the kernel or qemu; we can't handle it in real mode. 1570 * Thus we have to do a partition switch, so we have to 1571 * collect the other threads, if we are the first thread 1572 * to take an interrupt. To do this, we set the HDEC to 0, 1573 * which causes an HDEC interrupt in all threads within 2ns 1574 * because the HDEC register is shared between all 4 threads. 1575 * However, we don't need to bother if this is an HDEC 1576 * interrupt, since the other threads will already be on their 1577 * way here in that case. 1578 */ 1579 cmpwi r3,0x100 /* Are we the first here? */ 1580 bge 43f 1581 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1582 beq 40f 1583 li r0,0 1584 mtspr SPRN_HDEC,r0 158540: 1586 /* 1587 * Send an IPI to any napping threads, since an HDEC interrupt 1588 * doesn't wake CPUs up from nap. 1589 */ 1590 lwz r3,VCORE_NAPPING_THREADS(r5) 1591 lbz r4,HSTATE_PTID(r13) 1592 li r0,1 1593 sld r0,r0,r4 1594 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1595 beq 43f 1596 /* Order entry/exit update vs. IPIs */ 1597 sync 1598 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1599 subf r6,r4,r13 160042: andi. r0,r3,1 1601 beq 44f 1602 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 1603 li r0,IPI_PRIORITY 1604 li r7,XICS_MFRR 1605 stbcix r0,r7,r8 /* trigger the IPI */ 160644: srdi. r3,r3,1 1607 addi r6,r6,PACA_SIZE 1608 bne 42b 1609 1610secondary_too_late: 1611 /* Secondary threads wait for primary to do partition switch */ 161243: ld r5,HSTATE_KVM_VCORE(r13) 1613 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1614 lbz r3,HSTATE_PTID(r13) 1615 cmpwi r3,0 1616 beq 15f 1617 HMT_LOW 161813: lbz r3,VCORE_IN_GUEST(r5) 1619 cmpwi r3,0 1620 bne 13b 1621 HMT_MEDIUM 1622 b 16f 1623 1624 /* Primary thread waits for all the secondaries to exit guest */ 162515: lwz r3,VCORE_ENTRY_EXIT(r5) 1626 srwi r0,r3,8 1627 clrldi r3,r3,56 1628 cmpw r3,r0 1629 bne 15b 1630 isync 1631 1632 /* Primary thread switches back to host partition */ 1633 ld r6,KVM_HOST_SDR1(r4) 1634 lwz r7,KVM_HOST_LPID(r4) 1635 li r8,LPID_RSVD /* switch to reserved LPID */ 1636 mtspr SPRN_LPID,r8 1637 ptesync 1638 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1639 mtspr SPRN_LPID,r7 1640 isync 1641 1642BEGIN_FTR_SECTION 1643 /* DPDES is shared between threads */ 1644 mfspr r7, SPRN_DPDES 1645 std r7, VCORE_DPDES(r5) 1646 /* clear DPDES so we don't get guest doorbells in the host */ 1647 li r8, 0 1648 mtspr SPRN_DPDES, r8 1649END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1650 1651 /* Subtract timebase offset from timebase */ 1652 ld r8,VCORE_TB_OFFSET(r5) 1653 cmpdi r8,0 1654 beq 17f 1655 mftb r6 /* current guest timebase */ 1656 subf r8,r8,r6 1657 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1658 mftb r7 /* check if lower 24 bits overflowed */ 1659 clrldi r6,r6,40 1660 clrldi r7,r7,40 1661 cmpld r7,r6 1662 bge 17f 1663 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1664 mtspr SPRN_TBU40,r8 1665 1666 /* Reset PCR */ 166717: ld r0, VCORE_PCR(r5) 1668 cmpdi r0, 0 1669 beq 18f 1670 li r0, 0 1671 mtspr SPRN_PCR, r0 167218: 1673 /* Signal secondary CPUs to continue */ 1674 stb r0,VCORE_IN_GUEST(r5) 1675 lis r8,0x7fff /* MAX_INT@h */ 1676 mtspr SPRN_HDEC,r8 1677 167816: ld r8,KVM_HOST_LPCR(r4) 1679 mtspr SPRN_LPCR,r8 1680 isync 1681 b 33f 1682 1683 /* 1684 * PPC970 guest -> host partition switch code. 1685 * We have to lock against concurrent tlbies, and 1686 * we have to flush the whole TLB. 1687 */ 168832: ld r5,HSTATE_KVM_VCORE(r13) 1689 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1690 1691 /* Take the guest's tlbie_lock */ 1692#ifdef __BIG_ENDIAN__ 1693 lwz r8,PACA_LOCK_TOKEN(r13) 1694#else 1695 lwz r8,PACAPACAINDEX(r13) 1696#endif 1697 addi r3,r4,KVM_TLBIE_LOCK 169824: lwarx r0,0,r3 1699 cmpwi r0,0 1700 bne 24b 1701 stwcx. r8,0,r3 1702 bne 24b 1703 isync 1704 1705 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 1706 li r0,0x18f 1707 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 1708 or r0,r7,r0 1709 ptesync 1710 sync 1711 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 1712 isync 1713 li r0,0 1714 stw r0,0(r3) /* drop guest tlbie_lock */ 1715 1716 /* invalidate the whole TLB */ 1717 li r0,256 1718 mtctr r0 1719 li r6,0 172025: tlbiel r6 1721 addi r6,r6,0x1000 1722 bdnz 25b 1723 ptesync 1724 1725 /* take native_tlbie_lock */ 1726 ld r3,toc_tlbie_lock@toc(2) 172724: lwarx r0,0,r3 1728 cmpwi r0,0 1729 bne 24b 1730 stwcx. r8,0,r3 1731 bne 24b 1732 isync 1733 1734 ld r6,KVM_HOST_SDR1(r4) 1735 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1736 1737 /* Set up host HID4 value */ 1738 sync 1739 mtspr SPRN_HID4,r7 1740 isync 1741 li r0,0 1742 stw r0,0(r3) /* drop native_tlbie_lock */ 1743 1744 lis r8,0x7fff /* MAX_INT@h */ 1745 mtspr SPRN_HDEC,r8 1746 1747 /* Disable HDEC interrupts */ 1748 mfspr r0,SPRN_HID0 1749 li r3,0 1750 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 1751 sync 1752 mtspr SPRN_HID0,r0 1753 mfspr r0,SPRN_HID0 1754 mfspr r0,SPRN_HID0 1755 mfspr r0,SPRN_HID0 1756 mfspr r0,SPRN_HID0 1757 mfspr r0,SPRN_HID0 1758 mfspr r0,SPRN_HID0 1759 1760 /* load host SLB entries */ 176133: ld r8,PACA_SLBSHADOWPTR(r13) 1762 1763 .rept SLB_NUM_BOLTED 1764 li r3, SLBSHADOW_SAVEAREA 1765 LDX_BE r5, r8, r3 1766 addi r3, r3, 8 1767 LDX_BE r6, r8, r3 1768 andis. r7,r5,SLB_ESID_V@h 1769 beq 1f 1770 slbmte r6,r5 17711: addi r8,r8,16 1772 .endr 1773 1774 /* Unset guest mode */ 1775 li r0, KVM_GUEST_MODE_NONE 1776 stb r0, HSTATE_IN_GUEST(r13) 1777 1778 ld r0, 112+PPC_LR_STKOFF(r1) 1779 addi r1, r1, 112 1780 mtlr r0 1781 blr 1782 1783/* 1784 * Check whether an HDSI is an HPTE not found fault or something else. 1785 * If it is an HPTE not found fault that is due to the guest accessing 1786 * a page that they have mapped but which we have paged out, then 1787 * we continue on with the guest exit path. In all other cases, 1788 * reflect the HDSI to the guest as a DSI. 1789 */ 1790kvmppc_hdsi: 1791 mfspr r4, SPRN_HDAR 1792 mfspr r6, SPRN_HDSISR 1793 /* HPTE not found fault or protection fault? */ 1794 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1795 beq 1f /* if not, send it to the guest */ 1796 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1797 beq 3f 1798 clrrdi r0, r4, 28 1799 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1800 bne 1f /* if no SLB entry found */ 18014: std r4, VCPU_FAULT_DAR(r9) 1802 stw r6, VCPU_FAULT_DSISR(r9) 1803 1804 /* Search the hash table. */ 1805 mr r3, r9 /* vcpu pointer */ 1806 li r7, 1 /* data fault */ 1807 bl kvmppc_hpte_hv_fault 1808 ld r9, HSTATE_KVM_VCPU(r13) 1809 ld r10, VCPU_PC(r9) 1810 ld r11, VCPU_MSR(r9) 1811 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1812 cmpdi r3, 0 /* retry the instruction */ 1813 beq 6f 1814 cmpdi r3, -1 /* handle in kernel mode */ 1815 beq guest_exit_cont 1816 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1817 beq 2f 1818 1819 /* Synthesize a DSI for the guest */ 1820 ld r4, VCPU_FAULT_DAR(r9) 1821 mr r6, r3 18221: mtspr SPRN_DAR, r4 1823 mtspr SPRN_DSISR, r6 1824 mtspr SPRN_SRR0, r10 1825 mtspr SPRN_SRR1, r11 1826 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1827 bl kvmppc_msr_interrupt 1828fast_interrupt_c_return: 18296: ld r7, VCPU_CTR(r9) 1830 lwz r8, VCPU_XER(r9) 1831 mtctr r7 1832 mtxer r8 1833 mr r4, r9 1834 b fast_guest_return 1835 18363: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1837 ld r5, KVM_VRMA_SLB_V(r5) 1838 b 4b 1839 1840 /* If this is for emulated MMIO, load the instruction word */ 18412: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1842 1843 /* Set guest mode to 'jump over instruction' so if lwz faults 1844 * we'll just continue at the next IP. */ 1845 li r0, KVM_GUEST_MODE_SKIP 1846 stb r0, HSTATE_IN_GUEST(r13) 1847 1848 /* Do the access with MSR:DR enabled */ 1849 mfmsr r3 1850 ori r4, r3, MSR_DR /* Enable paging for data */ 1851 mtmsrd r4 1852 lwz r8, 0(r10) 1853 mtmsrd r3 1854 1855 /* Store the result */ 1856 stw r8, VCPU_LAST_INST(r9) 1857 1858 /* Unset guest mode. */ 1859 li r0, KVM_GUEST_MODE_HOST_HV 1860 stb r0, HSTATE_IN_GUEST(r13) 1861 b guest_exit_cont 1862 1863/* 1864 * Similarly for an HISI, reflect it to the guest as an ISI unless 1865 * it is an HPTE not found fault for a page that we have paged out. 1866 */ 1867kvmppc_hisi: 1868 andis. r0, r11, SRR1_ISI_NOPT@h 1869 beq 1f 1870 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1871 beq 3f 1872 clrrdi r0, r10, 28 1873 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1874 bne 1f /* if no SLB entry found */ 18754: 1876 /* Search the hash table. */ 1877 mr r3, r9 /* vcpu pointer */ 1878 mr r4, r10 1879 mr r6, r11 1880 li r7, 0 /* instruction fault */ 1881 bl kvmppc_hpte_hv_fault 1882 ld r9, HSTATE_KVM_VCPU(r13) 1883 ld r10, VCPU_PC(r9) 1884 ld r11, VCPU_MSR(r9) 1885 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1886 cmpdi r3, 0 /* retry the instruction */ 1887 beq fast_interrupt_c_return 1888 cmpdi r3, -1 /* handle in kernel mode */ 1889 beq guest_exit_cont 1890 1891 /* Synthesize an ISI for the guest */ 1892 mr r11, r3 18931: mtspr SPRN_SRR0, r10 1894 mtspr SPRN_SRR1, r11 1895 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1896 bl kvmppc_msr_interrupt 1897 b fast_interrupt_c_return 1898 18993: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1900 ld r5, KVM_VRMA_SLB_V(r6) 1901 b 4b 1902 1903/* 1904 * Try to handle an hcall in real mode. 1905 * Returns to the guest if we handle it, or continues on up to 1906 * the kernel if we can't (i.e. if we don't have a handler for 1907 * it, or if the handler returns H_TOO_HARD). 1908 */ 1909 .globl hcall_try_real_mode 1910hcall_try_real_mode: 1911 ld r3,VCPU_GPR(R3)(r9) 1912 andi. r0,r11,MSR_PR 1913 /* sc 1 from userspace - reflect to guest syscall */ 1914 bne sc_1_fast_return 1915 clrrdi r3,r3,2 1916 cmpldi r3,hcall_real_table_end - hcall_real_table 1917 bge guest_exit_cont 1918 /* See if this hcall is enabled for in-kernel handling */ 1919 ld r4, VCPU_KVM(r9) 1920 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 1921 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 1922 add r4, r4, r0 1923 ld r0, KVM_ENABLED_HCALLS(r4) 1924 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 1925 srd r0, r0, r4 1926 andi. r0, r0, 1 1927 beq guest_exit_cont 1928 /* Get pointer to handler, if any, and call it */ 1929 LOAD_REG_ADDR(r4, hcall_real_table) 1930 lwax r3,r3,r4 1931 cmpwi r3,0 1932 beq guest_exit_cont 1933 add r12,r3,r4 1934 mtctr r12 1935 mr r3,r9 /* get vcpu pointer */ 1936 ld r4,VCPU_GPR(R4)(r9) 1937 bctrl 1938 cmpdi r3,H_TOO_HARD 1939 beq hcall_real_fallback 1940 ld r4,HSTATE_KVM_VCPU(r13) 1941 std r3,VCPU_GPR(R3)(r4) 1942 ld r10,VCPU_PC(r4) 1943 ld r11,VCPU_MSR(r4) 1944 b fast_guest_return 1945 1946sc_1_fast_return: 1947 mtspr SPRN_SRR0,r10 1948 mtspr SPRN_SRR1,r11 1949 li r10, BOOK3S_INTERRUPT_SYSCALL 1950 bl kvmppc_msr_interrupt 1951 mr r4,r9 1952 b fast_guest_return 1953 1954 /* We've attempted a real mode hcall, but it's punted it back 1955 * to userspace. We need to restore some clobbered volatiles 1956 * before resuming the pass-it-to-qemu path */ 1957hcall_real_fallback: 1958 li r12,BOOK3S_INTERRUPT_SYSCALL 1959 ld r9, HSTATE_KVM_VCPU(r13) 1960 1961 b guest_exit_cont 1962 1963 .globl hcall_real_table 1964hcall_real_table: 1965 .long 0 /* 0 - unused */ 1966 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 1967 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 1968 .long DOTSYM(kvmppc_h_read) - hcall_real_table 1969 .long 0 /* 0x10 - H_CLEAR_MOD */ 1970 .long 0 /* 0x14 - H_CLEAR_REF */ 1971 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 1972 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 1973 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table 1974 .long 0 /* 0x24 - H_SET_SPRG0 */ 1975 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 1976 .long 0 /* 0x2c */ 1977 .long 0 /* 0x30 */ 1978 .long 0 /* 0x34 */ 1979 .long 0 /* 0x38 */ 1980 .long 0 /* 0x3c */ 1981 .long 0 /* 0x40 */ 1982 .long 0 /* 0x44 */ 1983 .long 0 /* 0x48 */ 1984 .long 0 /* 0x4c */ 1985 .long 0 /* 0x50 */ 1986 .long 0 /* 0x54 */ 1987 .long 0 /* 0x58 */ 1988 .long 0 /* 0x5c */ 1989 .long 0 /* 0x60 */ 1990#ifdef CONFIG_KVM_XICS 1991 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 1992 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 1993 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 1994 .long 0 /* 0x70 - H_IPOLL */ 1995 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 1996#else 1997 .long 0 /* 0x64 - H_EOI */ 1998 .long 0 /* 0x68 - H_CPPR */ 1999 .long 0 /* 0x6c - H_IPI */ 2000 .long 0 /* 0x70 - H_IPOLL */ 2001 .long 0 /* 0x74 - H_XIRR */ 2002#endif 2003 .long 0 /* 0x78 */ 2004 .long 0 /* 0x7c */ 2005 .long 0 /* 0x80 */ 2006 .long 0 /* 0x84 */ 2007 .long 0 /* 0x88 */ 2008 .long 0 /* 0x8c */ 2009 .long 0 /* 0x90 */ 2010 .long 0 /* 0x94 */ 2011 .long 0 /* 0x98 */ 2012 .long 0 /* 0x9c */ 2013 .long 0 /* 0xa0 */ 2014 .long 0 /* 0xa4 */ 2015 .long 0 /* 0xa8 */ 2016 .long 0 /* 0xac */ 2017 .long 0 /* 0xb0 */ 2018 .long 0 /* 0xb4 */ 2019 .long 0 /* 0xb8 */ 2020 .long 0 /* 0xbc */ 2021 .long 0 /* 0xc0 */ 2022 .long 0 /* 0xc4 */ 2023 .long 0 /* 0xc8 */ 2024 .long 0 /* 0xcc */ 2025 .long 0 /* 0xd0 */ 2026 .long 0 /* 0xd4 */ 2027 .long 0 /* 0xd8 */ 2028 .long 0 /* 0xdc */ 2029 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2030 .long 0 /* 0xe4 */ 2031 .long 0 /* 0xe8 */ 2032 .long 0 /* 0xec */ 2033 .long 0 /* 0xf0 */ 2034 .long 0 /* 0xf4 */ 2035 .long 0 /* 0xf8 */ 2036 .long 0 /* 0xfc */ 2037 .long 0 /* 0x100 */ 2038 .long 0 /* 0x104 */ 2039 .long 0 /* 0x108 */ 2040 .long 0 /* 0x10c */ 2041 .long 0 /* 0x110 */ 2042 .long 0 /* 0x114 */ 2043 .long 0 /* 0x118 */ 2044 .long 0 /* 0x11c */ 2045 .long 0 /* 0x120 */ 2046 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2047 .long 0 /* 0x128 */ 2048 .long 0 /* 0x12c */ 2049 .long 0 /* 0x130 */ 2050 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2051 .globl hcall_real_table_end 2052hcall_real_table_end: 2053 2054ignore_hdec: 2055 mr r4,r9 2056 b fast_guest_return 2057 2058_GLOBAL(kvmppc_h_set_xdabr) 2059 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2060 beq 6f 2061 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2062 andc. r0, r5, r0 2063 beq 3f 20646: li r3, H_PARAMETER 2065 blr 2066 2067_GLOBAL(kvmppc_h_set_dabr) 2068 li r5, DABRX_USER | DABRX_KERNEL 20693: 2070BEGIN_FTR_SECTION 2071 b 2f 2072END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2073 std r4,VCPU_DABR(r3) 2074 stw r5, VCPU_DABRX(r3) 2075 mtspr SPRN_DABRX, r5 2076 /* Work around P7 bug where DABR can get corrupted on mtspr */ 20771: mtspr SPRN_DABR,r4 2078 mfspr r5, SPRN_DABR 2079 cmpd r4, r5 2080 bne 1b 2081 isync 2082 li r3,0 2083 blr 2084 2085 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 20862: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2087 rlwimi r5, r4, 1, DAWRX_WT 2088 clrrdi r4, r4, 3 2089 std r4, VCPU_DAWR(r3) 2090 std r5, VCPU_DAWRX(r3) 2091 mtspr SPRN_DAWR, r4 2092 mtspr SPRN_DAWRX, r5 2093 li r3, 0 2094 blr 2095 2096_GLOBAL(kvmppc_h_cede) 2097 ori r11,r11,MSR_EE 2098 std r11,VCPU_MSR(r3) 2099 li r0,1 2100 stb r0,VCPU_CEDED(r3) 2101 sync /* order setting ceded vs. testing prodded */ 2102 lbz r5,VCPU_PRODDED(r3) 2103 cmpwi r5,0 2104 bne kvm_cede_prodded 2105 li r0,0 /* set trap to 0 to say hcall is handled */ 2106 stw r0,VCPU_TRAP(r3) 2107 li r0,H_SUCCESS 2108 std r0,VCPU_GPR(R3)(r3) 2109BEGIN_FTR_SECTION 2110 b kvm_cede_exit /* just send it up to host on 970 */ 2111END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 2112 2113 /* 2114 * Set our bit in the bitmask of napping threads unless all the 2115 * other threads are already napping, in which case we send this 2116 * up to the host. 2117 */ 2118 ld r5,HSTATE_KVM_VCORE(r13) 2119 lbz r6,HSTATE_PTID(r13) 2120 lwz r8,VCORE_ENTRY_EXIT(r5) 2121 clrldi r8,r8,56 2122 li r0,1 2123 sld r0,r0,r6 2124 addi r6,r5,VCORE_NAPPING_THREADS 212531: lwarx r4,0,r6 2126 or r4,r4,r0 2127 PPC_POPCNTW(R7,R4) 2128 cmpw r7,r8 2129 bge kvm_cede_exit 2130 stwcx. r4,0,r6 2131 bne 31b 2132 /* order napping_threads update vs testing entry_exit_count */ 2133 isync 2134 li r0,NAPPING_CEDE 2135 stb r0,HSTATE_NAPPING(r13) 2136 lwz r7,VCORE_ENTRY_EXIT(r5) 2137 cmpwi r7,0x100 2138 bge 33f /* another thread already exiting */ 2139 2140/* 2141 * Although not specifically required by the architecture, POWER7 2142 * preserves the following registers in nap mode, even if an SMT mode 2143 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2144 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2145 */ 2146 /* Save non-volatile GPRs */ 2147 std r14, VCPU_GPR(R14)(r3) 2148 std r15, VCPU_GPR(R15)(r3) 2149 std r16, VCPU_GPR(R16)(r3) 2150 std r17, VCPU_GPR(R17)(r3) 2151 std r18, VCPU_GPR(R18)(r3) 2152 std r19, VCPU_GPR(R19)(r3) 2153 std r20, VCPU_GPR(R20)(r3) 2154 std r21, VCPU_GPR(R21)(r3) 2155 std r22, VCPU_GPR(R22)(r3) 2156 std r23, VCPU_GPR(R23)(r3) 2157 std r24, VCPU_GPR(R24)(r3) 2158 std r25, VCPU_GPR(R25)(r3) 2159 std r26, VCPU_GPR(R26)(r3) 2160 std r27, VCPU_GPR(R27)(r3) 2161 std r28, VCPU_GPR(R28)(r3) 2162 std r29, VCPU_GPR(R29)(r3) 2163 std r30, VCPU_GPR(R30)(r3) 2164 std r31, VCPU_GPR(R31)(r3) 2165 2166 /* save FP state */ 2167 bl kvmppc_save_fp 2168 2169 /* 2170 * Take a nap until a decrementer or external or doobell interrupt 2171 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the 2172 * runlatch bit before napping. 2173 */ 2174 mfspr r2, SPRN_CTRLF 2175 clrrdi r2, r2, 1 2176 mtspr SPRN_CTRLT, r2 2177 2178 li r0,1 2179 stb r0,HSTATE_HWTHREAD_REQ(r13) 2180 mfspr r5,SPRN_LPCR 2181 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2182BEGIN_FTR_SECTION 2183 oris r5,r5,LPCR_PECEDP@h 2184END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2185 mtspr SPRN_LPCR,r5 2186 isync 2187 li r0, 0 2188 std r0, HSTATE_SCRATCH0(r13) 2189 ptesync 2190 ld r0, HSTATE_SCRATCH0(r13) 21911: cmpd r0, r0 2192 bne 1b 2193 nap 2194 b . 2195 219633: mr r4, r3 2197 li r3, 0 2198 li r12, 0 2199 b 34f 2200 2201kvm_end_cede: 2202 /* get vcpu pointer */ 2203 ld r4, HSTATE_KVM_VCPU(r13) 2204 2205 /* Woken by external or decrementer interrupt */ 2206 ld r1, HSTATE_HOST_R1(r13) 2207 2208 /* load up FP state */ 2209 bl kvmppc_load_fp 2210 2211 /* Load NV GPRS */ 2212 ld r14, VCPU_GPR(R14)(r4) 2213 ld r15, VCPU_GPR(R15)(r4) 2214 ld r16, VCPU_GPR(R16)(r4) 2215 ld r17, VCPU_GPR(R17)(r4) 2216 ld r18, VCPU_GPR(R18)(r4) 2217 ld r19, VCPU_GPR(R19)(r4) 2218 ld r20, VCPU_GPR(R20)(r4) 2219 ld r21, VCPU_GPR(R21)(r4) 2220 ld r22, VCPU_GPR(R22)(r4) 2221 ld r23, VCPU_GPR(R23)(r4) 2222 ld r24, VCPU_GPR(R24)(r4) 2223 ld r25, VCPU_GPR(R25)(r4) 2224 ld r26, VCPU_GPR(R26)(r4) 2225 ld r27, VCPU_GPR(R27)(r4) 2226 ld r28, VCPU_GPR(R28)(r4) 2227 ld r29, VCPU_GPR(R29)(r4) 2228 ld r30, VCPU_GPR(R30)(r4) 2229 ld r31, VCPU_GPR(R31)(r4) 2230 2231 /* Check the wake reason in SRR1 to see why we got here */ 2232 bl kvmppc_check_wake_reason 2233 2234 /* clear our bit in vcore->napping_threads */ 223534: ld r5,HSTATE_KVM_VCORE(r13) 2236 lbz r7,HSTATE_PTID(r13) 2237 li r0,1 2238 sld r0,r0,r7 2239 addi r6,r5,VCORE_NAPPING_THREADS 224032: lwarx r7,0,r6 2241 andc r7,r7,r0 2242 stwcx. r7,0,r6 2243 bne 32b 2244 li r0,0 2245 stb r0,HSTATE_NAPPING(r13) 2246 2247 /* See if the wake reason means we need to exit */ 2248 stw r12, VCPU_TRAP(r4) 2249 mr r9, r4 2250 cmpdi r3, 0 2251 bgt guest_exit_cont 2252 2253 /* see if any other thread is already exiting */ 2254 lwz r0,VCORE_ENTRY_EXIT(r5) 2255 cmpwi r0,0x100 2256 bge guest_exit_cont 2257 2258 b kvmppc_cede_reentry /* if not go back to guest */ 2259 2260 /* cede when already previously prodded case */ 2261kvm_cede_prodded: 2262 li r0,0 2263 stb r0,VCPU_PRODDED(r3) 2264 sync /* order testing prodded vs. clearing ceded */ 2265 stb r0,VCPU_CEDED(r3) 2266 li r3,H_SUCCESS 2267 blr 2268 2269 /* we've ceded but we want to give control to the host */ 2270kvm_cede_exit: 2271 b hcall_real_fallback 2272 2273 /* Try to handle a machine check in real mode */ 2274machine_check_realmode: 2275 mr r3, r9 /* get vcpu pointer */ 2276 bl kvmppc_realmode_machine_check 2277 nop 2278 cmpdi r3, 0 /* Did we handle MCE ? */ 2279 ld r9, HSTATE_KVM_VCPU(r13) 2280 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2281 /* 2282 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through 2283 * machine check interrupt (set HSRR0 to 0x200). And for handled 2284 * errors (no-fatal), just go back to guest execution with current 2285 * HSRR0 instead of exiting guest. This new approach will inject 2286 * machine check to guest for fatal error causing guest to crash. 2287 * 2288 * The old code used to return to host for unhandled errors which 2289 * was causing guest to hang with soft lockups inside guest and 2290 * makes it difficult to recover guest instance. 2291 */ 2292 ld r10, VCPU_PC(r9) 2293 ld r11, VCPU_MSR(r9) 2294 bne 2f /* Continue guest execution. */ 2295 /* If not, deliver a machine check. SRR0/1 are already set */ 2296 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2297 ld r11, VCPU_MSR(r9) 2298 bl kvmppc_msr_interrupt 22992: b fast_interrupt_c_return 2300 2301/* 2302 * Check the reason we woke from nap, and take appropriate action. 2303 * Returns: 2304 * 0 if nothing needs to be done 2305 * 1 if something happened that needs to be handled by the host 2306 * -1 if there was a guest wakeup (IPI) 2307 * 2308 * Also sets r12 to the interrupt vector for any interrupt that needs 2309 * to be handled now by the host (0x500 for external interrupt), or zero. 2310 */ 2311kvmppc_check_wake_reason: 2312 mfspr r6, SPRN_SRR1 2313BEGIN_FTR_SECTION 2314 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2315FTR_SECTION_ELSE 2316 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2317ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2318 cmpwi r6, 8 /* was it an external interrupt? */ 2319 li r12, BOOK3S_INTERRUPT_EXTERNAL 2320 beq kvmppc_read_intr /* if so, see what it was */ 2321 li r3, 0 2322 li r12, 0 2323 cmpwi r6, 6 /* was it the decrementer? */ 2324 beq 0f 2325BEGIN_FTR_SECTION 2326 cmpwi r6, 5 /* privileged doorbell? */ 2327 beq 0f 2328 cmpwi r6, 3 /* hypervisor doorbell? */ 2329 beq 3f 2330END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2331 li r3, 1 /* anything else, return 1 */ 23320: blr 2333 2334 /* hypervisor doorbell */ 23353: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2336 li r3, 1 2337 blr 2338 2339/* 2340 * Determine what sort of external interrupt is pending (if any). 2341 * Returns: 2342 * 0 if no interrupt is pending 2343 * 1 if an interrupt is pending that needs to be handled by the host 2344 * -1 if there was a guest wakeup IPI (which has now been cleared) 2345 */ 2346kvmppc_read_intr: 2347 /* see if a host IPI is pending */ 2348 li r3, 1 2349 lbz r0, HSTATE_HOST_IPI(r13) 2350 cmpwi r0, 0 2351 bne 1f 2352 2353 /* Now read the interrupt from the ICP */ 2354 ld r6, HSTATE_XICS_PHYS(r13) 2355 li r7, XICS_XIRR 2356 cmpdi r6, 0 2357 beq- 1f 2358 lwzcix r0, r6, r7 2359 /* 2360 * Save XIRR for later. Since we get in in reverse endian on LE 2361 * systems, save it byte reversed and fetch it back in host endian. 2362 */ 2363 li r3, HSTATE_SAVED_XIRR 2364 STWX_BE r0, r3, r13 2365#ifdef __LITTLE_ENDIAN__ 2366 lwz r3, HSTATE_SAVED_XIRR(r13) 2367#else 2368 mr r3, r0 2369#endif 2370 rlwinm. r3, r3, 0, 0xffffff 2371 sync 2372 beq 1f /* if nothing pending in the ICP */ 2373 2374 /* We found something in the ICP... 2375 * 2376 * If it's not an IPI, stash it in the PACA and return to 2377 * the host, we don't (yet) handle directing real external 2378 * interrupts directly to the guest 2379 */ 2380 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 2381 bne 42f 2382 2383 /* It's an IPI, clear the MFRR and EOI it */ 2384 li r3, 0xff 2385 li r8, XICS_MFRR 2386 stbcix r3, r6, r8 /* clear the IPI */ 2387 stwcix r0, r6, r7 /* EOI it */ 2388 sync 2389 2390 /* We need to re-check host IPI now in case it got set in the 2391 * meantime. If it's clear, we bounce the interrupt to the 2392 * guest 2393 */ 2394 lbz r0, HSTATE_HOST_IPI(r13) 2395 cmpwi r0, 0 2396 bne- 43f 2397 2398 /* OK, it's an IPI for us */ 2399 li r3, -1 24001: blr 2401 240242: /* It's not an IPI and it's for the host. We saved a copy of XIRR in 2403 * the PACA earlier, it will be picked up by the host ICP driver 2404 */ 2405 li r3, 1 2406 b 1b 2407 240843: /* We raced with the host, we need to resend that IPI, bummer */ 2409 li r0, IPI_PRIORITY 2410 stbcix r0, r6, r8 /* set the IPI */ 2411 sync 2412 li r3, 1 2413 b 1b 2414 2415/* 2416 * Save away FP, VMX and VSX registers. 2417 * r3 = vcpu pointer 2418 * N.B. r30 and r31 are volatile across this function, 2419 * thus it is not callable from C. 2420 */ 2421kvmppc_save_fp: 2422 mflr r30 2423 mr r31,r3 2424 mfmsr r5 2425 ori r8,r5,MSR_FP 2426#ifdef CONFIG_ALTIVEC 2427BEGIN_FTR_SECTION 2428 oris r8,r8,MSR_VEC@h 2429END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2430#endif 2431#ifdef CONFIG_VSX 2432BEGIN_FTR_SECTION 2433 oris r8,r8,MSR_VSX@h 2434END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2435#endif 2436 mtmsrd r8 2437 isync 2438 addi r3,r3,VCPU_FPRS 2439 bl store_fp_state 2440#ifdef CONFIG_ALTIVEC 2441BEGIN_FTR_SECTION 2442 addi r3,r31,VCPU_VRS 2443 bl store_vr_state 2444END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2445#endif 2446 mfspr r6,SPRN_VRSAVE 2447 stw r6,VCPU_VRSAVE(r31) 2448 mtlr r30 2449 blr 2450 2451/* 2452 * Load up FP, VMX and VSX registers 2453 * r4 = vcpu pointer 2454 * N.B. r30 and r31 are volatile across this function, 2455 * thus it is not callable from C. 2456 */ 2457kvmppc_load_fp: 2458 mflr r30 2459 mr r31,r4 2460 mfmsr r9 2461 ori r8,r9,MSR_FP 2462#ifdef CONFIG_ALTIVEC 2463BEGIN_FTR_SECTION 2464 oris r8,r8,MSR_VEC@h 2465END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2466#endif 2467#ifdef CONFIG_VSX 2468BEGIN_FTR_SECTION 2469 oris r8,r8,MSR_VSX@h 2470END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2471#endif 2472 mtmsrd r8 2473 isync 2474 addi r3,r4,VCPU_FPRS 2475 bl load_fp_state 2476#ifdef CONFIG_ALTIVEC 2477BEGIN_FTR_SECTION 2478 addi r3,r31,VCPU_VRS 2479 bl load_vr_state 2480END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2481#endif 2482 lwz r7,VCPU_VRSAVE(r31) 2483 mtspr SPRN_VRSAVE,r7 2484 mtlr r30 2485 mr r4,r31 2486 blr 2487 2488/* 2489 * We come here if we get any exception or interrupt while we are 2490 * executing host real mode code while in guest MMU context. 2491 * For now just spin, but we should do something better. 2492 */ 2493kvmppc_bad_host_intr: 2494 b . 2495 2496/* 2497 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2498 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2499 * r11 has the guest MSR value (in/out) 2500 * r9 has a vcpu pointer (in) 2501 * r0 is used as a scratch register 2502 */ 2503kvmppc_msr_interrupt: 2504 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2505 cmpwi r0, 2 /* Check if we are in transactional state.. */ 2506 ld r11, VCPU_INTR_MSR(r9) 2507 bne 1f 2508 /* ... if transactional, change to suspended */ 2509 li r0, 1 25101: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2511 blr 2512 2513/* 2514 * This works around a hardware bug on POWER8E processors, where 2515 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 2516 * performance monitor interrupt. Instead, when we need to have 2517 * an interrupt pending, we have to arrange for a counter to overflow. 2518 */ 2519kvmppc_fix_pmao: 2520 li r3, 0 2521 mtspr SPRN_MMCR2, r3 2522 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 2523 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 2524 mtspr SPRN_MMCR0, r3 2525 lis r3, 0x7fff 2526 ori r3, r3, 0xffff 2527 mtspr SPRN_PMC6, r3 2528 isync 2529 blr 2530