1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 12 * 13 * Derived from book3s_rmhandlers.S and other files, which are: 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu.h> 24#include <asm/page.h> 25#include <asm/ptrace.h> 26#include <asm/hvcall.h> 27#include <asm/asm-offsets.h> 28#include <asm/exception-64s.h> 29 30/***************************************************************************** 31 * * 32 * Real Mode handlers that need to be in the linear mapping * 33 * * 34 ****************************************************************************/ 35 36 .globl kvmppc_skip_interrupt 37kvmppc_skip_interrupt: 38 mfspr r13,SPRN_SRR0 39 addi r13,r13,4 40 mtspr SPRN_SRR0,r13 41 GET_SCRATCH0(r13) 42 rfid 43 b . 44 45 .globl kvmppc_skip_Hinterrupt 46kvmppc_skip_Hinterrupt: 47 mfspr r13,SPRN_HSRR0 48 addi r13,r13,4 49 mtspr SPRN_HSRR0,r13 50 GET_SCRATCH0(r13) 51 hrfid 52 b . 53 54/* 55 * Call kvmppc_hv_entry in real mode. 56 * Must be called with interrupts hard-disabled. 57 * 58 * Input Registers: 59 * 60 * LR = return address to continue at after eventually re-enabling MMU 61 */ 62_GLOBAL(kvmppc_hv_entry_trampoline) 63 mfmsr r10 64 LOAD_REG_ADDR(r5, kvmppc_hv_entry) 65 li r0,MSR_RI 66 andc r0,r10,r0 67 li r6,MSR_IR | MSR_DR 68 andc r6,r10,r6 69 mtmsrd r0,1 /* clear RI in MSR */ 70 mtsrr0 r5 71 mtsrr1 r6 72 RFI 73 74#define ULONG_SIZE 8 75#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 76 77/****************************************************************************** 78 * * 79 * Entry code * 80 * * 81 *****************************************************************************/ 82 83#define XICS_XIRR 4 84#define XICS_QIRR 0xc 85 86/* 87 * We come in here when wakened from nap mode on a secondary hw thread. 88 * Relocation is off and most register values are lost. 89 * r13 points to the PACA. 90 */ 91 .globl kvm_start_guest 92kvm_start_guest: 93 ld r1,PACAEMERGSP(r13) 94 subi r1,r1,STACK_FRAME_OVERHEAD 95 ld r2,PACATOC(r13) 96 97 /* were we napping due to cede? */ 98 lbz r0,HSTATE_NAPPING(r13) 99 cmpwi r0,0 100 bne kvm_end_cede 101 102 /* get vcpu pointer */ 103 ld r4, HSTATE_KVM_VCPU(r13) 104 105 /* We got here with an IPI; clear it */ 106 ld r5, HSTATE_XICS_PHYS(r13) 107 li r0, 0xff 108 li r6, XICS_QIRR 109 li r7, XICS_XIRR 110 lwzcix r8, r5, r7 /* ack the interrupt */ 111 sync 112 stbcix r0, r5, r6 /* clear it */ 113 stwcix r8, r5, r7 /* EOI it */ 114 115.global kvmppc_hv_entry 116kvmppc_hv_entry: 117 118 /* Required state: 119 * 120 * R4 = vcpu pointer 121 * MSR = ~IR|DR 122 * R13 = PACA 123 * R1 = host R1 124 * all other volatile GPRS = free 125 */ 126 mflr r0 127 std r0, HSTATE_VMHANDLER(r13) 128 129 ld r14, VCPU_GPR(r14)(r4) 130 ld r15, VCPU_GPR(r15)(r4) 131 ld r16, VCPU_GPR(r16)(r4) 132 ld r17, VCPU_GPR(r17)(r4) 133 ld r18, VCPU_GPR(r18)(r4) 134 ld r19, VCPU_GPR(r19)(r4) 135 ld r20, VCPU_GPR(r20)(r4) 136 ld r21, VCPU_GPR(r21)(r4) 137 ld r22, VCPU_GPR(r22)(r4) 138 ld r23, VCPU_GPR(r23)(r4) 139 ld r24, VCPU_GPR(r24)(r4) 140 ld r25, VCPU_GPR(r25)(r4) 141 ld r26, VCPU_GPR(r26)(r4) 142 ld r27, VCPU_GPR(r27)(r4) 143 ld r28, VCPU_GPR(r28)(r4) 144 ld r29, VCPU_GPR(r29)(r4) 145 ld r30, VCPU_GPR(r30)(r4) 146 ld r31, VCPU_GPR(r31)(r4) 147 148 /* Load guest PMU registers */ 149 /* R4 is live here (vcpu pointer) */ 150 li r3, 1 151 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 152 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 153 isync 154 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 155 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 156 lwz r6, VCPU_PMC + 8(r4) 157 lwz r7, VCPU_PMC + 12(r4) 158 lwz r8, VCPU_PMC + 16(r4) 159 lwz r9, VCPU_PMC + 20(r4) 160BEGIN_FTR_SECTION 161 lwz r10, VCPU_PMC + 24(r4) 162 lwz r11, VCPU_PMC + 28(r4) 163END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 164 mtspr SPRN_PMC1, r3 165 mtspr SPRN_PMC2, r5 166 mtspr SPRN_PMC3, r6 167 mtspr SPRN_PMC4, r7 168 mtspr SPRN_PMC5, r8 169 mtspr SPRN_PMC6, r9 170BEGIN_FTR_SECTION 171 mtspr SPRN_PMC7, r10 172 mtspr SPRN_PMC8, r11 173END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 174 ld r3, VCPU_MMCR(r4) 175 ld r5, VCPU_MMCR + 8(r4) 176 ld r6, VCPU_MMCR + 16(r4) 177 mtspr SPRN_MMCR1, r5 178 mtspr SPRN_MMCRA, r6 179 mtspr SPRN_MMCR0, r3 180 isync 181 182 /* Load up FP, VMX and VSX registers */ 183 bl kvmppc_load_fp 184 185BEGIN_FTR_SECTION 186 /* Switch DSCR to guest value */ 187 ld r5, VCPU_DSCR(r4) 188 mtspr SPRN_DSCR, r5 189END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 190 191 /* 192 * Set the decrementer to the guest decrementer. 193 */ 194 ld r8,VCPU_DEC_EXPIRES(r4) 195 mftb r7 196 subf r3,r7,r8 197 mtspr SPRN_DEC,r3 198 stw r3,VCPU_DEC(r4) 199 200 ld r5, VCPU_SPRG0(r4) 201 ld r6, VCPU_SPRG1(r4) 202 ld r7, VCPU_SPRG2(r4) 203 ld r8, VCPU_SPRG3(r4) 204 mtspr SPRN_SPRG0, r5 205 mtspr SPRN_SPRG1, r6 206 mtspr SPRN_SPRG2, r7 207 mtspr SPRN_SPRG3, r8 208 209 /* Save R1 in the PACA */ 210 std r1, HSTATE_HOST_R1(r13) 211 212 /* Increment yield count if they have a VPA */ 213 ld r3, VCPU_VPA(r4) 214 cmpdi r3, 0 215 beq 25f 216 lwz r5, LPPACA_YIELDCOUNT(r3) 217 addi r5, r5, 1 218 stw r5, LPPACA_YIELDCOUNT(r3) 21925: 220 /* Load up DAR and DSISR */ 221 ld r5, VCPU_DAR(r4) 222 lwz r6, VCPU_DSISR(r4) 223 mtspr SPRN_DAR, r5 224 mtspr SPRN_DSISR, r6 225 226 /* Set partition DABR */ 227 li r5,3 228 ld r6,VCPU_DABR(r4) 229 mtspr SPRN_DABRX,r5 230 mtspr SPRN_DABR,r6 231 232BEGIN_FTR_SECTION 233 /* Restore AMR and UAMOR, set AMOR to all 1s */ 234 ld r5,VCPU_AMR(r4) 235 ld r6,VCPU_UAMOR(r4) 236 li r7,-1 237 mtspr SPRN_AMR,r5 238 mtspr SPRN_UAMOR,r6 239 mtspr SPRN_AMOR,r7 240END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 241 242 /* Clear out SLB */ 243 li r6,0 244 slbmte r6,r6 245 slbia 246 ptesync 247 248BEGIN_FTR_SECTION 249 b 30f 250END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 251 /* 252 * POWER7 host -> guest partition switch code. 253 * We don't have to lock against concurrent tlbies, 254 * but we do have to coordinate across hardware threads. 255 */ 256 /* Increment entry count iff exit count is zero. */ 257 ld r5,HSTATE_KVM_VCORE(r13) 258 addi r9,r5,VCORE_ENTRY_EXIT 25921: lwarx r3,0,r9 260 cmpwi r3,0x100 /* any threads starting to exit? */ 261 bge secondary_too_late /* if so we're too late to the party */ 262 addi r3,r3,1 263 stwcx. r3,0,r9 264 bne 21b 265 266 /* Primary thread switches to guest partition. */ 267 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 268 lwz r6,VCPU_PTID(r4) 269 cmpwi r6,0 270 bne 20f 271 ld r6,KVM_SDR1(r9) 272 lwz r7,KVM_LPID(r9) 273 li r0,LPID_RSVD /* switch to reserved LPID */ 274 mtspr SPRN_LPID,r0 275 ptesync 276 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 277 mtspr SPRN_LPID,r7 278 isync 279 li r0,1 280 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 281 b 10f 282 283 /* Secondary threads wait for primary to have done partition switch */ 28420: lbz r0,VCORE_IN_GUEST(r5) 285 cmpwi r0,0 286 beq 20b 287 288 /* Set LPCR and RMOR. */ 28910: ld r8,KVM_LPCR(r9) 290 mtspr SPRN_LPCR,r8 291 ld r8,KVM_RMOR(r9) 292 mtspr SPRN_RMOR,r8 293 isync 294 295 /* Check if HDEC expires soon */ 296 mfspr r3,SPRN_HDEC 297 cmpwi r3,10 298 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 299 mr r9,r4 300 blt hdec_soon 301 302 /* 303 * Invalidate the TLB if we could possibly have stale TLB 304 * entries for this partition on this core due to the use 305 * of tlbiel. 306 * XXX maybe only need this on primary thread? 307 */ 308 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 309 lwz r5,VCPU_VCPUID(r4) 310 lhz r6,PACAPACAINDEX(r13) 311 rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */ 312 lhz r8,VCPU_LAST_CPU(r4) 313 sldi r7,r6,1 /* see if this is the same vcpu */ 314 add r7,r7,r9 /* as last ran on this pcpu */ 315 lhz r0,KVM_LAST_VCPU(r7) 316 cmpw r6,r8 /* on the same cpu core as last time? */ 317 bne 3f 318 cmpw r0,r5 /* same vcpu as this core last ran? */ 319 beq 1f 3203: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ 321 sth r5,KVM_LAST_VCPU(r7) 322 li r6,128 323 mtctr r6 324 li r7,0x800 /* IS field = 0b10 */ 325 ptesync 3262: tlbiel r7 327 addi r7,r7,0x1000 328 bdnz 2b 329 ptesync 3301: 331 332 /* Save purr/spurr */ 333 mfspr r5,SPRN_PURR 334 mfspr r6,SPRN_SPURR 335 std r5,HSTATE_PURR(r13) 336 std r6,HSTATE_SPURR(r13) 337 ld r7,VCPU_PURR(r4) 338 ld r8,VCPU_SPURR(r4) 339 mtspr SPRN_PURR,r7 340 mtspr SPRN_SPURR,r8 341 b 31f 342 343 /* 344 * PPC970 host -> guest partition switch code. 345 * We have to lock against concurrent tlbies, 346 * using native_tlbie_lock to lock against host tlbies 347 * and kvm->arch.tlbie_lock to lock against guest tlbies. 348 * We also have to invalidate the TLB since its 349 * entries aren't tagged with the LPID. 350 */ 35130: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 352 353 /* first take native_tlbie_lock */ 354 .section ".toc","aw" 355toc_tlbie_lock: 356 .tc native_tlbie_lock[TC],native_tlbie_lock 357 .previous 358 ld r3,toc_tlbie_lock@toc(2) 359 lwz r8,PACA_LOCK_TOKEN(r13) 36024: lwarx r0,0,r3 361 cmpwi r0,0 362 bne 24b 363 stwcx. r8,0,r3 364 bne 24b 365 isync 366 367 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 368 li r0,0x18f 369 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 370 or r0,r7,r0 371 ptesync 372 sync 373 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 374 isync 375 li r0,0 376 stw r0,0(r3) /* drop native_tlbie_lock */ 377 378 /* invalidate the whole TLB */ 379 li r0,256 380 mtctr r0 381 li r6,0 38225: tlbiel r6 383 addi r6,r6,0x1000 384 bdnz 25b 385 ptesync 386 387 /* Take the guest's tlbie_lock */ 388 addi r3,r9,KVM_TLBIE_LOCK 38924: lwarx r0,0,r3 390 cmpwi r0,0 391 bne 24b 392 stwcx. r8,0,r3 393 bne 24b 394 isync 395 ld r6,KVM_SDR1(r9) 396 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 397 398 /* Set up HID4 with the guest's LPID etc. */ 399 sync 400 mtspr SPRN_HID4,r7 401 isync 402 403 /* drop the guest's tlbie_lock */ 404 li r0,0 405 stw r0,0(r3) 406 407 /* Check if HDEC expires soon */ 408 mfspr r3,SPRN_HDEC 409 cmpwi r3,10 410 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 411 mr r9,r4 412 blt hdec_soon 413 414 /* Enable HDEC interrupts */ 415 mfspr r0,SPRN_HID0 416 li r3,1 417 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 418 sync 419 mtspr SPRN_HID0,r0 420 mfspr r0,SPRN_HID0 421 mfspr r0,SPRN_HID0 422 mfspr r0,SPRN_HID0 423 mfspr r0,SPRN_HID0 424 mfspr r0,SPRN_HID0 425 mfspr r0,SPRN_HID0 426 427 /* Load up guest SLB entries */ 42831: lwz r5,VCPU_SLB_MAX(r4) 429 cmpwi r5,0 430 beq 9f 431 mtctr r5 432 addi r6,r4,VCPU_SLB 4331: ld r8,VCPU_SLB_E(r6) 434 ld r9,VCPU_SLB_V(r6) 435 slbmte r9,r8 436 addi r6,r6,VCPU_SLB_SIZE 437 bdnz 1b 4389: 439 440 /* Restore state of CTRL run bit; assume 1 on entry */ 441 lwz r5,VCPU_CTRL(r4) 442 andi. r5,r5,1 443 bne 4f 444 mfspr r6,SPRN_CTRLF 445 clrrdi r6,r6,1 446 mtspr SPRN_CTRLT,r6 4474: 448 ld r6, VCPU_CTR(r4) 449 lwz r7, VCPU_XER(r4) 450 451 mtctr r6 452 mtxer r7 453 454kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 455 ld r6, VCPU_SRR0(r4) 456 ld r7, VCPU_SRR1(r4) 457 ld r10, VCPU_PC(r4) 458 ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */ 459 460 rldicl r11, r11, 63 - MSR_HV_LG, 1 461 rotldi r11, r11, 1 + MSR_HV_LG 462 ori r11, r11, MSR_ME 463 464 /* Check if we can deliver an external or decrementer interrupt now */ 465 ld r0,VCPU_PENDING_EXC(r4) 466 li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL) 467 oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 468 and r0,r0,r8 469 cmpdi cr1,r0,0 470 andi. r0,r11,MSR_EE 471 beq cr1,11f 472BEGIN_FTR_SECTION 473 mfspr r8,SPRN_LPCR 474 ori r8,r8,LPCR_MER 475 mtspr SPRN_LPCR,r8 476 isync 477END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 478 beq 5f 479 li r0,BOOK3S_INTERRUPT_EXTERNAL 48012: mr r6,r10 481 mr r10,r0 482 mr r7,r11 483 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 484 rotldi r11,r11,63 485 b 5f 48611: beq 5f 487 mfspr r0,SPRN_DEC 488 cmpwi r0,0 489 li r0,BOOK3S_INTERRUPT_DECREMENTER 490 blt 12b 491 492 /* Move SRR0 and SRR1 into the respective regs */ 4935: mtspr SPRN_SRR0, r6 494 mtspr SPRN_SRR1, r7 495 li r0,0 496 stb r0,VCPU_CEDED(r4) /* cancel cede */ 497 498fast_guest_return: 499 mtspr SPRN_HSRR0,r10 500 mtspr SPRN_HSRR1,r11 501 502 /* Activate guest mode, so faults get handled by KVM */ 503 li r9, KVM_GUEST_MODE_GUEST 504 stb r9, HSTATE_IN_GUEST(r13) 505 506 /* Enter guest */ 507 508 ld r5, VCPU_LR(r4) 509 lwz r6, VCPU_CR(r4) 510 mtlr r5 511 mtcr r6 512 513 ld r0, VCPU_GPR(r0)(r4) 514 ld r1, VCPU_GPR(r1)(r4) 515 ld r2, VCPU_GPR(r2)(r4) 516 ld r3, VCPU_GPR(r3)(r4) 517 ld r5, VCPU_GPR(r5)(r4) 518 ld r6, VCPU_GPR(r6)(r4) 519 ld r7, VCPU_GPR(r7)(r4) 520 ld r8, VCPU_GPR(r8)(r4) 521 ld r9, VCPU_GPR(r9)(r4) 522 ld r10, VCPU_GPR(r10)(r4) 523 ld r11, VCPU_GPR(r11)(r4) 524 ld r12, VCPU_GPR(r12)(r4) 525 ld r13, VCPU_GPR(r13)(r4) 526 527 ld r4, VCPU_GPR(r4)(r4) 528 529 hrfid 530 b . 531 532/****************************************************************************** 533 * * 534 * Exit code * 535 * * 536 *****************************************************************************/ 537 538/* 539 * We come here from the first-level interrupt handlers. 540 */ 541 .globl kvmppc_interrupt 542kvmppc_interrupt: 543 /* 544 * Register contents: 545 * R12 = interrupt vector 546 * R13 = PACA 547 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 548 * guest R13 saved in SPRN_SCRATCH0 549 */ 550 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 551 std r9, HSTATE_HOST_R2(r13) 552 ld r9, HSTATE_KVM_VCPU(r13) 553 554 /* Save registers */ 555 556 std r0, VCPU_GPR(r0)(r9) 557 std r1, VCPU_GPR(r1)(r9) 558 std r2, VCPU_GPR(r2)(r9) 559 std r3, VCPU_GPR(r3)(r9) 560 std r4, VCPU_GPR(r4)(r9) 561 std r5, VCPU_GPR(r5)(r9) 562 std r6, VCPU_GPR(r6)(r9) 563 std r7, VCPU_GPR(r7)(r9) 564 std r8, VCPU_GPR(r8)(r9) 565 ld r0, HSTATE_HOST_R2(r13) 566 std r0, VCPU_GPR(r9)(r9) 567 std r10, VCPU_GPR(r10)(r9) 568 std r11, VCPU_GPR(r11)(r9) 569 ld r3, HSTATE_SCRATCH0(r13) 570 lwz r4, HSTATE_SCRATCH1(r13) 571 std r3, VCPU_GPR(r12)(r9) 572 stw r4, VCPU_CR(r9) 573 574 /* Restore R1/R2 so we can handle faults */ 575 ld r1, HSTATE_HOST_R1(r13) 576 ld r2, PACATOC(r13) 577 578 mfspr r10, SPRN_SRR0 579 mfspr r11, SPRN_SRR1 580 std r10, VCPU_SRR0(r9) 581 std r11, VCPU_SRR1(r9) 582 andi. r0, r12, 2 /* need to read HSRR0/1? */ 583 beq 1f 584 mfspr r10, SPRN_HSRR0 585 mfspr r11, SPRN_HSRR1 586 clrrdi r12, r12, 2 5871: std r10, VCPU_PC(r9) 588 std r11, VCPU_MSR(r9) 589 590 GET_SCRATCH0(r3) 591 mflr r4 592 std r3, VCPU_GPR(r13)(r9) 593 std r4, VCPU_LR(r9) 594 595 /* Unset guest mode */ 596 li r0, KVM_GUEST_MODE_NONE 597 stb r0, HSTATE_IN_GUEST(r13) 598 599 stw r12,VCPU_TRAP(r9) 600 601 /* See if this is a leftover HDEC interrupt */ 602 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 603 bne 2f 604 mfspr r3,SPRN_HDEC 605 cmpwi r3,0 606 bge ignore_hdec 6072: 608 /* See if this is something we can handle in real mode */ 609 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 610 beq hcall_try_real_mode 611 612 /* Check for mediated interrupts (could be done earlier really ...) */ 613BEGIN_FTR_SECTION 614 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 615 bne+ 1f 616 andi. r0,r11,MSR_EE 617 beq 1f 618 mfspr r5,SPRN_LPCR 619 andi. r0,r5,LPCR_MER 620 bne bounce_ext_interrupt 6211: 622END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 623 624hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 625 /* Save DEC */ 626 mfspr r5,SPRN_DEC 627 mftb r6 628 extsw r5,r5 629 add r5,r5,r6 630 std r5,VCPU_DEC_EXPIRES(r9) 631 632 /* Save HEIR (HV emulation assist reg) in last_inst 633 if this is an HEI (HV emulation interrupt, e40) */ 634 li r3,-1 635BEGIN_FTR_SECTION 636 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 637 bne 11f 638 mfspr r3,SPRN_HEIR 639END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 64011: stw r3,VCPU_LAST_INST(r9) 641 642 /* Save more register state */ 643 mfxer r5 644 mfdar r6 645 mfdsisr r7 646 mfctr r8 647 648 stw r5, VCPU_XER(r9) 649 std r6, VCPU_DAR(r9) 650 stw r7, VCPU_DSISR(r9) 651 std r8, VCPU_CTR(r9) 652 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ 653BEGIN_FTR_SECTION 654 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 655 beq 6f 656END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 6577: std r6, VCPU_FAULT_DAR(r9) 658 stw r7, VCPU_FAULT_DSISR(r9) 659 660 /* Save guest CTRL register, set runlatch to 1 */ 661 mfspr r6,SPRN_CTRLF 662 stw r6,VCPU_CTRL(r9) 663 andi. r0,r6,1 664 bne 4f 665 ori r6,r6,1 666 mtspr SPRN_CTRLT,r6 6674: 668 /* Read the guest SLB and save it away */ 669 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 670 mtctr r0 671 li r6,0 672 addi r7,r9,VCPU_SLB 673 li r5,0 6741: slbmfee r8,r6 675 andis. r0,r8,SLB_ESID_V@h 676 beq 2f 677 add r8,r8,r6 /* put index in */ 678 slbmfev r3,r6 679 std r8,VCPU_SLB_E(r7) 680 std r3,VCPU_SLB_V(r7) 681 addi r7,r7,VCPU_SLB_SIZE 682 addi r5,r5,1 6832: addi r6,r6,1 684 bdnz 1b 685 stw r5,VCPU_SLB_MAX(r9) 686 687 /* 688 * Save the guest PURR/SPURR 689 */ 690BEGIN_FTR_SECTION 691 mfspr r5,SPRN_PURR 692 mfspr r6,SPRN_SPURR 693 ld r7,VCPU_PURR(r9) 694 ld r8,VCPU_SPURR(r9) 695 std r5,VCPU_PURR(r9) 696 std r6,VCPU_SPURR(r9) 697 subf r5,r7,r5 698 subf r6,r8,r6 699 700 /* 701 * Restore host PURR/SPURR and add guest times 702 * so that the time in the guest gets accounted. 703 */ 704 ld r3,HSTATE_PURR(r13) 705 ld r4,HSTATE_SPURR(r13) 706 add r3,r3,r5 707 add r4,r4,r6 708 mtspr SPRN_PURR,r3 709 mtspr SPRN_SPURR,r4 710END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 711 712 /* Clear out SLB */ 713 li r5,0 714 slbmte r5,r5 715 slbia 716 ptesync 717 718hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 719BEGIN_FTR_SECTION 720 b 32f 721END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 722 /* 723 * POWER7 guest -> host partition switch code. 724 * We don't have to lock against tlbies but we do 725 * have to coordinate the hardware threads. 726 */ 727 /* Increment the threads-exiting-guest count in the 0xff00 728 bits of vcore->entry_exit_count */ 729 lwsync 730 ld r5,HSTATE_KVM_VCORE(r13) 731 addi r6,r5,VCORE_ENTRY_EXIT 73241: lwarx r3,0,r6 733 addi r0,r3,0x100 734 stwcx. r0,0,r6 735 bne 41b 736 lwsync 737 738 /* 739 * At this point we have an interrupt that we have to pass 740 * up to the kernel or qemu; we can't handle it in real mode. 741 * Thus we have to do a partition switch, so we have to 742 * collect the other threads, if we are the first thread 743 * to take an interrupt. To do this, we set the HDEC to 0, 744 * which causes an HDEC interrupt in all threads within 2ns 745 * because the HDEC register is shared between all 4 threads. 746 * However, we don't need to bother if this is an HDEC 747 * interrupt, since the other threads will already be on their 748 * way here in that case. 749 */ 750 cmpwi r3,0x100 /* Are we the first here? */ 751 bge 43f 752 cmpwi r3,1 /* Are any other threads in the guest? */ 753 ble 43f 754 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 755 beq 40f 756 li r0,0 757 mtspr SPRN_HDEC,r0 75840: 759 /* 760 * Send an IPI to any napping threads, since an HDEC interrupt 761 * doesn't wake CPUs up from nap. 762 */ 763 lwz r3,VCORE_NAPPING_THREADS(r5) 764 lwz r4,VCPU_PTID(r9) 765 li r0,1 766 sldi r0,r0,r4 767 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 768 beq 43f 769 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 770 subf r6,r4,r13 77142: andi. r0,r3,1 772 beq 44f 773 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 774 li r0,IPI_PRIORITY 775 li r7,XICS_QIRR 776 stbcix r0,r7,r8 /* trigger the IPI */ 77744: srdi. r3,r3,1 778 addi r6,r6,PACA_SIZE 779 bne 42b 780 781 /* Secondary threads wait for primary to do partition switch */ 78243: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 783 ld r5,HSTATE_KVM_VCORE(r13) 784 lwz r3,VCPU_PTID(r9) 785 cmpwi r3,0 786 beq 15f 787 HMT_LOW 78813: lbz r3,VCORE_IN_GUEST(r5) 789 cmpwi r3,0 790 bne 13b 791 HMT_MEDIUM 792 b 16f 793 794 /* Primary thread waits for all the secondaries to exit guest */ 79515: lwz r3,VCORE_ENTRY_EXIT(r5) 796 srwi r0,r3,8 797 clrldi r3,r3,56 798 cmpw r3,r0 799 bne 15b 800 isync 801 802 /* Primary thread switches back to host partition */ 803 ld r6,KVM_HOST_SDR1(r4) 804 lwz r7,KVM_HOST_LPID(r4) 805 li r8,LPID_RSVD /* switch to reserved LPID */ 806 mtspr SPRN_LPID,r8 807 ptesync 808 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 809 mtspr SPRN_LPID,r7 810 isync 811 li r0,0 812 stb r0,VCORE_IN_GUEST(r5) 813 lis r8,0x7fff /* MAX_INT@h */ 814 mtspr SPRN_HDEC,r8 815 81616: ld r8,KVM_HOST_LPCR(r4) 817 mtspr SPRN_LPCR,r8 818 isync 819 b 33f 820 821 /* 822 * PPC970 guest -> host partition switch code. 823 * We have to lock against concurrent tlbies, and 824 * we have to flush the whole TLB. 825 */ 82632: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 827 828 /* Take the guest's tlbie_lock */ 829 lwz r8,PACA_LOCK_TOKEN(r13) 830 addi r3,r4,KVM_TLBIE_LOCK 83124: lwarx r0,0,r3 832 cmpwi r0,0 833 bne 24b 834 stwcx. r8,0,r3 835 bne 24b 836 isync 837 838 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 839 li r0,0x18f 840 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 841 or r0,r7,r0 842 ptesync 843 sync 844 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 845 isync 846 li r0,0 847 stw r0,0(r3) /* drop guest tlbie_lock */ 848 849 /* invalidate the whole TLB */ 850 li r0,256 851 mtctr r0 852 li r6,0 85325: tlbiel r6 854 addi r6,r6,0x1000 855 bdnz 25b 856 ptesync 857 858 /* take native_tlbie_lock */ 859 ld r3,toc_tlbie_lock@toc(2) 86024: lwarx r0,0,r3 861 cmpwi r0,0 862 bne 24b 863 stwcx. r8,0,r3 864 bne 24b 865 isync 866 867 ld r6,KVM_HOST_SDR1(r4) 868 mtspr SPRN_SDR1,r6 /* switch to host page table */ 869 870 /* Set up host HID4 value */ 871 sync 872 mtspr SPRN_HID4,r7 873 isync 874 li r0,0 875 stw r0,0(r3) /* drop native_tlbie_lock */ 876 877 lis r8,0x7fff /* MAX_INT@h */ 878 mtspr SPRN_HDEC,r8 879 880 /* Disable HDEC interrupts */ 881 mfspr r0,SPRN_HID0 882 li r3,0 883 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 884 sync 885 mtspr SPRN_HID0,r0 886 mfspr r0,SPRN_HID0 887 mfspr r0,SPRN_HID0 888 mfspr r0,SPRN_HID0 889 mfspr r0,SPRN_HID0 890 mfspr r0,SPRN_HID0 891 mfspr r0,SPRN_HID0 892 893 /* load host SLB entries */ 89433: ld r8,PACA_SLBSHADOWPTR(r13) 895 896 .rept SLB_NUM_BOLTED 897 ld r5,SLBSHADOW_SAVEAREA(r8) 898 ld r6,SLBSHADOW_SAVEAREA+8(r8) 899 andis. r7,r5,SLB_ESID_V@h 900 beq 1f 901 slbmte r6,r5 9021: addi r8,r8,16 903 .endr 904 905 /* Save and reset AMR and UAMOR before turning on the MMU */ 906BEGIN_FTR_SECTION 907 mfspr r5,SPRN_AMR 908 mfspr r6,SPRN_UAMOR 909 std r5,VCPU_AMR(r9) 910 std r6,VCPU_UAMOR(r9) 911 li r6,0 912 mtspr SPRN_AMR,r6 913END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 914 915 /* Restore host DABR and DABRX */ 916 ld r5,HSTATE_DABR(r13) 917 li r6,7 918 mtspr SPRN_DABR,r5 919 mtspr SPRN_DABRX,r6 920 921 /* Switch DSCR back to host value */ 922BEGIN_FTR_SECTION 923 mfspr r8, SPRN_DSCR 924 ld r7, HSTATE_DSCR(r13) 925 std r8, VCPU_DSCR(r7) 926 mtspr SPRN_DSCR, r7 927END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 928 929 /* Save non-volatile GPRs */ 930 std r14, VCPU_GPR(r14)(r9) 931 std r15, VCPU_GPR(r15)(r9) 932 std r16, VCPU_GPR(r16)(r9) 933 std r17, VCPU_GPR(r17)(r9) 934 std r18, VCPU_GPR(r18)(r9) 935 std r19, VCPU_GPR(r19)(r9) 936 std r20, VCPU_GPR(r20)(r9) 937 std r21, VCPU_GPR(r21)(r9) 938 std r22, VCPU_GPR(r22)(r9) 939 std r23, VCPU_GPR(r23)(r9) 940 std r24, VCPU_GPR(r24)(r9) 941 std r25, VCPU_GPR(r25)(r9) 942 std r26, VCPU_GPR(r26)(r9) 943 std r27, VCPU_GPR(r27)(r9) 944 std r28, VCPU_GPR(r28)(r9) 945 std r29, VCPU_GPR(r29)(r9) 946 std r30, VCPU_GPR(r30)(r9) 947 std r31, VCPU_GPR(r31)(r9) 948 949 /* Save SPRGs */ 950 mfspr r3, SPRN_SPRG0 951 mfspr r4, SPRN_SPRG1 952 mfspr r5, SPRN_SPRG2 953 mfspr r6, SPRN_SPRG3 954 std r3, VCPU_SPRG0(r9) 955 std r4, VCPU_SPRG1(r9) 956 std r5, VCPU_SPRG2(r9) 957 std r6, VCPU_SPRG3(r9) 958 959 /* Increment yield count if they have a VPA */ 960 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 961 cmpdi r8, 0 962 beq 25f 963 lwz r3, LPPACA_YIELDCOUNT(r8) 964 addi r3, r3, 1 965 stw r3, LPPACA_YIELDCOUNT(r8) 96625: 967 /* Save PMU registers if requested */ 968 /* r8 and cr0.eq are live here */ 969 li r3, 1 970 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 971 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 972 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 973 isync 974 beq 21f /* if no VPA, save PMU stuff anyway */ 975 lbz r7, LPPACA_PMCINUSE(r8) 976 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 977 bne 21f 978 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 979 b 22f 98021: mfspr r5, SPRN_MMCR1 981 mfspr r6, SPRN_MMCRA 982 std r4, VCPU_MMCR(r9) 983 std r5, VCPU_MMCR + 8(r9) 984 std r6, VCPU_MMCR + 16(r9) 985 mfspr r3, SPRN_PMC1 986 mfspr r4, SPRN_PMC2 987 mfspr r5, SPRN_PMC3 988 mfspr r6, SPRN_PMC4 989 mfspr r7, SPRN_PMC5 990 mfspr r8, SPRN_PMC6 991BEGIN_FTR_SECTION 992 mfspr r10, SPRN_PMC7 993 mfspr r11, SPRN_PMC8 994END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 995 stw r3, VCPU_PMC(r9) 996 stw r4, VCPU_PMC + 4(r9) 997 stw r5, VCPU_PMC + 8(r9) 998 stw r6, VCPU_PMC + 12(r9) 999 stw r7, VCPU_PMC + 16(r9) 1000 stw r8, VCPU_PMC + 20(r9) 1001BEGIN_FTR_SECTION 1002 stw r10, VCPU_PMC + 24(r9) 1003 stw r11, VCPU_PMC + 28(r9) 1004END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 100522: 1006 /* save FP state */ 1007 mr r3, r9 1008 bl .kvmppc_save_fp 1009 1010 /* Secondary threads go off to take a nap on POWER7 */ 1011BEGIN_FTR_SECTION 1012 lwz r0,VCPU_PTID(r3) 1013 cmpwi r0,0 1014 bne secondary_nap 1015END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1016 1017 /* 1018 * Reload DEC. HDEC interrupts were disabled when 1019 * we reloaded the host's LPCR value. 1020 */ 1021 ld r3, HSTATE_DECEXP(r13) 1022 mftb r4 1023 subf r4, r4, r3 1024 mtspr SPRN_DEC, r4 1025 1026 /* Reload the host's PMU registers */ 1027 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 1028 lbz r4, LPPACA_PMCINUSE(r3) 1029 cmpwi r4, 0 1030 beq 23f /* skip if not */ 1031 lwz r3, HSTATE_PMC(r13) 1032 lwz r4, HSTATE_PMC + 4(r13) 1033 lwz r5, HSTATE_PMC + 8(r13) 1034 lwz r6, HSTATE_PMC + 12(r13) 1035 lwz r8, HSTATE_PMC + 16(r13) 1036 lwz r9, HSTATE_PMC + 20(r13) 1037BEGIN_FTR_SECTION 1038 lwz r10, HSTATE_PMC + 24(r13) 1039 lwz r11, HSTATE_PMC + 28(r13) 1040END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1041 mtspr SPRN_PMC1, r3 1042 mtspr SPRN_PMC2, r4 1043 mtspr SPRN_PMC3, r5 1044 mtspr SPRN_PMC4, r6 1045 mtspr SPRN_PMC5, r8 1046 mtspr SPRN_PMC6, r9 1047BEGIN_FTR_SECTION 1048 mtspr SPRN_PMC7, r10 1049 mtspr SPRN_PMC8, r11 1050END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1051 ld r3, HSTATE_MMCR(r13) 1052 ld r4, HSTATE_MMCR + 8(r13) 1053 ld r5, HSTATE_MMCR + 16(r13) 1054 mtspr SPRN_MMCR1, r4 1055 mtspr SPRN_MMCRA, r5 1056 mtspr SPRN_MMCR0, r3 1057 isync 105823: 1059 /* 1060 * For external and machine check interrupts, we need 1061 * to call the Linux handler to process the interrupt. 1062 * We do that by jumping to the interrupt vector address 1063 * which we have in r12. The [h]rfid at the end of the 1064 * handler will return to the book3s_hv_interrupts.S code. 1065 * For other interrupts we do the rfid to get back 1066 * to the book3s_interrupts.S code here. 1067 */ 1068 ld r8, HSTATE_VMHANDLER(r13) 1069 ld r7, HSTATE_HOST_MSR(r13) 1070 1071 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1072 beq 11f 1073 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1074 1075 /* RFI into the highmem handler, or branch to interrupt handler */ 107612: mfmsr r6 1077 mtctr r12 1078 li r0, MSR_RI 1079 andc r6, r6, r0 1080 mtmsrd r6, 1 /* Clear RI in MSR */ 1081 mtsrr0 r8 1082 mtsrr1 r7 1083 beqctr 1084 RFI 1085 108611: 1087BEGIN_FTR_SECTION 1088 b 12b 1089END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1090 mtspr SPRN_HSRR0, r8 1091 mtspr SPRN_HSRR1, r7 1092 ba 0x500 1093 10946: mfspr r6,SPRN_HDAR 1095 mfspr r7,SPRN_HDSISR 1096 b 7b 1097 1098/* 1099 * Try to handle an hcall in real mode. 1100 * Returns to the guest if we handle it, or continues on up to 1101 * the kernel if we can't (i.e. if we don't have a handler for 1102 * it, or if the handler returns H_TOO_HARD). 1103 */ 1104 .globl hcall_try_real_mode 1105hcall_try_real_mode: 1106 ld r3,VCPU_GPR(r3)(r9) 1107 andi. r0,r11,MSR_PR 1108 bne hcall_real_cont 1109 clrrdi r3,r3,2 1110 cmpldi r3,hcall_real_table_end - hcall_real_table 1111 bge hcall_real_cont 1112 LOAD_REG_ADDR(r4, hcall_real_table) 1113 lwzx r3,r3,r4 1114 cmpwi r3,0 1115 beq hcall_real_cont 1116 add r3,r3,r4 1117 mtctr r3 1118 mr r3,r9 /* get vcpu pointer */ 1119 ld r4,VCPU_GPR(r4)(r9) 1120 bctrl 1121 cmpdi r3,H_TOO_HARD 1122 beq hcall_real_fallback 1123 ld r4,HSTATE_KVM_VCPU(r13) 1124 std r3,VCPU_GPR(r3)(r4) 1125 ld r10,VCPU_PC(r4) 1126 ld r11,VCPU_MSR(r4) 1127 b fast_guest_return 1128 1129 /* We've attempted a real mode hcall, but it's punted it back 1130 * to userspace. We need to restore some clobbered volatiles 1131 * before resuming the pass-it-to-qemu path */ 1132hcall_real_fallback: 1133 li r12,BOOK3S_INTERRUPT_SYSCALL 1134 ld r9, HSTATE_KVM_VCPU(r13) 1135 1136 b hcall_real_cont 1137 1138 .globl hcall_real_table 1139hcall_real_table: 1140 .long 0 /* 0 - unused */ 1141 .long .kvmppc_h_remove - hcall_real_table 1142 .long .kvmppc_h_enter - hcall_real_table 1143 .long .kvmppc_h_read - hcall_real_table 1144 .long 0 /* 0x10 - H_CLEAR_MOD */ 1145 .long 0 /* 0x14 - H_CLEAR_REF */ 1146 .long .kvmppc_h_protect - hcall_real_table 1147 .long 0 /* 0x1c - H_GET_TCE */ 1148 .long .kvmppc_h_put_tce - hcall_real_table 1149 .long 0 /* 0x24 - H_SET_SPRG0 */ 1150 .long .kvmppc_h_set_dabr - hcall_real_table 1151 .long 0 /* 0x2c */ 1152 .long 0 /* 0x30 */ 1153 .long 0 /* 0x34 */ 1154 .long 0 /* 0x38 */ 1155 .long 0 /* 0x3c */ 1156 .long 0 /* 0x40 */ 1157 .long 0 /* 0x44 */ 1158 .long 0 /* 0x48 */ 1159 .long 0 /* 0x4c */ 1160 .long 0 /* 0x50 */ 1161 .long 0 /* 0x54 */ 1162 .long 0 /* 0x58 */ 1163 .long 0 /* 0x5c */ 1164 .long 0 /* 0x60 */ 1165 .long 0 /* 0x64 */ 1166 .long 0 /* 0x68 */ 1167 .long 0 /* 0x6c */ 1168 .long 0 /* 0x70 */ 1169 .long 0 /* 0x74 */ 1170 .long 0 /* 0x78 */ 1171 .long 0 /* 0x7c */ 1172 .long 0 /* 0x80 */ 1173 .long 0 /* 0x84 */ 1174 .long 0 /* 0x88 */ 1175 .long 0 /* 0x8c */ 1176 .long 0 /* 0x90 */ 1177 .long 0 /* 0x94 */ 1178 .long 0 /* 0x98 */ 1179 .long 0 /* 0x9c */ 1180 .long 0 /* 0xa0 */ 1181 .long 0 /* 0xa4 */ 1182 .long 0 /* 0xa8 */ 1183 .long 0 /* 0xac */ 1184 .long 0 /* 0xb0 */ 1185 .long 0 /* 0xb4 */ 1186 .long 0 /* 0xb8 */ 1187 .long 0 /* 0xbc */ 1188 .long 0 /* 0xc0 */ 1189 .long 0 /* 0xc4 */ 1190 .long 0 /* 0xc8 */ 1191 .long 0 /* 0xcc */ 1192 .long 0 /* 0xd0 */ 1193 .long 0 /* 0xd4 */ 1194 .long 0 /* 0xd8 */ 1195 .long 0 /* 0xdc */ 1196 .long .kvmppc_h_cede - hcall_real_table 1197 .long 0 /* 0xe4 */ 1198 .long 0 /* 0xe8 */ 1199 .long 0 /* 0xec */ 1200 .long 0 /* 0xf0 */ 1201 .long 0 /* 0xf4 */ 1202 .long 0 /* 0xf8 */ 1203 .long 0 /* 0xfc */ 1204 .long 0 /* 0x100 */ 1205 .long 0 /* 0x104 */ 1206 .long 0 /* 0x108 */ 1207 .long 0 /* 0x10c */ 1208 .long 0 /* 0x110 */ 1209 .long 0 /* 0x114 */ 1210 .long 0 /* 0x118 */ 1211 .long 0 /* 0x11c */ 1212 .long 0 /* 0x120 */ 1213 .long .kvmppc_h_bulk_remove - hcall_real_table 1214hcall_real_table_end: 1215 1216ignore_hdec: 1217 mr r4,r9 1218 b fast_guest_return 1219 1220bounce_ext_interrupt: 1221 mr r4,r9 1222 mtspr SPRN_SRR0,r10 1223 mtspr SPRN_SRR1,r11 1224 li r10,BOOK3S_INTERRUPT_EXTERNAL 1225 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1226 rotldi r11,r11,63 1227 b fast_guest_return 1228 1229_GLOBAL(kvmppc_h_set_dabr) 1230 std r4,VCPU_DABR(r3) 1231 mtspr SPRN_DABR,r4 1232 li r3,0 1233 blr 1234 1235_GLOBAL(kvmppc_h_cede) 1236 ori r11,r11,MSR_EE 1237 std r11,VCPU_MSR(r3) 1238 li r0,1 1239 stb r0,VCPU_CEDED(r3) 1240 sync /* order setting ceded vs. testing prodded */ 1241 lbz r5,VCPU_PRODDED(r3) 1242 cmpwi r5,0 1243 bne 1f 1244 li r0,0 /* set trap to 0 to say hcall is handled */ 1245 stw r0,VCPU_TRAP(r3) 1246 li r0,H_SUCCESS 1247 std r0,VCPU_GPR(r3)(r3) 1248BEGIN_FTR_SECTION 1249 b 2f /* just send it up to host on 970 */ 1250END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1251 1252 /* 1253 * Set our bit in the bitmask of napping threads unless all the 1254 * other threads are already napping, in which case we send this 1255 * up to the host. 1256 */ 1257 ld r5,HSTATE_KVM_VCORE(r13) 1258 lwz r6,VCPU_PTID(r3) 1259 lwz r8,VCORE_ENTRY_EXIT(r5) 1260 clrldi r8,r8,56 1261 li r0,1 1262 sld r0,r0,r6 1263 addi r6,r5,VCORE_NAPPING_THREADS 126431: lwarx r4,0,r6 1265 or r4,r4,r0 1266 popcntw r7,r4 1267 cmpw r7,r8 1268 bge 2f 1269 stwcx. r4,0,r6 1270 bne 31b 1271 li r0,1 1272 stb r0,HSTATE_NAPPING(r13) 1273 /* order napping_threads update vs testing entry_exit_count */ 1274 lwsync 1275 mr r4,r3 1276 lwz r7,VCORE_ENTRY_EXIT(r5) 1277 cmpwi r7,0x100 1278 bge 33f /* another thread already exiting */ 1279 1280/* 1281 * Although not specifically required by the architecture, POWER7 1282 * preserves the following registers in nap mode, even if an SMT mode 1283 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 1284 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1285 */ 1286 /* Save non-volatile GPRs */ 1287 std r14, VCPU_GPR(r14)(r3) 1288 std r15, VCPU_GPR(r15)(r3) 1289 std r16, VCPU_GPR(r16)(r3) 1290 std r17, VCPU_GPR(r17)(r3) 1291 std r18, VCPU_GPR(r18)(r3) 1292 std r19, VCPU_GPR(r19)(r3) 1293 std r20, VCPU_GPR(r20)(r3) 1294 std r21, VCPU_GPR(r21)(r3) 1295 std r22, VCPU_GPR(r22)(r3) 1296 std r23, VCPU_GPR(r23)(r3) 1297 std r24, VCPU_GPR(r24)(r3) 1298 std r25, VCPU_GPR(r25)(r3) 1299 std r26, VCPU_GPR(r26)(r3) 1300 std r27, VCPU_GPR(r27)(r3) 1301 std r28, VCPU_GPR(r28)(r3) 1302 std r29, VCPU_GPR(r29)(r3) 1303 std r30, VCPU_GPR(r30)(r3) 1304 std r31, VCPU_GPR(r31)(r3) 1305 1306 /* save FP state */ 1307 bl .kvmppc_save_fp 1308 1309 /* 1310 * Take a nap until a decrementer or external interrupt occurs, 1311 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1312 */ 1313 li r0,0x80 1314 stb r0,PACAPROCSTART(r13) 1315 mfspr r5,SPRN_LPCR 1316 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1317 mtspr SPRN_LPCR,r5 1318 isync 1319 li r0, 0 1320 std r0, HSTATE_SCRATCH0(r13) 1321 ptesync 1322 ld r0, HSTATE_SCRATCH0(r13) 13231: cmpd r0, r0 1324 bne 1b 1325 nap 1326 b . 1327 1328kvm_end_cede: 1329 /* Woken by external or decrementer interrupt */ 1330 ld r1, HSTATE_HOST_R1(r13) 1331 ld r2, PACATOC(r13) 1332 1333 /* If we're a secondary thread and we got here by an IPI, ack it */ 1334 ld r4,HSTATE_KVM_VCPU(r13) 1335 lwz r3,VCPU_PTID(r4) 1336 cmpwi r3,0 1337 beq 27f 1338 mfspr r3,SPRN_SRR1 1339 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 1340 cmpwi r3,4 /* was it an external interrupt? */ 1341 bne 27f 1342 ld r5, HSTATE_XICS_PHYS(r13) 1343 li r0,0xff 1344 li r6,XICS_QIRR 1345 li r7,XICS_XIRR 1346 lwzcix r8,r5,r7 /* ack the interrupt */ 1347 sync 1348 stbcix r0,r5,r6 /* clear it */ 1349 stwcix r8,r5,r7 /* EOI it */ 135027: 1351 /* load up FP state */ 1352 bl kvmppc_load_fp 1353 1354 /* Load NV GPRS */ 1355 ld r14, VCPU_GPR(r14)(r4) 1356 ld r15, VCPU_GPR(r15)(r4) 1357 ld r16, VCPU_GPR(r16)(r4) 1358 ld r17, VCPU_GPR(r17)(r4) 1359 ld r18, VCPU_GPR(r18)(r4) 1360 ld r19, VCPU_GPR(r19)(r4) 1361 ld r20, VCPU_GPR(r20)(r4) 1362 ld r21, VCPU_GPR(r21)(r4) 1363 ld r22, VCPU_GPR(r22)(r4) 1364 ld r23, VCPU_GPR(r23)(r4) 1365 ld r24, VCPU_GPR(r24)(r4) 1366 ld r25, VCPU_GPR(r25)(r4) 1367 ld r26, VCPU_GPR(r26)(r4) 1368 ld r27, VCPU_GPR(r27)(r4) 1369 ld r28, VCPU_GPR(r28)(r4) 1370 ld r29, VCPU_GPR(r29)(r4) 1371 ld r30, VCPU_GPR(r30)(r4) 1372 ld r31, VCPU_GPR(r31)(r4) 1373 1374 /* clear our bit in vcore->napping_threads */ 137533: ld r5,HSTATE_KVM_VCORE(r13) 1376 lwz r3,VCPU_PTID(r4) 1377 li r0,1 1378 sld r0,r0,r3 1379 addi r6,r5,VCORE_NAPPING_THREADS 138032: lwarx r7,0,r6 1381 andc r7,r7,r0 1382 stwcx. r7,0,r6 1383 bne 32b 1384 li r0,0 1385 stb r0,HSTATE_NAPPING(r13) 1386 1387 /* see if any other thread is already exiting */ 1388 lwz r0,VCORE_ENTRY_EXIT(r5) 1389 cmpwi r0,0x100 1390 blt kvmppc_cede_reentry /* if not go back to guest */ 1391 1392 /* some threads are exiting, so go to the guest exit path */ 1393 b hcall_real_fallback 1394 1395 /* cede when already previously prodded case */ 13961: li r0,0 1397 stb r0,VCPU_PRODDED(r3) 1398 sync /* order testing prodded vs. clearing ceded */ 1399 stb r0,VCPU_CEDED(r3) 1400 li r3,H_SUCCESS 1401 blr 1402 1403 /* we've ceded but we want to give control to the host */ 14042: li r3,H_TOO_HARD 1405 blr 1406 1407secondary_too_late: 1408 ld r5,HSTATE_KVM_VCORE(r13) 1409 HMT_LOW 141013: lbz r3,VCORE_IN_GUEST(r5) 1411 cmpwi r3,0 1412 bne 13b 1413 HMT_MEDIUM 1414 ld r11,PACA_SLBSHADOWPTR(r13) 1415 1416 .rept SLB_NUM_BOLTED 1417 ld r5,SLBSHADOW_SAVEAREA(r11) 1418 ld r6,SLBSHADOW_SAVEAREA+8(r11) 1419 andis. r7,r5,SLB_ESID_V@h 1420 beq 1f 1421 slbmte r6,r5 14221: addi r11,r11,16 1423 .endr 1424 1425secondary_nap: 1426 /* Clear any pending IPI - assume we're a secondary thread */ 1427 ld r5, HSTATE_XICS_PHYS(r13) 1428 li r7, XICS_XIRR 1429 lwzcix r3, r5, r7 /* ack any pending interrupt */ 1430 rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 1431 beq 37f 1432 sync 1433 li r0, 0xff 1434 li r6, XICS_QIRR 1435 stbcix r0, r5, r6 /* clear the IPI */ 1436 stwcix r3, r5, r7 /* EOI it */ 143737: sync 1438 1439 /* increment the nap count and then go to nap mode */ 1440 ld r4, HSTATE_KVM_VCORE(r13) 1441 addi r4, r4, VCORE_NAP_COUNT 1442 lwsync /* make previous updates visible */ 144351: lwarx r3, 0, r4 1444 addi r3, r3, 1 1445 stwcx. r3, 0, r4 1446 bne 51b 1447 1448 li r3, LPCR_PECE0 1449 mfspr r4, SPRN_LPCR 1450 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 1451 mtspr SPRN_LPCR, r4 1452 isync 1453 li r0, 0 1454 std r0, HSTATE_SCRATCH0(r13) 1455 ptesync 1456 ld r0, HSTATE_SCRATCH0(r13) 14571: cmpd r0, r0 1458 bne 1b 1459 nap 1460 b . 1461 1462/* 1463 * Save away FP, VMX and VSX registers. 1464 * r3 = vcpu pointer 1465 */ 1466_GLOBAL(kvmppc_save_fp) 1467 mfmsr r9 1468 ori r8,r9,MSR_FP 1469#ifdef CONFIG_ALTIVEC 1470BEGIN_FTR_SECTION 1471 oris r8,r8,MSR_VEC@h 1472END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1473#endif 1474#ifdef CONFIG_VSX 1475BEGIN_FTR_SECTION 1476 oris r8,r8,MSR_VSX@h 1477END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1478#endif 1479 mtmsrd r8 1480 isync 1481#ifdef CONFIG_VSX 1482BEGIN_FTR_SECTION 1483 reg = 0 1484 .rept 32 1485 li r6,reg*16+VCPU_VSRS 1486 STXVD2X(reg,r6,r3) 1487 reg = reg + 1 1488 .endr 1489FTR_SECTION_ELSE 1490#endif 1491 reg = 0 1492 .rept 32 1493 stfd reg,reg*8+VCPU_FPRS(r3) 1494 reg = reg + 1 1495 .endr 1496#ifdef CONFIG_VSX 1497ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1498#endif 1499 mffs fr0 1500 stfd fr0,VCPU_FPSCR(r3) 1501 1502#ifdef CONFIG_ALTIVEC 1503BEGIN_FTR_SECTION 1504 reg = 0 1505 .rept 32 1506 li r6,reg*16+VCPU_VRS 1507 stvx reg,r6,r3 1508 reg = reg + 1 1509 .endr 1510 mfvscr vr0 1511 li r6,VCPU_VSCR 1512 stvx vr0,r6,r3 1513END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1514#endif 1515 mfspr r6,SPRN_VRSAVE 1516 stw r6,VCPU_VRSAVE(r3) 1517 mtmsrd r9 1518 isync 1519 blr 1520 1521/* 1522 * Load up FP, VMX and VSX registers 1523 * r4 = vcpu pointer 1524 */ 1525 .globl kvmppc_load_fp 1526kvmppc_load_fp: 1527 mfmsr r9 1528 ori r8,r9,MSR_FP 1529#ifdef CONFIG_ALTIVEC 1530BEGIN_FTR_SECTION 1531 oris r8,r8,MSR_VEC@h 1532END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1533#endif 1534#ifdef CONFIG_VSX 1535BEGIN_FTR_SECTION 1536 oris r8,r8,MSR_VSX@h 1537END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1538#endif 1539 mtmsrd r8 1540 isync 1541 lfd fr0,VCPU_FPSCR(r4) 1542 MTFSF_L(fr0) 1543#ifdef CONFIG_VSX 1544BEGIN_FTR_SECTION 1545 reg = 0 1546 .rept 32 1547 li r7,reg*16+VCPU_VSRS 1548 LXVD2X(reg,r7,r4) 1549 reg = reg + 1 1550 .endr 1551FTR_SECTION_ELSE 1552#endif 1553 reg = 0 1554 .rept 32 1555 lfd reg,reg*8+VCPU_FPRS(r4) 1556 reg = reg + 1 1557 .endr 1558#ifdef CONFIG_VSX 1559ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 1560#endif 1561 1562#ifdef CONFIG_ALTIVEC 1563BEGIN_FTR_SECTION 1564 li r7,VCPU_VSCR 1565 lvx vr0,r7,r4 1566 mtvscr vr0 1567 reg = 0 1568 .rept 32 1569 li r7,reg*16+VCPU_VRS 1570 lvx reg,r7,r4 1571 reg = reg + 1 1572 .endr 1573END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1574#endif 1575 lwz r7,VCPU_VRSAVE(r4) 1576 mtspr SPRN_VRSAVE,r7 1577 blr 1578