1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/thread_info.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/ppc-opcode.h> 19#include <asm/hw_irq.h> 20#include <asm/kvm_book3s_asm.h> 21#include <asm/opal.h> 22#include <asm/cpuidle.h> 23#include <asm/exception-64s.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/mmu.h> 26#include <asm/asm-compat.h> 27#include <asm/feature-fixups.h> 28 29#undef DEBUG 30 31/* 32 * Use unused space in the interrupt stack to save and restore 33 * registers for winkle support. 34 */ 35#define _MMCR0 GPR0 36#define _SDR1 GPR3 37#define _PTCR GPR3 38#define _RPR GPR4 39#define _SPURR GPR5 40#define _PURR GPR6 41#define _TSCR GPR7 42#define _DSCR GPR8 43#define _AMOR GPR9 44#define _WORT GPR10 45#define _WORC GPR11 46#define _LPCR GPR12 47 48#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 49 50 .text 51 52/* 53 * Used by threads before entering deep idle states. Saves SPRs 54 * in interrupt stack frame 55 */ 56save_sprs_to_stack: 57 /* 58 * Note all register i.e per-core, per-subcore or per-thread is saved 59 * here since any thread in the core might wake up first 60 */ 61BEGIN_FTR_SECTION 62 /* 63 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 64 * SDR1 here 65 */ 66 mfspr r3,SPRN_PTCR 67 std r3,_PTCR(r1) 68 mfspr r3,SPRN_LPCR 69 std r3,_LPCR(r1) 70FTR_SECTION_ELSE 71 mfspr r3,SPRN_SDR1 72 std r3,_SDR1(r1) 73ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 74 mfspr r3,SPRN_RPR 75 std r3,_RPR(r1) 76 mfspr r3,SPRN_SPURR 77 std r3,_SPURR(r1) 78 mfspr r3,SPRN_PURR 79 std r3,_PURR(r1) 80 mfspr r3,SPRN_TSCR 81 std r3,_TSCR(r1) 82 mfspr r3,SPRN_DSCR 83 std r3,_DSCR(r1) 84 mfspr r3,SPRN_AMOR 85 std r3,_AMOR(r1) 86 mfspr r3,SPRN_WORT 87 std r3,_WORT(r1) 88 mfspr r3,SPRN_WORC 89 std r3,_WORC(r1) 90/* 91 * On POWER9, there are idle states such as stop4, invoked via cpuidle, 92 * that lose hypervisor resources. In such cases, we need to save 93 * additional SPRs before entering those idle states so that they can 94 * be restored to their older values on wakeup from the idle state. 95 * 96 * On POWER8, the only such deep idle state is winkle which is used 97 * only in the context of CPU-Hotplug, where these additional SPRs are 98 * reinitiazed to a sane value. Hence there is no need to save/restore 99 * these SPRs. 100 */ 101BEGIN_FTR_SECTION 102 blr 103END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 104 105power9_save_additional_sprs: 106 mfspr r3, SPRN_PID 107 mfspr r4, SPRN_LDBAR 108 std r3, STOP_PID(r13) 109 std r4, STOP_LDBAR(r13) 110 111 mfspr r3, SPRN_FSCR 112 mfspr r4, SPRN_HFSCR 113 std r3, STOP_FSCR(r13) 114 std r4, STOP_HFSCR(r13) 115 116 mfspr r3, SPRN_MMCRA 117 mfspr r4, SPRN_MMCR0 118 std r3, STOP_MMCRA(r13) 119 std r4, _MMCR0(r1) 120 121 mfspr r3, SPRN_MMCR1 122 mfspr r4, SPRN_MMCR2 123 std r3, STOP_MMCR1(r13) 124 std r4, STOP_MMCR2(r13) 125 blr 126 127power9_restore_additional_sprs: 128 ld r3,_LPCR(r1) 129 ld r4, STOP_PID(r13) 130 mtspr SPRN_LPCR,r3 131 mtspr SPRN_PID, r4 132 133 ld r3, STOP_LDBAR(r13) 134 ld r4, STOP_FSCR(r13) 135 mtspr SPRN_LDBAR, r3 136 mtspr SPRN_FSCR, r4 137 138 ld r3, STOP_HFSCR(r13) 139 ld r4, STOP_MMCRA(r13) 140 mtspr SPRN_HFSCR, r3 141 mtspr SPRN_MMCRA, r4 142 143 ld r3, _MMCR0(r1) 144 ld r4, STOP_MMCR1(r13) 145 mtspr SPRN_MMCR0, r3 146 mtspr SPRN_MMCR1, r4 147 148 ld r3, STOP_MMCR2(r13) 149 mtspr SPRN_MMCR2, r3 150 blr 151 152/* 153 * Used by threads when the lock bit of core_idle_state is set. 154 * Threads will spin in HMT_LOW until the lock bit is cleared. 155 * r14 - pointer to core_idle_state 156 * r15 - used to load contents of core_idle_state 157 * r9 - used as a temporary variable 158 */ 159 160core_idle_lock_held: 161 HMT_LOW 1623: lwz r15,0(r14) 163 andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 164 bne 3b 165 HMT_MEDIUM 166 lwarx r15,0,r14 167 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 168 bne- core_idle_lock_held 169 blr 170 171/* 172 * Pass requested state in r3: 173 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 174 * - Requested PSSCR value in POWER9 175 * 176 * Address of idle handler to branch to in realmode in r4 177 */ 178pnv_powersave_common: 179 /* Use r3 to pass state nap/sleep/winkle */ 180 /* NAP is a state loss, we create a regs frame on the 181 * stack, fill it up with the state we care about and 182 * stick a pointer to it in PACAR1. We really only 183 * need to save PC, some CR bits and the NV GPRs, 184 * but for now an interrupt frame will do. 185 */ 186 mtctr r4 187 188 mflr r0 189 std r0,16(r1) 190 stdu r1,-INT_FRAME_SIZE(r1) 191 std r0,_LINK(r1) 192 std r0,_NIP(r1) 193 194 /* We haven't lost state ... yet */ 195 li r0,0 196 stb r0,PACA_NAPSTATELOST(r13) 197 198 /* Continue saving state */ 199 SAVE_GPR(2, r1) 200 SAVE_NVGPRS(r1) 201 mfcr r5 202 std r5,_CCR(r1) 203 std r1,PACAR1(r13) 204 205BEGIN_FTR_SECTION 206 /* 207 * POWER9 does not require real mode to stop, and presently does not 208 * set hwthread_state for KVM (threads don't share MMU context), so 209 * we can remain in virtual mode for this. 210 */ 211 bctr 212END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 213 /* 214 * POWER8 215 * Go to real mode to do the nap, as required by the architecture. 216 * Also, we need to be in real mode before setting hwthread_state, 217 * because as soon as we do that, another thread can switch 218 * the MMU context to the guest. 219 */ 220 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 221 mtmsrd r7,0 222 bctr 223 224/* 225 * This is the sequence required to execute idle instructions, as 226 * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. 227 */ 228#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ 229 /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ 230 std r0,0(r1); \ 231 ptesync; \ 232 ld r0,0(r1); \ 233236: cmpd cr0,r0,r0; \ 234 bne 236b; \ 235 IDLE_INST; 236 237 238 .globl pnv_enter_arch207_idle_mode 239pnv_enter_arch207_idle_mode: 240#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 241 /* Tell KVM we're entering idle */ 242 li r4,KVM_HWTHREAD_IN_IDLE 243 /******************************************************/ 244 /* N O T E W E L L ! ! ! N O T E W E L L */ 245 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 246 /* MUST occur in real mode, i.e. with the MMU off, */ 247 /* and the MMU must stay off until we clear this flag */ 248 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 249 /* pnv_powersave_wakeup in this file. */ 250 /* The reason is that another thread can switch the */ 251 /* MMU to a guest context whenever this flag is set */ 252 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 253 /* that would potentially cause this thread to start */ 254 /* executing instructions from guest memory in */ 255 /* hypervisor mode, leading to a host crash or data */ 256 /* corruption, or worse. */ 257 /******************************************************/ 258 stb r4,HSTATE_HWTHREAD_STATE(r13) 259#endif 260 stb r3,PACA_THREAD_IDLE_STATE(r13) 261 cmpwi cr3,r3,PNV_THREAD_SLEEP 262 bge cr3,2f 263 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 264 /* No return */ 2652: 266 /* Sleep or winkle */ 267 lbz r7,PACA_THREAD_MASK(r13) 268 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 269 li r5,0 270 beq cr3,3f 271 lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h 2723: 273lwarx_loop1: 274 lwarx r15,0,r14 275 276 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 277 bnel- core_idle_lock_held 278 279 add r15,r15,r5 /* Add if winkle */ 280 andc r15,r15,r7 /* Clear thread bit */ 281 282 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 283 284/* 285 * If cr0 = 0, then current thread is the last thread of the core entering 286 * sleep. Last thread needs to execute the hardware bug workaround code if 287 * required by the platform. 288 * Make the workaround call unconditionally here. The below branch call is 289 * patched out when the idle states are discovered if the platform does not 290 * require it. 291 */ 292.global pnv_fastsleep_workaround_at_entry 293pnv_fastsleep_workaround_at_entry: 294 beq fastsleep_workaround_at_entry 295 296 stwcx. r15,0,r14 297 bne- lwarx_loop1 298 isync 299 300common_enter: /* common code for all the threads entering sleep or winkle */ 301 bgt cr3,enter_winkle 302 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 303 304fastsleep_workaround_at_entry: 305 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 306 stwcx. r15,0,r14 307 bne- lwarx_loop1 308 isync 309 310 /* Fast sleep workaround */ 311 li r3,1 312 li r4,1 313 bl opal_config_cpu_idle_state 314 315 /* Unlock */ 316 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 317 lwsync 318 stw r15,0(r14) 319 b common_enter 320 321enter_winkle: 322 bl save_sprs_to_stack 323 324 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) 325 326/* 327 * r3 - PSSCR value corresponding to the requested stop state. 328 */ 329power_enter_stop: 330/* 331 * Check if we are executing the lite variant with ESL=EC=0 332 */ 333 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 334 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 335 bne .Lhandle_esl_ec_set 336 PPC_STOP 337 li r3,0 /* Since we didn't lose state, return 0 */ 338 std r3, PACA_REQ_PSSCR(r13) 339 340 /* 341 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so 342 * it can determine if the wakeup reason is an HMI in 343 * CHECK_HMI_INTERRUPT. 344 * 345 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup 346 * reason, so there is no point setting r12 to SRR1. 347 * 348 * Further, we clear r12 here, so that we don't accidentally enter the 349 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. 350 */ 351 li r12, 0 352 b pnv_wakeup_noloss 353 354.Lhandle_esl_ec_set: 355BEGIN_FTR_SECTION 356 /* 357 * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after 358 * a state-loss idle. Saving and restoring MMCR0 over idle is a 359 * workaround. 360 */ 361 mfspr r4,SPRN_MMCR0 362 std r4,_MMCR0(r1) 363END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1) 364 365/* 366 * Check if the requested state is a deep idle state. 367 */ 368 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 369 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 370 cmpd r3,r4 371 bge .Lhandle_deep_stop 372 PPC_STOP /* Does not return (system reset interrupt) */ 373 374.Lhandle_deep_stop: 375/* 376 * Entering deep idle state. 377 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 378 * stack and enter stop 379 */ 380 lbz r7,PACA_THREAD_MASK(r13) 381 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 382 383lwarx_loop_stop: 384 lwarx r15,0,r14 385 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 386 bnel- core_idle_lock_held 387 andc r15,r15,r7 /* Clear thread bit */ 388 389 stwcx. r15,0,r14 390 bne- lwarx_loop_stop 391 isync 392 393 bl save_sprs_to_stack 394 395 PPC_STOP /* Does not return (system reset interrupt) */ 396 397/* 398 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 399 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE). 400 */ 401_GLOBAL(power7_idle_insn) 402 /* Now check if user or arch enabled NAP mode */ 403 LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode) 404 b pnv_powersave_common 405 406#define CHECK_HMI_INTERRUPT \ 407BEGIN_FTR_SECTION_NESTED(66); \ 408 rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \ 409FTR_SECTION_ELSE_NESTED(66); \ 410 rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 411ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 412 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 413 bne+ 20f; \ 414 /* Invoke opal call to handle hmi */ \ 415 ld r2,PACATOC(r13); \ 416 ld r1,PACAR1(r13); \ 417 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 418 li r3,0; /* NULL argument */ \ 419 bl hmi_exception_realmode; \ 420 nop; \ 421 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 42220: nop; 423 424/* 425 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 426 * r3 contains desired PSSCR register value. 427 * 428 * Offline (CPU unplug) case also must notify KVM that the CPU is 429 * idle. 430 */ 431_GLOBAL(power9_offline_stop) 432#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 433 /* 434 * Tell KVM we're entering idle. 435 * This does not have to be done in real mode because the P9 MMU 436 * is independent per-thread. Some steppings share radix/hash mode 437 * between threads, but in that case KVM has a barrier sync in real 438 * mode before and after switching between radix and hash. 439 */ 440 li r4,KVM_HWTHREAD_IN_IDLE 441 stb r4,HSTATE_HWTHREAD_STATE(r13) 442#endif 443 /* fall through */ 444 445_GLOBAL(power9_idle_stop) 446 std r3, PACA_REQ_PSSCR(r13) 447#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 448BEGIN_FTR_SECTION 449 sync 450 lwz r5, PACA_DONT_STOP(r13) 451 cmpwi r5, 0 452 bne 1f 453END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 454#endif 455 mtspr SPRN_PSSCR,r3 456 LOAD_REG_ADDR(r4,power_enter_stop) 457 b pnv_powersave_common 458 /* No return */ 459#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 4601: 461 /* 462 * We get here when TM / thread reconfiguration bug workaround 463 * code wants to get the CPU into SMT4 mode, and therefore 464 * we are being asked not to stop. 465 */ 466 li r3, 0 467 std r3, PACA_REQ_PSSCR(r13) 468 blr /* return 0 for wakeup cause / SRR1 value */ 469#endif 470 471/* 472 * Called from machine check handler for powersave wakeups. 473 * Low level machine check processing has already been done. Now just 474 * go through the wake up path to get everything in order. 475 * 476 * r3 - The original SRR1 value. 477 * Original SRR[01] have been clobbered. 478 * MSR_RI is clear. 479 */ 480.global pnv_powersave_wakeup_mce 481pnv_powersave_wakeup_mce: 482 /* Set cr3 for pnv_powersave_wakeup */ 483 rlwinm r11,r3,47-31,30,31 484 cmpwi cr3,r11,2 485 486 /* 487 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake 488 * reason into r12, which allows reuse of the system reset wakeup 489 * code without being mistaken for another type of wakeup. 490 */ 491 oris r12,r3,SRR1_WAKEMCE_RESVD@h 492 493 b pnv_powersave_wakeup 494 495/* 496 * Called from reset vector for powersave wakeups. 497 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 498 * r12 - SRR1 499 */ 500.global pnv_powersave_wakeup 501pnv_powersave_wakeup: 502 ld r2, PACATOC(r13) 503 504BEGIN_FTR_SECTION 505 bl pnv_restore_hyp_resource_arch300 506FTR_SECTION_ELSE 507 bl pnv_restore_hyp_resource_arch207 508ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 509 510 li r0,PNV_THREAD_RUNNING 511 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ 512 513 mr r3,r12 514 515#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 516 lbz r0,HSTATE_HWTHREAD_STATE(r13) 517 cmpwi r0,KVM_HWTHREAD_IN_KERNEL 518 beq 0f 519 li r0,KVM_HWTHREAD_IN_KERNEL 520 stb r0,HSTATE_HWTHREAD_STATE(r13) 521 /* Order setting hwthread_state vs. testing hwthread_req */ 522 sync 5230: lbz r0,HSTATE_HWTHREAD_REQ(r13) 524 cmpwi r0,0 525 beq 1f 526 b kvm_start_guest 5271: 528#endif 529 530 /* Return SRR1 from power7_nap() */ 531 blt cr3,pnv_wakeup_noloss 532 b pnv_wakeup_loss 533 534/* 535 * Check whether we have woken up with hypervisor state loss. 536 * If yes, restore hypervisor state and return back to link. 537 * 538 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 539 */ 540pnv_restore_hyp_resource_arch300: 541 /* 542 * Workaround for POWER9, if we lost resources, the ERAT 543 * might have been mixed up and needs flushing. We also need 544 * to reload MMCR0 (see comment above). We also need to set 545 * then clear bit 60 in MMCRA to ensure the PMU starts running. 546 */ 547 blt cr3,1f 548BEGIN_FTR_SECTION 549 PPC_INVALIDATE_ERAT 550 ld r1,PACAR1(r13) 551 ld r4,_MMCR0(r1) 552 mtspr SPRN_MMCR0,r4 553END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1) 554 mfspr r4,SPRN_MMCRA 555 ori r4,r4,(1 << (63-60)) 556 mtspr SPRN_MMCRA,r4 557 xori r4,r4,(1 << (63-60)) 558 mtspr SPRN_MMCRA,r4 5591: 560 /* 561 * POWER ISA 3. Use PSSCR to determine if we 562 * are waking up from deep idle state 563 */ 564 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 565 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 566 567 /* 568 * 0-3 bits correspond to Power-Saving Level Status 569 * which indicates the idle state we are waking up from 570 */ 571 mfspr r5, SPRN_PSSCR 572 rldicl r5,r5,4,60 573 li r0, 0 /* clear requested_psscr to say we're awake */ 574 std r0, PACA_REQ_PSSCR(r13) 575 cmpd cr4,r5,r4 576 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ 577 578 blr /* Waking up without hypervisor state loss. */ 579 580/* Same calling convention as arch300 */ 581pnv_restore_hyp_resource_arch207: 582 /* 583 * POWER ISA 2.07 or less. 584 * Check if we slept with sleep or winkle. 585 */ 586 lbz r4,PACA_THREAD_IDLE_STATE(r13) 587 cmpwi cr2,r4,PNV_THREAD_NAP 588 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 589 590 /* 591 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 592 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 593 * indicates we are waking with hypervisor state loss from nap. 594 */ 595 bgt cr3,. 596 597 blr /* Waking up without hypervisor state loss */ 598 599/* 600 * Called if waking up from idle state which can cause either partial or 601 * complete hyp state loss. 602 * In POWER8, called if waking up from fastsleep or winkle 603 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 604 * 605 * r13 - PACA 606 * cr3 - gt if waking up with partial/complete hypervisor state loss 607 * 608 * If ISA300: 609 * cr4 - gt or eq if waking up from complete hypervisor state loss. 610 * 611 * If ISA207: 612 * r4 - PACA_THREAD_IDLE_STATE 613 */ 614pnv_wakeup_tb_loss: 615 ld r1,PACAR1(r13) 616 /* 617 * Before entering any idle state, the NVGPRs are saved in the stack. 618 * If there was a state loss, or PACA_NAPSTATELOST was set, then the 619 * NVGPRs are restored. If we are here, it is likely that state is lost, 620 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach 621 * here are the same as the test to restore NVGPRS: 622 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, 623 * and SRR1 test for restoring NVGPRs. 624 * 625 * We are about to clobber NVGPRs now, so set NAPSTATELOST to 626 * guarantee they will always be restored. This might be tightened 627 * with careful reading of specs (particularly for ISA300) but this 628 * is already a slow wakeup path and it's simpler to be safe. 629 */ 630 li r0,1 631 stb r0,PACA_NAPSTATELOST(r13) 632 633 /* 634 * 635 * Save SRR1 and LR in NVGPRs as they might be clobbered in 636 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 637 * to determine the wakeup reason if we branch to kvm_start_guest. LR 638 * is required to return back to reset vector after hypervisor state 639 * restore is complete. 640 */ 641 mr r19,r12 642 mr r18,r4 643 mflr r17 644BEGIN_FTR_SECTION 645 CHECK_HMI_INTERRUPT 646END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 647 648 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 649 lbz r7,PACA_THREAD_MASK(r13) 650 651 /* 652 * Take the core lock to synchronize against other threads. 653 * 654 * Lock bit is set in one of the 2 cases- 655 * a. In the sleep/winkle enter path, the last thread is executing 656 * fastsleep workaround code. 657 * b. In the wake up path, another thread is executing fastsleep 658 * workaround undo code or resyncing timebase or restoring context 659 * In either case loop until the lock bit is cleared. 660 */ 6611: 662 lwarx r15,0,r14 663 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 664 bnel- core_idle_lock_held 665 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 666 stwcx. r15,0,r14 667 bne- 1b 668 isync 669 670 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 671 cmpwi cr2,r9,0 672 673 /* 674 * At this stage 675 * cr2 - eq if first thread to wakeup in core 676 * cr3- gt if waking up with partial/complete hypervisor state loss 677 * ISA300: 678 * cr4 - gt or eq if waking up from complete hypervisor state loss. 679 */ 680 681BEGIN_FTR_SECTION 682 /* 683 * Were we in winkle? 684 * If yes, check if all threads were in winkle, decrement our 685 * winkle count, set all thread winkle bits if all were in winkle. 686 * Check if our thread has a winkle bit set, and set cr4 accordingly 687 * (to match ISA300, above). Pseudo-code for core idle state 688 * transitions for ISA207 is as follows (everything happens atomically 689 * due to store conditional and/or lock bit): 690 * 691 * nap_idle() { } 692 * nap_wake() { } 693 * 694 * sleep_idle() 695 * { 696 * core_idle_state &= ~thread_in_core 697 * } 698 * 699 * sleep_wake() 700 * { 701 * bool first_in_core, first_in_subcore; 702 * 703 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 704 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 705 * 706 * core_idle_state |= thread_in_core; 707 * } 708 * 709 * winkle_idle() 710 * { 711 * core_idle_state &= ~thread_in_core; 712 * core_idle_state += 1 << WINKLE_COUNT_SHIFT; 713 * } 714 * 715 * winkle_wake() 716 * { 717 * bool first_in_core, first_in_subcore, winkle_state_lost; 718 * 719 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 720 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 721 * 722 * core_idle_state |= thread_in_core; 723 * 724 * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) 725 * core_idle_state |= THREAD_WINKLE_BITS; 726 * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; 727 * 728 * winkle_state_lost = core_idle_state & 729 * (thread_in_core << WINKLE_THREAD_SHIFT); 730 * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); 731 * } 732 * 733 */ 734 cmpwi r18,PNV_THREAD_WINKLE 735 bne 2f 736 andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h 737 subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h 738 beq 2f 739 ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ 7402: 741 /* Shift thread bit to winkle mask, then test if this thread is set, 742 * and remove it from the winkle bits */ 743 slwi r8,r7,8 744 and r8,r8,r15 745 andc r15,r15,r8 746 cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ 747 748 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 749 and r4,r4,r15 750 cmpwi r4,0 /* Check if first in subcore */ 751 752 or r15,r15,r7 /* Set thread bit */ 753 beq first_thread_in_subcore 754END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 755 756 or r15,r15,r7 /* Set thread bit */ 757 beq cr2,first_thread_in_core 758 759 /* Not first thread in core or subcore to wake up */ 760 b clear_lock 761 762first_thread_in_subcore: 763 /* 764 * If waking up from sleep, subcore state is not lost. Hence 765 * skip subcore state restore 766 */ 767 blt cr4,subcore_state_restored 768 769 /* Restore per-subcore state */ 770 ld r4,_SDR1(r1) 771 mtspr SPRN_SDR1,r4 772 773 ld r4,_RPR(r1) 774 mtspr SPRN_RPR,r4 775 ld r4,_AMOR(r1) 776 mtspr SPRN_AMOR,r4 777 778subcore_state_restored: 779 /* 780 * Check if the thread is also the first thread in the core. If not, 781 * skip to clear_lock. 782 */ 783 bne cr2,clear_lock 784 785first_thread_in_core: 786 787 /* 788 * First thread in the core waking up from any state which can cause 789 * partial or complete hypervisor state loss. It needs to 790 * call the fastsleep workaround code if the platform requires it. 791 * Call it unconditionally here. The below branch instruction will 792 * be patched out if the platform does not have fastsleep or does not 793 * require the workaround. Patching will be performed during the 794 * discovery of idle-states. 795 */ 796.global pnv_fastsleep_workaround_at_exit 797pnv_fastsleep_workaround_at_exit: 798 b fastsleep_workaround_at_exit 799 800timebase_resync: 801 /* 802 * Use cr3 which indicates that we are waking up with atleast partial 803 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 804 */ 805 ble cr3,.Ltb_resynced 806 /* Time base re-sync */ 807 bl opal_resync_timebase; 808 /* 809 * If waking up from sleep (POWER8), per core state 810 * is not lost, skip to clear_lock. 811 */ 812.Ltb_resynced: 813 blt cr4,clear_lock 814 815 /* 816 * First thread in the core to wake up and its waking up with 817 * complete hypervisor state loss. Restore per core hypervisor 818 * state. 819 */ 820BEGIN_FTR_SECTION 821 ld r4,_PTCR(r1) 822 mtspr SPRN_PTCR,r4 823 ld r4,_RPR(r1) 824 mtspr SPRN_RPR,r4 825 ld r4,_AMOR(r1) 826 mtspr SPRN_AMOR,r4 827END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 828 829 ld r4,_TSCR(r1) 830 mtspr SPRN_TSCR,r4 831 ld r4,_WORC(r1) 832 mtspr SPRN_WORC,r4 833 834clear_lock: 835 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 836 lwsync 837 stw r15,0(r14) 838 839common_exit: 840 /* 841 * Common to all threads. 842 * 843 * If waking up from sleep, hypervisor state is not lost. Hence 844 * skip hypervisor state restore. 845 */ 846 blt cr4,hypervisor_state_restored 847 848 /* Waking up from winkle */ 849 850BEGIN_MMU_FTR_SECTION 851 b no_segments 852END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 853 /* Restore SLB from PACA */ 854 ld r8,PACA_SLBSHADOWPTR(r13) 855 856 .rept SLB_NUM_BOLTED 857 li r3, SLBSHADOW_SAVEAREA 858 LDX_BE r5, r8, r3 859 addi r3, r3, 8 860 LDX_BE r6, r8, r3 861 andis. r7,r5,SLB_ESID_V@h 862 beq 1f 863 slbmte r6,r5 8641: addi r8,r8,16 865 .endr 866no_segments: 867 868 /* Restore per thread state */ 869 870 ld r4,_SPURR(r1) 871 mtspr SPRN_SPURR,r4 872 ld r4,_PURR(r1) 873 mtspr SPRN_PURR,r4 874 ld r4,_DSCR(r1) 875 mtspr SPRN_DSCR,r4 876 ld r4,_WORT(r1) 877 mtspr SPRN_WORT,r4 878 879 /* Call cur_cpu_spec->cpu_restore() */ 880 LOAD_REG_ADDR(r4, cur_cpu_spec) 881 ld r4,0(r4) 882 ld r12,CPU_SPEC_RESTORE(r4) 883#ifdef PPC64_ELF_ABI_v1 884 ld r12,0(r12) 885#endif 886 mtctr r12 887 bctrl 888 889/* 890 * On POWER9, we can come here on wakeup from a cpuidle stop state. 891 * Hence restore the additional SPRs to the saved value. 892 * 893 * On POWER8, we come here only on winkle. Since winkle is used 894 * only in the case of CPU-Hotplug, we don't need to restore 895 * the additional SPRs. 896 */ 897BEGIN_FTR_SECTION 898 bl power9_restore_additional_sprs 899END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 900hypervisor_state_restored: 901 902 mr r12,r19 903 mtlr r17 904 blr /* return to pnv_powersave_wakeup */ 905 906fastsleep_workaround_at_exit: 907 li r3,1 908 li r4,0 909 bl opal_config_cpu_idle_state 910 b timebase_resync 911 912/* 913 * R3 here contains the value that will be returned to the caller 914 * of power7_nap. 915 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 916 */ 917.global pnv_wakeup_loss 918pnv_wakeup_loss: 919 ld r1,PACAR1(r13) 920BEGIN_FTR_SECTION 921 CHECK_HMI_INTERRUPT 922END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 923 REST_NVGPRS(r1) 924 REST_GPR(2, r1) 925 ld r4,PACAKMSR(r13) 926 ld r5,_LINK(r1) 927 ld r6,_CCR(r1) 928 addi r1,r1,INT_FRAME_SIZE 929 mtlr r5 930 mtcr r6 931 mtmsrd r4 932 blr 933 934/* 935 * R3 here contains the value that will be returned to the caller 936 * of power7_nap. 937 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 938 */ 939pnv_wakeup_noloss: 940 lbz r0,PACA_NAPSTATELOST(r13) 941 cmpwi r0,0 942 bne pnv_wakeup_loss 943 ld r1,PACAR1(r13) 944BEGIN_FTR_SECTION 945 CHECK_HMI_INTERRUPT 946END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 947 ld r4,PACAKMSR(r13) 948 ld r5,_NIP(r1) 949 ld r6,_CCR(r1) 950 addi r1,r1,INT_FRAME_SIZE 951 mtlr r5 952 mtcr r6 953 mtmsrd r4 954 blr 955