1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18#include <asm/cpuidle.h> 19 20/* 21 * We layout physical memory as follows: 22 * 0x0000 - 0x00ff : Secondary processor spin code 23 * 0x0100 - 0x17ff : pSeries Interrupt prologs 24 * 0x1800 - 0x4000 : interrupt support common interrupt prologs 25 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 26 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 27 * 0x7000 - 0x7fff : FWNMI data area 28 * 0x8000 - 0x8fff : Initial (CPU0) segment table 29 * 0x9000 - : Early init and support code 30 */ 31 /* Syscall routine is used twice, in reloc-off and reloc-on paths */ 32#define SYSCALL_PSERIES_1 \ 33BEGIN_FTR_SECTION \ 34 cmpdi r0,0x1ebe ; \ 35 beq- 1f ; \ 36END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ 37 mr r9,r13 ; \ 38 GET_PACA(r13) ; \ 39 mfspr r11,SPRN_SRR0 ; \ 400: 41 42#define SYSCALL_PSERIES_2_RFID \ 43 mfspr r12,SPRN_SRR1 ; \ 44 ld r10,PACAKBASE(r13) ; \ 45 LOAD_HANDLER(r10, system_call_entry) ; \ 46 mtspr SPRN_SRR0,r10 ; \ 47 ld r10,PACAKMSR(r13) ; \ 48 mtspr SPRN_SRR1,r10 ; \ 49 rfid ; \ 50 b . ; /* prevent speculative execution */ 51 52#define SYSCALL_PSERIES_3 \ 53 /* Fast LE/BE switch system call */ \ 541: mfspr r12,SPRN_SRR1 ; \ 55 xori r12,r12,MSR_LE ; \ 56 mtspr SPRN_SRR1,r12 ; \ 57 rfid ; /* return to userspace */ \ 58 b . ; /* prevent speculative execution */ 59 60#if defined(CONFIG_RELOCATABLE) 61 /* 62 * We can't branch directly so we do it via the CTR which 63 * is volatile across system calls. 64 */ 65#define SYSCALL_PSERIES_2_DIRECT \ 66 mflr r10 ; \ 67 ld r12,PACAKBASE(r13) ; \ 68 LOAD_HANDLER(r12, system_call_entry) ; \ 69 mtctr r12 ; \ 70 mfspr r12,SPRN_SRR1 ; \ 71 /* Re-use of r13... No spare regs to do this */ \ 72 li r13,MSR_RI ; \ 73 mtmsrd r13,1 ; \ 74 GET_PACA(r13) ; /* get r13 back */ \ 75 bctr ; 76#else 77 /* We can branch directly */ 78#define SYSCALL_PSERIES_2_DIRECT \ 79 mfspr r12,SPRN_SRR1 ; \ 80 li r10,MSR_RI ; \ 81 mtmsrd r10,1 ; /* Set RI (EE=0) */ \ 82 b system_call_common ; 83#endif 84 85/* 86 * This is the start of the interrupt handlers for pSeries 87 * This code runs with relocation off. 88 * Code from here to __end_interrupts gets copied down to real 89 * address 0x100 when we are running a relocatable kernel. 90 * Therefore any relative branches in this section must only 91 * branch to labels in this section. 92 */ 93 . = 0x100 94 .globl __start_interrupts 95__start_interrupts: 96 97 .globl system_reset_pSeries; 98system_reset_pSeries: 99 SET_SCRATCH0(r13) 100#ifdef CONFIG_PPC_P7_NAP 101BEGIN_FTR_SECTION 102 /* Running native on arch 2.06 or later, check if we are 103 * waking up from nap/sleep/winkle. 104 */ 105 mfspr r13,SPRN_SRR1 106 rlwinm. r13,r13,47-31,30,31 107 beq 9f 108 109 cmpwi cr3,r13,2 110 111 /* 112 * Check if last bit of HSPGR0 is set. This indicates whether we are 113 * waking up from winkle. 114 */ 115 GET_PACA(r13) 116 clrldi r5,r13,63 117 clrrdi r13,r13,1 118 cmpwi cr4,r5,1 119 mtspr SPRN_HSPRG0,r13 120 121 lbz r0,PACA_THREAD_IDLE_STATE(r13) 122 cmpwi cr2,r0,PNV_THREAD_NAP 123 bgt cr2,8f /* Either sleep or Winkle */ 124 125 /* Waking up from nap should not cause hypervisor state loss */ 126 bgt cr3,. 127 128 /* Waking up from nap */ 129 li r0,PNV_THREAD_RUNNING 130 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ 131 132#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 133 li r0,KVM_HWTHREAD_IN_KERNEL 134 stb r0,HSTATE_HWTHREAD_STATE(r13) 135 /* Order setting hwthread_state vs. testing hwthread_req */ 136 sync 137 lbz r0,HSTATE_HWTHREAD_REQ(r13) 138 cmpwi r0,0 139 beq 1f 140 b kvm_start_guest 1411: 142#endif 143 144 /* Return SRR1 from power7_nap() */ 145 mfspr r3,SPRN_SRR1 146 beq cr3,2f 147 b power7_wakeup_noloss 1482: b power7_wakeup_loss 149 150 /* Fast Sleep wakeup on PowerNV */ 1518: GET_PACA(r13) 152 b power7_wakeup_tb_loss 153 1549: 155END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 156#endif /* CONFIG_PPC_P7_NAP */ 157 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 158 NOTEST, 0x100) 159 160 . = 0x200 161machine_check_pSeries_1: 162 /* This is moved out of line as it can be patched by FW, but 163 * some code path might still want to branch into the original 164 * vector 165 */ 166 SET_SCRATCH0(r13) /* save r13 */ 167#ifdef CONFIG_PPC_P7_NAP 168BEGIN_FTR_SECTION 169 /* Running native on arch 2.06 or later, check if we are 170 * waking up from nap. We only handle no state loss and 171 * supervisor state loss. We do -not- handle hypervisor 172 * state loss at this time. 173 */ 174 mfspr r13,SPRN_SRR1 175 rlwinm. r13,r13,47-31,30,31 176 OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 177 beq 9f 178 179 mfspr r13,SPRN_SRR1 180 rlwinm. r13,r13,47-31,30,31 181 /* waking up from powersave (nap) state */ 182 cmpwi cr1,r13,2 183 /* Total loss of HV state is fatal. let's just stay stuck here */ 184 OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 185 bgt cr1,. 1869: 187 OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 188END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 189#endif /* CONFIG_PPC_P7_NAP */ 190 EXCEPTION_PROLOG_0(PACA_EXMC) 191BEGIN_FTR_SECTION 192 b machine_check_pSeries_early 193FTR_SECTION_ELSE 194 b machine_check_pSeries_0 195ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 196 197 . = 0x300 198 .globl data_access_pSeries 199data_access_pSeries: 200 SET_SCRATCH0(r13) 201 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 202 KVMTEST, 0x300) 203 204 . = 0x380 205 .globl data_access_slb_pSeries 206data_access_slb_pSeries: 207 SET_SCRATCH0(r13) 208 EXCEPTION_PROLOG_0(PACA_EXSLB) 209 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 210 std r3,PACA_EXSLB+EX_R3(r13) 211 mfspr r3,SPRN_DAR 212 mfspr r12,SPRN_SRR1 213#ifndef CONFIG_RELOCATABLE 214 b slb_miss_realmode 215#else 216 /* 217 * We can't just use a direct branch to slb_miss_realmode 218 * because the distance from here to there depends on where 219 * the kernel ends up being put. 220 */ 221 mfctr r11 222 ld r10,PACAKBASE(r13) 223 LOAD_HANDLER(r10, slb_miss_realmode) 224 mtctr r10 225 bctr 226#endif 227 228 STD_EXCEPTION_PSERIES(0x400, instruction_access) 229 230 . = 0x480 231 .globl instruction_access_slb_pSeries 232instruction_access_slb_pSeries: 233 SET_SCRATCH0(r13) 234 EXCEPTION_PROLOG_0(PACA_EXSLB) 235 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480) 236 std r3,PACA_EXSLB+EX_R3(r13) 237 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 238 mfspr r12,SPRN_SRR1 239#ifndef CONFIG_RELOCATABLE 240 b slb_miss_realmode 241#else 242 mfctr r11 243 ld r10,PACAKBASE(r13) 244 LOAD_HANDLER(r10, slb_miss_realmode) 245 mtctr r10 246 bctr 247#endif 248 249 /* We open code these as we can't have a ". = x" (even with 250 * x = "." within a feature section 251 */ 252 . = 0x500; 253 .globl hardware_interrupt_pSeries; 254 .globl hardware_interrupt_hv; 255hardware_interrupt_pSeries: 256hardware_interrupt_hv: 257 BEGIN_FTR_SECTION 258 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 259 EXC_HV, SOFTEN_TEST_HV) 260 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 261 FTR_SECTION_ELSE 262 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 263 EXC_STD, SOFTEN_TEST_PR) 264 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 265 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 266 267 STD_EXCEPTION_PSERIES(0x600, alignment) 268 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600) 269 270 STD_EXCEPTION_PSERIES(0x700, program_check) 271 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700) 272 273 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 274 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800) 275 276 . = 0x900 277 .globl decrementer_pSeries 278decrementer_pSeries: 279 _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR) 280 281 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) 282 283 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) 284 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00) 285 286 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 287 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00) 288 289 . = 0xc00 290 .globl system_call_pSeries 291system_call_pSeries: 292 /* 293 * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems 294 * that support it) before changing to HMT_MEDIUM. That allows the KVM 295 * code to save that value into the guest state (it is the guest's PPR 296 * value). Otherwise just change to HMT_MEDIUM as userspace has 297 * already saved the PPR. 298 */ 299#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 300 SET_SCRATCH0(r13) 301 GET_PACA(r13) 302 std r9,PACA_EXGEN+EX_R9(r13) 303 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); 304 HMT_MEDIUM; 305 std r10,PACA_EXGEN+EX_R10(r13) 306 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR); 307 mfcr r9 308 KVMTEST(0xc00) 309 GET_SCRATCH0(r13) 310#else 311 HMT_MEDIUM; 312#endif 313 SYSCALL_PSERIES_1 314 SYSCALL_PSERIES_2_RFID 315 SYSCALL_PSERIES_3 316 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 317 318 STD_EXCEPTION_PSERIES(0xd00, single_step) 319 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00) 320 321 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 322 * out of line to handle them 323 */ 324 . = 0xe00 325hv_data_storage_trampoline: 326 SET_SCRATCH0(r13) 327 EXCEPTION_PROLOG_0(PACA_EXGEN) 328 b h_data_storage_hv 329 330 . = 0xe20 331hv_instr_storage_trampoline: 332 SET_SCRATCH0(r13) 333 EXCEPTION_PROLOG_0(PACA_EXGEN) 334 b h_instr_storage_hv 335 336 . = 0xe40 337emulation_assist_trampoline: 338 SET_SCRATCH0(r13) 339 EXCEPTION_PROLOG_0(PACA_EXGEN) 340 b emulation_assist_hv 341 342 . = 0xe60 343hv_exception_trampoline: 344 SET_SCRATCH0(r13) 345 EXCEPTION_PROLOG_0(PACA_EXGEN) 346 b hmi_exception_early 347 348 . = 0xe80 349hv_doorbell_trampoline: 350 SET_SCRATCH0(r13) 351 EXCEPTION_PROLOG_0(PACA_EXGEN) 352 b h_doorbell_hv 353 354 /* We need to deal with the Altivec unavailable exception 355 * here which is at 0xf20, thus in the middle of the 356 * prolog code of the PerformanceMonitor one. A little 357 * trickery is thus necessary 358 */ 359 . = 0xf00 360performance_monitor_pseries_trampoline: 361 SET_SCRATCH0(r13) 362 EXCEPTION_PROLOG_0(PACA_EXGEN) 363 b performance_monitor_pSeries 364 365 . = 0xf20 366altivec_unavailable_pseries_trampoline: 367 SET_SCRATCH0(r13) 368 EXCEPTION_PROLOG_0(PACA_EXGEN) 369 b altivec_unavailable_pSeries 370 371 . = 0xf40 372vsx_unavailable_pseries_trampoline: 373 SET_SCRATCH0(r13) 374 EXCEPTION_PROLOG_0(PACA_EXGEN) 375 b vsx_unavailable_pSeries 376 377 . = 0xf60 378facility_unavailable_trampoline: 379 SET_SCRATCH0(r13) 380 EXCEPTION_PROLOG_0(PACA_EXGEN) 381 b facility_unavailable_pSeries 382 383 . = 0xf80 384hv_facility_unavailable_trampoline: 385 SET_SCRATCH0(r13) 386 EXCEPTION_PROLOG_0(PACA_EXGEN) 387 b facility_unavailable_hv 388 389#ifdef CONFIG_CBE_RAS 390 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 391 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 392#endif /* CONFIG_CBE_RAS */ 393 394 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 395 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 396 397 . = 0x1500 398 .global denorm_exception_hv 399denorm_exception_hv: 400 mtspr SPRN_SPRG_HSCRATCH0,r13 401 EXCEPTION_PROLOG_0(PACA_EXGEN) 402 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) 403 404#ifdef CONFIG_PPC_DENORMALISATION 405 mfspr r10,SPRN_HSRR1 406 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 407 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ 408 addi r11,r11,-4 /* HSRR0 is next instruction */ 409 bne+ denorm_assist 410#endif 411 412 KVMTEST(0x1500) 413 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) 414 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500) 415 416#ifdef CONFIG_CBE_RAS 417 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 418 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 419#endif /* CONFIG_CBE_RAS */ 420 421 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 422 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700) 423 424#ifdef CONFIG_CBE_RAS 425 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 426 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 427#else 428 . = 0x1800 429#endif /* CONFIG_CBE_RAS */ 430 431 432/*** Out of line interrupts support ***/ 433 434 .align 7 435 /* moved from 0x200 */ 436machine_check_pSeries_early: 437BEGIN_FTR_SECTION 438 EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) 439 /* 440 * Register contents: 441 * R13 = PACA 442 * R9 = CR 443 * Original R9 to R13 is saved on PACA_EXMC 444 * 445 * Switch to mc_emergency stack and handle re-entrancy (we limit 446 * the nested MCE upto level 4 to avoid stack overflow). 447 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 448 * 449 * We use paca->in_mce to check whether this is the first entry or 450 * nested machine check. We increment paca->in_mce to track nested 451 * machine checks. 452 * 453 * If this is the first entry then set stack pointer to 454 * paca->mc_emergency_sp, otherwise r1 is already pointing to 455 * stack frame on mc_emergency stack. 456 * 457 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 458 * checkstop if we get another machine check exception before we do 459 * rfid with MSR_ME=1. 460 */ 461 mr r11,r1 /* Save r1 */ 462 lhz r10,PACA_IN_MCE(r13) 463 cmpwi r10,0 /* Are we in nested machine check */ 464 bne 0f /* Yes, we are. */ 465 /* First machine check entry */ 466 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 4670: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 468 addi r10,r10,1 /* increment paca->in_mce */ 469 sth r10,PACA_IN_MCE(r13) 470 /* Limit nested MCE to level 4 to avoid stack overflow */ 471 cmpwi r10,4 472 bgt 2f /* Check if we hit limit of 4 */ 473 std r11,GPR1(r1) /* Save r1 on the stack. */ 474 std r11,0(r1) /* make stack chain pointer */ 475 mfspr r11,SPRN_SRR0 /* Save SRR0 */ 476 std r11,_NIP(r1) 477 mfspr r11,SPRN_SRR1 /* Save SRR1 */ 478 std r11,_MSR(r1) 479 mfspr r11,SPRN_DAR /* Save DAR */ 480 std r11,_DAR(r1) 481 mfspr r11,SPRN_DSISR /* Save DSISR */ 482 std r11,_DSISR(r1) 483 std r9,_CCR(r1) /* Save CR in stackframe */ 484 /* Save r9 through r13 from EXMC save area to stack frame. */ 485 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) 486 mfmsr r11 /* get MSR value */ 487 ori r11,r11,MSR_ME /* turn on ME bit */ 488 ori r11,r11,MSR_RI /* turn on RI bit */ 489 ld r12,PACAKBASE(r13) /* get high part of &label */ 490 LOAD_HANDLER(r12, machine_check_handle_early) 4911: mtspr SPRN_SRR0,r12 492 mtspr SPRN_SRR1,r11 493 rfid 494 b . /* prevent speculative execution */ 4952: 496 /* Stack overflow. Stay on emergency stack and panic. 497 * Keep the ME bit off while panic-ing, so that if we hit 498 * another machine check we checkstop. 499 */ 500 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */ 501 ld r11,PACAKMSR(r13) 502 ld r12,PACAKBASE(r13) 503 LOAD_HANDLER(r12, unrecover_mce) 504 li r10,MSR_ME 505 andc r11,r11,r10 /* Turn off MSR_ME */ 506 b 1b 507 b . /* prevent speculative execution */ 508END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 509 510machine_check_pSeries: 511 .globl machine_check_fwnmi 512machine_check_fwnmi: 513 SET_SCRATCH0(r13) /* save r13 */ 514 EXCEPTION_PROLOG_0(PACA_EXMC) 515machine_check_pSeries_0: 516 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) 517 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) 518 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 519 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 520 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 521 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400) 522 KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480) 523 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900) 524 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 525 526#ifdef CONFIG_PPC_DENORMALISATION 527denorm_assist: 528BEGIN_FTR_SECTION 529/* 530 * To denormalise we need to move a copy of the register to itself. 531 * For POWER6 do that here for all FP regs. 532 */ 533 mfmsr r10 534 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 535 xori r10,r10,(MSR_FE0|MSR_FE1) 536 mtmsrd r10 537 sync 538 539#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 540#define FMR4(n) FMR2(n) ; FMR2(n+2) 541#define FMR8(n) FMR4(n) ; FMR4(n+4) 542#define FMR16(n) FMR8(n) ; FMR8(n+8) 543#define FMR32(n) FMR16(n) ; FMR16(n+16) 544 FMR32(0) 545 546FTR_SECTION_ELSE 547/* 548 * To denormalise we need to move a copy of the register to itself. 549 * For POWER7 do that here for the first 32 VSX registers only. 550 */ 551 mfmsr r10 552 oris r10,r10,MSR_VSX@h 553 mtmsrd r10 554 sync 555 556#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) 557#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) 558#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) 559#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) 560#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) 561 XVCPSGNDP32(0) 562 563ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 564 565BEGIN_FTR_SECTION 566 b denorm_done 567END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 568/* 569 * To denormalise we need to move a copy of the register to itself. 570 * For POWER8 we need to do that for all 64 VSX registers 571 */ 572 XVCPSGNDP32(32) 573denorm_done: 574 mtspr SPRN_HSRR0,r11 575 mtcrf 0x80,r9 576 ld r9,PACA_EXGEN+EX_R9(r13) 577 RESTORE_PPR_PACA(PACA_EXGEN, r10) 578BEGIN_FTR_SECTION 579 ld r10,PACA_EXGEN+EX_CFAR(r13) 580 mtspr SPRN_CFAR,r10 581END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 582 ld r10,PACA_EXGEN+EX_R10(r13) 583 ld r11,PACA_EXGEN+EX_R11(r13) 584 ld r12,PACA_EXGEN+EX_R12(r13) 585 ld r13,PACA_EXGEN+EX_R13(r13) 586 HRFID 587 b . 588#endif 589 590 .align 7 591 /* moved from 0xe00 */ 592 STD_EXCEPTION_HV_OOL(0xe02, h_data_storage) 593 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 594 STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage) 595 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 596 STD_EXCEPTION_HV_OOL(0xe42, emulation_assist) 597 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 598 MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception) 599 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 600 601 MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell) 602 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82) 603 604 /* moved from 0xf00 */ 605 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 606 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00) 607 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 608 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20) 609 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 610 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40) 611 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 612 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60) 613 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) 614 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) 615 616/* 617 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 618 * - If it was a decrementer interrupt, we bump the dec to max and and return. 619 * - If it was a doorbell we return immediately since doorbells are edge 620 * triggered and won't automatically refire. 621 * - If it was a HMI we return immediately since we handled it in realmode 622 * and it won't refire. 623 * - else we hard disable and return. 624 * This is called with r10 containing the value to OR to the paca field. 625 */ 626#define MASKED_INTERRUPT(_H) \ 627masked_##_H##interrupt: \ 628 std r11,PACA_EXGEN+EX_R11(r13); \ 629 lbz r11,PACAIRQHAPPENED(r13); \ 630 or r11,r11,r10; \ 631 stb r11,PACAIRQHAPPENED(r13); \ 632 cmpwi r10,PACA_IRQ_DEC; \ 633 bne 1f; \ 634 lis r10,0x7fff; \ 635 ori r10,r10,0xffff; \ 636 mtspr SPRN_DEC,r10; \ 637 b 2f; \ 6381: cmpwi r10,PACA_IRQ_DBELL; \ 639 beq 2f; \ 640 cmpwi r10,PACA_IRQ_HMI; \ 641 beq 2f; \ 642 mfspr r10,SPRN_##_H##SRR1; \ 643 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 644 rotldi r10,r10,16; \ 645 mtspr SPRN_##_H##SRR1,r10; \ 6462: mtcrf 0x80,r9; \ 647 ld r9,PACA_EXGEN+EX_R9(r13); \ 648 ld r10,PACA_EXGEN+EX_R10(r13); \ 649 ld r11,PACA_EXGEN+EX_R11(r13); \ 650 GET_SCRATCH0(r13); \ 651 ##_H##rfid; \ 652 b . 653 654 MASKED_INTERRUPT() 655 MASKED_INTERRUPT(H) 656 657/* 658 * Called from arch_local_irq_enable when an interrupt needs 659 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate 660 * which kind of interrupt. MSR:EE is already off. We generate a 661 * stackframe like if a real interrupt had happened. 662 * 663 * Note: While MSR:EE is off, we need to make sure that _MSR 664 * in the generated frame has EE set to 1 or the exception 665 * handler will not properly re-enable them. 666 */ 667_GLOBAL(__replay_interrupt) 668 /* We are going to jump to the exception common code which 669 * will retrieve various register values from the PACA which 670 * we don't give a damn about, so we don't bother storing them. 671 */ 672 mfmsr r12 673 mflr r11 674 mfcr r9 675 ori r12,r12,MSR_EE 676 cmpwi r3,0x900 677 beq decrementer_common 678 cmpwi r3,0x500 679 beq hardware_interrupt_common 680BEGIN_FTR_SECTION 681 cmpwi r3,0xe80 682 beq h_doorbell_common 683FTR_SECTION_ELSE 684 cmpwi r3,0xa00 685 beq doorbell_super_common 686ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 687 blr 688 689#ifdef CONFIG_PPC_PSERIES 690/* 691 * Vectors for the FWNMI option. Share common code. 692 */ 693 .globl system_reset_fwnmi 694 .align 7 695system_reset_fwnmi: 696 SET_SCRATCH0(r13) /* save r13 */ 697 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 698 NOTEST, 0x100) 699 700#endif /* CONFIG_PPC_PSERIES */ 701 702#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 703kvmppc_skip_interrupt: 704 /* 705 * Here all GPRs are unchanged from when the interrupt happened 706 * except for r13, which is saved in SPRG_SCRATCH0. 707 */ 708 mfspr r13, SPRN_SRR0 709 addi r13, r13, 4 710 mtspr SPRN_SRR0, r13 711 GET_SCRATCH0(r13) 712 rfid 713 b . 714 715kvmppc_skip_Hinterrupt: 716 /* 717 * Here all GPRs are unchanged from when the interrupt happened 718 * except for r13, which is saved in SPRG_SCRATCH0. 719 */ 720 mfspr r13, SPRN_HSRR0 721 addi r13, r13, 4 722 mtspr SPRN_HSRR0, r13 723 GET_SCRATCH0(r13) 724 hrfid 725 b . 726#endif 727 728/* 729 * Ensure that any handlers that get invoked from the exception prologs 730 * above are below the first 64KB (0x10000) of the kernel image because 731 * the prologs assemble the addresses of these handlers using the 732 * LOAD_HANDLER macro, which uses an ori instruction. 733 */ 734 735/*** Common interrupt handlers ***/ 736 737 STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception) 738 739 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 740 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) 741 STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt) 742#ifdef CONFIG_PPC_DOORBELL 743 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception) 744#else 745 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception) 746#endif 747 STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) 748 STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) 749 STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) 750 STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) 751 STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception) 752#ifdef CONFIG_PPC_DOORBELL 753 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception) 754#else 755 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception) 756#endif 757 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) 758 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) 759 STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception) 760#ifdef CONFIG_ALTIVEC 761 STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception) 762#else 763 STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception) 764#endif 765#ifdef CONFIG_CBE_RAS 766 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) 767 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) 768 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception) 769#endif /* CONFIG_CBE_RAS */ 770 771 /* 772 * Relocation-on interrupts: A subset of the interrupts can be delivered 773 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 774 * it. Addresses are the same as the original interrupt addresses, but 775 * offset by 0xc000000000004000. 776 * It's impossible to receive interrupts below 0x300 via this mechanism. 777 * KVM: None of these traps are from the guest ; anything that escalated 778 * to HV=1 from HV=0 is delivered via real mode handlers. 779 */ 780 781 /* 782 * This uses the standard macro, since the original 0x300 vector 783 * only has extra guff for STAB-based processors -- which never 784 * come here. 785 */ 786 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access) 787 . = 0x4380 788 .globl data_access_slb_relon_pSeries 789data_access_slb_relon_pSeries: 790 SET_SCRATCH0(r13) 791 EXCEPTION_PROLOG_0(PACA_EXSLB) 792 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) 793 std r3,PACA_EXSLB+EX_R3(r13) 794 mfspr r3,SPRN_DAR 795 mfspr r12,SPRN_SRR1 796#ifndef CONFIG_RELOCATABLE 797 b slb_miss_realmode 798#else 799 /* 800 * We can't just use a direct branch to slb_miss_realmode 801 * because the distance from here to there depends on where 802 * the kernel ends up being put. 803 */ 804 mfctr r11 805 ld r10,PACAKBASE(r13) 806 LOAD_HANDLER(r10, slb_miss_realmode) 807 mtctr r10 808 bctr 809#endif 810 811 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access) 812 . = 0x4480 813 .globl instruction_access_slb_relon_pSeries 814instruction_access_slb_relon_pSeries: 815 SET_SCRATCH0(r13) 816 EXCEPTION_PROLOG_0(PACA_EXSLB) 817 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) 818 std r3,PACA_EXSLB+EX_R3(r13) 819 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 820 mfspr r12,SPRN_SRR1 821#ifndef CONFIG_RELOCATABLE 822 b slb_miss_realmode 823#else 824 mfctr r11 825 ld r10,PACAKBASE(r13) 826 LOAD_HANDLER(r10, slb_miss_realmode) 827 mtctr r10 828 bctr 829#endif 830 831 . = 0x4500 832 .globl hardware_interrupt_relon_pSeries; 833 .globl hardware_interrupt_relon_hv; 834hardware_interrupt_relon_pSeries: 835hardware_interrupt_relon_hv: 836 BEGIN_FTR_SECTION 837 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV) 838 FTR_SECTION_ELSE 839 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR) 840 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 841 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment) 842 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check) 843 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable) 844 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer) 845 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer) 846 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super) 847 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b) 848 849 . = 0x4c00 850 .globl system_call_relon_pSeries 851system_call_relon_pSeries: 852 HMT_MEDIUM 853 SYSCALL_PSERIES_1 854 SYSCALL_PSERIES_2_DIRECT 855 SYSCALL_PSERIES_3 856 857 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) 858 859 . = 0x4e00 860 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 861 862 . = 0x4e20 863 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 864 865 . = 0x4e40 866emulation_assist_relon_trampoline: 867 SET_SCRATCH0(r13) 868 EXCEPTION_PROLOG_0(PACA_EXGEN) 869 b emulation_assist_relon_hv 870 871 . = 0x4e60 872 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 873 874 . = 0x4e80 875h_doorbell_relon_trampoline: 876 SET_SCRATCH0(r13) 877 EXCEPTION_PROLOG_0(PACA_EXGEN) 878 b h_doorbell_relon_hv 879 880 . = 0x4f00 881performance_monitor_relon_pseries_trampoline: 882 SET_SCRATCH0(r13) 883 EXCEPTION_PROLOG_0(PACA_EXGEN) 884 b performance_monitor_relon_pSeries 885 886 . = 0x4f20 887altivec_unavailable_relon_pseries_trampoline: 888 SET_SCRATCH0(r13) 889 EXCEPTION_PROLOG_0(PACA_EXGEN) 890 b altivec_unavailable_relon_pSeries 891 892 . = 0x4f40 893vsx_unavailable_relon_pseries_trampoline: 894 SET_SCRATCH0(r13) 895 EXCEPTION_PROLOG_0(PACA_EXGEN) 896 b vsx_unavailable_relon_pSeries 897 898 . = 0x4f60 899facility_unavailable_relon_trampoline: 900 SET_SCRATCH0(r13) 901 EXCEPTION_PROLOG_0(PACA_EXGEN) 902 b facility_unavailable_relon_pSeries 903 904 . = 0x4f80 905hv_facility_unavailable_relon_trampoline: 906 SET_SCRATCH0(r13) 907 EXCEPTION_PROLOG_0(PACA_EXGEN) 908 b hv_facility_unavailable_relon_hv 909 910 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 911#ifdef CONFIG_PPC_DENORMALISATION 912 . = 0x5500 913 b denorm_exception_hv 914#endif 915 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) 916 917 .align 7 918system_call_entry: 919 b system_call_common 920 921ppc64_runlatch_on_trampoline: 922 b __ppc64_runlatch_on 923 924/* 925 * Here r13 points to the paca, r9 contains the saved CR, 926 * SRR0 and SRR1 are saved in r11 and r12, 927 * r9 - r13 are saved in paca->exgen. 928 */ 929 .align 7 930 .globl data_access_common 931data_access_common: 932 mfspr r10,SPRN_DAR 933 std r10,PACA_EXGEN+EX_DAR(r13) 934 mfspr r10,SPRN_DSISR 935 stw r10,PACA_EXGEN+EX_DSISR(r13) 936 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 937 RECONCILE_IRQ_STATE(r10, r11) 938 ld r12,_MSR(r1) 939 ld r3,PACA_EXGEN+EX_DAR(r13) 940 lwz r4,PACA_EXGEN+EX_DSISR(r13) 941 li r5,0x300 942 b do_hash_page /* Try to handle as hpte fault */ 943 944 .align 7 945 .globl h_data_storage_common 946h_data_storage_common: 947 mfspr r10,SPRN_HDAR 948 std r10,PACA_EXGEN+EX_DAR(r13) 949 mfspr r10,SPRN_HDSISR 950 stw r10,PACA_EXGEN+EX_DSISR(r13) 951 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 952 bl save_nvgprs 953 RECONCILE_IRQ_STATE(r10, r11) 954 addi r3,r1,STACK_FRAME_OVERHEAD 955 bl unknown_exception 956 b ret_from_except 957 958 .align 7 959 .globl instruction_access_common 960instruction_access_common: 961 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 962 RECONCILE_IRQ_STATE(r10, r11) 963 ld r12,_MSR(r1) 964 ld r3,_NIP(r1) 965 andis. r4,r12,0x5820 966 li r5,0x400 967 b do_hash_page /* Try to handle as hpte fault */ 968 969 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) 970 971 /* 972 * Machine check is different because we use a different 973 * save area: PACA_EXMC instead of PACA_EXGEN. 974 */ 975 .align 7 976 .globl machine_check_common 977machine_check_common: 978 979 mfspr r10,SPRN_DAR 980 std r10,PACA_EXGEN+EX_DAR(r13) 981 mfspr r10,SPRN_DSISR 982 stw r10,PACA_EXGEN+EX_DSISR(r13) 983 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 984 FINISH_NAP 985 RECONCILE_IRQ_STATE(r10, r11) 986 ld r3,PACA_EXGEN+EX_DAR(r13) 987 lwz r4,PACA_EXGEN+EX_DSISR(r13) 988 std r3,_DAR(r1) 989 std r4,_DSISR(r1) 990 bl save_nvgprs 991 addi r3,r1,STACK_FRAME_OVERHEAD 992 bl machine_check_exception 993 b ret_from_except 994 995 .align 7 996 .globl alignment_common 997alignment_common: 998 mfspr r10,SPRN_DAR 999 std r10,PACA_EXGEN+EX_DAR(r13) 1000 mfspr r10,SPRN_DSISR 1001 stw r10,PACA_EXGEN+EX_DSISR(r13) 1002 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 1003 ld r3,PACA_EXGEN+EX_DAR(r13) 1004 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1005 std r3,_DAR(r1) 1006 std r4,_DSISR(r1) 1007 bl save_nvgprs 1008 RECONCILE_IRQ_STATE(r10, r11) 1009 addi r3,r1,STACK_FRAME_OVERHEAD 1010 bl alignment_exception 1011 b ret_from_except 1012 1013 .align 7 1014 .globl program_check_common 1015program_check_common: 1016 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1017 bl save_nvgprs 1018 RECONCILE_IRQ_STATE(r10, r11) 1019 addi r3,r1,STACK_FRAME_OVERHEAD 1020 bl program_check_exception 1021 b ret_from_except 1022 1023 .align 7 1024 .globl fp_unavailable_common 1025fp_unavailable_common: 1026 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1027 bne 1f /* if from user, just load it up */ 1028 bl save_nvgprs 1029 RECONCILE_IRQ_STATE(r10, r11) 1030 addi r3,r1,STACK_FRAME_OVERHEAD 1031 bl kernel_fp_unavailable_exception 1032 BUG_OPCODE 10331: 1034#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1035BEGIN_FTR_SECTION 1036 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1037 * transaction), go do TM stuff 1038 */ 1039 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1040 bne- 2f 1041END_FTR_SECTION_IFSET(CPU_FTR_TM) 1042#endif 1043 bl load_up_fpu 1044 b fast_exception_return 1045#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 10462: /* User process was in a transaction */ 1047 bl save_nvgprs 1048 RECONCILE_IRQ_STATE(r10, r11) 1049 addi r3,r1,STACK_FRAME_OVERHEAD 1050 bl fp_unavailable_tm 1051 b ret_from_except 1052#endif 1053 .align 7 1054 .globl altivec_unavailable_common 1055altivec_unavailable_common: 1056 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 1057#ifdef CONFIG_ALTIVEC 1058BEGIN_FTR_SECTION 1059 beq 1f 1060#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1061 BEGIN_FTR_SECTION_NESTED(69) 1062 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1063 * transaction), go do TM stuff 1064 */ 1065 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1066 bne- 2f 1067 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1068#endif 1069 bl load_up_altivec 1070 b fast_exception_return 1071#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 10722: /* User process was in a transaction */ 1073 bl save_nvgprs 1074 RECONCILE_IRQ_STATE(r10, r11) 1075 addi r3,r1,STACK_FRAME_OVERHEAD 1076 bl altivec_unavailable_tm 1077 b ret_from_except 1078#endif 10791: 1080END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1081#endif 1082 bl save_nvgprs 1083 RECONCILE_IRQ_STATE(r10, r11) 1084 addi r3,r1,STACK_FRAME_OVERHEAD 1085 bl altivec_unavailable_exception 1086 b ret_from_except 1087 1088 .align 7 1089 .globl vsx_unavailable_common 1090vsx_unavailable_common: 1091 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 1092#ifdef CONFIG_VSX 1093BEGIN_FTR_SECTION 1094 beq 1f 1095#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1096 BEGIN_FTR_SECTION_NESTED(69) 1097 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1098 * transaction), go do TM stuff 1099 */ 1100 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1101 bne- 2f 1102 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1103#endif 1104 b load_up_vsx 1105#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11062: /* User process was in a transaction */ 1107 bl save_nvgprs 1108 RECONCILE_IRQ_STATE(r10, r11) 1109 addi r3,r1,STACK_FRAME_OVERHEAD 1110 bl vsx_unavailable_tm 1111 b ret_from_except 1112#endif 11131: 1114END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1115#endif 1116 bl save_nvgprs 1117 RECONCILE_IRQ_STATE(r10, r11) 1118 addi r3,r1,STACK_FRAME_OVERHEAD 1119 bl vsx_unavailable_exception 1120 b ret_from_except 1121 1122 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) 1123 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception) 1124 1125 /* Equivalents to the above handlers for relocation-on interrupt vectors */ 1126 STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist) 1127 MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell) 1128 1129 STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 1130 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1131 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1132 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1133 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) 1134 1135 /* 1136 * The __end_interrupts marker must be past the out-of-line (OOL) 1137 * handlers, so that they are copied to real address 0x100 when running 1138 * a relocatable kernel. This ensures they can be reached from the short 1139 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 1140 * directly, without using LOAD_HANDLER(). 1141 */ 1142 .align 7 1143 .globl __end_interrupts 1144__end_interrupts: 1145 1146#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1147/* 1148 * Data area reserved for FWNMI option. 1149 * This address (0x7000) is fixed by the RPA. 1150 */ 1151 .= 0x7000 1152 .globl fwnmi_data_area 1153fwnmi_data_area: 1154 1155 /* pseries and powernv need to keep the whole page from 1156 * 0x7000 to 0x8000 free for use by the firmware 1157 */ 1158 . = 0x8000 1159#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1160 1161 .globl hmi_exception_early 1162hmi_exception_early: 1163 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0xe60) 1164 mr r10,r1 /* Save r1 */ 1165 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1166 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1167 std r9,_CCR(r1) /* save CR in stackframe */ 1168 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */ 1169 std r11,_NIP(r1) /* save HSRR0 in stackframe */ 1170 mfspr r12,SPRN_HSRR1 /* Save SRR1 */ 1171 std r12,_MSR(r1) /* save SRR1 in stackframe */ 1172 std r10,0(r1) /* make stack chain pointer */ 1173 std r0,GPR0(r1) /* save r0 in stackframe */ 1174 std r10,GPR1(r1) /* save r1 in stackframe */ 1175 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) 1176 EXCEPTION_PROLOG_COMMON_3(0xe60) 1177 addi r3,r1,STACK_FRAME_OVERHEAD 1178 bl hmi_exception_realmode 1179 /* Windup the stack. */ 1180 /* Move original HSRR0 and HSRR1 into the respective regs */ 1181 ld r9,_MSR(r1) 1182 mtspr SPRN_HSRR1,r9 1183 ld r3,_NIP(r1) 1184 mtspr SPRN_HSRR0,r3 1185 ld r9,_CTR(r1) 1186 mtctr r9 1187 ld r9,_XER(r1) 1188 mtxer r9 1189 ld r9,_LINK(r1) 1190 mtlr r9 1191 REST_GPR(0, r1) 1192 REST_8GPRS(2, r1) 1193 REST_GPR(10, r1) 1194 ld r11,_CCR(r1) 1195 mtcr r11 1196 REST_GPR(11, r1) 1197 REST_2GPRS(12, r1) 1198 /* restore original r1. */ 1199 ld r1,GPR1(r1) 1200 1201 /* 1202 * Go to virtual mode and pull the HMI event information from 1203 * firmware. 1204 */ 1205 .globl hmi_exception_after_realmode 1206hmi_exception_after_realmode: 1207 SET_SCRATCH0(r13) 1208 EXCEPTION_PROLOG_0(PACA_EXGEN) 1209 b hmi_exception_hv 1210 1211 1212#define MACHINE_CHECK_HANDLER_WINDUP \ 1213 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1214 li r0,MSR_RI; \ 1215 mfmsr r9; /* get MSR value */ \ 1216 andc r9,r9,r0; \ 1217 mtmsrd r9,1; /* Clear MSR_RI */ \ 1218 /* Move original SRR0 and SRR1 into the respective regs */ \ 1219 ld r9,_MSR(r1); \ 1220 mtspr SPRN_SRR1,r9; \ 1221 ld r3,_NIP(r1); \ 1222 mtspr SPRN_SRR0,r3; \ 1223 ld r9,_CTR(r1); \ 1224 mtctr r9; \ 1225 ld r9,_XER(r1); \ 1226 mtxer r9; \ 1227 ld r9,_LINK(r1); \ 1228 mtlr r9; \ 1229 REST_GPR(0, r1); \ 1230 REST_8GPRS(2, r1); \ 1231 REST_GPR(10, r1); \ 1232 ld r11,_CCR(r1); \ 1233 mtcr r11; \ 1234 /* Decrement paca->in_mce. */ \ 1235 lhz r12,PACA_IN_MCE(r13); \ 1236 subi r12,r12,1; \ 1237 sth r12,PACA_IN_MCE(r13); \ 1238 REST_GPR(11, r1); \ 1239 REST_2GPRS(12, r1); \ 1240 /* restore original r1. */ \ 1241 ld r1,GPR1(r1) 1242 1243 /* 1244 * Handle machine check early in real mode. We come here with 1245 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. 1246 */ 1247 .align 7 1248 .globl machine_check_handle_early 1249machine_check_handle_early: 1250 std r0,GPR0(r1) /* Save r0 */ 1251 EXCEPTION_PROLOG_COMMON_3(0x200) 1252 bl save_nvgprs 1253 addi r3,r1,STACK_FRAME_OVERHEAD 1254 bl machine_check_early 1255 std r3,RESULT(r1) /* Save result */ 1256 ld r12,_MSR(r1) 1257#ifdef CONFIG_PPC_P7_NAP 1258 /* 1259 * Check if thread was in power saving mode. We come here when any 1260 * of the following is true: 1261 * a. thread wasn't in power saving mode 1262 * b. thread was in power saving mode with no state loss or 1263 * supervisor state loss 1264 * 1265 * Go back to nap again if (b) is true. 1266 */ 1267 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ 1268 beq 4f /* No, it wasn;t */ 1269 /* Thread was in power saving mode. Go back to nap again. */ 1270 cmpwi r11,2 1271 bne 3f 1272 /* Supervisor state loss */ 1273 li r0,1 1274 stb r0,PACA_NAPSTATELOST(r13) 12753: bl machine_check_queue_event 1276 MACHINE_CHECK_HANDLER_WINDUP 1277 GET_PACA(r13) 1278 ld r1,PACAR1(r13) 1279 li r3,PNV_THREAD_NAP 1280 b power7_enter_nap_mode 12814: 1282#endif 1283 /* 1284 * Check if we are coming from hypervisor userspace. If yes then we 1285 * continue in host kernel in V mode to deliver the MC event. 1286 */ 1287 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ 1288 beq 5f 1289 andi. r11,r12,MSR_PR /* See if coming from user. */ 1290 bne 9f /* continue in V mode if we are. */ 1291 12925: 1293#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1294 /* 1295 * We are coming from kernel context. Check if we are coming from 1296 * guest. if yes, then we can continue. We will fall through 1297 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. 1298 */ 1299 lbz r11,HSTATE_IN_GUEST(r13) 1300 cmpwi r11,0 /* Check if coming from guest */ 1301 bne 9f /* continue if we are. */ 1302#endif 1303 /* 1304 * At this point we are not sure about what context we come from. 1305 * Queue up the MCE event and return from the interrupt. 1306 * But before that, check if this is an un-recoverable exception. 1307 * If yes, then stay on emergency stack and panic. 1308 */ 1309 andi. r11,r12,MSR_RI 1310 bne 2f 13111: mfspr r11,SPRN_SRR0 1312 ld r10,PACAKBASE(r13) 1313 LOAD_HANDLER(r10,unrecover_mce) 1314 mtspr SPRN_SRR0,r10 1315 ld r10,PACAKMSR(r13) 1316 /* 1317 * We are going down. But there are chances that we might get hit by 1318 * another MCE during panic path and we may run into unstable state 1319 * with no way out. Hence, turn ME bit off while going down, so that 1320 * when another MCE is hit during panic path, system will checkstop 1321 * and hypervisor will get restarted cleanly by SP. 1322 */ 1323 li r3,MSR_ME 1324 andc r10,r10,r3 /* Turn off MSR_ME */ 1325 mtspr SPRN_SRR1,r10 1326 rfid 1327 b . 13282: 1329 /* 1330 * Check if we have successfully handled/recovered from error, if not 1331 * then stay on emergency stack and panic. 1332 */ 1333 ld r3,RESULT(r1) /* Load result */ 1334 cmpdi r3,0 /* see if we handled MCE successfully */ 1335 1336 beq 1b /* if !handled then panic */ 1337 /* 1338 * Return from MC interrupt. 1339 * Queue up the MCE event so that we can log it later, while 1340 * returning from kernel or opal call. 1341 */ 1342 bl machine_check_queue_event 1343 MACHINE_CHECK_HANDLER_WINDUP 1344 rfid 13459: 1346 /* Deliver the machine check to host kernel in V mode. */ 1347 MACHINE_CHECK_HANDLER_WINDUP 1348 b machine_check_pSeries 1349 1350unrecover_mce: 1351 /* Invoke machine_check_exception to print MCE event and panic. */ 1352 addi r3,r1,STACK_FRAME_OVERHEAD 1353 bl machine_check_exception 1354 /* 1355 * We will not reach here. Even if we did, there is no way out. Call 1356 * unrecoverable_exception and die. 1357 */ 13581: addi r3,r1,STACK_FRAME_OVERHEAD 1359 bl unrecoverable_exception 1360 b 1b 1361/* 1362 * r13 points to the PACA, r9 contains the saved CR, 1363 * r12 contain the saved SRR1, SRR0 is still ready for return 1364 * r3 has the faulting address 1365 * r9 - r13 are saved in paca->exslb. 1366 * r3 is saved in paca->slb_r3 1367 * We assume we aren't going to take any exceptions during this procedure. 1368 */ 1369slb_miss_realmode: 1370 mflr r10 1371#ifdef CONFIG_RELOCATABLE 1372 mtctr r11 1373#endif 1374 1375 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1376 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1377 1378 bl slb_allocate_realmode 1379 1380 /* All done -- return from exception. */ 1381 1382 ld r10,PACA_EXSLB+EX_LR(r13) 1383 ld r3,PACA_EXSLB+EX_R3(r13) 1384 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1385 1386 mtlr r10 1387 1388 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1389 beq- 2f 1390 1391.machine push 1392.machine "power4" 1393 mtcrf 0x80,r9 1394 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1395.machine pop 1396 1397 RESTORE_PPR_PACA(PACA_EXSLB, r9) 1398 ld r9,PACA_EXSLB+EX_R9(r13) 1399 ld r10,PACA_EXSLB+EX_R10(r13) 1400 ld r11,PACA_EXSLB+EX_R11(r13) 1401 ld r12,PACA_EXSLB+EX_R12(r13) 1402 ld r13,PACA_EXSLB+EX_R13(r13) 1403 rfid 1404 b . /* prevent speculative execution */ 1405 14062: mfspr r11,SPRN_SRR0 1407 ld r10,PACAKBASE(r13) 1408 LOAD_HANDLER(r10,unrecov_slb) 1409 mtspr SPRN_SRR0,r10 1410 ld r10,PACAKMSR(r13) 1411 mtspr SPRN_SRR1,r10 1412 rfid 1413 b . 1414 1415unrecov_slb: 1416 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1417 RECONCILE_IRQ_STATE(r10, r11) 1418 bl save_nvgprs 14191: addi r3,r1,STACK_FRAME_OVERHEAD 1420 bl unrecoverable_exception 1421 b 1b 1422 1423 1424#ifdef CONFIG_PPC_970_NAP 1425power4_fixup_nap: 1426 andc r9,r9,r10 1427 std r9,TI_LOCAL_FLAGS(r11) 1428 ld r10,_LINK(r1) /* make idle task do the */ 1429 std r10,_NIP(r1) /* equivalent of a blr */ 1430 blr 1431#endif 1432 1433/* 1434 * Hash table stuff 1435 */ 1436 .align 7 1437do_hash_page: 1438 std r3,_DAR(r1) 1439 std r4,_DSISR(r1) 1440 1441 andis. r0,r4,0xa410 /* weird error? */ 1442 bne- handle_page_fault /* if not, try to insert a HPTE */ 1443 andis. r0,r4,DSISR_DABRMATCH@h 1444 bne- handle_dabr_fault 1445 CURRENT_THREAD_INFO(r11, r1) 1446 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1447 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1448 bne 77f /* then don't call hash_page now */ 1449 1450 /* 1451 * r3 contains the faulting address 1452 * r4 msr 1453 * r5 contains the trap number 1454 * r6 contains dsisr 1455 * 1456 * at return r3 = 0 for success, 1 for page fault, negative for error 1457 */ 1458 mr r4,r12 1459 ld r6,_DSISR(r1) 1460 bl __hash_page /* build HPTE if possible */ 1461 cmpdi r3,0 /* see if __hash_page succeeded */ 1462 1463 /* Success */ 1464 beq fast_exc_return_irq /* Return from exception on success */ 1465 1466 /* Error */ 1467 blt- 13f 1468 1469/* Here we have a page fault that hash_page can't handle. */ 1470handle_page_fault: 147111: ld r4,_DAR(r1) 1472 ld r5,_DSISR(r1) 1473 addi r3,r1,STACK_FRAME_OVERHEAD 1474 bl do_page_fault 1475 cmpdi r3,0 1476 beq+ 12f 1477 bl save_nvgprs 1478 mr r5,r3 1479 addi r3,r1,STACK_FRAME_OVERHEAD 1480 lwz r4,_DAR(r1) 1481 bl bad_page_fault 1482 b ret_from_except 1483 1484/* We have a data breakpoint exception - handle it */ 1485handle_dabr_fault: 1486 bl save_nvgprs 1487 ld r4,_DAR(r1) 1488 ld r5,_DSISR(r1) 1489 addi r3,r1,STACK_FRAME_OVERHEAD 1490 bl do_break 149112: b ret_from_except_lite 1492 1493 1494/* We have a page fault that hash_page could handle but HV refused 1495 * the PTE insertion 1496 */ 149713: bl save_nvgprs 1498 mr r5,r3 1499 addi r3,r1,STACK_FRAME_OVERHEAD 1500 ld r4,_DAR(r1) 1501 bl low_hash_fault 1502 b ret_from_except 1503 1504/* 1505 * We come here as a result of a DSI at a point where we don't want 1506 * to call hash_page, such as when we are accessing memory (possibly 1507 * user memory) inside a PMU interrupt that occurred while interrupts 1508 * were soft-disabled. We want to invoke the exception handler for 1509 * the access, or panic if there isn't a handler. 1510 */ 151177: bl save_nvgprs 1512 mr r4,r3 1513 addi r3,r1,STACK_FRAME_OVERHEAD 1514 li r5,SIGSEGV 1515 bl bad_page_fault 1516 b ret_from_except 1517 1518/* 1519 * Here we have detected that the kernel stack pointer is bad. 1520 * R9 contains the saved CR, r13 points to the paca, 1521 * r10 contains the (bad) kernel stack pointer, 1522 * r11 and r12 contain the saved SRR0 and SRR1. 1523 * We switch to using an emergency stack, save the registers there, 1524 * and call kernel_bad_stack(), which panics. 1525 */ 1526bad_stack: 1527 ld r1,PACAEMERGSP(r13) 1528 subi r1,r1,64+INT_FRAME_SIZE 1529 std r9,_CCR(r1) 1530 std r10,GPR1(r1) 1531 std r11,_NIP(r1) 1532 std r12,_MSR(r1) 1533 mfspr r11,SPRN_DAR 1534 mfspr r12,SPRN_DSISR 1535 std r11,_DAR(r1) 1536 std r12,_DSISR(r1) 1537 mflr r10 1538 mfctr r11 1539 mfxer r12 1540 std r10,_LINK(r1) 1541 std r11,_CTR(r1) 1542 std r12,_XER(r1) 1543 SAVE_GPR(0,r1) 1544 SAVE_GPR(2,r1) 1545 ld r10,EX_R3(r3) 1546 std r10,GPR3(r1) 1547 SAVE_GPR(4,r1) 1548 SAVE_4GPRS(5,r1) 1549 ld r9,EX_R9(r3) 1550 ld r10,EX_R10(r3) 1551 SAVE_2GPRS(9,r1) 1552 ld r9,EX_R11(r3) 1553 ld r10,EX_R12(r3) 1554 ld r11,EX_R13(r3) 1555 std r9,GPR11(r1) 1556 std r10,GPR12(r1) 1557 std r11,GPR13(r1) 1558BEGIN_FTR_SECTION 1559 ld r10,EX_CFAR(r3) 1560 std r10,ORIG_GPR3(r1) 1561END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1562 SAVE_8GPRS(14,r1) 1563 SAVE_10GPRS(22,r1) 1564 lhz r12,PACA_TRAP_SAVE(r13) 1565 std r12,_TRAP(r1) 1566 addi r11,r1,INT_FRAME_SIZE 1567 std r11,0(r1) 1568 li r12,0 1569 std r12,0(r11) 1570 ld r2,PACATOC(r13) 1571 ld r11,exception_marker@toc(r2) 1572 std r12,RESULT(r1) 1573 std r11,STACK_FRAME_OVERHEAD-16(r1) 15741: addi r3,r1,STACK_FRAME_OVERHEAD 1575 bl kernel_bad_stack 1576 b 1b 1577