1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/exception-64s.h> 16#include <asm/ptrace.h> 17 18/* 19 * We layout physical memory as follows: 20 * 0x0000 - 0x00ff : Secondary processor spin code 21 * 0x0100 - 0x2fff : pSeries Interrupt prologs 22 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 23 * 0x6000 - 0x6fff : Initial (CPU0) segment table 24 * 0x7000 - 0x7fff : FWNMI data area 25 * 0x8000 - : Early init and support code 26 */ 27 28/* 29 * This is the start of the interrupt handlers for pSeries 30 * This code runs with relocation off. 31 * Code from here to __end_interrupts gets copied down to real 32 * address 0x100 when we are running a relocatable kernel. 33 * Therefore any relative branches in this section must only 34 * branch to labels in this section. 35 */ 36 . = 0x100 37 .globl __start_interrupts 38__start_interrupts: 39 40 .globl system_reset_pSeries; 41system_reset_pSeries: 42 HMT_MEDIUM; 43 SET_SCRATCH0(r13) 44#ifdef CONFIG_PPC_P7_NAP 45BEGIN_FTR_SECTION 46 /* Running native on arch 2.06 or later, check if we are 47 * waking up from nap. We only handle no state loss and 48 * supervisor state loss. We do -not- handle hypervisor 49 * state loss at this time. 50 */ 51 mfspr r13,SPRN_SRR1 52 rlwinm. r13,r13,47-31,30,31 53 beq 9f 54 55 /* waking up from powersave (nap) state */ 56 cmpwi cr1,r13,2 57 /* Total loss of HV state is fatal, we could try to use the 58 * PIR to locate a PACA, then use an emergency stack etc... 59 * but for now, let's just stay stuck here 60 */ 61 bgt cr1,. 62 GET_PACA(r13) 63 64#ifdef CONFIG_KVM_BOOK3S_64_HV 65 lbz r0,PACAPROCSTART(r13) 66 cmpwi r0,0x80 67 bne 1f 68 li r0,0 69 stb r0,PACAPROCSTART(r13) 70 b kvm_start_guest 711: 72#endif 73 74 beq cr1,2f 75 b .power7_wakeup_noloss 762: b .power7_wakeup_loss 779: 78END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 79#endif /* CONFIG_PPC_P7_NAP */ 80 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 81 NOTEST, 0x100) 82 83 . = 0x200 84machine_check_pSeries_1: 85 /* This is moved out of line as it can be patched by FW, but 86 * some code path might still want to branch into the original 87 * vector 88 */ 89 b machine_check_pSeries 90 91 . = 0x300 92 .globl data_access_pSeries 93data_access_pSeries: 94 HMT_MEDIUM 95 SET_SCRATCH0(r13) 96#ifndef CONFIG_POWER4_ONLY 97BEGIN_FTR_SECTION 98 b data_access_check_stab 99data_access_not_stab: 100END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 101#endif 102 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 103 KVMTEST_PR, 0x300) 104 105 . = 0x380 106 .globl data_access_slb_pSeries 107data_access_slb_pSeries: 108 HMT_MEDIUM 109 SET_SCRATCH0(r13) 110 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) 111 std r3,PACA_EXSLB+EX_R3(r13) 112 mfspr r3,SPRN_DAR 113#ifdef __DISABLED__ 114 /* Keep that around for when we re-implement dynamic VSIDs */ 115 cmpdi r3,0 116 bge slb_miss_user_pseries 117#endif /* __DISABLED__ */ 118 mfspr r12,SPRN_SRR1 119#ifndef CONFIG_RELOCATABLE 120 b .slb_miss_realmode 121#else 122 /* 123 * We can't just use a direct branch to .slb_miss_realmode 124 * because the distance from here to there depends on where 125 * the kernel ends up being put. 126 */ 127 mfctr r11 128 ld r10,PACAKBASE(r13) 129 LOAD_HANDLER(r10, .slb_miss_realmode) 130 mtctr r10 131 bctr 132#endif 133 134 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 135 136 . = 0x480 137 .globl instruction_access_slb_pSeries 138instruction_access_slb_pSeries: 139 HMT_MEDIUM 140 SET_SCRATCH0(r13) 141 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 142 std r3,PACA_EXSLB+EX_R3(r13) 143 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 144#ifdef __DISABLED__ 145 /* Keep that around for when we re-implement dynamic VSIDs */ 146 cmpdi r3,0 147 bge slb_miss_user_pseries 148#endif /* __DISABLED__ */ 149 mfspr r12,SPRN_SRR1 150#ifndef CONFIG_RELOCATABLE 151 b .slb_miss_realmode 152#else 153 mfctr r11 154 ld r10,PACAKBASE(r13) 155 LOAD_HANDLER(r10, .slb_miss_realmode) 156 mtctr r10 157 bctr 158#endif 159 160 /* We open code these as we can't have a ". = x" (even with 161 * x = "." within a feature section 162 */ 163 . = 0x500; 164 .globl hardware_interrupt_pSeries; 165 .globl hardware_interrupt_hv; 166hardware_interrupt_pSeries: 167hardware_interrupt_hv: 168 BEGIN_FTR_SECTION 169 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 170 EXC_HV, SOFTEN_TEST_HV) 171 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 172 FTR_SECTION_ELSE 173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 174 EXC_STD, SOFTEN_TEST_HV_201) 175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 176 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 177 178 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 179 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 180 181 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 182 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 183 184 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 185 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 186 187 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 188 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) 189 190 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 191 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 192 193 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 194 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 195 196 . = 0xc00 197 .globl system_call_pSeries 198system_call_pSeries: 199 HMT_MEDIUM 200#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 201 SET_SCRATCH0(r13) 202 GET_PACA(r13) 203 std r9,PACA_EXGEN+EX_R9(r13) 204 std r10,PACA_EXGEN+EX_R10(r13) 205 mfcr r9 206 KVMTEST(0xc00) 207 GET_SCRATCH0(r13) 208#endif 209BEGIN_FTR_SECTION 210 cmpdi r0,0x1ebe 211 beq- 1f 212END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 213 mr r9,r13 214 GET_PACA(r13) 215 mfspr r11,SPRN_SRR0 216 mfspr r12,SPRN_SRR1 217 ld r10,PACAKBASE(r13) 218 LOAD_HANDLER(r10, system_call_entry) 219 mtspr SPRN_SRR0,r10 220 ld r10,PACAKMSR(r13) 221 mtspr SPRN_SRR1,r10 222 rfid 223 b . /* prevent speculative execution */ 224 225 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 226 227/* Fast LE/BE switch system call */ 2281: mfspr r12,SPRN_SRR1 229 xori r12,r12,MSR_LE 230 mtspr SPRN_SRR1,r12 231 rfid /* return to userspace */ 232 b . 233 234 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 235 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 236 237 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 238 * out of line to handle them 239 */ 240 . = 0xe00 241 b h_data_storage_hv 242 . = 0xe20 243 b h_instr_storage_hv 244 . = 0xe40 245 b emulation_assist_hv 246 . = 0xe50 247 b hmi_exception_hv 248 . = 0xe60 249 b hmi_exception_hv 250 251 /* We need to deal with the Altivec unavailable exception 252 * here which is at 0xf20, thus in the middle of the 253 * prolog code of the PerformanceMonitor one. A little 254 * trickery is thus necessary 255 */ 256performance_monitor_pSeries_1: 257 . = 0xf00 258 b performance_monitor_pSeries 259 260altivec_unavailable_pSeries_1: 261 . = 0xf20 262 b altivec_unavailable_pSeries 263 264vsx_unavailable_pSeries_1: 265 . = 0xf40 266 b vsx_unavailable_pSeries 267 268#ifdef CONFIG_CBE_RAS 269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 270 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 271#endif /* CONFIG_CBE_RAS */ 272 273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 274 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 275 276#ifdef CONFIG_CBE_RAS 277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 278 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 279#endif /* CONFIG_CBE_RAS */ 280 281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 282 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 283 284#ifdef CONFIG_CBE_RAS 285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 286 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 287#endif /* CONFIG_CBE_RAS */ 288 289 . = 0x3000 290 291/*** Out of line interrupts support ***/ 292 293 /* moved from 0x200 */ 294machine_check_pSeries: 295 .globl machine_check_fwnmi 296machine_check_fwnmi: 297 HMT_MEDIUM 298 SET_SCRATCH0(r13) /* save r13 */ 299 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, 300 EXC_STD, KVMTEST, 0x200) 301 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 302 303#ifndef CONFIG_POWER4_ONLY 304 /* moved from 0x300 */ 305data_access_check_stab: 306 GET_PACA(r13) 307 std r9,PACA_EXSLB+EX_R9(r13) 308 std r10,PACA_EXSLB+EX_R10(r13) 309 mfspr r10,SPRN_DAR 310 mfspr r9,SPRN_DSISR 311 srdi r10,r10,60 312 rlwimi r10,r9,16,0x20 313#ifdef CONFIG_KVM_BOOK3S_PR 314 lbz r9,HSTATE_IN_GUEST(r13) 315 rlwimi r10,r9,8,0x300 316#endif 317 mfcr r9 318 cmpwi r10,0x2c 319 beq do_stab_bolted_pSeries 320 mtcrf 0x80,r9 321 ld r9,PACA_EXSLB+EX_R9(r13) 322 ld r10,PACA_EXSLB+EX_R10(r13) 323 b data_access_not_stab 324do_stab_bolted_pSeries: 325 std r11,PACA_EXSLB+EX_R11(r13) 326 std r12,PACA_EXSLB+EX_R12(r13) 327 GET_SCRATCH0(r10) 328 std r10,PACA_EXSLB+EX_R13(r13) 329 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 330#endif /* CONFIG_POWER4_ONLY */ 331 332 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300) 333 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380) 334 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 335 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 338 339 .align 7 340 /* moved from 0xe00 */ 341 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 342 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 343 STD_EXCEPTION_HV(., 0xe22, h_instr_storage) 344 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 345 STD_EXCEPTION_HV(., 0xe42, emulation_assist) 346 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 347 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */ 348 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 349 350 /* moved from 0xf00 */ 351 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 352 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 353 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 354 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 355 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 356 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 357 358/* 359 * An interrupt came in while soft-disabled; clear EE in SRR1, 360 * clear paca->hard_enabled and return. 361 */ 362masked_interrupt: 363 stb r10,PACAHARDIRQEN(r13) 364 mtcrf 0x80,r9 365 ld r9,PACA_EXGEN+EX_R9(r13) 366 mfspr r10,SPRN_SRR1 367 rldicl r10,r10,48,1 /* clear MSR_EE */ 368 rotldi r10,r10,16 369 mtspr SPRN_SRR1,r10 370 ld r10,PACA_EXGEN+EX_R10(r13) 371 GET_SCRATCH0(r13) 372 rfid 373 b . 374 375masked_Hinterrupt: 376 stb r10,PACAHARDIRQEN(r13) 377 mtcrf 0x80,r9 378 ld r9,PACA_EXGEN+EX_R9(r13) 379 mfspr r10,SPRN_HSRR1 380 rldicl r10,r10,48,1 /* clear MSR_EE */ 381 rotldi r10,r10,16 382 mtspr SPRN_HSRR1,r10 383 ld r10,PACA_EXGEN+EX_R10(r13) 384 GET_SCRATCH0(r13) 385 hrfid 386 b . 387 388#ifdef CONFIG_PPC_PSERIES 389/* 390 * Vectors for the FWNMI option. Share common code. 391 */ 392 .globl system_reset_fwnmi 393 .align 7 394system_reset_fwnmi: 395 HMT_MEDIUM 396 SET_SCRATCH0(r13) /* save r13 */ 397 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 398 NOTEST, 0x100) 399 400#endif /* CONFIG_PPC_PSERIES */ 401 402#ifdef __DISABLED__ 403/* 404 * This is used for when the SLB miss handler has to go virtual, 405 * which doesn't happen for now anymore but will once we re-implement 406 * dynamic VSIDs for shared page tables 407 */ 408slb_miss_user_pseries: 409 std r10,PACA_EXGEN+EX_R10(r13) 410 std r11,PACA_EXGEN+EX_R11(r13) 411 std r12,PACA_EXGEN+EX_R12(r13) 412 GET_SCRATCH0(r10) 413 ld r11,PACA_EXSLB+EX_R9(r13) 414 ld r12,PACA_EXSLB+EX_R3(r13) 415 std r10,PACA_EXGEN+EX_R13(r13) 416 std r11,PACA_EXGEN+EX_R9(r13) 417 std r12,PACA_EXGEN+EX_R3(r13) 418 clrrdi r12,r13,32 419 mfmsr r10 420 mfspr r11,SRR0 /* save SRR0 */ 421 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 422 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 423 mtspr SRR0,r12 424 mfspr r12,SRR1 /* and SRR1 */ 425 mtspr SRR1,r10 426 rfid 427 b . /* prevent spec. execution */ 428#endif /* __DISABLED__ */ 429 430 .align 7 431 .globl __end_interrupts 432__end_interrupts: 433 434/* 435 * Code from here down to __end_handlers is invoked from the 436 * exception prologs above. Because the prologs assemble the 437 * addresses of these handlers using the LOAD_HANDLER macro, 438 * which uses an addi instruction, these handlers must be in 439 * the first 32k of the kernel image. 440 */ 441 442/*** Common interrupt handlers ***/ 443 444 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 445 446 /* 447 * Machine check is different because we use a different 448 * save area: PACA_EXMC instead of PACA_EXGEN. 449 */ 450 .align 7 451 .globl machine_check_common 452machine_check_common: 453 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 454 FINISH_NAP 455 DISABLE_INTS 456 bl .save_nvgprs 457 addi r3,r1,STACK_FRAME_OVERHEAD 458 bl .machine_check_exception 459 b .ret_from_except 460 461 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 462 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 463 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 464 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 465 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 466 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) 467 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 468 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) 469 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 470#ifdef CONFIG_ALTIVEC 471 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 472#else 473 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 474#endif 475#ifdef CONFIG_CBE_RAS 476 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 477 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 478 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 479#endif /* CONFIG_CBE_RAS */ 480 481 .align 7 482system_call_entry: 483 b system_call_common 484 485/* 486 * Here we have detected that the kernel stack pointer is bad. 487 * R9 contains the saved CR, r13 points to the paca, 488 * r10 contains the (bad) kernel stack pointer, 489 * r11 and r12 contain the saved SRR0 and SRR1. 490 * We switch to using an emergency stack, save the registers there, 491 * and call kernel_bad_stack(), which panics. 492 */ 493bad_stack: 494 ld r1,PACAEMERGSP(r13) 495 subi r1,r1,64+INT_FRAME_SIZE 496 std r9,_CCR(r1) 497 std r10,GPR1(r1) 498 std r11,_NIP(r1) 499 std r12,_MSR(r1) 500 mfspr r11,SPRN_DAR 501 mfspr r12,SPRN_DSISR 502 std r11,_DAR(r1) 503 std r12,_DSISR(r1) 504 mflr r10 505 mfctr r11 506 mfxer r12 507 std r10,_LINK(r1) 508 std r11,_CTR(r1) 509 std r12,_XER(r1) 510 SAVE_GPR(0,r1) 511 SAVE_GPR(2,r1) 512 ld r10,EX_R3(r3) 513 std r10,GPR3(r1) 514 SAVE_GPR(4,r1) 515 SAVE_4GPRS(5,r1) 516 ld r9,EX_R9(r3) 517 ld r10,EX_R10(r3) 518 SAVE_2GPRS(9,r1) 519 ld r9,EX_R11(r3) 520 ld r10,EX_R12(r3) 521 ld r11,EX_R13(r3) 522 std r9,GPR11(r1) 523 std r10,GPR12(r1) 524 std r11,GPR13(r1) 525BEGIN_FTR_SECTION 526 ld r10,EX_CFAR(r3) 527 std r10,ORIG_GPR3(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 529 SAVE_8GPRS(14,r1) 530 SAVE_10GPRS(22,r1) 531 lhz r12,PACA_TRAP_SAVE(r13) 532 std r12,_TRAP(r1) 533 addi r11,r1,INT_FRAME_SIZE 534 std r11,0(r1) 535 li r12,0 536 std r12,0(r11) 537 ld r2,PACATOC(r13) 538 ld r11,exception_marker@toc(r2) 539 std r12,RESULT(r1) 540 std r11,STACK_FRAME_OVERHEAD-16(r1) 5411: addi r3,r1,STACK_FRAME_OVERHEAD 542 bl .kernel_bad_stack 543 b 1b 544 545/* 546 * Here r13 points to the paca, r9 contains the saved CR, 547 * SRR0 and SRR1 are saved in r11 and r12, 548 * r9 - r13 are saved in paca->exgen. 549 */ 550 .align 7 551 .globl data_access_common 552data_access_common: 553 mfspr r10,SPRN_DAR 554 std r10,PACA_EXGEN+EX_DAR(r13) 555 mfspr r10,SPRN_DSISR 556 stw r10,PACA_EXGEN+EX_DSISR(r13) 557 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 558 ld r3,PACA_EXGEN+EX_DAR(r13) 559 lwz r4,PACA_EXGEN+EX_DSISR(r13) 560 li r5,0x300 561 b .do_hash_page /* Try to handle as hpte fault */ 562 563 .align 7 564 .globl h_data_storage_common 565h_data_storage_common: 566 mfspr r10,SPRN_HDAR 567 std r10,PACA_EXGEN+EX_DAR(r13) 568 mfspr r10,SPRN_HDSISR 569 stw r10,PACA_EXGEN+EX_DSISR(r13) 570 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 571 bl .save_nvgprs 572 addi r3,r1,STACK_FRAME_OVERHEAD 573 bl .unknown_exception 574 b .ret_from_except 575 576 .align 7 577 .globl instruction_access_common 578instruction_access_common: 579 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 580 ld r3,_NIP(r1) 581 andis. r4,r12,0x5820 582 li r5,0x400 583 b .do_hash_page /* Try to handle as hpte fault */ 584 585 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 586 587/* 588 * Here is the common SLB miss user that is used when going to virtual 589 * mode for SLB misses, that is currently not used 590 */ 591#ifdef __DISABLED__ 592 .align 7 593 .globl slb_miss_user_common 594slb_miss_user_common: 595 mflr r10 596 std r3,PACA_EXGEN+EX_DAR(r13) 597 stw r9,PACA_EXGEN+EX_CCR(r13) 598 std r10,PACA_EXGEN+EX_LR(r13) 599 std r11,PACA_EXGEN+EX_SRR0(r13) 600 bl .slb_allocate_user 601 602 ld r10,PACA_EXGEN+EX_LR(r13) 603 ld r3,PACA_EXGEN+EX_R3(r13) 604 lwz r9,PACA_EXGEN+EX_CCR(r13) 605 ld r11,PACA_EXGEN+EX_SRR0(r13) 606 mtlr r10 607 beq- slb_miss_fault 608 609 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 610 beq- unrecov_user_slb 611 mfmsr r10 612 613.machine push 614.machine "power4" 615 mtcrf 0x80,r9 616.machine pop 617 618 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 619 mtmsrd r10,1 620 621 mtspr SRR0,r11 622 mtspr SRR1,r12 623 624 ld r9,PACA_EXGEN+EX_R9(r13) 625 ld r10,PACA_EXGEN+EX_R10(r13) 626 ld r11,PACA_EXGEN+EX_R11(r13) 627 ld r12,PACA_EXGEN+EX_R12(r13) 628 ld r13,PACA_EXGEN+EX_R13(r13) 629 rfid 630 b . 631 632slb_miss_fault: 633 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 634 ld r4,PACA_EXGEN+EX_DAR(r13) 635 li r5,0 636 std r4,_DAR(r1) 637 std r5,_DSISR(r1) 638 b handle_page_fault 639 640unrecov_user_slb: 641 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 642 DISABLE_INTS 643 bl .save_nvgprs 6441: addi r3,r1,STACK_FRAME_OVERHEAD 645 bl .unrecoverable_exception 646 b 1b 647 648#endif /* __DISABLED__ */ 649 650 651/* 652 * r13 points to the PACA, r9 contains the saved CR, 653 * r12 contain the saved SRR1, SRR0 is still ready for return 654 * r3 has the faulting address 655 * r9 - r13 are saved in paca->exslb. 656 * r3 is saved in paca->slb_r3 657 * We assume we aren't going to take any exceptions during this procedure. 658 */ 659_GLOBAL(slb_miss_realmode) 660 mflr r10 661#ifdef CONFIG_RELOCATABLE 662 mtctr r11 663#endif 664 665 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 666 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 667 668 bl .slb_allocate_realmode 669 670 /* All done -- return from exception. */ 671 672 ld r10,PACA_EXSLB+EX_LR(r13) 673 ld r3,PACA_EXSLB+EX_R3(r13) 674 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 675#ifdef CONFIG_PPC_ISERIES 676BEGIN_FW_FTR_SECTION 677 ld r11,PACALPPACAPTR(r13) 678 ld r11,LPPACASRR0(r11) /* get SRR0 value */ 679END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 680#endif /* CONFIG_PPC_ISERIES */ 681 682 mtlr r10 683 684 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 685 beq- 2f 686 687.machine push 688.machine "power4" 689 mtcrf 0x80,r9 690 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 691.machine pop 692 693#ifdef CONFIG_PPC_ISERIES 694BEGIN_FW_FTR_SECTION 695 mtspr SPRN_SRR0,r11 696 mtspr SPRN_SRR1,r12 697END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 698#endif /* CONFIG_PPC_ISERIES */ 699 ld r9,PACA_EXSLB+EX_R9(r13) 700 ld r10,PACA_EXSLB+EX_R10(r13) 701 ld r11,PACA_EXSLB+EX_R11(r13) 702 ld r12,PACA_EXSLB+EX_R12(r13) 703 ld r13,PACA_EXSLB+EX_R13(r13) 704 rfid 705 b . /* prevent speculative execution */ 706 7072: 708#ifdef CONFIG_PPC_ISERIES 709BEGIN_FW_FTR_SECTION 710 b unrecov_slb 711END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 712#endif /* CONFIG_PPC_ISERIES */ 713 mfspr r11,SPRN_SRR0 714 ld r10,PACAKBASE(r13) 715 LOAD_HANDLER(r10,unrecov_slb) 716 mtspr SPRN_SRR0,r10 717 ld r10,PACAKMSR(r13) 718 mtspr SPRN_SRR1,r10 719 rfid 720 b . 721 722unrecov_slb: 723 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 724 DISABLE_INTS 725 bl .save_nvgprs 7261: addi r3,r1,STACK_FRAME_OVERHEAD 727 bl .unrecoverable_exception 728 b 1b 729 730 .align 7 731 .globl hardware_interrupt_common 732 .globl hardware_interrupt_entry 733hardware_interrupt_common: 734 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 735 FINISH_NAP 736hardware_interrupt_entry: 737 DISABLE_INTS 738BEGIN_FTR_SECTION 739 bl .ppc64_runlatch_on 740END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 741 addi r3,r1,STACK_FRAME_OVERHEAD 742 bl .do_IRQ 743 b .ret_from_except_lite 744 745#ifdef CONFIG_PPC_970_NAP 746power4_fixup_nap: 747 andc r9,r9,r10 748 std r9,TI_LOCAL_FLAGS(r11) 749 ld r10,_LINK(r1) /* make idle task do the */ 750 std r10,_NIP(r1) /* equivalent of a blr */ 751 blr 752#endif 753 754 .align 7 755 .globl alignment_common 756alignment_common: 757 mfspr r10,SPRN_DAR 758 std r10,PACA_EXGEN+EX_DAR(r13) 759 mfspr r10,SPRN_DSISR 760 stw r10,PACA_EXGEN+EX_DSISR(r13) 761 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 762 ld r3,PACA_EXGEN+EX_DAR(r13) 763 lwz r4,PACA_EXGEN+EX_DSISR(r13) 764 std r3,_DAR(r1) 765 std r4,_DSISR(r1) 766 bl .save_nvgprs 767 addi r3,r1,STACK_FRAME_OVERHEAD 768 ENABLE_INTS 769 bl .alignment_exception 770 b .ret_from_except 771 772 .align 7 773 .globl program_check_common 774program_check_common: 775 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 776 bl .save_nvgprs 777 addi r3,r1,STACK_FRAME_OVERHEAD 778 ENABLE_INTS 779 bl .program_check_exception 780 b .ret_from_except 781 782 .align 7 783 .globl fp_unavailable_common 784fp_unavailable_common: 785 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 786 bne 1f /* if from user, just load it up */ 787 bl .save_nvgprs 788 addi r3,r1,STACK_FRAME_OVERHEAD 789 ENABLE_INTS 790 bl .kernel_fp_unavailable_exception 791 BUG_OPCODE 7921: bl .load_up_fpu 793 b fast_exception_return 794 795 .align 7 796 .globl altivec_unavailable_common 797altivec_unavailable_common: 798 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 799#ifdef CONFIG_ALTIVEC 800BEGIN_FTR_SECTION 801 beq 1f 802 bl .load_up_altivec 803 b fast_exception_return 8041: 805END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 806#endif 807 bl .save_nvgprs 808 addi r3,r1,STACK_FRAME_OVERHEAD 809 ENABLE_INTS 810 bl .altivec_unavailable_exception 811 b .ret_from_except 812 813 .align 7 814 .globl vsx_unavailable_common 815vsx_unavailable_common: 816 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 817#ifdef CONFIG_VSX 818BEGIN_FTR_SECTION 819 bne .load_up_vsx 8201: 821END_FTR_SECTION_IFSET(CPU_FTR_VSX) 822#endif 823 bl .save_nvgprs 824 addi r3,r1,STACK_FRAME_OVERHEAD 825 ENABLE_INTS 826 bl .vsx_unavailable_exception 827 b .ret_from_except 828 829 .align 7 830 .globl __end_handlers 831__end_handlers: 832 833/* 834 * Return from an exception with minimal checks. 835 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. 836 * If interrupts have been enabled, or anything has been 837 * done that might have changed the scheduling status of 838 * any task or sent any task a signal, you should use 839 * ret_from_except or ret_from_except_lite instead of this. 840 */ 841fast_exc_return_irq: /* restores irq state too */ 842 ld r3,SOFTE(r1) 843 TRACE_AND_RESTORE_IRQ(r3); 844 ld r12,_MSR(r1) 845 rldicl r4,r12,49,63 /* get MSR_EE to LSB */ 846 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ 847 b 1f 848 849 .globl fast_exception_return 850fast_exception_return: 851 ld r12,_MSR(r1) 8521: ld r11,_NIP(r1) 853 andi. r3,r12,MSR_RI /* check if RI is set */ 854 beq- unrecov_fer 855 856#ifdef CONFIG_VIRT_CPU_ACCOUNTING 857 andi. r3,r12,MSR_PR 858 beq 2f 859 ACCOUNT_CPU_USER_EXIT(r3, r4) 8602: 861#endif 862 863 ld r3,_CCR(r1) 864 ld r4,_LINK(r1) 865 ld r5,_CTR(r1) 866 ld r6,_XER(r1) 867 mtcr r3 868 mtlr r4 869 mtctr r5 870 mtxer r6 871 REST_GPR(0, r1) 872 REST_8GPRS(2, r1) 873 874 mfmsr r10 875 rldicl r10,r10,48,1 /* clear EE */ 876 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ 877 mtmsrd r10,1 878 879 mtspr SPRN_SRR1,r12 880 mtspr SPRN_SRR0,r11 881 REST_4GPRS(10, r1) 882 ld r1,GPR1(r1) 883 rfid 884 b . /* prevent speculative execution */ 885 886unrecov_fer: 887 bl .save_nvgprs 8881: addi r3,r1,STACK_FRAME_OVERHEAD 889 bl .unrecoverable_exception 890 b 1b 891 892 893/* 894 * Hash table stuff 895 */ 896 .align 7 897_STATIC(do_hash_page) 898 std r3,_DAR(r1) 899 std r4,_DSISR(r1) 900 901 andis. r0,r4,0xa410 /* weird error? */ 902 bne- handle_page_fault /* if not, try to insert a HPTE */ 903 andis. r0,r4,DSISR_DABRMATCH@h 904 bne- handle_dabr_fault 905 906BEGIN_FTR_SECTION 907 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 908 bne- do_ste_alloc /* If so handle it */ 909END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 910 911 clrrdi r11,r1,THREAD_SHIFT 912 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 913 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 914 bne 77f /* then don't call hash_page now */ 915 916 /* 917 * On iSeries, we soft-disable interrupts here, then 918 * hard-enable interrupts so that the hash_page code can spin on 919 * the hash_table_lock without problems on a shared processor. 920 */ 921 DISABLE_INTS 922 923 /* 924 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS 925 * and will clobber volatile registers when irq tracing is enabled 926 * so we need to reload them. It may be possible to be smarter here 927 * and move the irq tracing elsewhere but let's keep it simple for 928 * now 929 */ 930#ifdef CONFIG_TRACE_IRQFLAGS 931 ld r3,_DAR(r1) 932 ld r4,_DSISR(r1) 933 ld r5,_TRAP(r1) 934 ld r12,_MSR(r1) 935 clrrdi r5,r5,4 936#endif /* CONFIG_TRACE_IRQFLAGS */ 937 /* 938 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 939 * accessing a userspace segment (even from the kernel). We assume 940 * kernel addresses always have the high bit set. 941 */ 942 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 943 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 944 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 945 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 946 ori r4,r4,1 /* add _PAGE_PRESENT */ 947 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 948 949 /* 950 * r3 contains the faulting address 951 * r4 contains the required access permissions 952 * r5 contains the trap number 953 * 954 * at return r3 = 0 for success 955 */ 956 bl .hash_page /* build HPTE if possible */ 957 cmpdi r3,0 /* see if hash_page succeeded */ 958 959BEGIN_FW_FTR_SECTION 960 /* 961 * If we had interrupts soft-enabled at the point where the 962 * DSI/ISI occurred, and an interrupt came in during hash_page, 963 * handle it now. 964 * We jump to ret_from_except_lite rather than fast_exception_return 965 * because ret_from_except_lite will check for and handle pending 966 * interrupts if necessary. 967 */ 968 beq 13f 969END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 970 971BEGIN_FW_FTR_SECTION 972 /* 973 * Here we have interrupts hard-disabled, so it is sufficient 974 * to restore paca->{soft,hard}_enable and get out. 975 */ 976 beq fast_exc_return_irq /* Return from exception on success */ 977END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 978 979 /* For a hash failure, we don't bother re-enabling interrupts */ 980 ble- 12f 981 982 /* 983 * hash_page couldn't handle it, set soft interrupt enable back 984 * to what it was before the trap. Note that .arch_local_irq_restore 985 * handles any interrupts pending at this point. 986 */ 987 ld r3,SOFTE(r1) 988 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) 989 bl .arch_local_irq_restore 990 b 11f 991 992/* We have a data breakpoint exception - handle it */ 993handle_dabr_fault: 994 bl .save_nvgprs 995 ld r4,_DAR(r1) 996 ld r5,_DSISR(r1) 997 addi r3,r1,STACK_FRAME_OVERHEAD 998 bl .do_dabr 999 b .ret_from_except_lite 1000 1001/* Here we have a page fault that hash_page can't handle. */ 1002handle_page_fault: 1003 ENABLE_INTS 100411: ld r4,_DAR(r1) 1005 ld r5,_DSISR(r1) 1006 addi r3,r1,STACK_FRAME_OVERHEAD 1007 bl .do_page_fault 1008 cmpdi r3,0 1009 beq+ 13f 1010 bl .save_nvgprs 1011 mr r5,r3 1012 addi r3,r1,STACK_FRAME_OVERHEAD 1013 lwz r4,_DAR(r1) 1014 bl .bad_page_fault 1015 b .ret_from_except 1016 101713: b .ret_from_except_lite 1018 1019/* We have a page fault that hash_page could handle but HV refused 1020 * the PTE insertion 1021 */ 102212: bl .save_nvgprs 1023 mr r5,r3 1024 addi r3,r1,STACK_FRAME_OVERHEAD 1025 ld r4,_DAR(r1) 1026 bl .low_hash_fault 1027 b .ret_from_except 1028 1029/* 1030 * We come here as a result of a DSI at a point where we don't want 1031 * to call hash_page, such as when we are accessing memory (possibly 1032 * user memory) inside a PMU interrupt that occurred while interrupts 1033 * were soft-disabled. We want to invoke the exception handler for 1034 * the access, or panic if there isn't a handler. 1035 */ 103677: bl .save_nvgprs 1037 mr r4,r3 1038 addi r3,r1,STACK_FRAME_OVERHEAD 1039 li r5,SIGSEGV 1040 bl .bad_page_fault 1041 b .ret_from_except 1042 1043 /* here we have a segment miss */ 1044do_ste_alloc: 1045 bl .ste_allocate /* try to insert stab entry */ 1046 cmpdi r3,0 1047 bne- handle_page_fault 1048 b fast_exception_return 1049 1050/* 1051 * r13 points to the PACA, r9 contains the saved CR, 1052 * r11 and r12 contain the saved SRR0 and SRR1. 1053 * r9 - r13 are saved in paca->exslb. 1054 * We assume we aren't going to take any exceptions during this procedure. 1055 * We assume (DAR >> 60) == 0xc. 1056 */ 1057 .align 7 1058_GLOBAL(do_stab_bolted) 1059 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1060 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1061 1062 /* Hash to the primary group */ 1063 ld r10,PACASTABVIRT(r13) 1064 mfspr r11,SPRN_DAR 1065 srdi r11,r11,28 1066 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1067 1068 /* Calculate VSID */ 1069 /* This is a kernel address, so protovsid = ESID */ 1070 ASM_VSID_SCRAMBLE(r11, r9, 256M) 1071 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1072 1073 /* Search the primary group for a free entry */ 10741: ld r11,0(r10) /* Test valid bit of the current ste */ 1075 andi. r11,r11,0x80 1076 beq 2f 1077 addi r10,r10,16 1078 andi. r11,r10,0x70 1079 bne 1b 1080 1081 /* Stick for only searching the primary group for now. */ 1082 /* At least for now, we use a very simple random castout scheme */ 1083 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1084 mftb r11 1085 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1086 ori r11,r11,0x10 1087 1088 /* r10 currently points to an ste one past the group of interest */ 1089 /* make it point to the randomly selected entry */ 1090 subi r10,r10,128 1091 or r10,r10,r11 /* r10 is the entry to invalidate */ 1092 1093 isync /* mark the entry invalid */ 1094 ld r11,0(r10) 1095 rldicl r11,r11,56,1 /* clear the valid bit */ 1096 rotldi r11,r11,8 1097 std r11,0(r10) 1098 sync 1099 1100 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1101 slbie r11 1102 11032: std r9,8(r10) /* Store the vsid part of the ste */ 1104 eieio 1105 1106 mfspr r11,SPRN_DAR /* Get the new esid */ 1107 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1108 ori r11,r11,0x90 /* Turn on valid and kp */ 1109 std r11,0(r10) /* Put new entry back into the stab */ 1110 1111 sync 1112 1113 /* All done -- return from exception. */ 1114 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1115 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1116 1117 andi. r10,r12,MSR_RI 1118 beq- unrecov_slb 1119 1120 mtcrf 0x80,r9 /* restore CR */ 1121 1122 mfmsr r10 1123 clrrdi r10,r10,2 1124 mtmsrd r10,1 1125 1126 mtspr SPRN_SRR0,r11 1127 mtspr SPRN_SRR1,r12 1128 ld r9,PACA_EXSLB+EX_R9(r13) 1129 ld r10,PACA_EXSLB+EX_R10(r13) 1130 ld r11,PACA_EXSLB+EX_R11(r13) 1131 ld r12,PACA_EXSLB+EX_R12(r13) 1132 ld r13,PACA_EXSLB+EX_R13(r13) 1133 rfid 1134 b . /* prevent speculative execution */ 1135 1136#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1137/* 1138 * Data area reserved for FWNMI option. 1139 * This address (0x7000) is fixed by the RPA. 1140 */ 1141 .= 0x7000 1142 .globl fwnmi_data_area 1143fwnmi_data_area: 1144#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1145 1146 /* iSeries does not use the FWNMI stuff, so it is safe to put 1147 * this here, even if we later allow kernels that will boot on 1148 * both pSeries and iSeries */ 1149#ifdef CONFIG_PPC_ISERIES 1150 . = LPARMAP_PHYS 1151 .globl xLparMap 1152xLparMap: 1153 .quad HvEsidsToMap /* xNumberEsids */ 1154 .quad HvRangesToMap /* xNumberRanges */ 1155 .quad STAB0_PAGE /* xSegmentTableOffs */ 1156 .zero 40 /* xRsvd */ 1157 /* xEsids (HvEsidsToMap entries of 2 quads) */ 1158 .quad PAGE_OFFSET_ESID /* xKernelEsid */ 1159 .quad PAGE_OFFSET_VSID /* xKernelVsid */ 1160 .quad VMALLOC_START_ESID /* xKernelEsid */ 1161 .quad VMALLOC_START_VSID /* xKernelVsid */ 1162 /* xRanges (HvRangesToMap entries of 3 quads) */ 1163 .quad HvPagesToMap /* xPages */ 1164 .quad 0 /* xOffset */ 1165 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ 1166 1167#endif /* CONFIG_PPC_ISERIES */ 1168 1169#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1170 /* pseries and powernv need to keep the whole page from 1171 * 0x7000 to 0x8000 free for use by the firmware 1172 */ 1173 . = 0x8000 1174#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1175 1176/* 1177 * Space for CPU0's segment table. 1178 * 1179 * On iSeries, the hypervisor must fill in at least one entry before 1180 * we get control (with relocate on). The address is given to the hv 1181 * as a page number (see xLparMap above), so this must be at a 1182 * fixed address (the linker can't compute (u64)&initial_stab >> 1183 * PAGE_SHIFT). 1184 */ 1185 . = STAB0_OFFSET /* 0x8000 */ 1186 .globl initial_stab 1187initial_stab: 1188 .space 4096 1189#ifdef CONFIG_PPC_POWERNV 1190_GLOBAL(opal_mc_secondary_handler) 1191 HMT_MEDIUM 1192 SET_SCRATCH0(r13) 1193 GET_PACA(r13) 1194 clrldi r3,r3,2 1195 tovirt(r3,r3) 1196 std r3,PACA_OPAL_MC_EVT(r13) 1197 ld r13,OPAL_MC_SRR0(r3) 1198 mtspr SPRN_SRR0,r13 1199 ld r13,OPAL_MC_SRR1(r3) 1200 mtspr SPRN_SRR1,r13 1201 ld r3,OPAL_MC_GPR3(r3) 1202 GET_SCRATCH0(r13) 1203 b machine_check_pSeries 1204#endif /* CONFIG_PPC_POWERNV */ 1205