1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18 19/* 20 * We layout physical memory as follows: 21 * 0x0000 - 0x00ff : Secondary processor spin code 22 * 0x0100 - 0x2fff : pSeries Interrupt prologs 23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs 24 * 0x6000 - 0x6fff : Initial (CPU0) segment table 25 * 0x7000 - 0x7fff : FWNMI data area 26 * 0x8000 - : Early init and support code 27 */ 28 29/* 30 * This is the start of the interrupt handlers for pSeries 31 * This code runs with relocation off. 32 * Code from here to __end_interrupts gets copied down to real 33 * address 0x100 when we are running a relocatable kernel. 34 * Therefore any relative branches in this section must only 35 * branch to labels in this section. 36 */ 37 . = 0x100 38 .globl __start_interrupts 39__start_interrupts: 40 41 .globl system_reset_pSeries; 42system_reset_pSeries: 43 HMT_MEDIUM; 44 SET_SCRATCH0(r13) 45#ifdef CONFIG_PPC_P7_NAP 46BEGIN_FTR_SECTION 47 /* Running native on arch 2.06 or later, check if we are 48 * waking up from nap. We only handle no state loss and 49 * supervisor state loss. We do -not- handle hypervisor 50 * state loss at this time. 51 */ 52 mfspr r13,SPRN_SRR1 53 rlwinm. r13,r13,47-31,30,31 54 beq 9f 55 56 /* waking up from powersave (nap) state */ 57 cmpwi cr1,r13,2 58 /* Total loss of HV state is fatal, we could try to use the 59 * PIR to locate a PACA, then use an emergency stack etc... 60 * but for now, let's just stay stuck here 61 */ 62 bgt cr1,. 63 GET_PACA(r13) 64 65#ifdef CONFIG_KVM_BOOK3S_64_HV 66 li r0,KVM_HWTHREAD_IN_KERNEL 67 stb r0,HSTATE_HWTHREAD_STATE(r13) 68 /* Order setting hwthread_state vs. testing hwthread_req */ 69 sync 70 lbz r0,HSTATE_HWTHREAD_REQ(r13) 71 cmpwi r0,0 72 beq 1f 73 b kvm_start_guest 741: 75#endif 76 77 beq cr1,2f 78 b .power7_wakeup_noloss 792: b .power7_wakeup_loss 809: 81END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 82#endif /* CONFIG_PPC_P7_NAP */ 83 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 84 NOTEST, 0x100) 85 86 . = 0x200 87machine_check_pSeries_1: 88 /* This is moved out of line as it can be patched by FW, but 89 * some code path might still want to branch into the original 90 * vector 91 */ 92 b machine_check_pSeries 93 94 . = 0x300 95 .globl data_access_pSeries 96data_access_pSeries: 97 HMT_MEDIUM 98 SET_SCRATCH0(r13) 99BEGIN_FTR_SECTION 100 b data_access_check_stab 101data_access_not_stab: 102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 103 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 104 KVMTEST, 0x300) 105 106 . = 0x380 107 .globl data_access_slb_pSeries 108data_access_slb_pSeries: 109 HMT_MEDIUM 110 SET_SCRATCH0(r13) 111 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 112 std r3,PACA_EXSLB+EX_R3(r13) 113 mfspr r3,SPRN_DAR 114#ifdef __DISABLED__ 115 /* Keep that around for when we re-implement dynamic VSIDs */ 116 cmpdi r3,0 117 bge slb_miss_user_pseries 118#endif /* __DISABLED__ */ 119 mfspr r12,SPRN_SRR1 120#ifndef CONFIG_RELOCATABLE 121 b .slb_miss_realmode 122#else 123 /* 124 * We can't just use a direct branch to .slb_miss_realmode 125 * because the distance from here to there depends on where 126 * the kernel ends up being put. 127 */ 128 mfctr r11 129 ld r10,PACAKBASE(r13) 130 LOAD_HANDLER(r10, .slb_miss_realmode) 131 mtctr r10 132 bctr 133#endif 134 135 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 136 137 . = 0x480 138 .globl instruction_access_slb_pSeries 139instruction_access_slb_pSeries: 140 HMT_MEDIUM 141 SET_SCRATCH0(r13) 142 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 143 std r3,PACA_EXSLB+EX_R3(r13) 144 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 145#ifdef __DISABLED__ 146 /* Keep that around for when we re-implement dynamic VSIDs */ 147 cmpdi r3,0 148 bge slb_miss_user_pseries 149#endif /* __DISABLED__ */ 150 mfspr r12,SPRN_SRR1 151#ifndef CONFIG_RELOCATABLE 152 b .slb_miss_realmode 153#else 154 mfctr r11 155 ld r10,PACAKBASE(r13) 156 LOAD_HANDLER(r10, .slb_miss_realmode) 157 mtctr r10 158 bctr 159#endif 160 161 /* We open code these as we can't have a ". = x" (even with 162 * x = "." within a feature section 163 */ 164 . = 0x500; 165 .globl hardware_interrupt_pSeries; 166 .globl hardware_interrupt_hv; 167hardware_interrupt_pSeries: 168hardware_interrupt_hv: 169 BEGIN_FTR_SECTION 170 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 171 EXC_HV, SOFTEN_TEST_HV) 172 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 173 FTR_SECTION_ELSE 174 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 175 EXC_STD, SOFTEN_TEST_HV_201) 176 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 177 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 178 179 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 180 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 181 182 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 183 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 184 185 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 186 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 187 188 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 189 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) 190 191 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 192 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 193 194 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 195 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 196 197 . = 0xc00 198 .globl system_call_pSeries 199system_call_pSeries: 200 HMT_MEDIUM 201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 202 SET_SCRATCH0(r13) 203 GET_PACA(r13) 204 std r9,PACA_EXGEN+EX_R9(r13) 205 std r10,PACA_EXGEN+EX_R10(r13) 206 mfcr r9 207 KVMTEST(0xc00) 208 GET_SCRATCH0(r13) 209#endif 210BEGIN_FTR_SECTION 211 cmpdi r0,0x1ebe 212 beq- 1f 213END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 214 mr r9,r13 215 GET_PACA(r13) 216 mfspr r11,SPRN_SRR0 217 mfspr r12,SPRN_SRR1 218 ld r10,PACAKBASE(r13) 219 LOAD_HANDLER(r10, system_call_entry) 220 mtspr SPRN_SRR0,r10 221 ld r10,PACAKMSR(r13) 222 mtspr SPRN_SRR1,r10 223 rfid 224 b . /* prevent speculative execution */ 225 226 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 227 228/* Fast LE/BE switch system call */ 2291: mfspr r12,SPRN_SRR1 230 xori r12,r12,MSR_LE 231 mtspr SPRN_SRR1,r12 232 rfid /* return to userspace */ 233 b . 234 235 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 236 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 237 238 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 239 * out of line to handle them 240 */ 241 . = 0xe00 242hv_exception_trampoline: 243 b h_data_storage_hv 244 . = 0xe20 245 b h_instr_storage_hv 246 . = 0xe40 247 b emulation_assist_hv 248 . = 0xe50 249 b hmi_exception_hv 250 . = 0xe60 251 b hmi_exception_hv 252 253 /* We need to deal with the Altivec unavailable exception 254 * here which is at 0xf20, thus in the middle of the 255 * prolog code of the PerformanceMonitor one. A little 256 * trickery is thus necessary 257 */ 258performance_monitor_pSeries_1: 259 . = 0xf00 260 b performance_monitor_pSeries 261 262altivec_unavailable_pSeries_1: 263 . = 0xf20 264 b altivec_unavailable_pSeries 265 266vsx_unavailable_pSeries_1: 267 . = 0xf40 268 b vsx_unavailable_pSeries 269 270#ifdef CONFIG_CBE_RAS 271 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 272 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 273#endif /* CONFIG_CBE_RAS */ 274 275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 277 278#ifdef CONFIG_CBE_RAS 279 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 280 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 281#endif /* CONFIG_CBE_RAS */ 282 283 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 284 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 285 286#ifdef CONFIG_CBE_RAS 287 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 288 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 289#endif /* CONFIG_CBE_RAS */ 290 291 . = 0x3000 292 293/*** Out of line interrupts support ***/ 294 295 /* moved from 0x200 */ 296machine_check_pSeries: 297 .globl machine_check_fwnmi 298machine_check_fwnmi: 299 HMT_MEDIUM 300 SET_SCRATCH0(r13) /* save r13 */ 301 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, 302 EXC_STD, KVMTEST, 0x200) 303 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 304 305 /* moved from 0x300 */ 306data_access_check_stab: 307 GET_PACA(r13) 308 std r9,PACA_EXSLB+EX_R9(r13) 309 std r10,PACA_EXSLB+EX_R10(r13) 310 mfspr r10,SPRN_DAR 311 mfspr r9,SPRN_DSISR 312 srdi r10,r10,60 313 rlwimi r10,r9,16,0x20 314#ifdef CONFIG_KVM_BOOK3S_PR 315 lbz r9,HSTATE_IN_GUEST(r13) 316 rlwimi r10,r9,8,0x300 317#endif 318 mfcr r9 319 cmpwi r10,0x2c 320 beq do_stab_bolted_pSeries 321 mtcrf 0x80,r9 322 ld r9,PACA_EXSLB+EX_R9(r13) 323 ld r10,PACA_EXSLB+EX_R10(r13) 324 b data_access_not_stab 325do_stab_bolted_pSeries: 326 std r11,PACA_EXSLB+EX_R11(r13) 327 std r12,PACA_EXSLB+EX_R12(r13) 328 GET_SCRATCH0(r10) 329 std r10,PACA_EXSLB+EX_R13(r13) 330 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 331 332 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 333 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 334 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 335 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 338 339 .align 7 340 /* moved from 0xe00 */ 341 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 342 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 343 STD_EXCEPTION_HV(., 0xe22, h_instr_storage) 344 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 345 STD_EXCEPTION_HV(., 0xe42, emulation_assist) 346 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 347 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */ 348 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 349 350 /* moved from 0xf00 */ 351 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 352 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 353 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 354 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 355 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 356 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 357 358/* 359 * An interrupt came in while soft-disabled. We set paca->irq_happened, 360 * then, if it was a decrementer interrupt, we bump the dec to max and 361 * and return, else we hard disable and return. This is called with 362 * r10 containing the value to OR to the paca field. 363 */ 364#define MASKED_INTERRUPT(_H) \ 365masked_##_H##interrupt: \ 366 std r11,PACA_EXGEN+EX_R11(r13); \ 367 lbz r11,PACAIRQHAPPENED(r13); \ 368 or r11,r11,r10; \ 369 stb r11,PACAIRQHAPPENED(r13); \ 370 andi. r10,r10,PACA_IRQ_DEC; \ 371 beq 1f; \ 372 lis r10,0x7fff; \ 373 ori r10,r10,0xffff; \ 374 mtspr SPRN_DEC,r10; \ 375 b 2f; \ 3761: mfspr r10,SPRN_##_H##SRR1; \ 377 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 378 rotldi r10,r10,16; \ 379 mtspr SPRN_##_H##SRR1,r10; \ 3802: mtcrf 0x80,r9; \ 381 ld r9,PACA_EXGEN+EX_R9(r13); \ 382 ld r10,PACA_EXGEN+EX_R10(r13); \ 383 ld r11,PACA_EXGEN+EX_R11(r13); \ 384 GET_SCRATCH0(r13); \ 385 ##_H##rfid; \ 386 b . 387 388 MASKED_INTERRUPT() 389 MASKED_INTERRUPT(H) 390 391/* 392 * Called from arch_local_irq_enable when an interrupt needs 393 * to be resent. r3 contains 0x500 or 0x900 to indicate which 394 * kind of interrupt. MSR:EE is already off. We generate a 395 * stackframe like if a real interrupt had happened. 396 * 397 * Note: While MSR:EE is off, we need to make sure that _MSR 398 * in the generated frame has EE set to 1 or the exception 399 * handler will not properly re-enable them. 400 */ 401_GLOBAL(__replay_interrupt) 402 /* We are going to jump to the exception common code which 403 * will retrieve various register values from the PACA which 404 * we don't give a damn about, so we don't bother storing them. 405 */ 406 mfmsr r12 407 mflr r11 408 mfcr r9 409 ori r12,r12,MSR_EE 410 andi. r3,r3,0x0800 411 bne decrementer_common 412 b hardware_interrupt_common 413 414#ifdef CONFIG_PPC_PSERIES 415/* 416 * Vectors for the FWNMI option. Share common code. 417 */ 418 .globl system_reset_fwnmi 419 .align 7 420system_reset_fwnmi: 421 HMT_MEDIUM 422 SET_SCRATCH0(r13) /* save r13 */ 423 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 424 NOTEST, 0x100) 425 426#endif /* CONFIG_PPC_PSERIES */ 427 428#ifdef __DISABLED__ 429/* 430 * This is used for when the SLB miss handler has to go virtual, 431 * which doesn't happen for now anymore but will once we re-implement 432 * dynamic VSIDs for shared page tables 433 */ 434slb_miss_user_pseries: 435 std r10,PACA_EXGEN+EX_R10(r13) 436 std r11,PACA_EXGEN+EX_R11(r13) 437 std r12,PACA_EXGEN+EX_R12(r13) 438 GET_SCRATCH0(r10) 439 ld r11,PACA_EXSLB+EX_R9(r13) 440 ld r12,PACA_EXSLB+EX_R3(r13) 441 std r10,PACA_EXGEN+EX_R13(r13) 442 std r11,PACA_EXGEN+EX_R9(r13) 443 std r12,PACA_EXGEN+EX_R3(r13) 444 clrrdi r12,r13,32 445 mfmsr r10 446 mfspr r11,SRR0 /* save SRR0 */ 447 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 448 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 449 mtspr SRR0,r12 450 mfspr r12,SRR1 /* and SRR1 */ 451 mtspr SRR1,r10 452 rfid 453 b . /* prevent spec. execution */ 454#endif /* __DISABLED__ */ 455 456 .align 7 457 .globl __end_interrupts 458__end_interrupts: 459 460/* 461 * Code from here down to __end_handlers is invoked from the 462 * exception prologs above. Because the prologs assemble the 463 * addresses of these handlers using the LOAD_HANDLER macro, 464 * which uses an addi instruction, these handlers must be in 465 * the first 32k of the kernel image. 466 */ 467 468/*** Common interrupt handlers ***/ 469 470 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 471 472 /* 473 * Machine check is different because we use a different 474 * save area: PACA_EXMC instead of PACA_EXGEN. 475 */ 476 .align 7 477 .globl machine_check_common 478machine_check_common: 479 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 480 FINISH_NAP 481 DISABLE_INTS 482 bl .save_nvgprs 483 addi r3,r1,STACK_FRAME_OVERHEAD 484 bl .machine_check_exception 485 b .ret_from_except 486 487 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 488 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 489 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 490 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 491 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 492 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 493 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) 494 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 495 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 496 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 497#ifdef CONFIG_ALTIVEC 498 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 499#else 500 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 501#endif 502#ifdef CONFIG_CBE_RAS 503 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 504 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 505 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 506#endif /* CONFIG_CBE_RAS */ 507 508 .align 7 509system_call_entry: 510 b system_call_common 511 512ppc64_runlatch_on_trampoline: 513 b .__ppc64_runlatch_on 514 515/* 516 * Here we have detected that the kernel stack pointer is bad. 517 * R9 contains the saved CR, r13 points to the paca, 518 * r10 contains the (bad) kernel stack pointer, 519 * r11 and r12 contain the saved SRR0 and SRR1. 520 * We switch to using an emergency stack, save the registers there, 521 * and call kernel_bad_stack(), which panics. 522 */ 523bad_stack: 524 ld r1,PACAEMERGSP(r13) 525 subi r1,r1,64+INT_FRAME_SIZE 526 std r9,_CCR(r1) 527 std r10,GPR1(r1) 528 std r11,_NIP(r1) 529 std r12,_MSR(r1) 530 mfspr r11,SPRN_DAR 531 mfspr r12,SPRN_DSISR 532 std r11,_DAR(r1) 533 std r12,_DSISR(r1) 534 mflr r10 535 mfctr r11 536 mfxer r12 537 std r10,_LINK(r1) 538 std r11,_CTR(r1) 539 std r12,_XER(r1) 540 SAVE_GPR(0,r1) 541 SAVE_GPR(2,r1) 542 ld r10,EX_R3(r3) 543 std r10,GPR3(r1) 544 SAVE_GPR(4,r1) 545 SAVE_4GPRS(5,r1) 546 ld r9,EX_R9(r3) 547 ld r10,EX_R10(r3) 548 SAVE_2GPRS(9,r1) 549 ld r9,EX_R11(r3) 550 ld r10,EX_R12(r3) 551 ld r11,EX_R13(r3) 552 std r9,GPR11(r1) 553 std r10,GPR12(r1) 554 std r11,GPR13(r1) 555BEGIN_FTR_SECTION 556 ld r10,EX_CFAR(r3) 557 std r10,ORIG_GPR3(r1) 558END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 559 SAVE_8GPRS(14,r1) 560 SAVE_10GPRS(22,r1) 561 lhz r12,PACA_TRAP_SAVE(r13) 562 std r12,_TRAP(r1) 563 addi r11,r1,INT_FRAME_SIZE 564 std r11,0(r1) 565 li r12,0 566 std r12,0(r11) 567 ld r2,PACATOC(r13) 568 ld r11,exception_marker@toc(r2) 569 std r12,RESULT(r1) 570 std r11,STACK_FRAME_OVERHEAD-16(r1) 5711: addi r3,r1,STACK_FRAME_OVERHEAD 572 bl .kernel_bad_stack 573 b 1b 574 575/* 576 * Here r13 points to the paca, r9 contains the saved CR, 577 * SRR0 and SRR1 are saved in r11 and r12, 578 * r9 - r13 are saved in paca->exgen. 579 */ 580 .align 7 581 .globl data_access_common 582data_access_common: 583 mfspr r10,SPRN_DAR 584 std r10,PACA_EXGEN+EX_DAR(r13) 585 mfspr r10,SPRN_DSISR 586 stw r10,PACA_EXGEN+EX_DSISR(r13) 587 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 588 DISABLE_INTS 589 ld r12,_MSR(r1) 590 ld r3,PACA_EXGEN+EX_DAR(r13) 591 lwz r4,PACA_EXGEN+EX_DSISR(r13) 592 li r5,0x300 593 b .do_hash_page /* Try to handle as hpte fault */ 594 595 .align 7 596 .globl h_data_storage_common 597h_data_storage_common: 598 mfspr r10,SPRN_HDAR 599 std r10,PACA_EXGEN+EX_DAR(r13) 600 mfspr r10,SPRN_HDSISR 601 stw r10,PACA_EXGEN+EX_DSISR(r13) 602 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 603 bl .save_nvgprs 604 DISABLE_INTS 605 addi r3,r1,STACK_FRAME_OVERHEAD 606 bl .unknown_exception 607 b .ret_from_except 608 609 .align 7 610 .globl instruction_access_common 611instruction_access_common: 612 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 613 DISABLE_INTS 614 ld r12,_MSR(r1) 615 ld r3,_NIP(r1) 616 andis. r4,r12,0x5820 617 li r5,0x400 618 b .do_hash_page /* Try to handle as hpte fault */ 619 620 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 621 622/* 623 * Here is the common SLB miss user that is used when going to virtual 624 * mode for SLB misses, that is currently not used 625 */ 626#ifdef __DISABLED__ 627 .align 7 628 .globl slb_miss_user_common 629slb_miss_user_common: 630 mflr r10 631 std r3,PACA_EXGEN+EX_DAR(r13) 632 stw r9,PACA_EXGEN+EX_CCR(r13) 633 std r10,PACA_EXGEN+EX_LR(r13) 634 std r11,PACA_EXGEN+EX_SRR0(r13) 635 bl .slb_allocate_user 636 637 ld r10,PACA_EXGEN+EX_LR(r13) 638 ld r3,PACA_EXGEN+EX_R3(r13) 639 lwz r9,PACA_EXGEN+EX_CCR(r13) 640 ld r11,PACA_EXGEN+EX_SRR0(r13) 641 mtlr r10 642 beq- slb_miss_fault 643 644 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 645 beq- unrecov_user_slb 646 mfmsr r10 647 648.machine push 649.machine "power4" 650 mtcrf 0x80,r9 651.machine pop 652 653 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 654 mtmsrd r10,1 655 656 mtspr SRR0,r11 657 mtspr SRR1,r12 658 659 ld r9,PACA_EXGEN+EX_R9(r13) 660 ld r10,PACA_EXGEN+EX_R10(r13) 661 ld r11,PACA_EXGEN+EX_R11(r13) 662 ld r12,PACA_EXGEN+EX_R12(r13) 663 ld r13,PACA_EXGEN+EX_R13(r13) 664 rfid 665 b . 666 667slb_miss_fault: 668 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 669 ld r4,PACA_EXGEN+EX_DAR(r13) 670 li r5,0 671 std r4,_DAR(r1) 672 std r5,_DSISR(r1) 673 b handle_page_fault 674 675unrecov_user_slb: 676 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 677 DISABLE_INTS 678 bl .save_nvgprs 6791: addi r3,r1,STACK_FRAME_OVERHEAD 680 bl .unrecoverable_exception 681 b 1b 682 683#endif /* __DISABLED__ */ 684 685 686/* 687 * r13 points to the PACA, r9 contains the saved CR, 688 * r12 contain the saved SRR1, SRR0 is still ready for return 689 * r3 has the faulting address 690 * r9 - r13 are saved in paca->exslb. 691 * r3 is saved in paca->slb_r3 692 * We assume we aren't going to take any exceptions during this procedure. 693 */ 694_GLOBAL(slb_miss_realmode) 695 mflr r10 696#ifdef CONFIG_RELOCATABLE 697 mtctr r11 698#endif 699 700 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 701 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 702 703 bl .slb_allocate_realmode 704 705 /* All done -- return from exception. */ 706 707 ld r10,PACA_EXSLB+EX_LR(r13) 708 ld r3,PACA_EXSLB+EX_R3(r13) 709 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 710 711 mtlr r10 712 713 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 714 beq- 2f 715 716.machine push 717.machine "power4" 718 mtcrf 0x80,r9 719 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 720.machine pop 721 722 ld r9,PACA_EXSLB+EX_R9(r13) 723 ld r10,PACA_EXSLB+EX_R10(r13) 724 ld r11,PACA_EXSLB+EX_R11(r13) 725 ld r12,PACA_EXSLB+EX_R12(r13) 726 ld r13,PACA_EXSLB+EX_R13(r13) 727 rfid 728 b . /* prevent speculative execution */ 729 7302: mfspr r11,SPRN_SRR0 731 ld r10,PACAKBASE(r13) 732 LOAD_HANDLER(r10,unrecov_slb) 733 mtspr SPRN_SRR0,r10 734 ld r10,PACAKMSR(r13) 735 mtspr SPRN_SRR1,r10 736 rfid 737 b . 738 739unrecov_slb: 740 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 741 DISABLE_INTS 742 bl .save_nvgprs 7431: addi r3,r1,STACK_FRAME_OVERHEAD 744 bl .unrecoverable_exception 745 b 1b 746 747 748#ifdef CONFIG_PPC_970_NAP 749power4_fixup_nap: 750 andc r9,r9,r10 751 std r9,TI_LOCAL_FLAGS(r11) 752 ld r10,_LINK(r1) /* make idle task do the */ 753 std r10,_NIP(r1) /* equivalent of a blr */ 754 blr 755#endif 756 757 .align 7 758 .globl alignment_common 759alignment_common: 760 mfspr r10,SPRN_DAR 761 std r10,PACA_EXGEN+EX_DAR(r13) 762 mfspr r10,SPRN_DSISR 763 stw r10,PACA_EXGEN+EX_DSISR(r13) 764 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 765 ld r3,PACA_EXGEN+EX_DAR(r13) 766 lwz r4,PACA_EXGEN+EX_DSISR(r13) 767 std r3,_DAR(r1) 768 std r4,_DSISR(r1) 769 bl .save_nvgprs 770 DISABLE_INTS 771 addi r3,r1,STACK_FRAME_OVERHEAD 772 bl .alignment_exception 773 b .ret_from_except 774 775 .align 7 776 .globl program_check_common 777program_check_common: 778 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 779 bl .save_nvgprs 780 DISABLE_INTS 781 addi r3,r1,STACK_FRAME_OVERHEAD 782 bl .program_check_exception 783 b .ret_from_except 784 785 .align 7 786 .globl fp_unavailable_common 787fp_unavailable_common: 788 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 789 bne 1f /* if from user, just load it up */ 790 bl .save_nvgprs 791 DISABLE_INTS 792 addi r3,r1,STACK_FRAME_OVERHEAD 793 bl .kernel_fp_unavailable_exception 794 BUG_OPCODE 7951: bl .load_up_fpu 796 b fast_exception_return 797 798 .align 7 799 .globl altivec_unavailable_common 800altivec_unavailable_common: 801 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 802#ifdef CONFIG_ALTIVEC 803BEGIN_FTR_SECTION 804 beq 1f 805 bl .load_up_altivec 806 b fast_exception_return 8071: 808END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 809#endif 810 bl .save_nvgprs 811 DISABLE_INTS 812 addi r3,r1,STACK_FRAME_OVERHEAD 813 bl .altivec_unavailable_exception 814 b .ret_from_except 815 816 .align 7 817 .globl vsx_unavailable_common 818vsx_unavailable_common: 819 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 820#ifdef CONFIG_VSX 821BEGIN_FTR_SECTION 822 beq 1f 823 b .load_up_vsx 8241: 825END_FTR_SECTION_IFSET(CPU_FTR_VSX) 826#endif 827 bl .save_nvgprs 828 DISABLE_INTS 829 addi r3,r1,STACK_FRAME_OVERHEAD 830 bl .vsx_unavailable_exception 831 b .ret_from_except 832 833 .align 7 834 .globl __end_handlers 835__end_handlers: 836 837/* 838 * Hash table stuff 839 */ 840 .align 7 841_STATIC(do_hash_page) 842 std r3,_DAR(r1) 843 std r4,_DSISR(r1) 844 845 andis. r0,r4,0xa410 /* weird error? */ 846 bne- handle_page_fault /* if not, try to insert a HPTE */ 847 andis. r0,r4,DSISR_DABRMATCH@h 848 bne- handle_dabr_fault 849 850BEGIN_FTR_SECTION 851 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 852 bne- do_ste_alloc /* If so handle it */ 853END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 854 855 CURRENT_THREAD_INFO(r11, r1) 856 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 857 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 858 bne 77f /* then don't call hash_page now */ 859 /* 860 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 861 * accessing a userspace segment (even from the kernel). We assume 862 * kernel addresses always have the high bit set. 863 */ 864 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 865 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 866 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 867 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 868 ori r4,r4,1 /* add _PAGE_PRESENT */ 869 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 870 871 /* 872 * r3 contains the faulting address 873 * r4 contains the required access permissions 874 * r5 contains the trap number 875 * 876 * at return r3 = 0 for success, 1 for page fault, negative for error 877 */ 878 bl .hash_page /* build HPTE if possible */ 879 cmpdi r3,0 /* see if hash_page succeeded */ 880 881 /* Success */ 882 beq fast_exc_return_irq /* Return from exception on success */ 883 884 /* Error */ 885 blt- 13f 886 887/* Here we have a page fault that hash_page can't handle. */ 888handle_page_fault: 88911: ld r4,_DAR(r1) 890 ld r5,_DSISR(r1) 891 addi r3,r1,STACK_FRAME_OVERHEAD 892 bl .do_page_fault 893 cmpdi r3,0 894 beq+ 12f 895 bl .save_nvgprs 896 mr r5,r3 897 addi r3,r1,STACK_FRAME_OVERHEAD 898 lwz r4,_DAR(r1) 899 bl .bad_page_fault 900 b .ret_from_except 901 902/* We have a data breakpoint exception - handle it */ 903handle_dabr_fault: 904 bl .save_nvgprs 905 ld r4,_DAR(r1) 906 ld r5,_DSISR(r1) 907 addi r3,r1,STACK_FRAME_OVERHEAD 908 bl .do_dabr 90912: b .ret_from_except_lite 910 911 912/* We have a page fault that hash_page could handle but HV refused 913 * the PTE insertion 914 */ 91513: bl .save_nvgprs 916 mr r5,r3 917 addi r3,r1,STACK_FRAME_OVERHEAD 918 ld r4,_DAR(r1) 919 bl .low_hash_fault 920 b .ret_from_except 921 922/* 923 * We come here as a result of a DSI at a point where we don't want 924 * to call hash_page, such as when we are accessing memory (possibly 925 * user memory) inside a PMU interrupt that occurred while interrupts 926 * were soft-disabled. We want to invoke the exception handler for 927 * the access, or panic if there isn't a handler. 928 */ 92977: bl .save_nvgprs 930 mr r4,r3 931 addi r3,r1,STACK_FRAME_OVERHEAD 932 li r5,SIGSEGV 933 bl .bad_page_fault 934 b .ret_from_except 935 936 /* here we have a segment miss */ 937do_ste_alloc: 938 bl .ste_allocate /* try to insert stab entry */ 939 cmpdi r3,0 940 bne- handle_page_fault 941 b fast_exception_return 942 943/* 944 * r13 points to the PACA, r9 contains the saved CR, 945 * r11 and r12 contain the saved SRR0 and SRR1. 946 * r9 - r13 are saved in paca->exslb. 947 * We assume we aren't going to take any exceptions during this procedure. 948 * We assume (DAR >> 60) == 0xc. 949 */ 950 .align 7 951_GLOBAL(do_stab_bolted) 952 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 953 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 954 955 /* Hash to the primary group */ 956 ld r10,PACASTABVIRT(r13) 957 mfspr r11,SPRN_DAR 958 srdi r11,r11,28 959 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 960 961 /* Calculate VSID */ 962 /* This is a kernel address, so protovsid = ESID */ 963 ASM_VSID_SCRAMBLE(r11, r9, 256M) 964 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 965 966 /* Search the primary group for a free entry */ 9671: ld r11,0(r10) /* Test valid bit of the current ste */ 968 andi. r11,r11,0x80 969 beq 2f 970 addi r10,r10,16 971 andi. r11,r10,0x70 972 bne 1b 973 974 /* Stick for only searching the primary group for now. */ 975 /* At least for now, we use a very simple random castout scheme */ 976 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 977 mftb r11 978 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 979 ori r11,r11,0x10 980 981 /* r10 currently points to an ste one past the group of interest */ 982 /* make it point to the randomly selected entry */ 983 subi r10,r10,128 984 or r10,r10,r11 /* r10 is the entry to invalidate */ 985 986 isync /* mark the entry invalid */ 987 ld r11,0(r10) 988 rldicl r11,r11,56,1 /* clear the valid bit */ 989 rotldi r11,r11,8 990 std r11,0(r10) 991 sync 992 993 clrrdi r11,r11,28 /* Get the esid part of the ste */ 994 slbie r11 995 9962: std r9,8(r10) /* Store the vsid part of the ste */ 997 eieio 998 999 mfspr r11,SPRN_DAR /* Get the new esid */ 1000 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1001 ori r11,r11,0x90 /* Turn on valid and kp */ 1002 std r11,0(r10) /* Put new entry back into the stab */ 1003 1004 sync 1005 1006 /* All done -- return from exception. */ 1007 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1008 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1009 1010 andi. r10,r12,MSR_RI 1011 beq- unrecov_slb 1012 1013 mtcrf 0x80,r9 /* restore CR */ 1014 1015 mfmsr r10 1016 clrrdi r10,r10,2 1017 mtmsrd r10,1 1018 1019 mtspr SPRN_SRR0,r11 1020 mtspr SPRN_SRR1,r12 1021 ld r9,PACA_EXSLB+EX_R9(r13) 1022 ld r10,PACA_EXSLB+EX_R10(r13) 1023 ld r11,PACA_EXSLB+EX_R11(r13) 1024 ld r12,PACA_EXSLB+EX_R12(r13) 1025 ld r13,PACA_EXSLB+EX_R13(r13) 1026 rfid 1027 b . /* prevent speculative execution */ 1028 1029#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1030/* 1031 * Data area reserved for FWNMI option. 1032 * This address (0x7000) is fixed by the RPA. 1033 */ 1034 .= 0x7000 1035 .globl fwnmi_data_area 1036fwnmi_data_area: 1037 1038 /* pseries and powernv need to keep the whole page from 1039 * 0x7000 to 0x8000 free for use by the firmware 1040 */ 1041 . = 0x8000 1042#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1043 1044/* Space for CPU0's segment table */ 1045 .balign 4096 1046 .globl initial_stab 1047initial_stab: 1048 .space 4096 1049 1050#ifdef CONFIG_PPC_POWERNV 1051_GLOBAL(opal_mc_secondary_handler) 1052 HMT_MEDIUM 1053 SET_SCRATCH0(r13) 1054 GET_PACA(r13) 1055 clrldi r3,r3,2 1056 tovirt(r3,r3) 1057 std r3,PACA_OPAL_MC_EVT(r13) 1058 ld r13,OPAL_MC_SRR0(r3) 1059 mtspr SPRN_SRR0,r13 1060 ld r13,OPAL_MC_SRR1(r3) 1061 mtspr SPRN_SRR1,r13 1062 ld r3,OPAL_MC_GPR3(r3) 1063 GET_SCRATCH0(r13) 1064 b machine_check_pSeries 1065#endif /* CONFIG_PPC_POWERNV */ 1066