1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18 19/* 20 * We layout physical memory as follows: 21 * 0x0000 - 0x00ff : Secondary processor spin code 22 * 0x0100 - 0x2fff : pSeries Interrupt prologs 23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs 24 * 0x6000 - 0x6fff : Initial (CPU0) segment table 25 * 0x7000 - 0x7fff : FWNMI data area 26 * 0x8000 - : Early init and support code 27 */ 28 29/* 30 * This is the start of the interrupt handlers for pSeries 31 * This code runs with relocation off. 32 * Code from here to __end_interrupts gets copied down to real 33 * address 0x100 when we are running a relocatable kernel. 34 * Therefore any relative branches in this section must only 35 * branch to labels in this section. 36 */ 37 . = 0x100 38 .globl __start_interrupts 39__start_interrupts: 40 41 .globl system_reset_pSeries; 42system_reset_pSeries: 43 HMT_MEDIUM; 44 SET_SCRATCH0(r13) 45#ifdef CONFIG_PPC_P7_NAP 46BEGIN_FTR_SECTION 47 /* Running native on arch 2.06 or later, check if we are 48 * waking up from nap. We only handle no state loss and 49 * supervisor state loss. We do -not- handle hypervisor 50 * state loss at this time. 51 */ 52 mfspr r13,SPRN_SRR1 53 rlwinm. r13,r13,47-31,30,31 54 beq 9f 55 56 /* waking up from powersave (nap) state */ 57 cmpwi cr1,r13,2 58 /* Total loss of HV state is fatal, we could try to use the 59 * PIR to locate a PACA, then use an emergency stack etc... 60 * but for now, let's just stay stuck here 61 */ 62 bgt cr1,. 63 GET_PACA(r13) 64 65#ifdef CONFIG_KVM_BOOK3S_64_HV 66 li r0,KVM_HWTHREAD_IN_KERNEL 67 stb r0,HSTATE_HWTHREAD_STATE(r13) 68 /* Order setting hwthread_state vs. testing hwthread_req */ 69 sync 70 lbz r0,HSTATE_HWTHREAD_REQ(r13) 71 cmpwi r0,0 72 beq 1f 73 b kvm_start_guest 741: 75#endif 76 77 beq cr1,2f 78 b .power7_wakeup_noloss 792: b .power7_wakeup_loss 809: 81END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 82#endif /* CONFIG_PPC_P7_NAP */ 83 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 84 NOTEST, 0x100) 85 86 . = 0x200 87machine_check_pSeries_1: 88 /* This is moved out of line as it can be patched by FW, but 89 * some code path might still want to branch into the original 90 * vector 91 */ 92 b machine_check_pSeries 93 94 . = 0x300 95 .globl data_access_pSeries 96data_access_pSeries: 97 HMT_MEDIUM 98 SET_SCRATCH0(r13) 99BEGIN_FTR_SECTION 100 b data_access_check_stab 101data_access_not_stab: 102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 103 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 104 KVMTEST, 0x300) 105 106 . = 0x380 107 .globl data_access_slb_pSeries 108data_access_slb_pSeries: 109 HMT_MEDIUM 110 SET_SCRATCH0(r13) 111 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 112 std r3,PACA_EXSLB+EX_R3(r13) 113 mfspr r3,SPRN_DAR 114#ifdef __DISABLED__ 115 /* Keep that around for when we re-implement dynamic VSIDs */ 116 cmpdi r3,0 117 bge slb_miss_user_pseries 118#endif /* __DISABLED__ */ 119 mfspr r12,SPRN_SRR1 120#ifndef CONFIG_RELOCATABLE 121 b .slb_miss_realmode 122#else 123 /* 124 * We can't just use a direct branch to .slb_miss_realmode 125 * because the distance from here to there depends on where 126 * the kernel ends up being put. 127 */ 128 mfctr r11 129 ld r10,PACAKBASE(r13) 130 LOAD_HANDLER(r10, .slb_miss_realmode) 131 mtctr r10 132 bctr 133#endif 134 135 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 136 137 . = 0x480 138 .globl instruction_access_slb_pSeries 139instruction_access_slb_pSeries: 140 HMT_MEDIUM 141 SET_SCRATCH0(r13) 142 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 143 std r3,PACA_EXSLB+EX_R3(r13) 144 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 145#ifdef __DISABLED__ 146 /* Keep that around for when we re-implement dynamic VSIDs */ 147 cmpdi r3,0 148 bge slb_miss_user_pseries 149#endif /* __DISABLED__ */ 150 mfspr r12,SPRN_SRR1 151#ifndef CONFIG_RELOCATABLE 152 b .slb_miss_realmode 153#else 154 mfctr r11 155 ld r10,PACAKBASE(r13) 156 LOAD_HANDLER(r10, .slb_miss_realmode) 157 mtctr r10 158 bctr 159#endif 160 161 /* We open code these as we can't have a ". = x" (even with 162 * x = "." within a feature section 163 */ 164 . = 0x500; 165 .globl hardware_interrupt_pSeries; 166 .globl hardware_interrupt_hv; 167hardware_interrupt_pSeries: 168hardware_interrupt_hv: 169 BEGIN_FTR_SECTION 170 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 171 EXC_HV, SOFTEN_TEST_HV) 172 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 173 FTR_SECTION_ELSE 174 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 175 EXC_STD, SOFTEN_TEST_HV_201) 176 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 177 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 178 179 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 180 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 181 182 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 183 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 184 185 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 186 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 187 188 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 189 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) 190 191 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 192 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 193 194 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 195 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 196 197 . = 0xc00 198 .globl system_call_pSeries 199system_call_pSeries: 200 HMT_MEDIUM 201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 202 SET_SCRATCH0(r13) 203 GET_PACA(r13) 204 std r9,PACA_EXGEN+EX_R9(r13) 205 std r10,PACA_EXGEN+EX_R10(r13) 206 mfcr r9 207 KVMTEST(0xc00) 208 GET_SCRATCH0(r13) 209#endif 210BEGIN_FTR_SECTION 211 cmpdi r0,0x1ebe 212 beq- 1f 213END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 214 mr r9,r13 215 GET_PACA(r13) 216 mfspr r11,SPRN_SRR0 217 mfspr r12,SPRN_SRR1 218 ld r10,PACAKBASE(r13) 219 LOAD_HANDLER(r10, system_call_entry) 220 mtspr SPRN_SRR0,r10 221 ld r10,PACAKMSR(r13) 222 mtspr SPRN_SRR1,r10 223 rfid 224 b . /* prevent speculative execution */ 225 226 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 227 228/* Fast LE/BE switch system call */ 2291: mfspr r12,SPRN_SRR1 230 xori r12,r12,MSR_LE 231 mtspr SPRN_SRR1,r12 232 rfid /* return to userspace */ 233 b . 234 235 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 236 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 237 238 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 239 * out of line to handle them 240 */ 241 . = 0xe00 242 b h_data_storage_hv 243 . = 0xe20 244 b h_instr_storage_hv 245 . = 0xe40 246 b emulation_assist_hv 247 . = 0xe50 248 b hmi_exception_hv 249 . = 0xe60 250 b hmi_exception_hv 251 252 /* We need to deal with the Altivec unavailable exception 253 * here which is at 0xf20, thus in the middle of the 254 * prolog code of the PerformanceMonitor one. A little 255 * trickery is thus necessary 256 */ 257performance_monitor_pSeries_1: 258 . = 0xf00 259 b performance_monitor_pSeries 260 261altivec_unavailable_pSeries_1: 262 . = 0xf20 263 b altivec_unavailable_pSeries 264 265vsx_unavailable_pSeries_1: 266 . = 0xf40 267 b vsx_unavailable_pSeries 268 269#ifdef CONFIG_CBE_RAS 270 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 271 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 272#endif /* CONFIG_CBE_RAS */ 273 274 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 275 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 276 277#ifdef CONFIG_CBE_RAS 278 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 279 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 280#endif /* CONFIG_CBE_RAS */ 281 282 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 283 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 284 285#ifdef CONFIG_CBE_RAS 286 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 287 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 288#endif /* CONFIG_CBE_RAS */ 289 290 . = 0x3000 291 292/*** Out of line interrupts support ***/ 293 294 /* moved from 0x200 */ 295machine_check_pSeries: 296 .globl machine_check_fwnmi 297machine_check_fwnmi: 298 HMT_MEDIUM 299 SET_SCRATCH0(r13) /* save r13 */ 300 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, 301 EXC_STD, KVMTEST, 0x200) 302 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 303 304 /* moved from 0x300 */ 305data_access_check_stab: 306 GET_PACA(r13) 307 std r9,PACA_EXSLB+EX_R9(r13) 308 std r10,PACA_EXSLB+EX_R10(r13) 309 mfspr r10,SPRN_DAR 310 mfspr r9,SPRN_DSISR 311 srdi r10,r10,60 312 rlwimi r10,r9,16,0x20 313#ifdef CONFIG_KVM_BOOK3S_PR 314 lbz r9,HSTATE_IN_GUEST(r13) 315 rlwimi r10,r9,8,0x300 316#endif 317 mfcr r9 318 cmpwi r10,0x2c 319 beq do_stab_bolted_pSeries 320 mtcrf 0x80,r9 321 ld r9,PACA_EXSLB+EX_R9(r13) 322 ld r10,PACA_EXSLB+EX_R10(r13) 323 b data_access_not_stab 324do_stab_bolted_pSeries: 325 std r11,PACA_EXSLB+EX_R11(r13) 326 std r12,PACA_EXSLB+EX_R12(r13) 327 GET_SCRATCH0(r10) 328 std r10,PACA_EXSLB+EX_R13(r13) 329 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 330 331 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 332 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 333 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 334 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 335 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 336 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 337 338 .align 7 339 /* moved from 0xe00 */ 340 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 341 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 342 STD_EXCEPTION_HV(., 0xe22, h_instr_storage) 343 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 344 STD_EXCEPTION_HV(., 0xe42, emulation_assist) 345 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 346 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */ 347 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 348 349 /* moved from 0xf00 */ 350 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 351 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 352 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 353 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 354 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 355 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 356 357/* 358 * An interrupt came in while soft-disabled. We set paca->irq_happened, 359 * then, if it was a decrementer interrupt, we bump the dec to max and 360 * and return, else we hard disable and return. This is called with 361 * r10 containing the value to OR to the paca field. 362 */ 363#define MASKED_INTERRUPT(_H) \ 364masked_##_H##interrupt: \ 365 std r11,PACA_EXGEN+EX_R11(r13); \ 366 lbz r11,PACAIRQHAPPENED(r13); \ 367 or r11,r11,r10; \ 368 stb r11,PACAIRQHAPPENED(r13); \ 369 andi. r10,r10,PACA_IRQ_DEC; \ 370 beq 1f; \ 371 lis r10,0x7fff; \ 372 ori r10,r10,0xffff; \ 373 mtspr SPRN_DEC,r10; \ 374 b 2f; \ 3751: mfspr r10,SPRN_##_H##SRR1; \ 376 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 377 rotldi r10,r10,16; \ 378 mtspr SPRN_##_H##SRR1,r10; \ 3792: mtcrf 0x80,r9; \ 380 ld r9,PACA_EXGEN+EX_R9(r13); \ 381 ld r10,PACA_EXGEN+EX_R10(r13); \ 382 ld r11,PACA_EXGEN+EX_R11(r13); \ 383 GET_SCRATCH0(r13); \ 384 ##_H##rfid; \ 385 b . 386 387 MASKED_INTERRUPT() 388 MASKED_INTERRUPT(H) 389 390/* 391 * Called from arch_local_irq_enable when an interrupt needs 392 * to be resent. r3 contains 0x500 or 0x900 to indicate which 393 * kind of interrupt. MSR:EE is already off. We generate a 394 * stackframe like if a real interrupt had happened. 395 * 396 * Note: While MSR:EE is off, we need to make sure that _MSR 397 * in the generated frame has EE set to 1 or the exception 398 * handler will not properly re-enable them. 399 */ 400_GLOBAL(__replay_interrupt) 401 /* We are going to jump to the exception common code which 402 * will retrieve various register values from the PACA which 403 * we don't give a damn about, so we don't bother storing them. 404 */ 405 mfmsr r12 406 mflr r11 407 mfcr r9 408 ori r12,r12,MSR_EE 409 andi. r3,r3,0x0800 410 bne decrementer_common 411 b hardware_interrupt_common 412 413#ifdef CONFIG_PPC_PSERIES 414/* 415 * Vectors for the FWNMI option. Share common code. 416 */ 417 .globl system_reset_fwnmi 418 .align 7 419system_reset_fwnmi: 420 HMT_MEDIUM 421 SET_SCRATCH0(r13) /* save r13 */ 422 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 423 NOTEST, 0x100) 424 425#endif /* CONFIG_PPC_PSERIES */ 426 427#ifdef __DISABLED__ 428/* 429 * This is used for when the SLB miss handler has to go virtual, 430 * which doesn't happen for now anymore but will once we re-implement 431 * dynamic VSIDs for shared page tables 432 */ 433slb_miss_user_pseries: 434 std r10,PACA_EXGEN+EX_R10(r13) 435 std r11,PACA_EXGEN+EX_R11(r13) 436 std r12,PACA_EXGEN+EX_R12(r13) 437 GET_SCRATCH0(r10) 438 ld r11,PACA_EXSLB+EX_R9(r13) 439 ld r12,PACA_EXSLB+EX_R3(r13) 440 std r10,PACA_EXGEN+EX_R13(r13) 441 std r11,PACA_EXGEN+EX_R9(r13) 442 std r12,PACA_EXGEN+EX_R3(r13) 443 clrrdi r12,r13,32 444 mfmsr r10 445 mfspr r11,SRR0 /* save SRR0 */ 446 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 447 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 448 mtspr SRR0,r12 449 mfspr r12,SRR1 /* and SRR1 */ 450 mtspr SRR1,r10 451 rfid 452 b . /* prevent spec. execution */ 453#endif /* __DISABLED__ */ 454 455 .align 7 456 .globl __end_interrupts 457__end_interrupts: 458 459/* 460 * Code from here down to __end_handlers is invoked from the 461 * exception prologs above. Because the prologs assemble the 462 * addresses of these handlers using the LOAD_HANDLER macro, 463 * which uses an addi instruction, these handlers must be in 464 * the first 32k of the kernel image. 465 */ 466 467/*** Common interrupt handlers ***/ 468 469 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 470 471 /* 472 * Machine check is different because we use a different 473 * save area: PACA_EXMC instead of PACA_EXGEN. 474 */ 475 .align 7 476 .globl machine_check_common 477machine_check_common: 478 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 479 FINISH_NAP 480 DISABLE_INTS 481 bl .save_nvgprs 482 addi r3,r1,STACK_FRAME_OVERHEAD 483 bl .machine_check_exception 484 b .ret_from_except 485 486 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 487 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 488 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 489 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 490 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 491 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 492 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) 493 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 494 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 495 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 496#ifdef CONFIG_ALTIVEC 497 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 498#else 499 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 500#endif 501#ifdef CONFIG_CBE_RAS 502 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 503 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 504 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 505#endif /* CONFIG_CBE_RAS */ 506 507 .align 7 508system_call_entry: 509 b system_call_common 510 511ppc64_runlatch_on_trampoline: 512 b .__ppc64_runlatch_on 513 514/* 515 * Here we have detected that the kernel stack pointer is bad. 516 * R9 contains the saved CR, r13 points to the paca, 517 * r10 contains the (bad) kernel stack pointer, 518 * r11 and r12 contain the saved SRR0 and SRR1. 519 * We switch to using an emergency stack, save the registers there, 520 * and call kernel_bad_stack(), which panics. 521 */ 522bad_stack: 523 ld r1,PACAEMERGSP(r13) 524 subi r1,r1,64+INT_FRAME_SIZE 525 std r9,_CCR(r1) 526 std r10,GPR1(r1) 527 std r11,_NIP(r1) 528 std r12,_MSR(r1) 529 mfspr r11,SPRN_DAR 530 mfspr r12,SPRN_DSISR 531 std r11,_DAR(r1) 532 std r12,_DSISR(r1) 533 mflr r10 534 mfctr r11 535 mfxer r12 536 std r10,_LINK(r1) 537 std r11,_CTR(r1) 538 std r12,_XER(r1) 539 SAVE_GPR(0,r1) 540 SAVE_GPR(2,r1) 541 ld r10,EX_R3(r3) 542 std r10,GPR3(r1) 543 SAVE_GPR(4,r1) 544 SAVE_4GPRS(5,r1) 545 ld r9,EX_R9(r3) 546 ld r10,EX_R10(r3) 547 SAVE_2GPRS(9,r1) 548 ld r9,EX_R11(r3) 549 ld r10,EX_R12(r3) 550 ld r11,EX_R13(r3) 551 std r9,GPR11(r1) 552 std r10,GPR12(r1) 553 std r11,GPR13(r1) 554BEGIN_FTR_SECTION 555 ld r10,EX_CFAR(r3) 556 std r10,ORIG_GPR3(r1) 557END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 558 SAVE_8GPRS(14,r1) 559 SAVE_10GPRS(22,r1) 560 lhz r12,PACA_TRAP_SAVE(r13) 561 std r12,_TRAP(r1) 562 addi r11,r1,INT_FRAME_SIZE 563 std r11,0(r1) 564 li r12,0 565 std r12,0(r11) 566 ld r2,PACATOC(r13) 567 ld r11,exception_marker@toc(r2) 568 std r12,RESULT(r1) 569 std r11,STACK_FRAME_OVERHEAD-16(r1) 5701: addi r3,r1,STACK_FRAME_OVERHEAD 571 bl .kernel_bad_stack 572 b 1b 573 574/* 575 * Here r13 points to the paca, r9 contains the saved CR, 576 * SRR0 and SRR1 are saved in r11 and r12, 577 * r9 - r13 are saved in paca->exgen. 578 */ 579 .align 7 580 .globl data_access_common 581data_access_common: 582 mfspr r10,SPRN_DAR 583 std r10,PACA_EXGEN+EX_DAR(r13) 584 mfspr r10,SPRN_DSISR 585 stw r10,PACA_EXGEN+EX_DSISR(r13) 586 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 587 DISABLE_INTS 588 ld r12,_MSR(r1) 589 ld r3,PACA_EXGEN+EX_DAR(r13) 590 lwz r4,PACA_EXGEN+EX_DSISR(r13) 591 li r5,0x300 592 b .do_hash_page /* Try to handle as hpte fault */ 593 594 .align 7 595 .globl h_data_storage_common 596h_data_storage_common: 597 mfspr r10,SPRN_HDAR 598 std r10,PACA_EXGEN+EX_DAR(r13) 599 mfspr r10,SPRN_HDSISR 600 stw r10,PACA_EXGEN+EX_DSISR(r13) 601 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 602 bl .save_nvgprs 603 DISABLE_INTS 604 addi r3,r1,STACK_FRAME_OVERHEAD 605 bl .unknown_exception 606 b .ret_from_except 607 608 .align 7 609 .globl instruction_access_common 610instruction_access_common: 611 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 612 DISABLE_INTS 613 ld r12,_MSR(r1) 614 ld r3,_NIP(r1) 615 andis. r4,r12,0x5820 616 li r5,0x400 617 b .do_hash_page /* Try to handle as hpte fault */ 618 619 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 620 621/* 622 * Here is the common SLB miss user that is used when going to virtual 623 * mode for SLB misses, that is currently not used 624 */ 625#ifdef __DISABLED__ 626 .align 7 627 .globl slb_miss_user_common 628slb_miss_user_common: 629 mflr r10 630 std r3,PACA_EXGEN+EX_DAR(r13) 631 stw r9,PACA_EXGEN+EX_CCR(r13) 632 std r10,PACA_EXGEN+EX_LR(r13) 633 std r11,PACA_EXGEN+EX_SRR0(r13) 634 bl .slb_allocate_user 635 636 ld r10,PACA_EXGEN+EX_LR(r13) 637 ld r3,PACA_EXGEN+EX_R3(r13) 638 lwz r9,PACA_EXGEN+EX_CCR(r13) 639 ld r11,PACA_EXGEN+EX_SRR0(r13) 640 mtlr r10 641 beq- slb_miss_fault 642 643 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 644 beq- unrecov_user_slb 645 mfmsr r10 646 647.machine push 648.machine "power4" 649 mtcrf 0x80,r9 650.machine pop 651 652 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 653 mtmsrd r10,1 654 655 mtspr SRR0,r11 656 mtspr SRR1,r12 657 658 ld r9,PACA_EXGEN+EX_R9(r13) 659 ld r10,PACA_EXGEN+EX_R10(r13) 660 ld r11,PACA_EXGEN+EX_R11(r13) 661 ld r12,PACA_EXGEN+EX_R12(r13) 662 ld r13,PACA_EXGEN+EX_R13(r13) 663 rfid 664 b . 665 666slb_miss_fault: 667 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 668 ld r4,PACA_EXGEN+EX_DAR(r13) 669 li r5,0 670 std r4,_DAR(r1) 671 std r5,_DSISR(r1) 672 b handle_page_fault 673 674unrecov_user_slb: 675 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 676 DISABLE_INTS 677 bl .save_nvgprs 6781: addi r3,r1,STACK_FRAME_OVERHEAD 679 bl .unrecoverable_exception 680 b 1b 681 682#endif /* __DISABLED__ */ 683 684 685/* 686 * r13 points to the PACA, r9 contains the saved CR, 687 * r12 contain the saved SRR1, SRR0 is still ready for return 688 * r3 has the faulting address 689 * r9 - r13 are saved in paca->exslb. 690 * r3 is saved in paca->slb_r3 691 * We assume we aren't going to take any exceptions during this procedure. 692 */ 693_GLOBAL(slb_miss_realmode) 694 mflr r10 695#ifdef CONFIG_RELOCATABLE 696 mtctr r11 697#endif 698 699 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 700 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 701 702 bl .slb_allocate_realmode 703 704 /* All done -- return from exception. */ 705 706 ld r10,PACA_EXSLB+EX_LR(r13) 707 ld r3,PACA_EXSLB+EX_R3(r13) 708 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 709 710 mtlr r10 711 712 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 713 beq- 2f 714 715.machine push 716.machine "power4" 717 mtcrf 0x80,r9 718 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 719.machine pop 720 721 ld r9,PACA_EXSLB+EX_R9(r13) 722 ld r10,PACA_EXSLB+EX_R10(r13) 723 ld r11,PACA_EXSLB+EX_R11(r13) 724 ld r12,PACA_EXSLB+EX_R12(r13) 725 ld r13,PACA_EXSLB+EX_R13(r13) 726 rfid 727 b . /* prevent speculative execution */ 728 7292: mfspr r11,SPRN_SRR0 730 ld r10,PACAKBASE(r13) 731 LOAD_HANDLER(r10,unrecov_slb) 732 mtspr SPRN_SRR0,r10 733 ld r10,PACAKMSR(r13) 734 mtspr SPRN_SRR1,r10 735 rfid 736 b . 737 738unrecov_slb: 739 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 740 DISABLE_INTS 741 bl .save_nvgprs 7421: addi r3,r1,STACK_FRAME_OVERHEAD 743 bl .unrecoverable_exception 744 b 1b 745 746 747#ifdef CONFIG_PPC_970_NAP 748power4_fixup_nap: 749 andc r9,r9,r10 750 std r9,TI_LOCAL_FLAGS(r11) 751 ld r10,_LINK(r1) /* make idle task do the */ 752 std r10,_NIP(r1) /* equivalent of a blr */ 753 blr 754#endif 755 756 .align 7 757 .globl alignment_common 758alignment_common: 759 mfspr r10,SPRN_DAR 760 std r10,PACA_EXGEN+EX_DAR(r13) 761 mfspr r10,SPRN_DSISR 762 stw r10,PACA_EXGEN+EX_DSISR(r13) 763 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 764 ld r3,PACA_EXGEN+EX_DAR(r13) 765 lwz r4,PACA_EXGEN+EX_DSISR(r13) 766 std r3,_DAR(r1) 767 std r4,_DSISR(r1) 768 bl .save_nvgprs 769 DISABLE_INTS 770 addi r3,r1,STACK_FRAME_OVERHEAD 771 bl .alignment_exception 772 b .ret_from_except 773 774 .align 7 775 .globl program_check_common 776program_check_common: 777 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 778 bl .save_nvgprs 779 DISABLE_INTS 780 addi r3,r1,STACK_FRAME_OVERHEAD 781 bl .program_check_exception 782 b .ret_from_except 783 784 .align 7 785 .globl fp_unavailable_common 786fp_unavailable_common: 787 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 788 bne 1f /* if from user, just load it up */ 789 bl .save_nvgprs 790 DISABLE_INTS 791 addi r3,r1,STACK_FRAME_OVERHEAD 792 bl .kernel_fp_unavailable_exception 793 BUG_OPCODE 7941: bl .load_up_fpu 795 b fast_exception_return 796 797 .align 7 798 .globl altivec_unavailable_common 799altivec_unavailable_common: 800 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 801#ifdef CONFIG_ALTIVEC 802BEGIN_FTR_SECTION 803 beq 1f 804 bl .load_up_altivec 805 b fast_exception_return 8061: 807END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 808#endif 809 bl .save_nvgprs 810 DISABLE_INTS 811 addi r3,r1,STACK_FRAME_OVERHEAD 812 bl .altivec_unavailable_exception 813 b .ret_from_except 814 815 .align 7 816 .globl vsx_unavailable_common 817vsx_unavailable_common: 818 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 819#ifdef CONFIG_VSX 820BEGIN_FTR_SECTION 821 beq 1f 822 b .load_up_vsx 8231: 824END_FTR_SECTION_IFSET(CPU_FTR_VSX) 825#endif 826 bl .save_nvgprs 827 DISABLE_INTS 828 addi r3,r1,STACK_FRAME_OVERHEAD 829 bl .vsx_unavailable_exception 830 b .ret_from_except 831 832 .align 7 833 .globl __end_handlers 834__end_handlers: 835 836/* 837 * Hash table stuff 838 */ 839 .align 7 840_STATIC(do_hash_page) 841 std r3,_DAR(r1) 842 std r4,_DSISR(r1) 843 844 andis. r0,r4,0xa410 /* weird error? */ 845 bne- handle_page_fault /* if not, try to insert a HPTE */ 846 andis. r0,r4,DSISR_DABRMATCH@h 847 bne- handle_dabr_fault 848 849BEGIN_FTR_SECTION 850 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 851 bne- do_ste_alloc /* If so handle it */ 852END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 853 854 clrrdi r11,r1,THREAD_SHIFT 855 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 856 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 857 bne 77f /* then don't call hash_page now */ 858 /* 859 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 860 * accessing a userspace segment (even from the kernel). We assume 861 * kernel addresses always have the high bit set. 862 */ 863 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 864 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 865 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 866 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 867 ori r4,r4,1 /* add _PAGE_PRESENT */ 868 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 869 870 /* 871 * r3 contains the faulting address 872 * r4 contains the required access permissions 873 * r5 contains the trap number 874 * 875 * at return r3 = 0 for success, 1 for page fault, negative for error 876 */ 877 bl .hash_page /* build HPTE if possible */ 878 cmpdi r3,0 /* see if hash_page succeeded */ 879 880 /* Success */ 881 beq fast_exc_return_irq /* Return from exception on success */ 882 883 /* Error */ 884 blt- 13f 885 886/* Here we have a page fault that hash_page can't handle. */ 887handle_page_fault: 88811: ld r4,_DAR(r1) 889 ld r5,_DSISR(r1) 890 addi r3,r1,STACK_FRAME_OVERHEAD 891 bl .do_page_fault 892 cmpdi r3,0 893 beq+ 12f 894 bl .save_nvgprs 895 mr r5,r3 896 addi r3,r1,STACK_FRAME_OVERHEAD 897 lwz r4,_DAR(r1) 898 bl .bad_page_fault 899 b .ret_from_except 900 901/* We have a data breakpoint exception - handle it */ 902handle_dabr_fault: 903 bl .save_nvgprs 904 ld r4,_DAR(r1) 905 ld r5,_DSISR(r1) 906 addi r3,r1,STACK_FRAME_OVERHEAD 907 bl .do_dabr 90812: b .ret_from_except_lite 909 910 911/* We have a page fault that hash_page could handle but HV refused 912 * the PTE insertion 913 */ 91413: bl .save_nvgprs 915 mr r5,r3 916 addi r3,r1,STACK_FRAME_OVERHEAD 917 ld r4,_DAR(r1) 918 bl .low_hash_fault 919 b .ret_from_except 920 921/* 922 * We come here as a result of a DSI at a point where we don't want 923 * to call hash_page, such as when we are accessing memory (possibly 924 * user memory) inside a PMU interrupt that occurred while interrupts 925 * were soft-disabled. We want to invoke the exception handler for 926 * the access, or panic if there isn't a handler. 927 */ 92877: bl .save_nvgprs 929 mr r4,r3 930 addi r3,r1,STACK_FRAME_OVERHEAD 931 li r5,SIGSEGV 932 bl .bad_page_fault 933 b .ret_from_except 934 935 /* here we have a segment miss */ 936do_ste_alloc: 937 bl .ste_allocate /* try to insert stab entry */ 938 cmpdi r3,0 939 bne- handle_page_fault 940 b fast_exception_return 941 942/* 943 * r13 points to the PACA, r9 contains the saved CR, 944 * r11 and r12 contain the saved SRR0 and SRR1. 945 * r9 - r13 are saved in paca->exslb. 946 * We assume we aren't going to take any exceptions during this procedure. 947 * We assume (DAR >> 60) == 0xc. 948 */ 949 .align 7 950_GLOBAL(do_stab_bolted) 951 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 952 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 953 954 /* Hash to the primary group */ 955 ld r10,PACASTABVIRT(r13) 956 mfspr r11,SPRN_DAR 957 srdi r11,r11,28 958 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 959 960 /* Calculate VSID */ 961 /* This is a kernel address, so protovsid = ESID */ 962 ASM_VSID_SCRAMBLE(r11, r9, 256M) 963 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 964 965 /* Search the primary group for a free entry */ 9661: ld r11,0(r10) /* Test valid bit of the current ste */ 967 andi. r11,r11,0x80 968 beq 2f 969 addi r10,r10,16 970 andi. r11,r10,0x70 971 bne 1b 972 973 /* Stick for only searching the primary group for now. */ 974 /* At least for now, we use a very simple random castout scheme */ 975 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 976 mftb r11 977 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 978 ori r11,r11,0x10 979 980 /* r10 currently points to an ste one past the group of interest */ 981 /* make it point to the randomly selected entry */ 982 subi r10,r10,128 983 or r10,r10,r11 /* r10 is the entry to invalidate */ 984 985 isync /* mark the entry invalid */ 986 ld r11,0(r10) 987 rldicl r11,r11,56,1 /* clear the valid bit */ 988 rotldi r11,r11,8 989 std r11,0(r10) 990 sync 991 992 clrrdi r11,r11,28 /* Get the esid part of the ste */ 993 slbie r11 994 9952: std r9,8(r10) /* Store the vsid part of the ste */ 996 eieio 997 998 mfspr r11,SPRN_DAR /* Get the new esid */ 999 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1000 ori r11,r11,0x90 /* Turn on valid and kp */ 1001 std r11,0(r10) /* Put new entry back into the stab */ 1002 1003 sync 1004 1005 /* All done -- return from exception. */ 1006 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1007 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1008 1009 andi. r10,r12,MSR_RI 1010 beq- unrecov_slb 1011 1012 mtcrf 0x80,r9 /* restore CR */ 1013 1014 mfmsr r10 1015 clrrdi r10,r10,2 1016 mtmsrd r10,1 1017 1018 mtspr SPRN_SRR0,r11 1019 mtspr SPRN_SRR1,r12 1020 ld r9,PACA_EXSLB+EX_R9(r13) 1021 ld r10,PACA_EXSLB+EX_R10(r13) 1022 ld r11,PACA_EXSLB+EX_R11(r13) 1023 ld r12,PACA_EXSLB+EX_R12(r13) 1024 ld r13,PACA_EXSLB+EX_R13(r13) 1025 rfid 1026 b . /* prevent speculative execution */ 1027 1028#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1029/* 1030 * Data area reserved for FWNMI option. 1031 * This address (0x7000) is fixed by the RPA. 1032 */ 1033 .= 0x7000 1034 .globl fwnmi_data_area 1035fwnmi_data_area: 1036 1037 /* pseries and powernv need to keep the whole page from 1038 * 0x7000 to 0x8000 free for use by the firmware 1039 */ 1040 . = 0x8000 1041#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1042 1043/* Space for CPU0's segment table */ 1044 .balign 4096 1045 .globl initial_stab 1046initial_stab: 1047 .space 4096 1048 1049#ifdef CONFIG_PPC_POWERNV 1050_GLOBAL(opal_mc_secondary_handler) 1051 HMT_MEDIUM 1052 SET_SCRATCH0(r13) 1053 GET_PACA(r13) 1054 clrldi r3,r3,2 1055 tovirt(r3,r3) 1056 std r3,PACA_OPAL_MC_EVT(r13) 1057 ld r13,OPAL_MC_SRR0(r3) 1058 mtspr SPRN_SRR0,r13 1059 ld r13,OPAL_MC_SRR1(r3) 1060 mtspr SPRN_SRR1,r13 1061 ld r3,OPAL_MC_GPR3(r3) 1062 GET_SCRATCH0(r13) 1063 b machine_check_pSeries 1064#endif /* CONFIG_PPC_POWERNV */ 1065