1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Boot code and exception vectors for Book3E processors 4 * 5 * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. 6 */ 7 8#include <linux/threads.h> 9#include <asm/reg.h> 10#include <asm/page.h> 11#include <asm/ppc_asm.h> 12#include <asm/asm-offsets.h> 13#include <asm/cputable.h> 14#include <asm/setup.h> 15#include <asm/thread_info.h> 16#include <asm/reg_a2.h> 17#include <asm/exception-64e.h> 18#include <asm/bug.h> 19#include <asm/irqflags.h> 20#include <asm/ptrace.h> 21#include <asm/ppc-opcode.h> 22#include <asm/mmu.h> 23#include <asm/hw_irq.h> 24#include <asm/kvm_asm.h> 25#include <asm/kvm_booke_hv_asm.h> 26#include <asm/feature-fixups.h> 27#include <asm/context_tracking.h> 28 29/* 64e interrupt returns always use SRR registers */ 30#define fast_interrupt_return fast_interrupt_return_srr 31#define interrupt_return interrupt_return_srr 32 33/* XXX This will ultimately add space for a special exception save 34 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... 35 * when taking special interrupts. For now we don't support that, 36 * special interrupts from within a non-standard level will probably 37 * blow you up 38 */ 39#define SPECIAL_EXC_SRR0 0 40#define SPECIAL_EXC_SRR1 1 41#define SPECIAL_EXC_SPRG_GEN 2 42#define SPECIAL_EXC_SPRG_TLB 3 43#define SPECIAL_EXC_MAS0 4 44#define SPECIAL_EXC_MAS1 5 45#define SPECIAL_EXC_MAS2 6 46#define SPECIAL_EXC_MAS3 7 47#define SPECIAL_EXC_MAS6 8 48#define SPECIAL_EXC_MAS7 9 49#define SPECIAL_EXC_MAS5 10 /* E.HV only */ 50#define SPECIAL_EXC_MAS8 11 /* E.HV only */ 51#define SPECIAL_EXC_IRQHAPPENED 12 52#define SPECIAL_EXC_DEAR 13 53#define SPECIAL_EXC_ESR 14 54#define SPECIAL_EXC_SOFTE 15 55#define SPECIAL_EXC_CSRR0 16 56#define SPECIAL_EXC_CSRR1 17 57/* must be even to keep 16-byte stack alignment */ 58#define SPECIAL_EXC_END 18 59 60#define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8) 61#define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288) 62 63#define SPECIAL_EXC_STORE(reg, name) \ 64 std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) 65 66#define SPECIAL_EXC_LOAD(reg, name) \ 67 ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) 68 69special_reg_save: 70 /* 71 * We only need (or have stack space) to save this stuff if 72 * we interrupted the kernel. 73 */ 74 ld r3,_MSR(r1) 75 andi. r3,r3,MSR_PR 76 bnelr 77 78 /* 79 * Advance to the next TLB exception frame for handler 80 * types that don't do it automatically. 81 */ 82 LOAD_REG_ADDR(r11,extlb_level_exc) 83 lwz r12,0(r11) 84 mfspr r10,SPRN_SPRG_TLB_EXFRAME 85 add r10,r10,r12 86 mtspr SPRN_SPRG_TLB_EXFRAME,r10 87 88 /* 89 * Save registers needed to allow nesting of certain exceptions 90 * (such as TLB misses) inside special exception levels 91 */ 92 mfspr r10,SPRN_SRR0 93 SPECIAL_EXC_STORE(r10,SRR0) 94 mfspr r10,SPRN_SRR1 95 SPECIAL_EXC_STORE(r10,SRR1) 96 mfspr r10,SPRN_SPRG_GEN_SCRATCH 97 SPECIAL_EXC_STORE(r10,SPRG_GEN) 98 mfspr r10,SPRN_SPRG_TLB_SCRATCH 99 SPECIAL_EXC_STORE(r10,SPRG_TLB) 100 mfspr r10,SPRN_MAS0 101 SPECIAL_EXC_STORE(r10,MAS0) 102 mfspr r10,SPRN_MAS1 103 SPECIAL_EXC_STORE(r10,MAS1) 104 mfspr r10,SPRN_MAS2 105 SPECIAL_EXC_STORE(r10,MAS2) 106 mfspr r10,SPRN_MAS3 107 SPECIAL_EXC_STORE(r10,MAS3) 108 mfspr r10,SPRN_MAS6 109 SPECIAL_EXC_STORE(r10,MAS6) 110 mfspr r10,SPRN_MAS7 111 SPECIAL_EXC_STORE(r10,MAS7) 112BEGIN_FTR_SECTION 113 mfspr r10,SPRN_MAS5 114 SPECIAL_EXC_STORE(r10,MAS5) 115 mfspr r10,SPRN_MAS8 116 SPECIAL_EXC_STORE(r10,MAS8) 117 118 /* MAS5/8 could have inappropriate values if we interrupted KVM code */ 119 li r10,0 120 mtspr SPRN_MAS5,r10 121 mtspr SPRN_MAS8,r10 122END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 123 mfspr r10,SPRN_DEAR 124 SPECIAL_EXC_STORE(r10,DEAR) 125 mfspr r10,SPRN_ESR 126 SPECIAL_EXC_STORE(r10,ESR) 127 128 ld r10,_NIP(r1) 129 SPECIAL_EXC_STORE(r10,CSRR0) 130 ld r10,_MSR(r1) 131 SPECIAL_EXC_STORE(r10,CSRR1) 132 133 blr 134 135ret_from_level_except: 136 ld r3,_MSR(r1) 137 andi. r3,r3,MSR_PR 138 beq 1f 139 REST_NVGPRS(r1) 140 b interrupt_return 1411: 142 143 LOAD_REG_ADDR(r11,extlb_level_exc) 144 lwz r12,0(r11) 145 mfspr r10,SPRN_SPRG_TLB_EXFRAME 146 sub r10,r10,r12 147 mtspr SPRN_SPRG_TLB_EXFRAME,r10 148 149 /* 150 * It's possible that the special level exception interrupted a 151 * TLB miss handler, and inserted the same entry that the 152 * interrupted handler was about to insert. On CPUs without TLB 153 * write conditional, this can result in a duplicate TLB entry. 154 * Wipe all non-bolted entries to be safe. 155 * 156 * Note that this doesn't protect against any TLB misses 157 * we may take accessing the stack from here to the end of 158 * the special level exception. It's not clear how we can 159 * reasonably protect against that, but only CPUs with 160 * neither TLB write conditional nor bolted kernel memory 161 * are affected. Do any such CPUs even exist? 162 */ 163 PPC_TLBILX_ALL(0,R0) 164 165 REST_NVGPRS(r1) 166 167 SPECIAL_EXC_LOAD(r10,SRR0) 168 mtspr SPRN_SRR0,r10 169 SPECIAL_EXC_LOAD(r10,SRR1) 170 mtspr SPRN_SRR1,r10 171 SPECIAL_EXC_LOAD(r10,SPRG_GEN) 172 mtspr SPRN_SPRG_GEN_SCRATCH,r10 173 SPECIAL_EXC_LOAD(r10,SPRG_TLB) 174 mtspr SPRN_SPRG_TLB_SCRATCH,r10 175 SPECIAL_EXC_LOAD(r10,MAS0) 176 mtspr SPRN_MAS0,r10 177 SPECIAL_EXC_LOAD(r10,MAS1) 178 mtspr SPRN_MAS1,r10 179 SPECIAL_EXC_LOAD(r10,MAS2) 180 mtspr SPRN_MAS2,r10 181 SPECIAL_EXC_LOAD(r10,MAS3) 182 mtspr SPRN_MAS3,r10 183 SPECIAL_EXC_LOAD(r10,MAS6) 184 mtspr SPRN_MAS6,r10 185 SPECIAL_EXC_LOAD(r10,MAS7) 186 mtspr SPRN_MAS7,r10 187BEGIN_FTR_SECTION 188 SPECIAL_EXC_LOAD(r10,MAS5) 189 mtspr SPRN_MAS5,r10 190 SPECIAL_EXC_LOAD(r10,MAS8) 191 mtspr SPRN_MAS8,r10 192END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 193 194 SPECIAL_EXC_LOAD(r10,DEAR) 195 mtspr SPRN_DEAR,r10 196 SPECIAL_EXC_LOAD(r10,ESR) 197 mtspr SPRN_ESR,r10 198 199 stdcx. r0,0,r1 /* to clear the reservation */ 200 201 REST_GPRS(2, 9, r1) 202 203 ld r10,_CTR(r1) 204 ld r11,_XER(r1) 205 mtctr r10 206 mtxer r11 207 208 blr 209 210.macro ret_from_level srr0 srr1 paca_ex scratch 211 bl ret_from_level_except 212 213 ld r10,_LINK(r1) 214 ld r11,_CCR(r1) 215 ld r0,GPR13(r1) 216 mtlr r10 217 mtcr r11 218 219 ld r10,GPR10(r1) 220 ld r11,GPR11(r1) 221 ld r12,GPR12(r1) 222 mtspr \scratch,r0 223 224 std r10,\paca_ex+EX_R10(r13); 225 std r11,\paca_ex+EX_R11(r13); 226 ld r10,_NIP(r1) 227 ld r11,_MSR(r1) 228 ld r0,GPR0(r1) 229 ld r1,GPR1(r1) 230 mtspr \srr0,r10 231 mtspr \srr1,r11 232 ld r10,\paca_ex+EX_R10(r13) 233 ld r11,\paca_ex+EX_R11(r13) 234 mfspr r13,\scratch 235.endm 236 237ret_from_crit_except: 238 ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH 239 rfci 240 241ret_from_mc_except: 242 ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH 243 rfmci 244 245/* Exception prolog code for all exceptions */ 246#define EXCEPTION_PROLOG(n, intnum, type, addition) \ 247 mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ 248 mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ 249 std r10,PACA_EX##type+EX_R10(r13); \ 250 std r11,PACA_EX##type+EX_R11(r13); \ 251 mfcr r10; /* save CR */ \ 252 mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ 253 DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ 254 stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ 255 addition; /* additional code for that exc. */ \ 256 std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ 257 type##_SET_KSTACK; /* get special stack if necessary */\ 258 andi. r10,r11,MSR_PR; /* save stack pointer */ \ 259 beq 1f; /* branch around if supervisor */ \ 260 ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ 2611: type##_BTB_FLUSH \ 262 cmpdi cr1,r1,0; /* check if SP makes sense */ \ 263 bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ 264 mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ 265 266/* Exception type-specific macros */ 267#define GEN_SET_KSTACK \ 268 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ 269#define SPRN_GEN_SRR0 SPRN_SRR0 270#define SPRN_GEN_SRR1 SPRN_SRR1 271 272#define GDBELL_SET_KSTACK GEN_SET_KSTACK 273#define SPRN_GDBELL_SRR0 SPRN_GSRR0 274#define SPRN_GDBELL_SRR1 SPRN_GSRR1 275 276#define CRIT_SET_KSTACK \ 277 ld r1,PACA_CRIT_STACK(r13); \ 278 subi r1,r1,SPECIAL_EXC_FRAME_SIZE 279#define SPRN_CRIT_SRR0 SPRN_CSRR0 280#define SPRN_CRIT_SRR1 SPRN_CSRR1 281 282#define DBG_SET_KSTACK \ 283 ld r1,PACA_DBG_STACK(r13); \ 284 subi r1,r1,SPECIAL_EXC_FRAME_SIZE 285#define SPRN_DBG_SRR0 SPRN_DSRR0 286#define SPRN_DBG_SRR1 SPRN_DSRR1 287 288#define MC_SET_KSTACK \ 289 ld r1,PACA_MC_STACK(r13); \ 290 subi r1,r1,SPECIAL_EXC_FRAME_SIZE 291#define SPRN_MC_SRR0 SPRN_MCSRR0 292#define SPRN_MC_SRR1 SPRN_MCSRR1 293 294#define GEN_BTB_FLUSH \ 295 START_BTB_FLUSH_SECTION \ 296 beq 1f; \ 297 BTB_FLUSH(r10) \ 298 1: \ 299 END_BTB_FLUSH_SECTION 300 301#define CRIT_BTB_FLUSH \ 302 START_BTB_FLUSH_SECTION \ 303 BTB_FLUSH(r10) \ 304 END_BTB_FLUSH_SECTION 305 306#define DBG_BTB_FLUSH CRIT_BTB_FLUSH 307#define MC_BTB_FLUSH CRIT_BTB_FLUSH 308#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH 309 310#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ 311 EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) 312 313#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ 314 EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) 315 316#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ 317 EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) 318 319#define MC_EXCEPTION_PROLOG(n, intnum, addition) \ 320 EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) 321 322#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ 323 EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) 324 325/* Variants of the "addition" argument for the prolog 326 */ 327#define PROLOG_ADDITION_NONE_GEN(n) 328#define PROLOG_ADDITION_NONE_GDBELL(n) 329#define PROLOG_ADDITION_NONE_CRIT(n) 330#define PROLOG_ADDITION_NONE_DBG(n) 331#define PROLOG_ADDITION_NONE_MC(n) 332 333#define PROLOG_ADDITION_MASKABLE_GEN(n) \ 334 lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ 335 andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ 336 bne masked_interrupt_book3e_##n 337 338/* 339 * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is 340 * called, because that does SAVE_NVGPRS which must see the original register 341 * values, otherwise the scratch values might be restored when exiting the 342 * interrupt. 343 */ 344#define PROLOG_ADDITION_2REGS_GEN(n) \ 345 std r14,PACA_EXGEN+EX_R14(r13); \ 346 std r15,PACA_EXGEN+EX_R15(r13) 347 348#define PROLOG_ADDITION_1REG_GEN(n) \ 349 std r14,PACA_EXGEN+EX_R14(r13); 350 351#define PROLOG_ADDITION_2REGS_CRIT(n) \ 352 std r14,PACA_EXCRIT+EX_R14(r13); \ 353 std r15,PACA_EXCRIT+EX_R15(r13) 354 355#define PROLOG_ADDITION_2REGS_DBG(n) \ 356 std r14,PACA_EXDBG+EX_R14(r13); \ 357 std r15,PACA_EXDBG+EX_R15(r13) 358 359#define PROLOG_ADDITION_2REGS_MC(n) \ 360 std r14,PACA_EXMC+EX_R14(r13); \ 361 std r15,PACA_EXMC+EX_R15(r13) 362 363 364/* Core exception code for all exceptions except TLB misses. */ 365#define EXCEPTION_COMMON_LVL(n, scratch, excf) \ 366exc_##n##_common: \ 367 std r0,GPR0(r1); /* save r0 in stackframe */ \ 368 std r2,GPR2(r1); /* save r2 in stackframe */ \ 369 SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \ 370 std r10,_NIP(r1); /* save SRR0 to stackframe */ \ 371 std r11,_MSR(r1); /* save SRR1 to stackframe */ \ 372 beq 2f; /* if from kernel mode */ \ 3732: ld r3,excf+EX_R10(r13); /* get back r10 */ \ 374 ld r4,excf+EX_R11(r13); /* get back r11 */ \ 375 mfspr r5,scratch; /* get back r13 */ \ 376 std r12,GPR12(r1); /* save r12 in stackframe */ \ 377 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 378 mflr r6; /* save LR in stackframe */ \ 379 mfctr r7; /* save CTR in stackframe */ \ 380 mfspr r8,SPRN_XER; /* save XER in stackframe */ \ 381 ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ 382 lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ 383 lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ 384 ld r12,exception_marker@toc(r2); \ 385 li r0,0; \ 386 std r3,GPR10(r1); /* save r10 to stackframe */ \ 387 std r4,GPR11(r1); /* save r11 to stackframe */ \ 388 std r5,GPR13(r1); /* save it to stackframe */ \ 389 std r6,_LINK(r1); \ 390 std r7,_CTR(r1); \ 391 std r8,_XER(r1); \ 392 li r3,(n); /* regs.trap vector */ \ 393 std r9,0(r1); /* store stack frame back link */ \ 394 std r10,_CCR(r1); /* store orig CR in stackframe */ \ 395 std r9,GPR1(r1); /* store stack frame back link */ \ 396 std r11,SOFTE(r1); /* and save it to stackframe */ \ 397 std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 398 std r3,_TRAP(r1); /* set trap number */ \ 399 std r0,RESULT(r1); /* clear regs->result */ \ 400 SAVE_NVGPRS(r1); 401 402#define EXCEPTION_COMMON(n) \ 403 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) 404#define EXCEPTION_COMMON_CRIT(n) \ 405 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT) 406#define EXCEPTION_COMMON_MC(n) \ 407 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC) 408#define EXCEPTION_COMMON_DBG(n) \ 409 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG) 410 411/* XXX FIXME: Restore r14/r15 when necessary */ 412#define BAD_STACK_TRAMPOLINE(n) \ 413exc_##n##_bad_stack: \ 414 li r1,(n); /* get exception number */ \ 415 sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ 416 b bad_stack_book3e; /* bad stack error */ 417 418/* WARNING: If you change the layout of this stub, make sure you check 419 * the debug exception handler which handles single stepping 420 * into exceptions from userspace, and the MM code in 421 * arch/powerpc/mm/tlb_nohash.c which patches the branch here 422 * and would need to be updated if that branch is moved 423 */ 424#define EXCEPTION_STUB(loc, label) \ 425 . = interrupt_base_book3e + loc; \ 426 nop; /* To make debug interrupts happy */ \ 427 b exc_##label##_book3e; 428 429#define ACK_NONE(r) 430#define ACK_DEC(r) \ 431 lis r,TSR_DIS@h; \ 432 mtspr SPRN_TSR,r 433#define ACK_FIT(r) \ 434 lis r,TSR_FIS@h; \ 435 mtspr SPRN_TSR,r 436 437/* Used by asynchronous interrupt that may happen in the idle loop. 438 * 439 * This check if the thread was in the idle loop, and if yes, returns 440 * to the caller rather than the PC. This is to avoid a race if 441 * interrupts happen before the wait instruction. 442 */ 443#define CHECK_NAPPING() \ 444 ld r11, PACA_THREAD_INFO(r13); \ 445 ld r10,TI_LOCAL_FLAGS(r11); \ 446 andi. r9,r10,_TLF_NAPPING; \ 447 beq+ 1f; \ 448 ld r8,_LINK(r1); \ 449 rlwinm r7,r10,0,~_TLF_NAPPING; \ 450 std r8,_NIP(r1); \ 451 std r7,TI_LOCAL_FLAGS(r11); \ 4521: 453 454 455#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ 456 START_EXCEPTION(label); \ 457 NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ 458 EXCEPTION_COMMON(trapnum) \ 459 ack(r8); \ 460 CHECK_NAPPING(); \ 461 addi r3,r1,STACK_FRAME_OVERHEAD; \ 462 bl hdlr; \ 463 b interrupt_return 464 465/* This value is used to mark exception frames on the stack. */ 466 .section ".toc","aw" 467exception_marker: 468 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER 469 470 471/* 472 * And here we have the exception vectors ! 473 */ 474 475 .text 476 .balign 0x1000 477 .globl interrupt_base_book3e 478interrupt_base_book3e: /* fake trap */ 479 EXCEPTION_STUB(0x000, machine_check) 480 EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */ 481 EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ 482 EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ 483 EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ 484 EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ 485 EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ 486 EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ 487 EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ 488 EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ 489 EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ 490 EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ 491 EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ 492 EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ 493 EXCEPTION_STUB(0x1c0, data_tlb_miss) 494 EXCEPTION_STUB(0x1e0, instruction_tlb_miss) 495 EXCEPTION_STUB(0x200, altivec_unavailable) 496 EXCEPTION_STUB(0x220, altivec_assist) 497 EXCEPTION_STUB(0x260, perfmon) 498 EXCEPTION_STUB(0x280, doorbell) 499 EXCEPTION_STUB(0x2a0, doorbell_crit) 500 EXCEPTION_STUB(0x2c0, guest_doorbell) 501 EXCEPTION_STUB(0x2e0, guest_doorbell_crit) 502 EXCEPTION_STUB(0x300, hypercall) 503 EXCEPTION_STUB(0x320, ehpriv) 504 EXCEPTION_STUB(0x340, lrat_error) 505 506 .globl __end_interrupts 507__end_interrupts: 508 509/* Critical Input Interrupt */ 510 START_EXCEPTION(critical_input); 511 CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, 512 PROLOG_ADDITION_NONE) 513 EXCEPTION_COMMON_CRIT(0x100) 514 bl special_reg_save 515 CHECK_NAPPING(); 516 addi r3,r1,STACK_FRAME_OVERHEAD 517 bl unknown_nmi_exception 518 b ret_from_crit_except 519 520/* Machine Check Interrupt */ 521 START_EXCEPTION(machine_check); 522 MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, 523 PROLOG_ADDITION_NONE) 524 EXCEPTION_COMMON_MC(0x000) 525 bl special_reg_save 526 CHECK_NAPPING(); 527 addi r3,r1,STACK_FRAME_OVERHEAD 528 bl machine_check_exception 529 b ret_from_mc_except 530 531/* Data Storage Interrupt */ 532 START_EXCEPTION(data_storage) 533 NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, 534 PROLOG_ADDITION_2REGS) 535 mfspr r14,SPRN_DEAR 536 mfspr r15,SPRN_ESR 537 std r14,_DEAR(r1) 538 std r15,_ESR(r1) 539 ld r14,PACA_EXGEN+EX_R14(r13) 540 ld r15,PACA_EXGEN+EX_R15(r13) 541 EXCEPTION_COMMON(0x300) 542 b storage_fault_common 543 544/* Instruction Storage Interrupt */ 545 START_EXCEPTION(instruction_storage); 546 NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, 547 PROLOG_ADDITION_2REGS) 548 li r15,0 549 mr r14,r10 550 std r14,_DEAR(r1) 551 std r15,_ESR(r1) 552 ld r14,PACA_EXGEN+EX_R14(r13) 553 ld r15,PACA_EXGEN+EX_R15(r13) 554 EXCEPTION_COMMON(0x400) 555 b storage_fault_common 556 557/* External Input Interrupt */ 558 MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, 559 external_input, do_IRQ, ACK_NONE) 560 561/* Alignment */ 562 START_EXCEPTION(alignment); 563 NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, 564 PROLOG_ADDITION_2REGS) 565 mfspr r14,SPRN_DEAR 566 mfspr r15,SPRN_ESR 567 std r14,_DEAR(r1) 568 std r15,_ESR(r1) 569 ld r14,PACA_EXGEN+EX_R14(r13) 570 ld r15,PACA_EXGEN+EX_R15(r13) 571 EXCEPTION_COMMON(0x600) 572 b alignment_more /* no room, go out of line */ 573 574/* Program Interrupt */ 575 START_EXCEPTION(program); 576 NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, 577 PROLOG_ADDITION_1REG) 578 mfspr r14,SPRN_ESR 579 std r14,_ESR(r1) 580 ld r14,PACA_EXGEN+EX_R14(r13) 581 EXCEPTION_COMMON(0x700) 582 addi r3,r1,STACK_FRAME_OVERHEAD 583 bl program_check_exception 584 REST_NVGPRS(r1) 585 b interrupt_return 586 587/* Floating Point Unavailable Interrupt */ 588 START_EXCEPTION(fp_unavailable); 589 NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, 590 PROLOG_ADDITION_NONE) 591 /* we can probably do a shorter exception entry for that one... */ 592 EXCEPTION_COMMON(0x800) 593 ld r12,_MSR(r1) 594 andi. r0,r12,MSR_PR; 595 beq- 1f 596 bl load_up_fpu 597 b fast_interrupt_return 5981: addi r3,r1,STACK_FRAME_OVERHEAD 599 bl kernel_fp_unavailable_exception 600 b interrupt_return 601 602/* Altivec Unavailable Interrupt */ 603 START_EXCEPTION(altivec_unavailable); 604 NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, 605 PROLOG_ADDITION_NONE) 606 /* we can probably do a shorter exception entry for that one... */ 607 EXCEPTION_COMMON(0x200) 608#ifdef CONFIG_ALTIVEC 609BEGIN_FTR_SECTION 610 ld r12,_MSR(r1) 611 andi. r0,r12,MSR_PR; 612 beq- 1f 613 bl load_up_altivec 614 b fast_interrupt_return 6151: 616END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 617#endif 618 addi r3,r1,STACK_FRAME_OVERHEAD 619 bl altivec_unavailable_exception 620 b interrupt_return 621 622/* AltiVec Assist */ 623 START_EXCEPTION(altivec_assist); 624 NORMAL_EXCEPTION_PROLOG(0x220, 625 BOOKE_INTERRUPT_ALTIVEC_ASSIST, 626 PROLOG_ADDITION_NONE) 627 EXCEPTION_COMMON(0x220) 628 addi r3,r1,STACK_FRAME_OVERHEAD 629#ifdef CONFIG_ALTIVEC 630BEGIN_FTR_SECTION 631 bl altivec_assist_exception 632END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 633 REST_NVGPRS(r1) 634#else 635 bl unknown_exception 636#endif 637 b interrupt_return 638 639 640/* Decrementer Interrupt */ 641 MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, 642 decrementer, timer_interrupt, ACK_DEC) 643 644/* Fixed Interval Timer Interrupt */ 645 MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, 646 fixed_interval, unknown_exception, ACK_FIT) 647 648/* Watchdog Timer Interrupt */ 649 START_EXCEPTION(watchdog); 650 CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, 651 PROLOG_ADDITION_NONE) 652 EXCEPTION_COMMON_CRIT(0x9f0) 653 bl special_reg_save 654 CHECK_NAPPING(); 655 addi r3,r1,STACK_FRAME_OVERHEAD 656#ifdef CONFIG_BOOKE_WDT 657 bl WatchdogException 658#else 659 bl unknown_nmi_exception 660#endif 661 b ret_from_crit_except 662 663/* System Call Interrupt */ 664 START_EXCEPTION(system_call) 665 mr r9,r13 /* keep a copy of userland r13 */ 666 mfspr r11,SPRN_SRR0 /* get return address */ 667 mfspr r12,SPRN_SRR1 /* get previous MSR */ 668 mfspr r13,SPRN_SPRG_PACA /* get our PACA */ 669 b system_call_common 670 671/* Auxiliary Processor Unavailable Interrupt */ 672 START_EXCEPTION(ap_unavailable); 673 NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, 674 PROLOG_ADDITION_NONE) 675 EXCEPTION_COMMON(0xf20) 676 addi r3,r1,STACK_FRAME_OVERHEAD 677 bl unknown_exception 678 b interrupt_return 679 680/* Debug exception as a critical interrupt*/ 681 START_EXCEPTION(debug_crit); 682 CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, 683 PROLOG_ADDITION_2REGS) 684 685 /* 686 * If there is a single step or branch-taken exception in an 687 * exception entry sequence, it was probably meant to apply to 688 * the code where the exception occurred (since exception entry 689 * doesn't turn off DE automatically). We simulate the effect 690 * of turning off DE on entry to an exception handler by turning 691 * off DE in the CSRR1 value and clearing the debug status. 692 */ 693 694 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 695 andis. r15,r14,(DBSR_IC|DBSR_BT)@h 696 beq+ 1f 697 698#ifdef CONFIG_RELOCATABLE 699 ld r15,PACATOC(r13) 700 ld r14,interrupt_base_book3e@got(r15) 701 ld r15,__end_interrupts@got(r15) 702 cmpld cr0,r10,r14 703 cmpld cr1,r10,r15 704#else 705 LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) 706 cmpld cr0, r10, r14 707 LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts) 708 cmpld cr1, r10, r14 709#endif 710 blt+ cr0,1f 711 bge+ cr1,1f 712 713 /* here it looks like we got an inappropriate debug exception. */ 714 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ 715 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ 716 mtspr SPRN_DBSR,r14 717 mtspr SPRN_CSRR1,r11 718 lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ 719 ld r1,PACA_EXCRIT+EX_R1(r13) 720 ld r14,PACA_EXCRIT+EX_R14(r13) 721 ld r15,PACA_EXCRIT+EX_R15(r13) 722 mtcr r10 723 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ 724 ld r11,PACA_EXCRIT+EX_R11(r13) 725 mfspr r13,SPRN_SPRG_CRIT_SCRATCH 726 rfci 727 728 /* Normal debug exception */ 729 /* XXX We only handle coming from userspace for now since we can't 730 * quite save properly an interrupted kernel state yet 731 */ 7321: andi. r14,r11,MSR_PR; /* check for userspace again */ 733 beq kernel_dbg_exc; /* if from kernel mode */ 734 735 /* Now we mash up things to make it look like we are coming on a 736 * normal exception 737 */ 738 mfspr r14,SPRN_DBSR 739 std r14,_DSISR(r1) 740 ld r14,PACA_EXCRIT+EX_R14(r13) 741 ld r15,PACA_EXCRIT+EX_R15(r13) 742 EXCEPTION_COMMON_CRIT(0xd00) 743 addi r3,r1,STACK_FRAME_OVERHEAD 744 bl DebugException 745 REST_NVGPRS(r1) 746 b interrupt_return 747 748kernel_dbg_exc: 749 b . /* NYI */ 750 751/* Debug exception as a debug interrupt*/ 752 START_EXCEPTION(debug_debug); 753 DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, 754 PROLOG_ADDITION_2REGS) 755 756 /* 757 * If there is a single step or branch-taken exception in an 758 * exception entry sequence, it was probably meant to apply to 759 * the code where the exception occurred (since exception entry 760 * doesn't turn off DE automatically). We simulate the effect 761 * of turning off DE on entry to an exception handler by turning 762 * off DE in the DSRR1 value and clearing the debug status. 763 */ 764 765 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 766 andis. r15,r14,(DBSR_IC|DBSR_BT)@h 767 beq+ 1f 768 769#ifdef CONFIG_RELOCATABLE 770 ld r15,PACATOC(r13) 771 ld r14,interrupt_base_book3e@got(r15) 772 ld r15,__end_interrupts@got(r15) 773 cmpld cr0,r10,r14 774 cmpld cr1,r10,r15 775#else 776 LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) 777 cmpld cr0, r10, r14 778 LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts) 779 cmpld cr1, r10, r14 780#endif 781 blt+ cr0,1f 782 bge+ cr1,1f 783 784 /* here it looks like we got an inappropriate debug exception. */ 785 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ 786 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ 787 mtspr SPRN_DBSR,r14 788 mtspr SPRN_DSRR1,r11 789 lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ 790 ld r1,PACA_EXDBG+EX_R1(r13) 791 ld r14,PACA_EXDBG+EX_R14(r13) 792 ld r15,PACA_EXDBG+EX_R15(r13) 793 mtcr r10 794 ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ 795 ld r11,PACA_EXDBG+EX_R11(r13) 796 mfspr r13,SPRN_SPRG_DBG_SCRATCH 797 rfdi 798 799 /* Normal debug exception */ 800 /* XXX We only handle coming from userspace for now since we can't 801 * quite save properly an interrupted kernel state yet 802 */ 8031: andi. r14,r11,MSR_PR; /* check for userspace again */ 804 beq kernel_dbg_exc; /* if from kernel mode */ 805 806 /* Now we mash up things to make it look like we are coming on a 807 * normal exception 808 */ 809 mfspr r14,SPRN_DBSR 810 std r14,_DSISR(r1) 811 ld r14,PACA_EXDBG+EX_R14(r13) 812 ld r15,PACA_EXDBG+EX_R15(r13) 813 EXCEPTION_COMMON_DBG(0xd08) 814 addi r3,r1,STACK_FRAME_OVERHEAD 815 bl DebugException 816 REST_NVGPRS(r1) 817 b interrupt_return 818 819 START_EXCEPTION(perfmon); 820 NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, 821 PROLOG_ADDITION_NONE) 822 EXCEPTION_COMMON(0x260) 823 CHECK_NAPPING() 824 addi r3,r1,STACK_FRAME_OVERHEAD 825 bl performance_monitor_exception 826 b interrupt_return 827 828/* Doorbell interrupt */ 829 MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, 830 doorbell, doorbell_exception, ACK_NONE) 831 832/* Doorbell critical Interrupt */ 833 START_EXCEPTION(doorbell_crit); 834 CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, 835 PROLOG_ADDITION_NONE) 836 EXCEPTION_COMMON_CRIT(0x2a0) 837 bl special_reg_save 838 CHECK_NAPPING(); 839 addi r3,r1,STACK_FRAME_OVERHEAD 840 bl unknown_nmi_exception 841 b ret_from_crit_except 842 843/* 844 * Guest doorbell interrupt 845 * This general exception use GSRRx save/restore registers 846 */ 847 START_EXCEPTION(guest_doorbell); 848 GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, 849 PROLOG_ADDITION_NONE) 850 EXCEPTION_COMMON(0x2c0) 851 addi r3,r1,STACK_FRAME_OVERHEAD 852 bl unknown_exception 853 b interrupt_return 854 855/* Guest Doorbell critical Interrupt */ 856 START_EXCEPTION(guest_doorbell_crit); 857 CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, 858 PROLOG_ADDITION_NONE) 859 EXCEPTION_COMMON_CRIT(0x2e0) 860 bl special_reg_save 861 CHECK_NAPPING(); 862 addi r3,r1,STACK_FRAME_OVERHEAD 863 bl unknown_nmi_exception 864 b ret_from_crit_except 865 866/* Hypervisor call */ 867 START_EXCEPTION(hypercall); 868 NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, 869 PROLOG_ADDITION_NONE) 870 EXCEPTION_COMMON(0x310) 871 addi r3,r1,STACK_FRAME_OVERHEAD 872 bl unknown_exception 873 b interrupt_return 874 875/* Embedded Hypervisor priviledged */ 876 START_EXCEPTION(ehpriv); 877 NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, 878 PROLOG_ADDITION_NONE) 879 EXCEPTION_COMMON(0x320) 880 addi r3,r1,STACK_FRAME_OVERHEAD 881 bl unknown_exception 882 b interrupt_return 883 884/* LRAT Error interrupt */ 885 START_EXCEPTION(lrat_error); 886 NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, 887 PROLOG_ADDITION_NONE) 888 EXCEPTION_COMMON(0x340) 889 addi r3,r1,STACK_FRAME_OVERHEAD 890 bl unknown_exception 891 b interrupt_return 892 893.macro SEARCH_RESTART_TABLE 894#ifdef CONFIG_RELOCATABLE 895 ld r11,PACATOC(r13) 896 ld r14,__start___restart_table@got(r11) 897 ld r15,__stop___restart_table@got(r11) 898#else 899 LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table) 900 LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table) 901#endif 902300: 903 cmpd r14,r15 904 beq 302f 905 ld r11,0(r14) 906 cmpld r10,r11 907 blt 301f 908 ld r11,8(r14) 909 cmpld r10,r11 910 bge 301f 911 ld r11,16(r14) 912 b 303f 913301: 914 addi r14,r14,24 915 b 300b 916302: 917 li r11,0 918303: 919.endm 920 921/* 922 * An interrupt came in while soft-disabled; We mark paca->irq_happened 923 * accordingly and if the interrupt is level sensitive, we hard disable 924 * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so 925 * keep these in synch. 926 */ 927 928.macro masked_interrupt_book3e paca_irq full_mask 929 std r14,PACA_EXGEN+EX_R14(r13) 930 std r15,PACA_EXGEN+EX_R15(r13) 931 932 lbz r10,PACAIRQHAPPENED(r13) 933 .if \full_mask == 1 934 ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS 935 .else 936 ori r10,r10,\paca_irq 937 .endif 938 stb r10,PACAIRQHAPPENED(r13) 939 940 .if \full_mask == 1 941 xori r11,r11,MSR_EE /* clear MSR_EE */ 942 mtspr SPRN_SRR1,r11 943 .endif 944 945 mfspr r10,SPRN_SRR0 946 SEARCH_RESTART_TABLE 947 cmpdi r11,0 948 beq 1f 949 mtspr SPRN_SRR0,r11 /* return to restart address */ 9501: 951 952 lwz r11,PACA_EXGEN+EX_CR(r13) 953 mtcr r11 954 ld r10,PACA_EXGEN+EX_R10(r13) 955 ld r11,PACA_EXGEN+EX_R11(r13) 956 ld r14,PACA_EXGEN+EX_R14(r13) 957 ld r15,PACA_EXGEN+EX_R15(r13) 958 mfspr r13,SPRN_SPRG_GEN_SCRATCH 959 rfi 960 b . 961.endm 962 963masked_interrupt_book3e_0x500: 964 masked_interrupt_book3e PACA_IRQ_EE 1 965 966masked_interrupt_book3e_0x900: 967 ACK_DEC(r10); 968 masked_interrupt_book3e PACA_IRQ_DEC 0 969 970masked_interrupt_book3e_0x980: 971 ACK_FIT(r10); 972 masked_interrupt_book3e PACA_IRQ_DEC 0 973 974masked_interrupt_book3e_0x280: 975masked_interrupt_book3e_0x2c0: 976 masked_interrupt_book3e PACA_IRQ_DBELL 0 977 978/* 979 * This is called from 0x300 and 0x400 handlers after the prologs with 980 * r14 and r15 containing the fault address and error code, with the 981 * original values stashed away in the PACA 982 */ 983storage_fault_common: 984 addi r3,r1,STACK_FRAME_OVERHEAD 985 bl do_page_fault 986 b interrupt_return 987 988/* 989 * Alignment exception doesn't fit entirely in the 0x100 bytes so it 990 * continues here. 991 */ 992alignment_more: 993 addi r3,r1,STACK_FRAME_OVERHEAD 994 bl alignment_exception 995 REST_NVGPRS(r1) 996 b interrupt_return 997 998/* 999 * Trampolines used when spotting a bad kernel stack pointer in 1000 * the exception entry code. 1001 * 1002 * TODO: move some bits like SRR0 read to trampoline, pass PACA 1003 * index around, etc... to handle crit & mcheck 1004 */ 1005BAD_STACK_TRAMPOLINE(0x000) 1006BAD_STACK_TRAMPOLINE(0x100) 1007BAD_STACK_TRAMPOLINE(0x200) 1008BAD_STACK_TRAMPOLINE(0x220) 1009BAD_STACK_TRAMPOLINE(0x260) 1010BAD_STACK_TRAMPOLINE(0x280) 1011BAD_STACK_TRAMPOLINE(0x2a0) 1012BAD_STACK_TRAMPOLINE(0x2c0) 1013BAD_STACK_TRAMPOLINE(0x2e0) 1014BAD_STACK_TRAMPOLINE(0x300) 1015BAD_STACK_TRAMPOLINE(0x310) 1016BAD_STACK_TRAMPOLINE(0x320) 1017BAD_STACK_TRAMPOLINE(0x340) 1018BAD_STACK_TRAMPOLINE(0x400) 1019BAD_STACK_TRAMPOLINE(0x500) 1020BAD_STACK_TRAMPOLINE(0x600) 1021BAD_STACK_TRAMPOLINE(0x700) 1022BAD_STACK_TRAMPOLINE(0x800) 1023BAD_STACK_TRAMPOLINE(0x900) 1024BAD_STACK_TRAMPOLINE(0x980) 1025BAD_STACK_TRAMPOLINE(0x9f0) 1026BAD_STACK_TRAMPOLINE(0xa00) 1027BAD_STACK_TRAMPOLINE(0xb00) 1028BAD_STACK_TRAMPOLINE(0xc00) 1029BAD_STACK_TRAMPOLINE(0xd00) 1030BAD_STACK_TRAMPOLINE(0xd08) 1031BAD_STACK_TRAMPOLINE(0xe00) 1032BAD_STACK_TRAMPOLINE(0xf00) 1033BAD_STACK_TRAMPOLINE(0xf20) 1034 1035 .globl bad_stack_book3e 1036bad_stack_book3e: 1037 /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ 1038 mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ 1039 ld r1,PACAEMERGSP(r13) 1040 subi r1,r1,64+INT_FRAME_SIZE 1041 std r10,_NIP(r1) 1042 std r11,_MSR(r1) 1043 ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ 1044 lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ 1045 std r10,GPR1(r1) 1046 std r11,_CCR(r1) 1047 mfspr r10,SPRN_DEAR 1048 mfspr r11,SPRN_ESR 1049 std r10,_DEAR(r1) 1050 std r11,_ESR(r1) 1051 std r0,GPR0(r1); /* save r0 in stackframe */ \ 1052 std r2,GPR2(r1); /* save r2 in stackframe */ \ 1053 SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \ 1054 ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ 1055 ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ 1056 mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ 1057 std r3,GPR10(r1); /* save r10 to stackframe */ \ 1058 std r4,GPR11(r1); /* save r11 to stackframe */ \ 1059 std r12,GPR12(r1); /* save r12 in stackframe */ \ 1060 std r5,GPR13(r1); /* save it to stackframe */ \ 1061 mflr r10 1062 mfctr r11 1063 mfxer r12 1064 std r10,_LINK(r1) 1065 std r11,_CTR(r1) 1066 std r12,_XER(r1) 1067 SAVE_GPRS(14, 31, r1) 1068 lhz r12,PACA_TRAP_SAVE(r13) 1069 std r12,_TRAP(r1) 1070 addi r11,r1,INT_FRAME_SIZE 1071 std r11,0(r1) 1072 li r12,0 1073 std r12,0(r11) 1074 ld r2,PACATOC(r13) 10751: addi r3,r1,STACK_FRAME_OVERHEAD 1076 bl kernel_bad_stack 1077 b 1b 1078 1079/* 1080 * Setup the initial TLB for a core. This current implementation 1081 * assume that whatever we are running off will not conflict with 1082 * the new mapping at PAGE_OFFSET. 1083 */ 1084_GLOBAL(initial_tlb_book3e) 1085 1086 /* Look for the first TLB with IPROT set */ 1087 mfspr r4,SPRN_TLB0CFG 1088 andi. r3,r4,TLBnCFG_IPROT 1089 lis r3,MAS0_TLBSEL(0)@h 1090 bne found_iprot 1091 1092 mfspr r4,SPRN_TLB1CFG 1093 andi. r3,r4,TLBnCFG_IPROT 1094 lis r3,MAS0_TLBSEL(1)@h 1095 bne found_iprot 1096 1097 mfspr r4,SPRN_TLB2CFG 1098 andi. r3,r4,TLBnCFG_IPROT 1099 lis r3,MAS0_TLBSEL(2)@h 1100 bne found_iprot 1101 1102 lis r3,MAS0_TLBSEL(3)@h 1103 mfspr r4,SPRN_TLB3CFG 1104 /* fall through */ 1105 1106found_iprot: 1107 andi. r5,r4,TLBnCFG_HES 1108 bne have_hes 1109 1110 mflr r8 /* save LR */ 1111/* 1. Find the index of the entry we're executing in 1112 * 1113 * r3 = MAS0_TLBSEL (for the iprot array) 1114 * r4 = SPRN_TLBnCFG 1115 */ 1116 bcl 20,31,$+4 /* Find our address */ 1117invstr: mflr r6 /* Make it accessible */ 1118 mfmsr r7 1119 rlwinm r5,r7,27,31,31 /* extract MSR[IS] */ 1120 mfspr r7,SPRN_PID 1121 slwi r7,r7,16 1122 or r7,r7,r5 1123 mtspr SPRN_MAS6,r7 1124 tlbsx 0,r6 /* search MSR[IS], SPID=PID */ 1125 1126 mfspr r3,SPRN_MAS0 1127 rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */ 1128 1129 mfspr r7,SPRN_MAS1 /* Insure IPROT set */ 1130 oris r7,r7,MAS1_IPROT@h 1131 mtspr SPRN_MAS1,r7 1132 tlbwe 1133 1134/* 2. Invalidate all entries except the entry we're executing in 1135 * 1136 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in 1137 * r4 = SPRN_TLBnCFG 1138 * r5 = ESEL of entry we are running in 1139 */ 1140 andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */ 1141 li r6,0 /* Set Entry counter to 0 */ 11421: mr r7,r3 /* Set MAS0(TLBSEL) */ 1143 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ 1144 mtspr SPRN_MAS0,r7 1145 tlbre 1146 mfspr r7,SPRN_MAS1 1147 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ 1148 cmpw r5,r6 1149 beq skpinv /* Dont update the current execution TLB */ 1150 mtspr SPRN_MAS1,r7 1151 tlbwe 1152 isync 1153skpinv: addi r6,r6,1 /* Increment */ 1154 cmpw r6,r4 /* Are we done? */ 1155 bne 1b /* If not, repeat */ 1156 1157 /* Invalidate all TLBs */ 1158 PPC_TLBILX_ALL(0,R0) 1159 sync 1160 isync 1161 1162/* 3. Setup a temp mapping and jump to it 1163 * 1164 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in 1165 * r5 = ESEL of entry we are running in 1166 */ 1167 andi. r7,r5,0x1 /* Find an entry not used and is non-zero */ 1168 addi r7,r7,0x1 1169 mr r4,r3 /* Set MAS0(TLBSEL) = 1 */ 1170 mtspr SPRN_MAS0,r4 1171 tlbre 1172 1173 rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */ 1174 mtspr SPRN_MAS0,r4 1175 1176 mfspr r7,SPRN_MAS1 1177 xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */ 1178 mtspr SPRN_MAS1,r6 1179 1180 tlbwe 1181 1182 mfmsr r6 1183 xori r6,r6,MSR_IS 1184 mtspr SPRN_SRR1,r6 1185 bcl 20,31,$+4 /* Find our address */ 11861: mflr r6 1187 addi r6,r6,(2f - 1b) 1188 mtspr SPRN_SRR0,r6 1189 rfi 11902: 1191 1192/* 4. Clear out PIDs & Search info 1193 * 1194 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in 1195 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1196 * r5 = MAS3 1197 */ 1198 li r6,0 1199 mtspr SPRN_MAS6,r6 1200 mtspr SPRN_PID,r6 1201 1202/* 5. Invalidate mapping we started in 1203 * 1204 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in 1205 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1206 * r5 = MAS3 1207 */ 1208 mtspr SPRN_MAS0,r3 1209 tlbre 1210 mfspr r6,SPRN_MAS1 1211 rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */ 1212 mtspr SPRN_MAS1,r6 1213 tlbwe 1214 sync 1215 isync 1216 1217/* 6. Setup KERNELBASE mapping in TLB[0] 1218 * 1219 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in 1220 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1221 * r5 = MAS3 1222 */ 1223 rlwinm r3,r3,0,16,3 /* clear ESEL */ 1224 mtspr SPRN_MAS0,r3 1225 lis r6,(MAS1_VALID|MAS1_IPROT)@h 1226 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l 1227 mtspr SPRN_MAS1,r6 1228 1229 LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED) 1230 mtspr SPRN_MAS2,r6 1231 1232 rlwinm r5,r5,0,0,25 1233 ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX 1234 mtspr SPRN_MAS3,r5 1235 li r5,-1 1236 rlwinm r5,r5,0,0,25 1237 1238 tlbwe 1239 1240/* 7. Jump to KERNELBASE mapping 1241 * 1242 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1243 */ 1244 /* Now we branch the new virtual address mapped by this entry */ 1245 bcl 20,31,$+4 /* Find our address */ 12461: mflr r6 1247 addi r6,r6,(2f - 1b) 1248 tovirt(r6,r6) 1249 lis r7,MSR_KERNEL@h 1250 ori r7,r7,MSR_KERNEL@l 1251 mtspr SPRN_SRR0,r6 1252 mtspr SPRN_SRR1,r7 1253 rfi /* start execution out of TLB1[0] entry */ 12542: 1255 1256/* 8. Clear out the temp mapping 1257 * 1258 * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in 1259 */ 1260 mtspr SPRN_MAS0,r4 1261 tlbre 1262 mfspr r5,SPRN_MAS1 1263 rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */ 1264 mtspr SPRN_MAS1,r5 1265 tlbwe 1266 sync 1267 isync 1268 1269 /* We translate LR and return */ 1270 tovirt(r8,r8) 1271 mtlr r8 1272 blr 1273 1274have_hes: 1275 /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the 1276 * kernel linear mapping. We also set MAS8 once for all here though 1277 * that will have to be made dependent on whether we are running under 1278 * a hypervisor I suppose. 1279 */ 1280 1281 /* BEWARE, MAGIC 1282 * This code is called as an ordinary function on the boot CPU. But to 1283 * avoid duplication, this code is also used in SCOM bringup of 1284 * secondary CPUs. We read the code between the initial_tlb_code_start 1285 * and initial_tlb_code_end labels one instruction at a time and RAM it 1286 * into the new core via SCOM. That doesn't process branches, so there 1287 * must be none between those two labels. It also means if this code 1288 * ever takes any parameters, the SCOM code must also be updated to 1289 * provide them. 1290 */ 1291 .globl a2_tlbinit_code_start 1292a2_tlbinit_code_start: 1293 1294 ori r11,r3,MAS0_WQ_ALLWAYS 1295 oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ 1296 mtspr SPRN_MAS0,r11 1297 lis r3,(MAS1_VALID | MAS1_IPROT)@h 1298 ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT 1299 mtspr SPRN_MAS1,r3 1300 LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) 1301 mtspr SPRN_MAS2,r3 1302 li r3,MAS3_SR | MAS3_SW | MAS3_SX 1303 mtspr SPRN_MAS7_MAS3,r3 1304 li r3,0 1305 mtspr SPRN_MAS8,r3 1306 1307 /* Write the TLB entry */ 1308 tlbwe 1309 1310 .globl a2_tlbinit_after_linear_map 1311a2_tlbinit_after_linear_map: 1312 1313 /* Now we branch the new virtual address mapped by this entry */ 1314#ifdef CONFIG_RELOCATABLE 1315 ld r5,PACATOC(r13) 1316 ld r3,1f@got(r5) 1317#else 1318 LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) 1319#endif 1320 mtctr r3 1321 bctr 1322 13231: /* We are now running at PAGE_OFFSET, clean the TLB of everything 1324 * else (including IPROTed things left by firmware) 1325 * r4 = TLBnCFG 1326 * r3 = current address (more or less) 1327 */ 1328 1329 li r5,0 1330 mtspr SPRN_MAS6,r5 1331 tlbsx 0,r3 1332 1333 rlwinm r9,r4,0,TLBnCFG_N_ENTRY 1334 rlwinm r10,r4,8,0xff 1335 addi r10,r10,-1 /* Get inner loop mask */ 1336 1337 li r3,1 1338 1339 mfspr r5,SPRN_MAS1 1340 rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) 1341 1342 mfspr r6,SPRN_MAS2 1343 rldicr r6,r6,0,51 /* Extract EPN */ 1344 1345 mfspr r7,SPRN_MAS0 1346 rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ 1347 1348 rlwinm r8,r7,16,0xfff /* Extract ESEL */ 1349 13502: add r4,r3,r8 1351 and r4,r4,r10 1352 1353 rlwimi r7,r4,16,MAS0_ESEL_MASK 1354 1355 mtspr SPRN_MAS0,r7 1356 mtspr SPRN_MAS1,r5 1357 mtspr SPRN_MAS2,r6 1358 tlbwe 1359 1360 addi r3,r3,1 1361 and. r4,r3,r10 1362 1363 bne 3f 1364 addis r6,r6,(1<<30)@h 13653: 1366 cmpw r3,r9 1367 blt 2b 1368 1369 .globl a2_tlbinit_after_iprot_flush 1370a2_tlbinit_after_iprot_flush: 1371 1372 PPC_TLBILX(0,0,R0) 1373 sync 1374 isync 1375 1376 .globl a2_tlbinit_code_end 1377a2_tlbinit_code_end: 1378 1379 /* We translate LR and return */ 1380 mflr r3 1381 tovirt(r3,r3) 1382 mtlr r3 1383 blr 1384 1385/* 1386 * Main entry (boot CPU, thread 0) 1387 * 1388 * We enter here from head_64.S, possibly after the prom_init trampoline 1389 * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits 1390 * mode. Anything else is as it was left by the bootloader 1391 * 1392 * Initial requirements of this port: 1393 * 1394 * - Kernel loaded at 0 physical 1395 * - A good lump of memory mapped 0:0 by UTLB entry 0 1396 * - MSR:IS & MSR:DS set to 0 1397 * 1398 * Note that some of the above requirements will be relaxed in the future 1399 * as the kernel becomes smarter at dealing with different initial conditions 1400 * but for now you have to be careful 1401 */ 1402_GLOBAL(start_initialization_book3e) 1403 mflr r28 1404 1405 /* First, we need to setup some initial TLBs to map the kernel 1406 * text, data and bss at PAGE_OFFSET. We don't have a real mode 1407 * and always use AS 0, so we just set it up to match our link 1408 * address and never use 0 based addresses. 1409 */ 1410 bl initial_tlb_book3e 1411 1412 /* Init global core bits */ 1413 bl init_core_book3e 1414 1415 /* Init per-thread bits */ 1416 bl init_thread_book3e 1417 1418 /* Return to common init code */ 1419 tovirt(r28,r28) 1420 mtlr r28 1421 blr 1422 1423 1424/* 1425 * Secondary core/processor entry 1426 * 1427 * This is entered for thread 0 of a secondary core, all other threads 1428 * are expected to be stopped. It's similar to start_initialization_book3e 1429 * except that it's generally entered from the holding loop in head_64.S 1430 * after CPUs have been gathered by Open Firmware. 1431 * 1432 * We assume we are in 32 bits mode running with whatever TLB entry was 1433 * set for us by the firmware or POR engine. 1434 */ 1435_GLOBAL(book3e_secondary_core_init_tlb_set) 1436 li r4,1 1437 b generic_secondary_smp_init 1438 1439_GLOBAL(book3e_secondary_core_init) 1440 mflr r28 1441 1442 /* Do we need to setup initial TLB entry ? */ 1443 cmplwi r4,0 1444 bne 2f 1445 1446 /* Setup TLB for this core */ 1447 bl initial_tlb_book3e 1448 1449 /* We can return from the above running at a different 1450 * address, so recalculate r2 (TOC) 1451 */ 1452 bl relative_toc 1453 1454 /* Init global core bits */ 14552: bl init_core_book3e 1456 1457 /* Init per-thread bits */ 14583: bl init_thread_book3e 1459 1460 /* Return to common init code at proper virtual address. 1461 * 1462 * Due to various previous assumptions, we know we entered this 1463 * function at either the final PAGE_OFFSET mapping or using a 1464 * 1:1 mapping at 0, so we don't bother doing a complicated check 1465 * here, we just ensure the return address has the right top bits. 1466 * 1467 * Note that if we ever want to be smarter about where we can be 1468 * started from, we have to be careful that by the time we reach 1469 * the code below we may already be running at a different location 1470 * than the one we were called from since initial_tlb_book3e can 1471 * have moved us already. 1472 */ 1473 cmpdi cr0,r28,0 1474 blt 1f 1475 lis r3,PAGE_OFFSET@highest 1476 sldi r3,r3,32 1477 or r28,r28,r3 14781: mtlr r28 1479 blr 1480 1481_GLOBAL(book3e_secondary_thread_init) 1482 mflr r28 1483 b 3b 1484 1485 .globl init_core_book3e 1486init_core_book3e: 1487 /* Establish the interrupt vector base */ 1488 tovirt(r2,r2) 1489 LOAD_REG_ADDR(r3, interrupt_base_book3e) 1490 mtspr SPRN_IVPR,r3 1491 sync 1492 blr 1493 1494init_thread_book3e: 1495 lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h 1496 mtspr SPRN_EPCR,r3 1497 1498 /* Make sure interrupts are off */ 1499 wrteei 0 1500 1501 /* disable all timers and clear out status */ 1502 li r3,0 1503 mtspr SPRN_TCR,r3 1504 mfspr r3,SPRN_TSR 1505 mtspr SPRN_TSR,r3 1506 1507 blr 1508 1509_GLOBAL(__setup_base_ivors) 1510 SET_IVOR(0, 0x020) /* Critical Input */ 1511 SET_IVOR(1, 0x000) /* Machine Check */ 1512 SET_IVOR(2, 0x060) /* Data Storage */ 1513 SET_IVOR(3, 0x080) /* Instruction Storage */ 1514 SET_IVOR(4, 0x0a0) /* External Input */ 1515 SET_IVOR(5, 0x0c0) /* Alignment */ 1516 SET_IVOR(6, 0x0e0) /* Program */ 1517 SET_IVOR(7, 0x100) /* FP Unavailable */ 1518 SET_IVOR(8, 0x120) /* System Call */ 1519 SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ 1520 SET_IVOR(10, 0x160) /* Decrementer */ 1521 SET_IVOR(11, 0x180) /* Fixed Interval Timer */ 1522 SET_IVOR(12, 0x1a0) /* Watchdog Timer */ 1523 SET_IVOR(13, 0x1c0) /* Data TLB Error */ 1524 SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ 1525 SET_IVOR(15, 0x040) /* Debug */ 1526 1527 sync 1528 1529 blr 1530 1531_GLOBAL(setup_altivec_ivors) 1532 SET_IVOR(32, 0x200) /* AltiVec Unavailable */ 1533 SET_IVOR(33, 0x220) /* AltiVec Assist */ 1534 blr 1535 1536_GLOBAL(setup_perfmon_ivor) 1537 SET_IVOR(35, 0x260) /* Performance Monitor */ 1538 blr 1539 1540_GLOBAL(setup_doorbell_ivors) 1541 SET_IVOR(36, 0x280) /* Processor Doorbell */ 1542 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ 1543 blr 1544 1545_GLOBAL(setup_ehv_ivors) 1546 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ 1547 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ 1548 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ 1549 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ 1550 blr 1551 1552_GLOBAL(setup_lrat_ivor) 1553 SET_IVOR(42, 0x340) /* LRAT Error */ 1554 blr 1555