1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label))@l; \ 90 addis reg,reg,(ABS_ADDR(label))@h 91 92/* 93 * Branch to label using its 0xC000 address. This results in instruction 94 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned 95 * on using mtmsr rather than rfid. 96 * 97 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than 98 * load KBASE for a slight optimisation. 99 */ 100#define BRANCH_TO_C000(reg, label) \ 101 __LOAD_FAR_HANDLER(reg, label); \ 102 mtctr reg; \ 103 bctr 104 105/* 106 * Interrupt code generation macros 107 */ 108#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 109#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 110#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 111#define IAREA .L_IAREA_\name\() /* PACA save area */ 112#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 113#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 114#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 115#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 116#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */ 117#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 118#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 119#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 120#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 121#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 122#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 123#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 124#define __ISTACK(name) .L_ISTACK_ ## name 125#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 126 127#define INT_DEFINE_BEGIN(n) \ 128.macro int_define_ ## n name 129 130#define INT_DEFINE_END(n) \ 131.endm ; \ 132int_define_ ## n n ; \ 133do_define_int n 134 135.macro do_define_int name 136 .ifndef IVEC 137 .error "IVEC not defined" 138 .endif 139 .ifndef IHSRR 140 IHSRR=0 141 .endif 142 .ifndef IHSRR_IF_HVMODE 143 IHSRR_IF_HVMODE=0 144 .endif 145 .ifndef IAREA 146 IAREA=PACA_EXGEN 147 .endif 148 .ifndef IVIRT 149 IVIRT=1 150 .endif 151 .ifndef IISIDE 152 IISIDE=0 153 .endif 154 .ifndef IDAR 155 IDAR=0 156 .endif 157 .ifndef IDSISR 158 IDSISR=0 159 .endif 160 .ifndef ISET_RI 161 ISET_RI=1 162 .endif 163 .ifndef IBRANCH_TO_COMMON 164 IBRANCH_TO_COMMON=1 165 .endif 166 .ifndef IREALMODE_COMMON 167 IREALMODE_COMMON=0 168 .else 169 .if ! IBRANCH_TO_COMMON 170 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 171 .endif 172 .endif 173 .ifndef IMASK 174 IMASK=0 175 .endif 176 .ifndef IKVM_REAL 177 IKVM_REAL=0 178 .endif 179 .ifndef IKVM_VIRT 180 IKVM_VIRT=0 181 .endif 182 .ifndef ISTACK 183 ISTACK=1 184 .endif 185 .ifndef IKUAP 186 IKUAP=1 187 .endif 188.endm 189 190/* 191 * All interrupts which set HSRR registers, as well as SRESET and MCE and 192 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 193 * so they all generally need to test whether they were taken in guest context. 194 * 195 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 196 * taken with MSR[HV]=0. 197 * 198 * Interrupts which set SRR registers (with the above exceptions) do not 199 * elevate to MSR[HV]=1 mode, though most can be taken when running with 200 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 201 * not need to test whether a guest is running because they get delivered to 202 * the guest directly, including nested HV KVM guests. 203 * 204 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 205 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 206 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 207 * delivered to the real-mode entry point, therefore such interrupts only test 208 * KVM in their real mode handlers, and only when PR KVM is possible. 209 * 210 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 211 * delivered in real-mode when the MMU is in hash mode because the MMU 212 * registers are not set appropriately to translate host addresses. In nested 213 * radix mode these can be delivered in virt-mode as the host translations are 214 * used implicitly (see: effective LPID, effective PID). 215 */ 216 217/* 218 * If an interrupt is taken while a guest is running, it is immediately routed 219 * to KVM to handle. 220 */ 221 222.macro KVMTEST name handler 223#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 224 lbz r10,HSTATE_IN_GUEST(r13) 225 cmpwi r10,0 226 /* HSRR variants have the 0x2 bit added to their trap number */ 227 .if IHSRR_IF_HVMODE 228 BEGIN_FTR_SECTION 229 li r10,(IVEC + 0x2) 230 FTR_SECTION_ELSE 231 li r10,(IVEC) 232 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 233 .elseif IHSRR 234 li r10,(IVEC + 0x2) 235 .else 236 li r10,(IVEC) 237 .endif 238 bne \handler 239#endif 240.endm 241 242/* 243 * This is the BOOK3S interrupt entry code macro. 244 * 245 * This can result in one of several things happening: 246 * - Branch to the _common handler, relocated, in virtual mode. 247 * These are normal interrupts (synchronous and asynchronous) handled by 248 * the kernel. 249 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 250 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 251 * / intended for host or guest kernel, but KVM must always be involved 252 * because the machine state is set for guest execution. 253 * - Branch to the masked handler, unrelocated. 254 * These occur when maskable asynchronous interrupts are taken with the 255 * irq_soft_mask set. 256 * - Branch to an "early" handler in real mode but relocated. 257 * This is done if early=1. MCE and HMI use these to handle errors in real 258 * mode. 259 * - Fall through and continue executing in real, unrelocated mode. 260 * This is done if early=2. 261 */ 262 263.macro GEN_BRANCH_TO_COMMON name, virt 264 .if IREALMODE_COMMON 265 LOAD_HANDLER(r10, \name\()_common) 266 mtctr r10 267 bctr 268 .else 269 .if \virt 270#ifndef CONFIG_RELOCATABLE 271 b \name\()_common_virt 272#else 273 LOAD_HANDLER(r10, \name\()_common_virt) 274 mtctr r10 275 bctr 276#endif 277 .else 278 LOAD_HANDLER(r10, \name\()_common_real) 279 mtctr r10 280 bctr 281 .endif 282 .endif 283.endm 284 285.macro GEN_INT_ENTRY name, virt, ool=0 286 SET_SCRATCH0(r13) /* save r13 */ 287 GET_PACA(r13) 288 std r9,IAREA+EX_R9(r13) /* save r9 */ 289BEGIN_FTR_SECTION 290 mfspr r9,SPRN_PPR 291END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 292 HMT_MEDIUM 293 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 294BEGIN_FTR_SECTION 295 mfspr r10,SPRN_CFAR 296END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 297 .if \ool 298 .if !\virt 299 b tramp_real_\name 300 .pushsection .text 301 TRAMP_REAL_BEGIN(tramp_real_\name) 302 .else 303 b tramp_virt_\name 304 .pushsection .text 305 TRAMP_VIRT_BEGIN(tramp_virt_\name) 306 .endif 307 .endif 308 309BEGIN_FTR_SECTION 310 std r9,IAREA+EX_PPR(r13) 311END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 312BEGIN_FTR_SECTION 313 std r10,IAREA+EX_CFAR(r13) 314END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 315 INTERRUPT_TO_KERNEL 316 mfctr r10 317 std r10,IAREA+EX_CTR(r13) 318 mfcr r9 319 std r11,IAREA+EX_R11(r13) 320 std r12,IAREA+EX_R12(r13) 321 322 /* 323 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 324 * because a d-side MCE will clobber those registers so is 325 * not recoverable if they are live. 326 */ 327 GET_SCRATCH0(r10) 328 std r10,IAREA+EX_R13(r13) 329 .if IDAR && !IISIDE 330 .if IHSRR 331 mfspr r10,SPRN_HDAR 332 .else 333 mfspr r10,SPRN_DAR 334 .endif 335 std r10,IAREA+EX_DAR(r13) 336 .endif 337 .if IDSISR && !IISIDE 338 .if IHSRR 339 mfspr r10,SPRN_HDSISR 340 .else 341 mfspr r10,SPRN_DSISR 342 .endif 343 stw r10,IAREA+EX_DSISR(r13) 344 .endif 345 346 .if IHSRR_IF_HVMODE 347 BEGIN_FTR_SECTION 348 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 349 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 350 FTR_SECTION_ELSE 351 mfspr r11,SPRN_SRR0 /* save SRR0 */ 352 mfspr r12,SPRN_SRR1 /* and SRR1 */ 353 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 354 .elseif IHSRR 355 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 356 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 357 .else 358 mfspr r11,SPRN_SRR0 /* save SRR0 */ 359 mfspr r12,SPRN_SRR1 /* and SRR1 */ 360 .endif 361 362 .if IBRANCH_TO_COMMON 363 GEN_BRANCH_TO_COMMON \name \virt 364 .endif 365 366 .if \ool 367 .popsection 368 .endif 369.endm 370 371/* 372 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 373 * entry, except in the case of the real-mode handlers which require 374 * __GEN_REALMODE_COMMON_ENTRY. 375 * 376 * This switches to virtual mode and sets MSR[RI]. 377 */ 378.macro __GEN_COMMON_ENTRY name 379DEFINE_FIXED_SYMBOL(\name\()_common_real) 380\name\()_common_real: 381 .if IKVM_REAL 382 KVMTEST \name kvm_interrupt 383 .endif 384 385 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 386 /* MSR[RI] is clear iff using SRR regs */ 387 .if IHSRR_IF_HVMODE 388 BEGIN_FTR_SECTION 389 xori r10,r10,MSR_RI 390 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 391 .elseif ! IHSRR 392 xori r10,r10,MSR_RI 393 .endif 394 mtmsrd r10 395 396 .if IVIRT 397 .if IKVM_VIRT 398 b 1f /* skip the virt test coming from real */ 399 .endif 400 401 .balign IFETCH_ALIGN_BYTES 402DEFINE_FIXED_SYMBOL(\name\()_common_virt) 403\name\()_common_virt: 404 .if IKVM_VIRT 405 KVMTEST \name kvm_interrupt 4061: 407 .endif 408 .endif /* IVIRT */ 409.endm 410 411/* 412 * Don't switch to virt mode. Used for early MCE and HMI handlers that 413 * want to run in real mode. 414 */ 415.macro __GEN_REALMODE_COMMON_ENTRY name 416DEFINE_FIXED_SYMBOL(\name\()_common_real) 417\name\()_common_real: 418 .if IKVM_REAL 419 KVMTEST \name kvm_interrupt 420 .endif 421.endm 422 423.macro __GEN_COMMON_BODY name 424 .if IMASK 425 .if ! ISTACK 426 .error "No support for masked interrupt to use custom stack" 427 .endif 428 429 /* If coming from user, skip soft-mask tests. */ 430 andi. r10,r12,MSR_PR 431 bne 3f 432 433 /* 434 * Kernel code running below __end_soft_masked may be 435 * implicitly soft-masked if it is within the regions 436 * in the soft mask table. 437 */ 438 LOAD_HANDLER(r10, __end_soft_masked) 439 cmpld r11,r10 440 bge+ 1f 441 442 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 443 mtctr r12 444 stw r9,PACA_EXGEN+EX_CCR(r13) 445 SEARCH_SOFT_MASK_TABLE 446 cmpdi r12,0 447 mfctr r12 /* Restore r12 to SRR1 */ 448 lwz r9,PACA_EXGEN+EX_CCR(r13) 449 beq 1f /* Not in soft-mask table */ 450 li r10,IMASK 451 b 2f /* In soft-mask table, always mask */ 452 453 /* Test the soft mask state against our interrupt's bit */ 4541: lbz r10,PACAIRQSOFTMASK(r13) 4552: andi. r10,r10,IMASK 456 /* Associate vector numbers with bits in paca->irq_happened */ 457 .if IVEC == 0x500 || IVEC == 0xea0 458 li r10,PACA_IRQ_EE 459 .elseif IVEC == 0x900 460 li r10,PACA_IRQ_DEC 461 .elseif IVEC == 0xa00 || IVEC == 0xe80 462 li r10,PACA_IRQ_DBELL 463 .elseif IVEC == 0xe60 464 li r10,PACA_IRQ_HMI 465 .elseif IVEC == 0xf00 466 li r10,PACA_IRQ_PMI 467 .else 468 .abort "Bad maskable vector" 469 .endif 470 471 .if IHSRR_IF_HVMODE 472 BEGIN_FTR_SECTION 473 bne masked_Hinterrupt 474 FTR_SECTION_ELSE 475 bne masked_interrupt 476 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 477 .elseif IHSRR 478 bne masked_Hinterrupt 479 .else 480 bne masked_interrupt 481 .endif 482 .endif 483 484 .if ISTACK 485 andi. r10,r12,MSR_PR /* See if coming from user */ 4863: mr r10,r1 /* Save r1 */ 487 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 488 beq- 100f 489 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 490100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 491 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 492 .endif 493 494 std r9,_CCR(r1) /* save CR in stackframe */ 495 std r11,_NIP(r1) /* save SRR0 in stackframe */ 496 std r12,_MSR(r1) /* save SRR1 in stackframe */ 497 std r10,0(r1) /* make stack chain pointer */ 498 std r0,GPR0(r1) /* save r0 in stackframe */ 499 std r10,GPR1(r1) /* save r1 in stackframe */ 500 501 /* Mark our [H]SRRs valid for return */ 502 li r10,1 503 .if IHSRR_IF_HVMODE 504 BEGIN_FTR_SECTION 505 stb r10,PACAHSRR_VALID(r13) 506 FTR_SECTION_ELSE 507 stb r10,PACASRR_VALID(r13) 508 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 509 .elseif IHSRR 510 stb r10,PACAHSRR_VALID(r13) 511 .else 512 stb r10,PACASRR_VALID(r13) 513 .endif 514 515 .if ISET_RI 516 li r10,MSR_RI 517 mtmsrd r10,1 /* Set MSR_RI */ 518 .endif 519 520 .if ISTACK 521 .if IKUAP 522 kuap_save_amr_and_lock r9, r10, cr1, cr0 523 .endif 524 beq 101f /* if from kernel mode */ 525BEGIN_FTR_SECTION 526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 527 std r9,_PPR(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 529101: 530 .else 531 .if IKUAP 532 kuap_save_amr_and_lock r9, r10, cr1 533 .endif 534 .endif 535 536 /* Save original regs values from save area to stack frame. */ 537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 538 ld r10,IAREA+EX_R10(r13) 539 std r9,GPR9(r1) 540 std r10,GPR10(r1) 541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 542 ld r10,IAREA+EX_R12(r13) 543 ld r11,IAREA+EX_R13(r13) 544 std r9,GPR11(r1) 545 std r10,GPR12(r1) 546 std r11,GPR13(r1) 547 548 SAVE_NVGPRS(r1) 549 550 .if IDAR 551 .if IISIDE 552 ld r10,_NIP(r1) 553 .else 554 ld r10,IAREA+EX_DAR(r13) 555 .endif 556 std r10,_DAR(r1) 557 .endif 558 559 .if IDSISR 560 .if IISIDE 561 ld r10,_MSR(r1) 562 lis r11,DSISR_SRR1_MATCH_64S@h 563 and r10,r10,r11 564 .else 565 lwz r10,IAREA+EX_DSISR(r13) 566 .endif 567 std r10,_DSISR(r1) 568 .endif 569 570BEGIN_FTR_SECTION 571 ld r10,IAREA+EX_CFAR(r13) 572 std r10,ORIG_GPR3(r1) 573END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 574 ld r10,IAREA+EX_CTR(r13) 575 std r10,_CTR(r1) 576 std r2,GPR2(r1) /* save r2 in stackframe */ 577 SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ 578 mflr r9 /* Get LR, later save to stack */ 579 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 580 std r9,_LINK(r1) 581 lbz r10,PACAIRQSOFTMASK(r13) 582 mfspr r11,SPRN_XER /* save XER in stackframe */ 583 std r10,SOFTE(r1) 584 std r11,_XER(r1) 585 li r9,IVEC 586 std r9,_TRAP(r1) /* set trap number */ 587 li r10,0 588 ld r11,exception_marker@toc(r2) 589 std r10,RESULT(r1) /* clear regs->result */ 590 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 591.endm 592 593/* 594 * On entry r13 points to the paca, r9-r13 are saved in the paca, 595 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 596 * SRR1, and relocation is on. 597 * 598 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 599 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 600 */ 601.macro GEN_COMMON name 602 __GEN_COMMON_ENTRY \name 603 __GEN_COMMON_BODY \name 604.endm 605 606.macro SEARCH_RESTART_TABLE 607#ifdef CONFIG_RELOCATABLE 608 mr r12,r2 609 ld r2,PACATOC(r13) 610 LOAD_REG_ADDR(r9, __start___restart_table) 611 LOAD_REG_ADDR(r10, __stop___restart_table) 612 mr r2,r12 613#else 614 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 615 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 616#endif 617300: 618 cmpd r9,r10 619 beq 302f 620 ld r12,0(r9) 621 cmpld r11,r12 622 blt 301f 623 ld r12,8(r9) 624 cmpld r11,r12 625 bge 301f 626 ld r12,16(r9) 627 b 303f 628301: 629 addi r9,r9,24 630 b 300b 631302: 632 li r12,0 633303: 634.endm 635 636.macro SEARCH_SOFT_MASK_TABLE 637#ifdef CONFIG_RELOCATABLE 638 mr r12,r2 639 ld r2,PACATOC(r13) 640 LOAD_REG_ADDR(r9, __start___soft_mask_table) 641 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 642 mr r2,r12 643#else 644 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 645 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 646#endif 647300: 648 cmpd r9,r10 649 beq 302f 650 ld r12,0(r9) 651 cmpld r11,r12 652 blt 301f 653 ld r12,8(r9) 654 cmpld r11,r12 655 bge 301f 656 li r12,1 657 b 303f 658301: 659 addi r9,r9,16 660 b 300b 661302: 662 li r12,0 663303: 664.endm 665 666/* 667 * Restore all registers including H/SRR0/1 saved in a stack frame of a 668 * standard exception. 669 */ 670.macro EXCEPTION_RESTORE_REGS hsrr=0 671 /* Move original SRR0 and SRR1 into the respective regs */ 672 ld r9,_MSR(r1) 673 li r10,0 674 .if \hsrr 675 mtspr SPRN_HSRR1,r9 676 stb r10,PACAHSRR_VALID(r13) 677 .else 678 mtspr SPRN_SRR1,r9 679 stb r10,PACASRR_VALID(r13) 680 .endif 681 ld r9,_NIP(r1) 682 .if \hsrr 683 mtspr SPRN_HSRR0,r9 684 .else 685 mtspr SPRN_SRR0,r9 686 .endif 687 ld r9,_CTR(r1) 688 mtctr r9 689 ld r9,_XER(r1) 690 mtxer r9 691 ld r9,_LINK(r1) 692 mtlr r9 693 ld r9,_CCR(r1) 694 mtcr r9 695 REST_GPRS(2, 13, r1) 696 REST_GPR(0, r1) 697 /* restore original r1. */ 698 ld r1,GPR1(r1) 699.endm 700 701/* 702 * There are a few constraints to be concerned with. 703 * - Real mode exceptions code/data must be located at their physical location. 704 * - Virtual mode exceptions must be mapped at their 0xc000... location. 705 * - Fixed location code must not call directly beyond the __end_interrupts 706 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 707 * must be used. 708 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 709 * virtual 0xc00... 710 * - Conditional branch targets must be within +/-32K of caller. 711 * 712 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 713 * therefore don't have to run in physically located code or rfid to 714 * virtual mode kernel code. However on relocatable kernels they do have 715 * to branch to KERNELBASE offset because the rest of the kernel (outside 716 * the exception vectors) may be located elsewhere. 717 * 718 * Virtual exceptions correspond with physical, except their entry points 719 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 720 * offset applied. Virtual exceptions are enabled with the Alternate 721 * Interrupt Location (AIL) bit set in the LPCR. However this does not 722 * guarantee they will be delivered virtually. Some conditions (see the ISA) 723 * cause exceptions to be delivered in real mode. 724 * 725 * The scv instructions are a special case. They get a 0x3000 offset applied. 726 * scv exceptions have unique reentrancy properties, see below. 727 * 728 * It's impossible to receive interrupts below 0x300 via AIL. 729 * 730 * KVM: None of the virtual exceptions are from the guest. Anything that 731 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 732 * 733 * 734 * We layout physical memory as follows: 735 * 0x0000 - 0x00ff : Secondary processor spin code 736 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 737 * 0x1900 - 0x2fff : Real mode trampolines 738 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 739 * 0x5900 - 0x6fff : Relon mode trampolines 740 * 0x7000 - 0x7fff : FWNMI data area 741 * 0x8000 - .... : Common interrupt handlers, remaining early 742 * setup code, rest of kernel. 743 * 744 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 745 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 746 * vectors there. 747 */ 748OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 749OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 750OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 751OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 752 753#ifdef CONFIG_PPC_POWERNV 754 .globl start_real_trampolines 755 .globl end_real_trampolines 756 .globl start_virt_trampolines 757 .globl end_virt_trampolines 758#endif 759 760#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 761/* 762 * Data area reserved for FWNMI option. 763 * This address (0x7000) is fixed by the RPA. 764 * pseries and powernv need to keep the whole page from 765 * 0x7000 to 0x8000 free for use by the firmware 766 */ 767ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 768OPEN_TEXT_SECTION(0x8000) 769#else 770OPEN_TEXT_SECTION(0x7000) 771#endif 772 773USE_FIXED_SECTION(real_vectors) 774 775/* 776 * This is the start of the interrupt handlers for pSeries 777 * This code runs with relocation off. 778 * Code from here to __end_interrupts gets copied down to real 779 * address 0x100 when we are running a relocatable kernel. 780 * Therefore any relative branches in this section must only 781 * branch to labels in this section. 782 */ 783 .globl __start_interrupts 784__start_interrupts: 785 786/** 787 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 788 * This is a synchronous interrupt invoked with the "scv" instruction. The 789 * system call does not alter the HV bit, so it is directed to the OS. 790 * 791 * Handling: 792 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 793 * In particular, this means we can take a maskable interrupt at any point 794 * in the scv handler, which is unlike any other interrupt. This is solved 795 * by treating the instruction addresses in the handler as being soft-masked, 796 * by adding a SOFT_MASK_TABLE entry for them. 797 * 798 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 799 * ensure scv is never executed with relocation off, which means AIL-0 800 * should never happen. 801 * 802 * Before leaving the following inside-__end_soft_masked text, at least of the 803 * following must be true: 804 * - MSR[PR]=1 (i.e., return to userspace) 805 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 806 * - Standard kernel environment is set up (stack, paca, etc) 807 * 808 * Call convention: 809 * 810 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 811 */ 812EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 813 /* SCV 0 */ 814 mr r9,r13 815 GET_PACA(r13) 816 mflr r11 817 mfctr r12 818 li r10,IRQS_ALL_DISABLED 819 stb r10,PACAIRQSOFTMASK(r13) 820#ifdef CONFIG_RELOCATABLE 821 b system_call_vectored_tramp 822#else 823 b system_call_vectored_common 824#endif 825 nop 826 827 /* SCV 1 - 127 */ 828 .rept 127 829 mr r9,r13 830 GET_PACA(r13) 831 mflr r11 832 mfctr r12 833 li r10,IRQS_ALL_DISABLED 834 stb r10,PACAIRQSOFTMASK(r13) 835 li r0,-1 /* cause failure */ 836#ifdef CONFIG_RELOCATABLE 837 b system_call_vectored_sigill_tramp 838#else 839 b system_call_vectored_sigill 840#endif 841 .endr 842EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 843 844// Treat scv vectors as soft-masked, see comment above. 845// Use absolute values rather than labels here, so they don't get relocated, 846// because this code runs unrelocated. 847SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) 848 849#ifdef CONFIG_RELOCATABLE 850TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 851 __LOAD_HANDLER(r10, system_call_vectored_common) 852 mtctr r10 853 bctr 854 855TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 856 __LOAD_HANDLER(r10, system_call_vectored_sigill) 857 mtctr r10 858 bctr 859#endif 860 861 862/* No virt vectors corresponding with 0x0..0x100 */ 863EXC_VIRT_NONE(0x4000, 0x100) 864 865 866/** 867 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 868 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 869 * It is caused by: 870 * - Wake from power-saving state, on powernv. 871 * - An NMI from another CPU, triggered by firmware or hypercall. 872 * - As crash/debug signal injected from BMC, firmware or hypervisor. 873 * 874 * Handling: 875 * Power-save wakeup is the only performance critical path, so this is 876 * determined quickly as possible first. In this case volatile registers 877 * can be discarded and SPRs like CFAR don't need to be read. 878 * 879 * If not a powersave wakeup, then it's run as a regular interrupt, however 880 * it uses its own stack and PACA save area to preserve the regular kernel 881 * environment for debugging. 882 * 883 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 884 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 885 * correct to switch to virtual mode to run the regular interrupt handler 886 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 887 * is clear). 888 * 889 * FWNMI: 890 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 891 * entry point with a different register set up. Some hypervisors will 892 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 893 * 894 * KVM: 895 * Unlike most SRR interrupts, this may be taken by the host while executing 896 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 897 * mode and then raise the sreset. 898 */ 899INT_DEFINE_BEGIN(system_reset) 900 IVEC=0x100 901 IAREA=PACA_EXNMI 902 IVIRT=0 /* no virt entry point */ 903 /* 904 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is 905 * being used, so a nested NMI exception would corrupt it. 906 */ 907 ISET_RI=0 908 ISTACK=0 909 IKVM_REAL=1 910INT_DEFINE_END(system_reset) 911 912EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 913#ifdef CONFIG_PPC_P7_NAP 914 /* 915 * If running native on arch 2.06 or later, check if we are waking up 916 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 917 * bits 46:47. A non-0 value indicates that we are coming from a power 918 * saving state. The idle wakeup handler initially runs in real mode, 919 * but we branch to the 0xc000... address so we can turn on relocation 920 * with mtmsrd later, after SPRs are restored. 921 * 922 * Careful to minimise cost for the fast path (idle wakeup) while 923 * also avoiding clobbering CFAR for the debug path (non-idle). 924 * 925 * For the idle wake case volatile registers can be clobbered, which 926 * is why we use those initially. If it turns out to not be an idle 927 * wake, carefully put everything back the way it was, so we can use 928 * common exception macros to handle it. 929 */ 930BEGIN_FTR_SECTION 931 SET_SCRATCH0(r13) 932 GET_PACA(r13) 933 std r3,PACA_EXNMI+0*8(r13) 934 std r4,PACA_EXNMI+1*8(r13) 935 std r5,PACA_EXNMI+2*8(r13) 936 mfspr r3,SPRN_SRR1 937 mfocrf r4,0x80 938 rlwinm. r5,r3,47-31,30,31 939 bne+ system_reset_idle_wake 940 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 941 mtocrf 0x80,r4 942 ld r3,PACA_EXNMI+0*8(r13) 943 ld r4,PACA_EXNMI+1*8(r13) 944 ld r5,PACA_EXNMI+2*8(r13) 945 GET_SCRATCH0(r13) 946END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 947#endif 948 949 GEN_INT_ENTRY system_reset, virt=0 950 /* 951 * In theory, we should not enable relocation here if it was disabled 952 * in SRR1, because the MMU may not be configured to support it (e.g., 953 * SLB may have been cleared). In practice, there should only be a few 954 * small windows where that's the case, and sreset is considered to 955 * be dangerous anyway. 956 */ 957EXC_REAL_END(system_reset, 0x100, 0x100) 958EXC_VIRT_NONE(0x4100, 0x100) 959 960#ifdef CONFIG_PPC_P7_NAP 961TRAMP_REAL_BEGIN(system_reset_idle_wake) 962 /* We are waking up from idle, so may clobber any volatile register */ 963 cmpwi cr1,r5,2 964 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 965 BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss)) 966#endif 967 968#ifdef CONFIG_PPC_PSERIES 969/* 970 * Vectors for the FWNMI option. Share common code. 971 */ 972TRAMP_REAL_BEGIN(system_reset_fwnmi) 973 GEN_INT_ENTRY system_reset, virt=0 974 975#endif /* CONFIG_PPC_PSERIES */ 976 977EXC_COMMON_BEGIN(system_reset_common) 978 __GEN_COMMON_ENTRY system_reset 979 /* 980 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able 981 * to recover, but nested NMI will notice in_nmi and not recover 982 * because of the use of the NMI stack. in_nmi reentrancy is tested in 983 * system_reset_exception. 984 */ 985 lhz r10,PACA_IN_NMI(r13) 986 addi r10,r10,1 987 sth r10,PACA_IN_NMI(r13) 988 li r10,MSR_RI 989 mtmsrd r10,1 990 991 mr r10,r1 992 ld r1,PACA_NMI_EMERG_SP(r13) 993 subi r1,r1,INT_FRAME_SIZE 994 __GEN_COMMON_BODY system_reset 995 996 addi r3,r1,STACK_FRAME_OVERHEAD 997 bl system_reset_exception 998 999 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1000 li r9,0 1001 mtmsrd r9,1 1002 1003 /* 1004 * MSR_RI is clear, now we can decrement paca->in_nmi. 1005 */ 1006 lhz r10,PACA_IN_NMI(r13) 1007 subi r10,r10,1 1008 sth r10,PACA_IN_NMI(r13) 1009 1010 kuap_kernel_restore r9, r10 1011 EXCEPTION_RESTORE_REGS 1012 RFI_TO_USER_OR_KERNEL 1013 1014 1015/** 1016 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1017 * This is a non-maskable interrupt always taken in real-mode. It can be 1018 * synchronous or asynchronous, caused by hardware or software, and it may be 1019 * taken in a power-saving state. 1020 * 1021 * Handling: 1022 * Similarly to system reset, this uses its own stack and PACA save area, 1023 * the difference is re-entrancy is allowed on the machine check stack. 1024 * 1025 * machine_check_early is run in real mode, and carefully decodes the 1026 * machine check and tries to handle it (e.g., flush the SLB if there was an 1027 * error detected there), determines if it was recoverable and logs the 1028 * event. 1029 * 1030 * This early code does not "reconcile" irq soft-mask state like SRESET or 1031 * regular interrupts do, so irqs_disabled() among other things may not work 1032 * properly (irq disable/enable already doesn't work because irq tracing can 1033 * not work in real mode). 1034 * 1035 * Then, depending on the execution context when the interrupt is taken, there 1036 * are 3 main actions: 1037 * - Executing in kernel mode. The event is queued with irq_work, which means 1038 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1039 * interrupts), which could be immediately when the interrupt returns. This 1040 * avoids nasty issues like switching to virtual mode when the MMU is in a 1041 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1042 * but it has different priorities). Check to see if the CPU was in power 1043 * save, and return via the wake up code if it was. 1044 * 1045 * - Executing in user mode. machine_check_exception is run like a normal 1046 * interrupt handler, which processes the data generated by the early handler. 1047 * 1048 * - Executing in guest mode. The interrupt is run with its KVM test, and 1049 * branches to KVM to deal with. KVM may queue the event for the host 1050 * to report later. 1051 * 1052 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1053 * or SCRATCH0 is in use, it may cause a crash. 1054 * 1055 * KVM: 1056 * See SRESET. 1057 */ 1058INT_DEFINE_BEGIN(machine_check_early) 1059 IVEC=0x200 1060 IAREA=PACA_EXMC 1061 IVIRT=0 /* no virt entry point */ 1062 IREALMODE_COMMON=1 1063 /* 1064 * MSR_RI is not enabled, because PACA_EXMC is being used, so a 1065 * nested machine check corrupts it. machine_check_common enables 1066 * MSR_RI. 1067 */ 1068 ISET_RI=0 1069 ISTACK=0 1070 IDAR=1 1071 IDSISR=1 1072 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1073INT_DEFINE_END(machine_check_early) 1074 1075INT_DEFINE_BEGIN(machine_check) 1076 IVEC=0x200 1077 IAREA=PACA_EXMC 1078 IVIRT=0 /* no virt entry point */ 1079 ISET_RI=0 1080 IDAR=1 1081 IDSISR=1 1082 IKVM_REAL=1 1083INT_DEFINE_END(machine_check) 1084 1085EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1086 GEN_INT_ENTRY machine_check_early, virt=0 1087EXC_REAL_END(machine_check, 0x200, 0x100) 1088EXC_VIRT_NONE(0x4200, 0x100) 1089 1090#ifdef CONFIG_PPC_PSERIES 1091TRAMP_REAL_BEGIN(machine_check_fwnmi) 1092 /* See comment at machine_check exception, don't turn on RI */ 1093 GEN_INT_ENTRY machine_check_early, virt=0 1094#endif 1095 1096#define MACHINE_CHECK_HANDLER_WINDUP \ 1097 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1098 li r9,0; \ 1099 mtmsrd r9,1; /* Clear MSR_RI */ \ 1100 /* Decrement paca->in_mce now RI is clear. */ \ 1101 lhz r12,PACA_IN_MCE(r13); \ 1102 subi r12,r12,1; \ 1103 sth r12,PACA_IN_MCE(r13); \ 1104 EXCEPTION_RESTORE_REGS 1105 1106EXC_COMMON_BEGIN(machine_check_early_common) 1107 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1108 1109 /* 1110 * Switch to mc_emergency stack and handle re-entrancy (we limit 1111 * the nested MCE upto level 4 to avoid stack overflow). 1112 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1113 * 1114 * We use paca->in_mce to check whether this is the first entry or 1115 * nested machine check. We increment paca->in_mce to track nested 1116 * machine checks. 1117 * 1118 * If this is the first entry then set stack pointer to 1119 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1120 * stack frame on mc_emergency stack. 1121 * 1122 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1123 * checkstop if we get another machine check exception before we do 1124 * rfid with MSR_ME=1. 1125 * 1126 * This interrupt can wake directly from idle. If that is the case, 1127 * the machine check is handled then the idle wakeup code is called 1128 * to restore state. 1129 */ 1130 lhz r10,PACA_IN_MCE(r13) 1131 cmpwi r10,0 /* Are we in nested machine check */ 1132 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1133 addi r10,r10,1 /* increment paca->in_mce */ 1134 sth r10,PACA_IN_MCE(r13) 1135 1136 mr r10,r1 /* Save r1 */ 1137 bne 1f 1138 /* First machine check entry */ 1139 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11401: /* Limit nested MCE to level 4 to avoid stack overflow */ 1141 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1142 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1143 1144 __GEN_COMMON_BODY machine_check_early 1145 1146BEGIN_FTR_SECTION 1147 bl enable_machine_check 1148END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1149 li r10,MSR_RI 1150 mtmsrd r10,1 1151 1152 addi r3,r1,STACK_FRAME_OVERHEAD 1153 bl machine_check_early 1154 std r3,RESULT(r1) /* Save result */ 1155 ld r12,_MSR(r1) 1156 1157#ifdef CONFIG_PPC_P7_NAP 1158 /* 1159 * Check if thread was in power saving mode. We come here when any 1160 * of the following is true: 1161 * a. thread wasn't in power saving mode 1162 * b. thread was in power saving mode with no state loss, 1163 * supervisor state loss or hypervisor state loss. 1164 * 1165 * Go back to nap/sleep/winkle mode again if (b) is true. 1166 */ 1167BEGIN_FTR_SECTION 1168 rlwinm. r11,r12,47-31,30,31 1169 bne machine_check_idle_common 1170END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1171#endif 1172 1173#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1174 /* 1175 * Check if we are coming from guest. If yes, then run the normal 1176 * exception handler which will take the 1177 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1178 * to guest. 1179 */ 1180 lbz r11,HSTATE_IN_GUEST(r13) 1181 cmpwi r11,0 /* Check if coming from guest */ 1182 bne mce_deliver /* continue if we are. */ 1183#endif 1184 1185 /* 1186 * Check if we are coming from userspace. If yes, then run the normal 1187 * exception handler which will deliver the MC event to this kernel. 1188 */ 1189 andi. r11,r12,MSR_PR /* See if coming from user. */ 1190 bne mce_deliver /* continue in V mode if we are. */ 1191 1192 /* 1193 * At this point we are coming from kernel context. 1194 * Queue up the MCE event and return from the interrupt. 1195 * But before that, check if this is an un-recoverable exception. 1196 * If yes, then stay on emergency stack and panic. 1197 */ 1198 andi. r11,r12,MSR_RI 1199 beq unrecoverable_mce 1200 1201 /* 1202 * Check if we have successfully handled/recovered from error, if not 1203 * then stay on emergency stack and panic. 1204 */ 1205 ld r3,RESULT(r1) /* Load result */ 1206 cmpdi r3,0 /* see if we handled MCE successfully */ 1207 beq unrecoverable_mce /* if !handled then panic */ 1208 1209 /* 1210 * Return from MC interrupt. 1211 * Queue up the MCE event so that we can log it later, while 1212 * returning from kernel or opal call. 1213 */ 1214 bl machine_check_queue_event 1215 MACHINE_CHECK_HANDLER_WINDUP 1216 RFI_TO_KERNEL 1217 1218mce_deliver: 1219 /* 1220 * This is a host user or guest MCE. Restore all registers, then 1221 * run the "late" handler. For host user, this will run the 1222 * machine_check_exception handler in virtual mode like a normal 1223 * interrupt handler. For guest, this will trigger the KVM test 1224 * and branch to the KVM interrupt similarly to other interrupts. 1225 */ 1226BEGIN_FTR_SECTION 1227 ld r10,ORIG_GPR3(r1) 1228 mtspr SPRN_CFAR,r10 1229END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1230 MACHINE_CHECK_HANDLER_WINDUP 1231 GEN_INT_ENTRY machine_check, virt=0 1232 1233EXC_COMMON_BEGIN(machine_check_common) 1234 /* 1235 * Machine check is different because we use a different 1236 * save area: PACA_EXMC instead of PACA_EXGEN. 1237 */ 1238 GEN_COMMON machine_check 1239 1240 /* Enable MSR_RI when finished with PACA_EXMC */ 1241 li r10,MSR_RI 1242 mtmsrd r10,1 1243 addi r3,r1,STACK_FRAME_OVERHEAD 1244 bl machine_check_exception_async 1245 b interrupt_return_srr 1246 1247 1248#ifdef CONFIG_PPC_P7_NAP 1249/* 1250 * This is an idle wakeup. Low level machine check has already been 1251 * done. Queue the event then call the idle code to do the wake up. 1252 */ 1253EXC_COMMON_BEGIN(machine_check_idle_common) 1254 bl machine_check_queue_event 1255 1256 /* 1257 * GPR-loss wakeups are relatively straightforward, because the 1258 * idle sleep code has saved all non-volatile registers on its 1259 * own stack, and r1 in PACAR1. 1260 * 1261 * For no-loss wakeups the r1 and lr registers used by the 1262 * early machine check handler have to be restored first. r2 is 1263 * the kernel TOC, so no need to restore it. 1264 * 1265 * Then decrement MCE nesting after finishing with the stack. 1266 */ 1267 ld r3,_MSR(r1) 1268 ld r4,_LINK(r1) 1269 ld r1,GPR1(r1) 1270 1271 lhz r11,PACA_IN_MCE(r13) 1272 subi r11,r11,1 1273 sth r11,PACA_IN_MCE(r13) 1274 1275 mtlr r4 1276 rlwinm r10,r3,47-31,30,31 1277 cmpwi cr1,r10,2 1278 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1279 b idle_return_gpr_loss 1280#endif 1281 1282EXC_COMMON_BEGIN(unrecoverable_mce) 1283 /* 1284 * We are going down. But there are chances that we might get hit by 1285 * another MCE during panic path and we may run into unstable state 1286 * with no way out. Hence, turn ME bit off while going down, so that 1287 * when another MCE is hit during panic path, system will checkstop 1288 * and hypervisor will get restarted cleanly by SP. 1289 */ 1290BEGIN_FTR_SECTION 1291 li r10,0 /* clear MSR_RI */ 1292 mtmsrd r10,1 1293 bl disable_machine_check 1294END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1295 ld r10,PACAKMSR(r13) 1296 li r3,MSR_ME 1297 andc r10,r10,r3 1298 mtmsrd r10 1299 1300 lhz r12,PACA_IN_MCE(r13) 1301 subi r12,r12,1 1302 sth r12,PACA_IN_MCE(r13) 1303 1304 /* 1305 * Invoke machine_check_exception to print MCE event and panic. 1306 * This is the NMI version of the handler because we are called from 1307 * the early handler which is a true NMI. 1308 */ 1309 addi r3,r1,STACK_FRAME_OVERHEAD 1310 bl machine_check_exception 1311 1312 /* 1313 * We will not reach here. Even if we did, there is no way out. 1314 * Call unrecoverable_exception and die. 1315 */ 1316 addi r3,r1,STACK_FRAME_OVERHEAD 1317 bl unrecoverable_exception 1318 b . 1319 1320 1321/** 1322 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1323 * This is a synchronous interrupt generated due to a data access exception, 1324 * e.g., a load orstore which does not have a valid page table entry with 1325 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1326 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1327 * 1328 * Handling: 1329 * - Hash MMU 1330 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1331 * Linux page table. Hash faults can hit in kernel mode in a fairly 1332 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1333 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1334 * backed by Linux page table entries. 1335 * 1336 * If no entry is found the Linux page fault handler is invoked (by 1337 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1338 * copy operations of course. 1339 * 1340 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1341 * MMU context, which may cause a DSI in the host, which must go to the 1342 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1343 * always be used regardless of AIL setting. 1344 * 1345 * - Radix MMU 1346 * The hardware loads from the Linux page table directly, so a fault goes 1347 * immediately to Linux page fault. 1348 * 1349 * Conditions like DAWR match are handled on the way in to Linux page fault. 1350 */ 1351INT_DEFINE_BEGIN(data_access) 1352 IVEC=0x300 1353 IDAR=1 1354 IDSISR=1 1355 IKVM_REAL=1 1356INT_DEFINE_END(data_access) 1357 1358EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1359 GEN_INT_ENTRY data_access, virt=0 1360EXC_REAL_END(data_access, 0x300, 0x80) 1361EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1362 GEN_INT_ENTRY data_access, virt=1 1363EXC_VIRT_END(data_access, 0x4300, 0x80) 1364EXC_COMMON_BEGIN(data_access_common) 1365 GEN_COMMON data_access 1366 ld r4,_DSISR(r1) 1367 addi r3,r1,STACK_FRAME_OVERHEAD 1368 andis. r0,r4,DSISR_DABRMATCH@h 1369 bne- 1f 1370#ifdef CONFIG_PPC_64S_HASH_MMU 1371BEGIN_MMU_FTR_SECTION 1372 bl do_hash_fault 1373MMU_FTR_SECTION_ELSE 1374 bl do_page_fault 1375ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1376#else 1377 bl do_page_fault 1378#endif 1379 b interrupt_return_srr 1380 13811: bl do_break 1382 /* 1383 * do_break() may have changed the NV GPRS while handling a breakpoint. 1384 * If so, we need to restore them with their updated values. 1385 */ 1386 REST_NVGPRS(r1) 1387 b interrupt_return_srr 1388 1389 1390/** 1391 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1392 * This is a synchronous interrupt in response to an MMU fault missing SLB 1393 * entry for HPT, or an address outside RPT translation range. 1394 * 1395 * Handling: 1396 * - HPT: 1397 * This refills the SLB, or reports an access fault similarly to a bad page 1398 * fault. When coming from user-mode, the SLB handler may access any kernel 1399 * data, though it may itself take a DSLB. When coming from kernel mode, 1400 * recursive faults must be avoided so access is restricted to the kernel 1401 * image text/data, kernel stack, and any data allocated below 1402 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1403 * on user-handler data structures. 1404 * 1405 * KVM: Same as 0x300, DSLB must test for KVM guest. 1406 */ 1407INT_DEFINE_BEGIN(data_access_slb) 1408 IVEC=0x380 1409 IDAR=1 1410 IKVM_REAL=1 1411INT_DEFINE_END(data_access_slb) 1412 1413EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1414 GEN_INT_ENTRY data_access_slb, virt=0 1415EXC_REAL_END(data_access_slb, 0x380, 0x80) 1416EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1417 GEN_INT_ENTRY data_access_slb, virt=1 1418EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1419EXC_COMMON_BEGIN(data_access_slb_common) 1420 GEN_COMMON data_access_slb 1421#ifdef CONFIG_PPC_64S_HASH_MMU 1422BEGIN_MMU_FTR_SECTION 1423 /* HPT case, do SLB fault */ 1424 addi r3,r1,STACK_FRAME_OVERHEAD 1425 bl do_slb_fault 1426 cmpdi r3,0 1427 bne- 1f 1428 b fast_interrupt_return_srr 14291: /* Error case */ 1430MMU_FTR_SECTION_ELSE 1431 /* Radix case, access is outside page table range */ 1432 li r3,-EFAULT 1433ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1434#else 1435 li r3,-EFAULT 1436#endif 1437 std r3,RESULT(r1) 1438 addi r3,r1,STACK_FRAME_OVERHEAD 1439 bl do_bad_segment_interrupt 1440 b interrupt_return_srr 1441 1442 1443/** 1444 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1445 * This is a synchronous interrupt in response to an MMU fault due to an 1446 * instruction fetch. 1447 * 1448 * Handling: 1449 * Similar to DSI, though in response to fetch. The faulting address is found 1450 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1451 */ 1452INT_DEFINE_BEGIN(instruction_access) 1453 IVEC=0x400 1454 IISIDE=1 1455 IDAR=1 1456 IDSISR=1 1457#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1458 IKVM_REAL=1 1459#endif 1460INT_DEFINE_END(instruction_access) 1461 1462EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1463 GEN_INT_ENTRY instruction_access, virt=0 1464EXC_REAL_END(instruction_access, 0x400, 0x80) 1465EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1466 GEN_INT_ENTRY instruction_access, virt=1 1467EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1468EXC_COMMON_BEGIN(instruction_access_common) 1469 GEN_COMMON instruction_access 1470 addi r3,r1,STACK_FRAME_OVERHEAD 1471#ifdef CONFIG_PPC_64S_HASH_MMU 1472BEGIN_MMU_FTR_SECTION 1473 bl do_hash_fault 1474MMU_FTR_SECTION_ELSE 1475 bl do_page_fault 1476ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1477#else 1478 bl do_page_fault 1479#endif 1480 b interrupt_return_srr 1481 1482 1483/** 1484 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1485 * This is a synchronous interrupt in response to an MMU fault due to an 1486 * instruction fetch. 1487 * 1488 * Handling: 1489 * Similar to DSLB, though in response to fetch. The faulting address is found 1490 * in SRR0 (rather than DAR). 1491 */ 1492INT_DEFINE_BEGIN(instruction_access_slb) 1493 IVEC=0x480 1494 IISIDE=1 1495 IDAR=1 1496#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1497 IKVM_REAL=1 1498#endif 1499INT_DEFINE_END(instruction_access_slb) 1500 1501EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1502 GEN_INT_ENTRY instruction_access_slb, virt=0 1503EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1504EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1505 GEN_INT_ENTRY instruction_access_slb, virt=1 1506EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1507EXC_COMMON_BEGIN(instruction_access_slb_common) 1508 GEN_COMMON instruction_access_slb 1509#ifdef CONFIG_PPC_64S_HASH_MMU 1510BEGIN_MMU_FTR_SECTION 1511 /* HPT case, do SLB fault */ 1512 addi r3,r1,STACK_FRAME_OVERHEAD 1513 bl do_slb_fault 1514 cmpdi r3,0 1515 bne- 1f 1516 b fast_interrupt_return_srr 15171: /* Error case */ 1518MMU_FTR_SECTION_ELSE 1519 /* Radix case, access is outside page table range */ 1520 li r3,-EFAULT 1521ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1522#else 1523 li r3,-EFAULT 1524#endif 1525 std r3,RESULT(r1) 1526 addi r3,r1,STACK_FRAME_OVERHEAD 1527 bl do_bad_segment_interrupt 1528 b interrupt_return_srr 1529 1530 1531/** 1532 * Interrupt 0x500 - External Interrupt. 1533 * This is an asynchronous maskable interrupt in response to an "external 1534 * exception" from the interrupt controller or hypervisor (e.g., device 1535 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1536 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1537 * 1538 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1539 * interrupts are delivered with HSRR registers, guests use SRRs, which 1540 * reqiures IHSRR_IF_HVMODE. 1541 * 1542 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1543 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1544 * rather than External Interrupts. 1545 * 1546 * Handling: 1547 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1548 * because registers at the time of the interrupt are not so important as it is 1549 * asynchronous. 1550 * 1551 * If soft masked, the masked handler will note the pending interrupt for 1552 * replay, and clear MSR[EE] in the interrupted context. 1553 */ 1554INT_DEFINE_BEGIN(hardware_interrupt) 1555 IVEC=0x500 1556 IHSRR_IF_HVMODE=1 1557 IMASK=IRQS_DISABLED 1558 IKVM_REAL=1 1559 IKVM_VIRT=1 1560INT_DEFINE_END(hardware_interrupt) 1561 1562EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1563 GEN_INT_ENTRY hardware_interrupt, virt=0 1564EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1565EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1566 GEN_INT_ENTRY hardware_interrupt, virt=1 1567EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1568EXC_COMMON_BEGIN(hardware_interrupt_common) 1569 GEN_COMMON hardware_interrupt 1570 addi r3,r1,STACK_FRAME_OVERHEAD 1571 bl do_IRQ 1572 BEGIN_FTR_SECTION 1573 b interrupt_return_hsrr 1574 FTR_SECTION_ELSE 1575 b interrupt_return_srr 1576 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1577 1578 1579/** 1580 * Interrupt 0x600 - Alignment Interrupt 1581 * This is a synchronous interrupt in response to data alignment fault. 1582 */ 1583INT_DEFINE_BEGIN(alignment) 1584 IVEC=0x600 1585 IDAR=1 1586 IDSISR=1 1587#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1588 IKVM_REAL=1 1589#endif 1590INT_DEFINE_END(alignment) 1591 1592EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1593 GEN_INT_ENTRY alignment, virt=0 1594EXC_REAL_END(alignment, 0x600, 0x100) 1595EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1596 GEN_INT_ENTRY alignment, virt=1 1597EXC_VIRT_END(alignment, 0x4600, 0x100) 1598EXC_COMMON_BEGIN(alignment_common) 1599 GEN_COMMON alignment 1600 addi r3,r1,STACK_FRAME_OVERHEAD 1601 bl alignment_exception 1602 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1603 b interrupt_return_srr 1604 1605 1606/** 1607 * Interrupt 0x700 - Program Interrupt (program check). 1608 * This is a synchronous interrupt in response to various instruction faults: 1609 * traps, privilege errors, TM errors, floating point exceptions. 1610 * 1611 * Handling: 1612 * This interrupt may use the "emergency stack" in some cases when being taken 1613 * from kernel context, which complicates handling. 1614 */ 1615INT_DEFINE_BEGIN(program_check) 1616 IVEC=0x700 1617#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1618 IKVM_REAL=1 1619#endif 1620INT_DEFINE_END(program_check) 1621 1622EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1623 1624#ifdef CONFIG_CPU_LITTLE_ENDIAN 1625 /* 1626 * There's a short window during boot where although the kernel is 1627 * running little endian, any exceptions will cause the CPU to switch 1628 * back to big endian. For example a WARN() boils down to a trap 1629 * instruction, which will cause a program check, and we end up here but 1630 * with the CPU in big endian mode. The first instruction of the program 1631 * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when 1632 * executed in the wrong endian is an lhzu with a ~3GB displacement from 1633 * r3. The content of r3 is random, so that is a load from some random 1634 * location, and depending on the system can easily lead to a checkstop, 1635 * or an infinitely recursive page fault. 1636 * 1637 * So to handle that case we have a trampoline here that can detect we 1638 * are in the wrong endian and flip us back to the correct endian. We 1639 * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires 1640 * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as 1641 * SPRG1 is already used for the paca. SPRG3 is user readable, but this 1642 * trampoline is only active very early in boot, and SPRG3 will be 1643 * reinitialised in vdso_getcpu_init() before userspace starts. 1644 */ 1645BEGIN_FTR_SECTION 1646 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 1647 b 1f // Skip trampoline if endian is correct 1648 .long 0xa643707d // mtsprg 0, r11 Backup r11 1649 .long 0xa6027a7d // mfsrr0 r11 1650 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 1651 .long 0xa6027b7d // mfsrr1 r11 1652 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 1653 .long 0xa600607d // mfmsr r11 1654 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 1655 .long 0xa6037b7d // mtsrr1 r11 1656 .long 0x34076039 // li r11, 0x734 1657 .long 0xa6037a7d // mtsrr0 r11 1658 .long 0x2400004c // rfid 1659 mfsprg r11, 3 1660 mtsrr1 r11 // Restore SRR1 1661 mfsprg r11, 2 1662 mtsrr0 r11 // Restore SRR0 1663 mfsprg r11, 0 // Restore r11 16641: 1665END_FTR_SECTION(0, 1) // nop out after boot 1666#endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1667 1668 GEN_INT_ENTRY program_check, virt=0 1669EXC_REAL_END(program_check, 0x700, 0x100) 1670EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1671 GEN_INT_ENTRY program_check, virt=1 1672EXC_VIRT_END(program_check, 0x4700, 0x100) 1673EXC_COMMON_BEGIN(program_check_common) 1674 __GEN_COMMON_ENTRY program_check 1675 1676 /* 1677 * It's possible to receive a TM Bad Thing type program check with 1678 * userspace register values (in particular r1), but with SRR1 reporting 1679 * that we came from the kernel. Normally that would confuse the bad 1680 * stack logic, and we would report a bad kernel stack pointer. Instead 1681 * we switch to the emergency stack if we're taking a TM Bad Thing from 1682 * the kernel. 1683 */ 1684 1685 andi. r10,r12,MSR_PR 1686 bne .Lnormal_stack /* If userspace, go normal path */ 1687 1688 andis. r10,r12,(SRR1_PROGTM)@h 1689 bne .Lemergency_stack /* If TM, emergency */ 1690 1691 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1692 blt .Lnormal_stack /* normal path if not */ 1693 1694 /* Use the emergency stack */ 1695.Lemergency_stack: 1696 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1697 /* 3 in EXCEPTION_PROLOG_COMMON */ 1698 mr r10,r1 /* Save r1 */ 1699 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1700 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1701 __ISTACK(program_check)=0 1702 __GEN_COMMON_BODY program_check 1703 b .Ldo_program_check 1704 1705.Lnormal_stack: 1706 __ISTACK(program_check)=1 1707 __GEN_COMMON_BODY program_check 1708 1709.Ldo_program_check: 1710 addi r3,r1,STACK_FRAME_OVERHEAD 1711 bl program_check_exception 1712 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1713 b interrupt_return_srr 1714 1715 1716/* 1717 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1718 * This is a synchronous interrupt in response to executing an fp instruction 1719 * with MSR[FP]=0. 1720 * 1721 * Handling: 1722 * This will load FP registers and enable the FP bit if coming from userspace, 1723 * otherwise report a bad kernel use of FP. 1724 */ 1725INT_DEFINE_BEGIN(fp_unavailable) 1726 IVEC=0x800 1727#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1728 IKVM_REAL=1 1729#endif 1730INT_DEFINE_END(fp_unavailable) 1731 1732EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1733 GEN_INT_ENTRY fp_unavailable, virt=0 1734EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1735EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1736 GEN_INT_ENTRY fp_unavailable, virt=1 1737EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1738EXC_COMMON_BEGIN(fp_unavailable_common) 1739 GEN_COMMON fp_unavailable 1740 bne 1f /* if from user, just load it up */ 1741 addi r3,r1,STACK_FRAME_OVERHEAD 1742 bl kernel_fp_unavailable_exception 17430: trap 1744 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17451: 1746#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1747BEGIN_FTR_SECTION 1748 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1749 * transaction), go do TM stuff 1750 */ 1751 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1752 bne- 2f 1753END_FTR_SECTION_IFSET(CPU_FTR_TM) 1754#endif 1755 bl load_up_fpu 1756 b fast_interrupt_return_srr 1757#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17582: /* User process was in a transaction */ 1759 addi r3,r1,STACK_FRAME_OVERHEAD 1760 bl fp_unavailable_tm 1761 b interrupt_return_srr 1762#endif 1763 1764 1765/** 1766 * Interrupt 0x900 - Decrementer Interrupt. 1767 * This is an asynchronous interrupt in response to a decrementer exception 1768 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1769 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1770 * local_irq_disable()). 1771 * 1772 * Handling: 1773 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1774 * 1775 * If soft masked, the masked handler will note the pending interrupt for 1776 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1777 * in the interrupted context. 1778 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1779 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1780 * on the emergency stack. 1781 */ 1782INT_DEFINE_BEGIN(decrementer) 1783 IVEC=0x900 1784 IMASK=IRQS_DISABLED 1785#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1786 IKVM_REAL=1 1787#endif 1788INT_DEFINE_END(decrementer) 1789 1790EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1791 GEN_INT_ENTRY decrementer, virt=0 1792EXC_REAL_END(decrementer, 0x900, 0x80) 1793EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1794 GEN_INT_ENTRY decrementer, virt=1 1795EXC_VIRT_END(decrementer, 0x4900, 0x80) 1796EXC_COMMON_BEGIN(decrementer_common) 1797 GEN_COMMON decrementer 1798 addi r3,r1,STACK_FRAME_OVERHEAD 1799 bl timer_interrupt 1800 b interrupt_return_srr 1801 1802 1803/** 1804 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1805 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1806 * register. 1807 * 1808 * Handling: 1809 * Linux does not use this outside KVM where it's used to keep a host timer 1810 * while the guest is given control of DEC. It should normally be caught by 1811 * the KVM test and routed there. 1812 */ 1813INT_DEFINE_BEGIN(hdecrementer) 1814 IVEC=0x980 1815 IHSRR=1 1816 ISTACK=0 1817 IKVM_REAL=1 1818 IKVM_VIRT=1 1819INT_DEFINE_END(hdecrementer) 1820 1821EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1822 GEN_INT_ENTRY hdecrementer, virt=0 1823EXC_REAL_END(hdecrementer, 0x980, 0x80) 1824EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1825 GEN_INT_ENTRY hdecrementer, virt=1 1826EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1827EXC_COMMON_BEGIN(hdecrementer_common) 1828 __GEN_COMMON_ENTRY hdecrementer 1829 /* 1830 * Hypervisor decrementer interrupts not caught by the KVM test 1831 * shouldn't occur but are sometimes left pending on exit from a KVM 1832 * guest. We don't need to do anything to clear them, as they are 1833 * edge-triggered. 1834 * 1835 * Be careful to avoid touching the kernel stack. 1836 */ 1837 li r10,0 1838 stb r10,PACAHSRR_VALID(r13) 1839 ld r10,PACA_EXGEN+EX_CTR(r13) 1840 mtctr r10 1841 mtcrf 0x80,r9 1842 ld r9,PACA_EXGEN+EX_R9(r13) 1843 ld r10,PACA_EXGEN+EX_R10(r13) 1844 ld r11,PACA_EXGEN+EX_R11(r13) 1845 ld r12,PACA_EXGEN+EX_R12(r13) 1846 ld r13,PACA_EXGEN+EX_R13(r13) 1847 HRFI_TO_KERNEL 1848 1849 1850/** 1851 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1852 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1853 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1854 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1855 * 1856 * Handling: 1857 * Guests may use this for IPIs between threads in a core if the 1858 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1859 * 1860 * If soft masked, the masked handler will note the pending interrupt for 1861 * replay, leaving MSR[EE] enabled in the interrupted context because the 1862 * doorbells are edge triggered. 1863 */ 1864INT_DEFINE_BEGIN(doorbell_super) 1865 IVEC=0xa00 1866 IMASK=IRQS_DISABLED 1867#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1868 IKVM_REAL=1 1869#endif 1870INT_DEFINE_END(doorbell_super) 1871 1872EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1873 GEN_INT_ENTRY doorbell_super, virt=0 1874EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1875EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1876 GEN_INT_ENTRY doorbell_super, virt=1 1877EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1878EXC_COMMON_BEGIN(doorbell_super_common) 1879 GEN_COMMON doorbell_super 1880 addi r3,r1,STACK_FRAME_OVERHEAD 1881#ifdef CONFIG_PPC_DOORBELL 1882 bl doorbell_exception 1883#else 1884 bl unknown_async_exception 1885#endif 1886 b interrupt_return_srr 1887 1888 1889EXC_REAL_NONE(0xb00, 0x100) 1890EXC_VIRT_NONE(0x4b00, 0x100) 1891 1892/** 1893 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1894 * This is a synchronous interrupt invoked with the "sc" instruction. The 1895 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1896 * is directed to the currently running OS. The hypercall is invoked with 1897 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1898 * 1899 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1900 * 0x4c00 virtual mode. 1901 * 1902 * Handling: 1903 * If the KVM test fires then it was due to a hypercall and is accordingly 1904 * routed to KVM. Otherwise this executes a normal Linux system call. 1905 * 1906 * Call convention: 1907 * 1908 * syscall and hypercalls register conventions are documented in 1909 * Documentation/powerpc/syscall64-abi.rst and 1910 * Documentation/powerpc/papr_hcalls.rst respectively. 1911 * 1912 * The intersection of volatile registers that don't contain possible 1913 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1914 * without saving, though xer is not a good idea to use, as hardware may 1915 * interpret some bits so it may be costly to change them. 1916 */ 1917INT_DEFINE_BEGIN(system_call) 1918 IVEC=0xc00 1919 IKVM_REAL=1 1920 IKVM_VIRT=1 1921INT_DEFINE_END(system_call) 1922 1923.macro SYSTEM_CALL virt 1924#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1925 /* 1926 * There is a little bit of juggling to get syscall and hcall 1927 * working well. Save r13 in ctr to avoid using SPRG scratch 1928 * register. 1929 * 1930 * Userspace syscalls have already saved the PPR, hcalls must save 1931 * it before setting HMT_MEDIUM. 1932 */ 1933 mtctr r13 1934 GET_PACA(r13) 1935 std r10,PACA_EXGEN+EX_R10(r13) 1936 INTERRUPT_TO_KERNEL 1937 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1938 mfctr r9 1939#else 1940 mr r9,r13 1941 GET_PACA(r13) 1942 INTERRUPT_TO_KERNEL 1943#endif 1944 1945#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1946BEGIN_FTR_SECTION 1947 cmpdi r0,0x1ebe 1948 beq- 1f 1949END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1950#endif 1951 1952 /* We reach here with PACA in r13, r13 in r9. */ 1953 mfspr r11,SPRN_SRR0 1954 mfspr r12,SPRN_SRR1 1955 1956 HMT_MEDIUM 1957 1958 .if ! \virt 1959 __LOAD_HANDLER(r10, system_call_common_real) 1960 mtctr r10 1961 bctr 1962 .else 1963#ifdef CONFIG_RELOCATABLE 1964 __LOAD_HANDLER(r10, system_call_common) 1965 mtctr r10 1966 bctr 1967#else 1968 b system_call_common 1969#endif 1970 .endif 1971 1972#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1973 /* Fast LE/BE switch system call */ 19741: mfspr r12,SPRN_SRR1 1975 xori r12,r12,MSR_LE 1976 mtspr SPRN_SRR1,r12 1977 mr r13,r9 1978 RFI_TO_USER /* return to userspace */ 1979 b . /* prevent speculative execution */ 1980#endif 1981.endm 1982 1983EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 1984 SYSTEM_CALL 0 1985EXC_REAL_END(system_call, 0xc00, 0x100) 1986EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 1987 SYSTEM_CALL 1 1988EXC_VIRT_END(system_call, 0x4c00, 0x100) 1989 1990#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1991TRAMP_REAL_BEGIN(kvm_hcall) 1992 std r9,PACA_EXGEN+EX_R9(r13) 1993 std r11,PACA_EXGEN+EX_R11(r13) 1994 std r12,PACA_EXGEN+EX_R12(r13) 1995 mfcr r9 1996 mfctr r10 1997 std r10,PACA_EXGEN+EX_R13(r13) 1998 li r10,0 1999 std r10,PACA_EXGEN+EX_CFAR(r13) 2000 std r10,PACA_EXGEN+EX_CTR(r13) 2001 /* 2002 * Save the PPR (on systems that support it) before changing to 2003 * HMT_MEDIUM. That allows the KVM code to save that value into the 2004 * guest state (it is the guest's PPR value). 2005 */ 2006BEGIN_FTR_SECTION 2007 mfspr r10,SPRN_PPR 2008 std r10,PACA_EXGEN+EX_PPR(r13) 2009END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2010 2011 HMT_MEDIUM 2012 2013#ifdef CONFIG_RELOCATABLE 2014 /* 2015 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 2016 * outside the head section. 2017 */ 2018 __LOAD_FAR_HANDLER(r10, kvmppc_hcall) 2019 mtctr r10 2020 bctr 2021#else 2022 b kvmppc_hcall 2023#endif 2024#endif 2025 2026/** 2027 * Interrupt 0xd00 - Trace Interrupt. 2028 * This is a synchronous interrupt in response to instruction step or 2029 * breakpoint faults. 2030 */ 2031INT_DEFINE_BEGIN(single_step) 2032 IVEC=0xd00 2033#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2034 IKVM_REAL=1 2035#endif 2036INT_DEFINE_END(single_step) 2037 2038EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2039 GEN_INT_ENTRY single_step, virt=0 2040EXC_REAL_END(single_step, 0xd00, 0x100) 2041EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2042 GEN_INT_ENTRY single_step, virt=1 2043EXC_VIRT_END(single_step, 0x4d00, 0x100) 2044EXC_COMMON_BEGIN(single_step_common) 2045 GEN_COMMON single_step 2046 addi r3,r1,STACK_FRAME_OVERHEAD 2047 bl single_step_exception 2048 b interrupt_return_srr 2049 2050 2051/** 2052 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2053 * This is a synchronous interrupt in response to an MMU fault caused by a 2054 * guest data access. 2055 * 2056 * Handling: 2057 * This should always get routed to KVM. In radix MMU mode, this is caused 2058 * by a guest nested radix access that can't be performed due to the 2059 * partition scope page table. In hash mode, this can be caused by guests 2060 * running with translation disabled (virtual real mode) or with VPM enabled. 2061 * KVM will update the page table structures or disallow the access. 2062 */ 2063INT_DEFINE_BEGIN(h_data_storage) 2064 IVEC=0xe00 2065 IHSRR=1 2066 IDAR=1 2067 IDSISR=1 2068 IKVM_REAL=1 2069 IKVM_VIRT=1 2070INT_DEFINE_END(h_data_storage) 2071 2072EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2073 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2074EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2075EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2076 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2077EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2078EXC_COMMON_BEGIN(h_data_storage_common) 2079 GEN_COMMON h_data_storage 2080 addi r3,r1,STACK_FRAME_OVERHEAD 2081BEGIN_MMU_FTR_SECTION 2082 bl do_bad_page_fault_segv 2083MMU_FTR_SECTION_ELSE 2084 bl unknown_exception 2085ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2086 b interrupt_return_hsrr 2087 2088 2089/** 2090 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2091 * This is a synchronous interrupt in response to an MMU fault caused by a 2092 * guest instruction fetch, similar to HDSI. 2093 */ 2094INT_DEFINE_BEGIN(h_instr_storage) 2095 IVEC=0xe20 2096 IHSRR=1 2097 IKVM_REAL=1 2098 IKVM_VIRT=1 2099INT_DEFINE_END(h_instr_storage) 2100 2101EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2102 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2103EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2104EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2105 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2106EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2107EXC_COMMON_BEGIN(h_instr_storage_common) 2108 GEN_COMMON h_instr_storage 2109 addi r3,r1,STACK_FRAME_OVERHEAD 2110 bl unknown_exception 2111 b interrupt_return_hsrr 2112 2113 2114/** 2115 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2116 */ 2117INT_DEFINE_BEGIN(emulation_assist) 2118 IVEC=0xe40 2119 IHSRR=1 2120 IKVM_REAL=1 2121 IKVM_VIRT=1 2122INT_DEFINE_END(emulation_assist) 2123 2124EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2125 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2126EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2127EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2128 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2129EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2130EXC_COMMON_BEGIN(emulation_assist_common) 2131 GEN_COMMON emulation_assist 2132 addi r3,r1,STACK_FRAME_OVERHEAD 2133 bl emulation_assist_interrupt 2134 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2135 b interrupt_return_hsrr 2136 2137 2138/** 2139 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2140 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2141 * Exception. It is always taken in real mode but uses HSRR registers 2142 * unlike SRESET and MCE. 2143 * 2144 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2145 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2146 * 2147 * Handling: 2148 * This is a special case, this is handled similarly to machine checks, with an 2149 * initial real mode handler that is not soft-masked, which attempts to fix the 2150 * problem. Then a regular handler which is soft-maskable and reports the 2151 * problem. 2152 * 2153 * The emergency stack is used for the early real mode handler. 2154 * 2155 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2156 * either use soft-masking for the MCE, or use irq_work for the HMI. 2157 * 2158 * KVM: 2159 * Unlike MCE, this calls into KVM without calling the real mode handler 2160 * first. 2161 */ 2162INT_DEFINE_BEGIN(hmi_exception_early) 2163 IVEC=0xe60 2164 IHSRR=1 2165 IREALMODE_COMMON=1 2166 ISTACK=0 2167 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2168 IKVM_REAL=1 2169INT_DEFINE_END(hmi_exception_early) 2170 2171INT_DEFINE_BEGIN(hmi_exception) 2172 IVEC=0xe60 2173 IHSRR=1 2174 IMASK=IRQS_DISABLED 2175 IKVM_REAL=1 2176INT_DEFINE_END(hmi_exception) 2177 2178EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2179 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2180EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2181EXC_VIRT_NONE(0x4e60, 0x20) 2182 2183EXC_COMMON_BEGIN(hmi_exception_early_common) 2184 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2185 2186 mr r10,r1 /* Save r1 */ 2187 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2188 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2189 2190 __GEN_COMMON_BODY hmi_exception_early 2191 2192 addi r3,r1,STACK_FRAME_OVERHEAD 2193 bl hmi_exception_realmode 2194 cmpdi cr0,r3,0 2195 bne 1f 2196 2197 EXCEPTION_RESTORE_REGS hsrr=1 2198 HRFI_TO_USER_OR_KERNEL 2199 22001: 2201 /* 2202 * Go to virtual mode and pull the HMI event information from 2203 * firmware. 2204 */ 2205 EXCEPTION_RESTORE_REGS hsrr=1 2206 GEN_INT_ENTRY hmi_exception, virt=0 2207 2208EXC_COMMON_BEGIN(hmi_exception_common) 2209 GEN_COMMON hmi_exception 2210 addi r3,r1,STACK_FRAME_OVERHEAD 2211 bl handle_hmi_exception 2212 b interrupt_return_hsrr 2213 2214 2215/** 2216 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2217 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2218 * Similar to the 0xa00 doorbell but for host rather than guest. 2219 */ 2220INT_DEFINE_BEGIN(h_doorbell) 2221 IVEC=0xe80 2222 IHSRR=1 2223 IMASK=IRQS_DISABLED 2224 IKVM_REAL=1 2225 IKVM_VIRT=1 2226INT_DEFINE_END(h_doorbell) 2227 2228EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2229 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2230EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2231EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2232 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2233EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2234EXC_COMMON_BEGIN(h_doorbell_common) 2235 GEN_COMMON h_doorbell 2236 addi r3,r1,STACK_FRAME_OVERHEAD 2237#ifdef CONFIG_PPC_DOORBELL 2238 bl doorbell_exception 2239#else 2240 bl unknown_async_exception 2241#endif 2242 b interrupt_return_hsrr 2243 2244 2245/** 2246 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2247 * This is an asynchronous interrupt in response to an "external exception". 2248 * Similar to 0x500 but for host only. 2249 */ 2250INT_DEFINE_BEGIN(h_virt_irq) 2251 IVEC=0xea0 2252 IHSRR=1 2253 IMASK=IRQS_DISABLED 2254 IKVM_REAL=1 2255 IKVM_VIRT=1 2256INT_DEFINE_END(h_virt_irq) 2257 2258EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2259 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2260EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2261EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2262 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2263EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2264EXC_COMMON_BEGIN(h_virt_irq_common) 2265 GEN_COMMON h_virt_irq 2266 addi r3,r1,STACK_FRAME_OVERHEAD 2267 bl do_IRQ 2268 b interrupt_return_hsrr 2269 2270 2271EXC_REAL_NONE(0xec0, 0x20) 2272EXC_VIRT_NONE(0x4ec0, 0x20) 2273EXC_REAL_NONE(0xee0, 0x20) 2274EXC_VIRT_NONE(0x4ee0, 0x20) 2275 2276 2277/* 2278 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2279 * This is an asynchronous interrupt in response to a PMU exception. 2280 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2281 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2282 * 2283 * Handling: 2284 * This calls into the perf subsystem. 2285 * 2286 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2287 * runs under local_irq_disable. However it may be soft-masked in 2288 * powerpc-specific code. 2289 * 2290 * If soft masked, the masked handler will note the pending interrupt for 2291 * replay, and clear MSR[EE] in the interrupted context. 2292 */ 2293INT_DEFINE_BEGIN(performance_monitor) 2294 IVEC=0xf00 2295 IMASK=IRQS_PMI_DISABLED 2296#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2297 IKVM_REAL=1 2298#endif 2299INT_DEFINE_END(performance_monitor) 2300 2301EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2302 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2303EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2304EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2305 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2306EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2307EXC_COMMON_BEGIN(performance_monitor_common) 2308 GEN_COMMON performance_monitor 2309 addi r3,r1,STACK_FRAME_OVERHEAD 2310 bl performance_monitor_exception 2311 b interrupt_return_srr 2312 2313 2314/** 2315 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2316 * This is a synchronous interrupt in response to 2317 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2318 * Similar to FP unavailable. 2319 */ 2320INT_DEFINE_BEGIN(altivec_unavailable) 2321 IVEC=0xf20 2322#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2323 IKVM_REAL=1 2324#endif 2325INT_DEFINE_END(altivec_unavailable) 2326 2327EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2328 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2329EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2330EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2331 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2332EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2333EXC_COMMON_BEGIN(altivec_unavailable_common) 2334 GEN_COMMON altivec_unavailable 2335#ifdef CONFIG_ALTIVEC 2336BEGIN_FTR_SECTION 2337 beq 1f 2338#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2339 BEGIN_FTR_SECTION_NESTED(69) 2340 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2341 * transaction), go do TM stuff 2342 */ 2343 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2344 bne- 2f 2345 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2346#endif 2347 bl load_up_altivec 2348 b fast_interrupt_return_srr 2349#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23502: /* User process was in a transaction */ 2351 addi r3,r1,STACK_FRAME_OVERHEAD 2352 bl altivec_unavailable_tm 2353 b interrupt_return_srr 2354#endif 23551: 2356END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2357#endif 2358 addi r3,r1,STACK_FRAME_OVERHEAD 2359 bl altivec_unavailable_exception 2360 b interrupt_return_srr 2361 2362 2363/** 2364 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2365 * This is a synchronous interrupt in response to 2366 * executing a VSX instruction with MSR[VSX]=0. 2367 * Similar to FP unavailable. 2368 */ 2369INT_DEFINE_BEGIN(vsx_unavailable) 2370 IVEC=0xf40 2371#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2372 IKVM_REAL=1 2373#endif 2374INT_DEFINE_END(vsx_unavailable) 2375 2376EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2377 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2378EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2379EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2380 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2381EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2382EXC_COMMON_BEGIN(vsx_unavailable_common) 2383 GEN_COMMON vsx_unavailable 2384#ifdef CONFIG_VSX 2385BEGIN_FTR_SECTION 2386 beq 1f 2387#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2388 BEGIN_FTR_SECTION_NESTED(69) 2389 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2390 * transaction), go do TM stuff 2391 */ 2392 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2393 bne- 2f 2394 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2395#endif 2396 b load_up_vsx 2397#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23982: /* User process was in a transaction */ 2399 addi r3,r1,STACK_FRAME_OVERHEAD 2400 bl vsx_unavailable_tm 2401 b interrupt_return_srr 2402#endif 24031: 2404END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2405#endif 2406 addi r3,r1,STACK_FRAME_OVERHEAD 2407 bl vsx_unavailable_exception 2408 b interrupt_return_srr 2409 2410 2411/** 2412 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2413 * This is a synchronous interrupt in response to 2414 * executing an instruction without access to the facility that can be 2415 * resolved by the OS (e.g., FSCR, MSR). 2416 * Similar to FP unavailable. 2417 */ 2418INT_DEFINE_BEGIN(facility_unavailable) 2419 IVEC=0xf60 2420#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2421 IKVM_REAL=1 2422#endif 2423INT_DEFINE_END(facility_unavailable) 2424 2425EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2426 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2427EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2428EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2429 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2430EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2431EXC_COMMON_BEGIN(facility_unavailable_common) 2432 GEN_COMMON facility_unavailable 2433 addi r3,r1,STACK_FRAME_OVERHEAD 2434 bl facility_unavailable_exception 2435 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2436 b interrupt_return_srr 2437 2438 2439/** 2440 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2441 * This is a synchronous interrupt in response to 2442 * executing an instruction without access to the facility that can only 2443 * be resolved in HV mode (e.g., HFSCR). 2444 * Similar to FP unavailable. 2445 */ 2446INT_DEFINE_BEGIN(h_facility_unavailable) 2447 IVEC=0xf80 2448 IHSRR=1 2449 IKVM_REAL=1 2450 IKVM_VIRT=1 2451INT_DEFINE_END(h_facility_unavailable) 2452 2453EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2454 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2455EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2456EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2457 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2458EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2459EXC_COMMON_BEGIN(h_facility_unavailable_common) 2460 GEN_COMMON h_facility_unavailable 2461 addi r3,r1,STACK_FRAME_OVERHEAD 2462 bl facility_unavailable_exception 2463 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2464 b interrupt_return_hsrr 2465 2466 2467EXC_REAL_NONE(0xfa0, 0x20) 2468EXC_VIRT_NONE(0x4fa0, 0x20) 2469EXC_REAL_NONE(0xfc0, 0x20) 2470EXC_VIRT_NONE(0x4fc0, 0x20) 2471EXC_REAL_NONE(0xfe0, 0x20) 2472EXC_VIRT_NONE(0x4fe0, 0x20) 2473 2474EXC_REAL_NONE(0x1000, 0x100) 2475EXC_VIRT_NONE(0x5000, 0x100) 2476EXC_REAL_NONE(0x1100, 0x100) 2477EXC_VIRT_NONE(0x5100, 0x100) 2478 2479#ifdef CONFIG_CBE_RAS 2480INT_DEFINE_BEGIN(cbe_system_error) 2481 IVEC=0x1200 2482 IHSRR=1 2483INT_DEFINE_END(cbe_system_error) 2484 2485EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2486 GEN_INT_ENTRY cbe_system_error, virt=0 2487EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2488EXC_VIRT_NONE(0x5200, 0x100) 2489EXC_COMMON_BEGIN(cbe_system_error_common) 2490 GEN_COMMON cbe_system_error 2491 addi r3,r1,STACK_FRAME_OVERHEAD 2492 bl cbe_system_error_exception 2493 b interrupt_return_hsrr 2494 2495#else /* CONFIG_CBE_RAS */ 2496EXC_REAL_NONE(0x1200, 0x100) 2497EXC_VIRT_NONE(0x5200, 0x100) 2498#endif 2499 2500/** 2501 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2502 * This has been removed from the ISA before 2.01, which is the earliest 2503 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2504 * interrupt with a non-architected feature available through the support 2505 * processor interface. 2506 */ 2507INT_DEFINE_BEGIN(instruction_breakpoint) 2508 IVEC=0x1300 2509#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2510 IKVM_REAL=1 2511#endif 2512INT_DEFINE_END(instruction_breakpoint) 2513 2514EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2515 GEN_INT_ENTRY instruction_breakpoint, virt=0 2516EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2517EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2518 GEN_INT_ENTRY instruction_breakpoint, virt=1 2519EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2520EXC_COMMON_BEGIN(instruction_breakpoint_common) 2521 GEN_COMMON instruction_breakpoint 2522 addi r3,r1,STACK_FRAME_OVERHEAD 2523 bl instruction_breakpoint_exception 2524 b interrupt_return_srr 2525 2526 2527EXC_REAL_NONE(0x1400, 0x100) 2528EXC_VIRT_NONE(0x5400, 0x100) 2529 2530/** 2531 * Interrupt 0x1500 - Soft Patch Interrupt 2532 * 2533 * Handling: 2534 * This is an implementation specific interrupt which can be used for a 2535 * range of exceptions. 2536 * 2537 * This interrupt handler is unique in that it runs the denormal assist 2538 * code even for guests (and even in guest context) without going to KVM, 2539 * for speed. POWER9 does not raise denorm exceptions, so this special case 2540 * could be phased out in future to reduce special cases. 2541 */ 2542INT_DEFINE_BEGIN(denorm_exception) 2543 IVEC=0x1500 2544 IHSRR=1 2545 IBRANCH_TO_COMMON=0 2546 IKVM_REAL=1 2547INT_DEFINE_END(denorm_exception) 2548 2549EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2550 GEN_INT_ENTRY denorm_exception, virt=0 2551#ifdef CONFIG_PPC_DENORMALISATION 2552 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2553 bne+ denorm_assist 2554#endif 2555 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2556EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2557#ifdef CONFIG_PPC_DENORMALISATION 2558EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2559 GEN_INT_ENTRY denorm_exception, virt=1 2560 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2561 bne+ denorm_assist 2562 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2563EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2564#else 2565EXC_VIRT_NONE(0x5500, 0x100) 2566#endif 2567 2568#ifdef CONFIG_PPC_DENORMALISATION 2569TRAMP_REAL_BEGIN(denorm_assist) 2570BEGIN_FTR_SECTION 2571/* 2572 * To denormalise we need to move a copy of the register to itself. 2573 * For POWER6 do that here for all FP regs. 2574 */ 2575 mfmsr r10 2576 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2577 xori r10,r10,(MSR_FE0|MSR_FE1) 2578 mtmsrd r10 2579 sync 2580 2581 .Lreg=0 2582 .rept 32 2583 fmr .Lreg,.Lreg 2584 .Lreg=.Lreg+1 2585 .endr 2586 2587FTR_SECTION_ELSE 2588/* 2589 * To denormalise we need to move a copy of the register to itself. 2590 * For POWER7 do that here for the first 32 VSX registers only. 2591 */ 2592 mfmsr r10 2593 oris r10,r10,MSR_VSX@h 2594 mtmsrd r10 2595 sync 2596 2597 .Lreg=0 2598 .rept 32 2599 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2600 .Lreg=.Lreg+1 2601 .endr 2602 2603ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2604 2605BEGIN_FTR_SECTION 2606 b denorm_done 2607END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2608/* 2609 * To denormalise we need to move a copy of the register to itself. 2610 * For POWER8 we need to do that for all 64 VSX registers 2611 */ 2612 .Lreg=32 2613 .rept 32 2614 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2615 .Lreg=.Lreg+1 2616 .endr 2617 2618denorm_done: 2619 mfspr r11,SPRN_HSRR0 2620 subi r11,r11,4 2621 mtspr SPRN_HSRR0,r11 2622 mtcrf 0x80,r9 2623 ld r9,PACA_EXGEN+EX_R9(r13) 2624BEGIN_FTR_SECTION 2625 ld r10,PACA_EXGEN+EX_PPR(r13) 2626 mtspr SPRN_PPR,r10 2627END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2628BEGIN_FTR_SECTION 2629 ld r10,PACA_EXGEN+EX_CFAR(r13) 2630 mtspr SPRN_CFAR,r10 2631END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2632 li r10,0 2633 stb r10,PACAHSRR_VALID(r13) 2634 ld r10,PACA_EXGEN+EX_R10(r13) 2635 ld r11,PACA_EXGEN+EX_R11(r13) 2636 ld r12,PACA_EXGEN+EX_R12(r13) 2637 ld r13,PACA_EXGEN+EX_R13(r13) 2638 HRFI_TO_UNKNOWN 2639 b . 2640#endif 2641 2642EXC_COMMON_BEGIN(denorm_exception_common) 2643 GEN_COMMON denorm_exception 2644 addi r3,r1,STACK_FRAME_OVERHEAD 2645 bl unknown_exception 2646 b interrupt_return_hsrr 2647 2648 2649#ifdef CONFIG_CBE_RAS 2650INT_DEFINE_BEGIN(cbe_maintenance) 2651 IVEC=0x1600 2652 IHSRR=1 2653INT_DEFINE_END(cbe_maintenance) 2654 2655EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2656 GEN_INT_ENTRY cbe_maintenance, virt=0 2657EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2658EXC_VIRT_NONE(0x5600, 0x100) 2659EXC_COMMON_BEGIN(cbe_maintenance_common) 2660 GEN_COMMON cbe_maintenance 2661 addi r3,r1,STACK_FRAME_OVERHEAD 2662 bl cbe_maintenance_exception 2663 b interrupt_return_hsrr 2664 2665#else /* CONFIG_CBE_RAS */ 2666EXC_REAL_NONE(0x1600, 0x100) 2667EXC_VIRT_NONE(0x5600, 0x100) 2668#endif 2669 2670 2671INT_DEFINE_BEGIN(altivec_assist) 2672 IVEC=0x1700 2673#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2674 IKVM_REAL=1 2675#endif 2676INT_DEFINE_END(altivec_assist) 2677 2678EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2679 GEN_INT_ENTRY altivec_assist, virt=0 2680EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2681EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2682 GEN_INT_ENTRY altivec_assist, virt=1 2683EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2684EXC_COMMON_BEGIN(altivec_assist_common) 2685 GEN_COMMON altivec_assist 2686 addi r3,r1,STACK_FRAME_OVERHEAD 2687#ifdef CONFIG_ALTIVEC 2688 bl altivec_assist_exception 2689 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2690#else 2691 bl unknown_exception 2692#endif 2693 b interrupt_return_srr 2694 2695 2696#ifdef CONFIG_CBE_RAS 2697INT_DEFINE_BEGIN(cbe_thermal) 2698 IVEC=0x1800 2699 IHSRR=1 2700INT_DEFINE_END(cbe_thermal) 2701 2702EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2703 GEN_INT_ENTRY cbe_thermal, virt=0 2704EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2705EXC_VIRT_NONE(0x5800, 0x100) 2706EXC_COMMON_BEGIN(cbe_thermal_common) 2707 GEN_COMMON cbe_thermal 2708 addi r3,r1,STACK_FRAME_OVERHEAD 2709 bl cbe_thermal_exception 2710 b interrupt_return_hsrr 2711 2712#else /* CONFIG_CBE_RAS */ 2713EXC_REAL_NONE(0x1800, 0x100) 2714EXC_VIRT_NONE(0x5800, 0x100) 2715#endif 2716 2717 2718#ifdef CONFIG_PPC_WATCHDOG 2719 2720INT_DEFINE_BEGIN(soft_nmi) 2721 IVEC=0x900 2722 ISTACK=0 2723INT_DEFINE_END(soft_nmi) 2724 2725/* 2726 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2727 * stack is one that is usable by maskable interrupts so long as MSR_EE 2728 * remains off. It is used for recovery when something has corrupted the 2729 * normal kernel stack, for example. The "soft NMI" must not use the process 2730 * stack because we want irq disabled sections to avoid touching the stack 2731 * at all (other than PMU interrupts), so use the emergency stack for this, 2732 * and run it entirely with interrupts hard disabled. 2733 */ 2734EXC_COMMON_BEGIN(soft_nmi_common) 2735 mr r10,r1 2736 ld r1,PACAEMERGSP(r13) 2737 subi r1,r1,INT_FRAME_SIZE 2738 __GEN_COMMON_BODY soft_nmi 2739 2740 addi r3,r1,STACK_FRAME_OVERHEAD 2741 bl soft_nmi_interrupt 2742 2743 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2744 li r9,0 2745 mtmsrd r9,1 2746 2747 kuap_kernel_restore r9, r10 2748 2749 EXCEPTION_RESTORE_REGS hsrr=0 2750 RFI_TO_KERNEL 2751 2752#endif /* CONFIG_PPC_WATCHDOG */ 2753 2754/* 2755 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2756 * - If it was a decrementer interrupt, we bump the dec to max and and return. 2757 * - If it was a doorbell we return immediately since doorbells are edge 2758 * triggered and won't automatically refire. 2759 * - If it was a HMI we return immediately since we handled it in realmode 2760 * and it won't refire. 2761 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2762 * This is called with r10 containing the value to OR to the paca field. 2763 */ 2764.macro MASKED_INTERRUPT hsrr=0 2765 .if \hsrr 2766masked_Hinterrupt: 2767 .else 2768masked_interrupt: 2769 .endif 2770 stw r9,PACA_EXGEN+EX_CCR(r13) 2771 lbz r9,PACAIRQHAPPENED(r13) 2772 or r9,r9,r10 2773 stb r9,PACAIRQHAPPENED(r13) 2774 2775 .if ! \hsrr 2776 cmpwi r10,PACA_IRQ_DEC 2777 bne 1f 2778 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2779 mtspr SPRN_DEC,r9 2780#ifdef CONFIG_PPC_WATCHDOG 2781 lwz r9,PACA_EXGEN+EX_CCR(r13) 2782 b soft_nmi_common 2783#else 2784 b 2f 2785#endif 2786 .endif 2787 27881: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2789 beq 2f 2790 xori r12,r12,MSR_EE /* clear MSR_EE */ 2791 .if \hsrr 2792 mtspr SPRN_HSRR1,r12 2793 .else 2794 mtspr SPRN_SRR1,r12 2795 .endif 2796 ori r9,r9,PACA_IRQ_HARD_DIS 2797 stb r9,PACAIRQHAPPENED(r13) 27982: /* done */ 2799 li r9,0 2800 .if \hsrr 2801 stb r9,PACAHSRR_VALID(r13) 2802 .else 2803 stb r9,PACASRR_VALID(r13) 2804 .endif 2805 2806 SEARCH_RESTART_TABLE 2807 cmpdi r12,0 2808 beq 3f 2809 .if \hsrr 2810 mtspr SPRN_HSRR0,r12 2811 .else 2812 mtspr SPRN_SRR0,r12 2813 .endif 28143: 2815 2816 ld r9,PACA_EXGEN+EX_CTR(r13) 2817 mtctr r9 2818 lwz r9,PACA_EXGEN+EX_CCR(r13) 2819 mtcrf 0x80,r9 2820 std r1,PACAR1(r13) 2821 ld r9,PACA_EXGEN+EX_R9(r13) 2822 ld r10,PACA_EXGEN+EX_R10(r13) 2823 ld r11,PACA_EXGEN+EX_R11(r13) 2824 ld r12,PACA_EXGEN+EX_R12(r13) 2825 ld r13,PACA_EXGEN+EX_R13(r13) 2826 /* May return to masked low address where r13 is not set up */ 2827 .if \hsrr 2828 HRFI_TO_KERNEL 2829 .else 2830 RFI_TO_KERNEL 2831 .endif 2832 b . 2833.endm 2834 2835TRAMP_REAL_BEGIN(stf_barrier_fallback) 2836 std r9,PACA_EXRFI+EX_R9(r13) 2837 std r10,PACA_EXRFI+EX_R10(r13) 2838 sync 2839 ld r9,PACA_EXRFI+EX_R9(r13) 2840 ld r10,PACA_EXRFI+EX_R10(r13) 2841 ori 31,31,0 2842 .rept 14 2843 b 1f 28441: 2845 .endr 2846 blr 2847 2848/* Clobbers r10, r11, ctr */ 2849.macro L1D_DISPLACEMENT_FLUSH 2850 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2851 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2852 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2853 mtctr r11 2854 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2855 2856 /* order ld/st prior to dcbt stop all streams with flushing */ 2857 sync 2858 2859 /* 2860 * The load addresses are at staggered offsets within cachelines, 2861 * which suits some pipelines better (on others it should not 2862 * hurt). 2863 */ 28641: 2865 ld r11,(0x80 + 8)*0(r10) 2866 ld r11,(0x80 + 8)*1(r10) 2867 ld r11,(0x80 + 8)*2(r10) 2868 ld r11,(0x80 + 8)*3(r10) 2869 ld r11,(0x80 + 8)*4(r10) 2870 ld r11,(0x80 + 8)*5(r10) 2871 ld r11,(0x80 + 8)*6(r10) 2872 ld r11,(0x80 + 8)*7(r10) 2873 addi r10,r10,0x80*8 2874 bdnz 1b 2875.endm 2876 2877TRAMP_REAL_BEGIN(entry_flush_fallback) 2878 std r9,PACA_EXRFI+EX_R9(r13) 2879 std r10,PACA_EXRFI+EX_R10(r13) 2880 std r11,PACA_EXRFI+EX_R11(r13) 2881 mfctr r9 2882 L1D_DISPLACEMENT_FLUSH 2883 mtctr r9 2884 ld r9,PACA_EXRFI+EX_R9(r13) 2885 ld r10,PACA_EXRFI+EX_R10(r13) 2886 ld r11,PACA_EXRFI+EX_R11(r13) 2887 blr 2888 2889/* 2890 * The SCV entry flush happens with interrupts enabled, so it must disable 2891 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2892 * (containing LR) does not need to be preserved here because scv entry 2893 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2894 */ 2895TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2896 li r10,0 2897 mtmsrd r10,1 2898 lbz r10,PACAIRQHAPPENED(r13) 2899 ori r10,r10,PACA_IRQ_HARD_DIS 2900 stb r10,PACAIRQHAPPENED(r13) 2901 std r11,PACA_EXRFI+EX_R11(r13) 2902 L1D_DISPLACEMENT_FLUSH 2903 ld r11,PACA_EXRFI+EX_R11(r13) 2904 li r10,MSR_RI 2905 mtmsrd r10,1 2906 blr 2907 2908TRAMP_REAL_BEGIN(rfi_flush_fallback) 2909 SET_SCRATCH0(r13); 2910 GET_PACA(r13); 2911 std r1,PACA_EXRFI+EX_R12(r13) 2912 ld r1,PACAKSAVE(r13) 2913 std r9,PACA_EXRFI+EX_R9(r13) 2914 std r10,PACA_EXRFI+EX_R10(r13) 2915 std r11,PACA_EXRFI+EX_R11(r13) 2916 mfctr r9 2917 L1D_DISPLACEMENT_FLUSH 2918 mtctr r9 2919 ld r9,PACA_EXRFI+EX_R9(r13) 2920 ld r10,PACA_EXRFI+EX_R10(r13) 2921 ld r11,PACA_EXRFI+EX_R11(r13) 2922 ld r1,PACA_EXRFI+EX_R12(r13) 2923 GET_SCRATCH0(r13); 2924 rfid 2925 2926TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2927 SET_SCRATCH0(r13); 2928 GET_PACA(r13); 2929 std r1,PACA_EXRFI+EX_R12(r13) 2930 ld r1,PACAKSAVE(r13) 2931 std r9,PACA_EXRFI+EX_R9(r13) 2932 std r10,PACA_EXRFI+EX_R10(r13) 2933 std r11,PACA_EXRFI+EX_R11(r13) 2934 mfctr r9 2935 L1D_DISPLACEMENT_FLUSH 2936 mtctr r9 2937 ld r9,PACA_EXRFI+EX_R9(r13) 2938 ld r10,PACA_EXRFI+EX_R10(r13) 2939 ld r11,PACA_EXRFI+EX_R11(r13) 2940 ld r1,PACA_EXRFI+EX_R12(r13) 2941 GET_SCRATCH0(r13); 2942 hrfid 2943 2944TRAMP_REAL_BEGIN(rfscv_flush_fallback) 2945 /* system call volatile */ 2946 mr r7,r13 2947 GET_PACA(r13); 2948 mr r8,r1 2949 ld r1,PACAKSAVE(r13) 2950 mfctr r9 2951 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2952 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2953 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2954 mtctr r11 2955 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2956 2957 /* order ld/st prior to dcbt stop all streams with flushing */ 2958 sync 2959 2960 /* 2961 * The load adresses are at staggered offsets within cachelines, 2962 * which suits some pipelines better (on others it should not 2963 * hurt). 2964 */ 29651: 2966 ld r11,(0x80 + 8)*0(r10) 2967 ld r11,(0x80 + 8)*1(r10) 2968 ld r11,(0x80 + 8)*2(r10) 2969 ld r11,(0x80 + 8)*3(r10) 2970 ld r11,(0x80 + 8)*4(r10) 2971 ld r11,(0x80 + 8)*5(r10) 2972 ld r11,(0x80 + 8)*6(r10) 2973 ld r11,(0x80 + 8)*7(r10) 2974 addi r10,r10,0x80*8 2975 bdnz 1b 2976 2977 mtctr r9 2978 li r9,0 2979 li r10,0 2980 li r11,0 2981 mr r1,r8 2982 mr r13,r7 2983 RFSCV 2984 2985USE_TEXT_SECTION() 2986 2987#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2988kvm_interrupt: 2989 /* 2990 * The conditional branch in KVMTEST can't reach all the way, 2991 * make a stub. 2992 */ 2993 b kvmppc_interrupt 2994#endif 2995 2996_GLOBAL(do_uaccess_flush) 2997 UACCESS_FLUSH_FIXUP_SECTION 2998 nop 2999 nop 3000 nop 3001 blr 3002 L1D_DISPLACEMENT_FLUSH 3003 blr 3004_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3005EXPORT_SYMBOL(do_uaccess_flush) 3006 3007 3008MASKED_INTERRUPT 3009MASKED_INTERRUPT hsrr=1 3010 3011 /* 3012 * Relocation-on interrupts: A subset of the interrupts can be delivered 3013 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 3014 * it. Addresses are the same as the original interrupt addresses, but 3015 * offset by 0xc000000000004000. 3016 * It's impossible to receive interrupts below 0x300 via this mechanism. 3017 * KVM: None of these traps are from the guest ; anything that escalated 3018 * to HV=1 from HV=0 is delivered via real mode handlers. 3019 */ 3020 3021 /* 3022 * This uses the standard macro, since the original 0x300 vector 3023 * only has extra guff for STAB-based processors -- which never 3024 * come here. 3025 */ 3026 3027USE_FIXED_SECTION(virt_trampolines) 3028 /* 3029 * All code below __end_soft_masked is treated as soft-masked. If 3030 * any code runs here with MSR[EE]=1, it must then cope with pending 3031 * soft interrupt being raised (i.e., by ensuring it is replayed). 3032 * 3033 * The __end_interrupts marker must be past the out-of-line (OOL) 3034 * handlers, so that they are copied to real address 0x100 when running 3035 * a relocatable kernel. This ensures they can be reached from the short 3036 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3037 * directly, without using LOAD_HANDLER(). 3038 */ 3039 .align 7 3040 .globl __end_interrupts 3041__end_interrupts: 3042DEFINE_FIXED_SYMBOL(__end_interrupts) 3043 3044CLOSE_FIXED_SECTION(real_vectors); 3045CLOSE_FIXED_SECTION(real_trampolines); 3046CLOSE_FIXED_SECTION(virt_vectors); 3047CLOSE_FIXED_SECTION(virt_trampolines); 3048 3049USE_TEXT_SECTION() 3050 3051/* MSR[RI] should be clear because this uses SRR[01] */ 3052enable_machine_check: 3053 mflr r0 3054 bcl 20,31,$+4 30550: mflr r3 3056 addi r3,r3,(1f - 0b) 3057 mtspr SPRN_SRR0,r3 3058 mfmsr r3 3059 ori r3,r3,MSR_ME 3060 mtspr SPRN_SRR1,r3 3061 RFI_TO_KERNEL 30621: mtlr r0 3063 blr 3064 3065/* MSR[RI] should be clear because this uses SRR[01] */ 3066disable_machine_check: 3067 mflr r0 3068 bcl 20,31,$+4 30690: mflr r3 3070 addi r3,r3,(1f - 0b) 3071 mtspr SPRN_SRR0,r3 3072 mfmsr r3 3073 li r4,MSR_ME 3074 andc r3,r3,r4 3075 mtspr SPRN_SRR1,r3 3076 RFI_TO_KERNEL 30771: mtlr r0 3078 blr 3079