1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* PACA save area offsets (exgen, exmc, etc) */ 25#define EX_R9 0 26#define EX_R10 8 27#define EX_R11 16 28#define EX_R12 24 29#define EX_R13 32 30#define EX_DAR 40 31#define EX_DSISR 48 32#define EX_CCR 52 33#define EX_CFAR 56 34#define EX_PPR 64 35#define EX_CTR 72 36.if EX_SIZE != 10 37 .error "EX_SIZE is wrong" 38.endif 39 40/* 41 * Following are fixed section helper macros. 42 * 43 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 44 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 45 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 46 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 47 * EXC_COMMON - After switching to virtual, relocated mode. 48 */ 49 50#define EXC_REAL_BEGIN(name, start, size) \ 51 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 52 53#define EXC_REAL_END(name, start, size) \ 54 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 55 56#define EXC_VIRT_BEGIN(name, start, size) \ 57 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 58 59#define EXC_VIRT_END(name, start, size) \ 60 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 61 62#define EXC_COMMON_BEGIN(name) \ 63 USE_TEXT_SECTION(); \ 64 .balign IFETCH_ALIGN_BYTES; \ 65 .global name; \ 66 _ASM_NOKPROBE_SYMBOL(name); \ 67 DEFINE_FIXED_SYMBOL(name); \ 68name: 69 70#define TRAMP_REAL_BEGIN(name) \ 71 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 72 73#define TRAMP_VIRT_BEGIN(name) \ 74 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 75 76#define EXC_REAL_NONE(start, size) \ 77 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 78 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 79 80#define EXC_VIRT_NONE(start, size) \ 81 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 82 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 83 84/* 85 * We're short on space and time in the exception prolog, so we can't 86 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 87 * Instead we get the base of the kernel from paca->kernelbase and or in the low 88 * part of label. This requires that the label be within 64KB of kernelbase, and 89 * that kernelbase be 64K aligned. 90 */ 91#define LOAD_HANDLER(reg, label) \ 92 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 93 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 94 95#define __LOAD_HANDLER(reg, label) \ 96 ld reg,PACAKBASE(r13); \ 97 ori reg,reg,(ABS_ADDR(label))@l 98 99/* 100 * Branches from unrelocated code (e.g., interrupts) to labels outside 101 * head-y require >64K offsets. 102 */ 103#define __LOAD_FAR_HANDLER(reg, label) \ 104 ld reg,PACAKBASE(r13); \ 105 ori reg,reg,(ABS_ADDR(label))@l; \ 106 addis reg,reg,(ABS_ADDR(label))@h 107 108/* 109 * Branch to label using its 0xC000 address. This results in instruction 110 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned 111 * on using mtmsr rather than rfid. 112 * 113 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than 114 * load KBASE for a slight optimisation. 115 */ 116#define BRANCH_TO_C000(reg, label) \ 117 __LOAD_FAR_HANDLER(reg, label); \ 118 mtctr reg; \ 119 bctr 120 121/* 122 * Interrupt code generation macros 123 */ 124#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 125#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 126#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 127#define IAREA .L_IAREA_\name\() /* PACA save area */ 128#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 129#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 130#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 131#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 132#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */ 133#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 134#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 135#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 136#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */ 137#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 138#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 139#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 140#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 141#define __ISTACK(name) .L_ISTACK_ ## name 142#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */ 143#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 144 145#define INT_DEFINE_BEGIN(n) \ 146.macro int_define_ ## n name 147 148#define INT_DEFINE_END(n) \ 149.endm ; \ 150int_define_ ## n n ; \ 151do_define_int n 152 153.macro do_define_int name 154 .ifndef IVEC 155 .error "IVEC not defined" 156 .endif 157 .ifndef IHSRR 158 IHSRR=0 159 .endif 160 .ifndef IHSRR_IF_HVMODE 161 IHSRR_IF_HVMODE=0 162 .endif 163 .ifndef IAREA 164 IAREA=PACA_EXGEN 165 .endif 166 .ifndef IVIRT 167 IVIRT=1 168 .endif 169 .ifndef IISIDE 170 IISIDE=0 171 .endif 172 .ifndef IDAR 173 IDAR=0 174 .endif 175 .ifndef IDSISR 176 IDSISR=0 177 .endif 178 .ifndef ISET_RI 179 ISET_RI=1 180 .endif 181 .ifndef IBRANCH_TO_COMMON 182 IBRANCH_TO_COMMON=1 183 .endif 184 .ifndef IREALMODE_COMMON 185 IREALMODE_COMMON=0 186 .else 187 .if ! IBRANCH_TO_COMMON 188 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 189 .endif 190 .endif 191 .ifndef IMASK 192 IMASK=0 193 .endif 194 .ifndef IKVM_SKIP 195 IKVM_SKIP=0 196 .endif 197 .ifndef IKVM_REAL 198 IKVM_REAL=0 199 .endif 200 .ifndef IKVM_VIRT 201 IKVM_VIRT=0 202 .endif 203 .ifndef ISTACK 204 ISTACK=1 205 .endif 206 .ifndef IRECONCILE 207 IRECONCILE=1 208 .endif 209 .ifndef IKUAP 210 IKUAP=1 211 .endif 212.endm 213 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 216/* 217 * All interrupts which set HSRR registers, as well as SRESET and MCE and 218 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 219 * so they all generally need to test whether they were taken in guest context. 220 * 221 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 222 * taken with MSR[HV]=0. 223 * 224 * Interrupts which set SRR registers (with the above exceptions) do not 225 * elevate to MSR[HV]=1 mode, though most can be taken when running with 226 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 227 * not need to test whether a guest is running because they get delivered to 228 * the guest directly, including nested HV KVM guests. 229 * 230 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 231 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 232 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 233 * delivered to the real-mode entry point, therefore such interrupts only test 234 * KVM in their real mode handlers, and only when PR KVM is possible. 235 * 236 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 237 * delivered in real-mode when the MMU is in hash mode because the MMU 238 * registers are not set appropriately to translate host addresses. In nested 239 * radix mode these can be delivered in virt-mode as the host translations are 240 * used implicitly (see: effective LPID, effective PID). 241 */ 242 243/* 244 * If an interrupt is taken while a guest is running, it is immediately routed 245 * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first 246 * to kvmppc_interrupt_hv, which handles the PR guest case. 247 */ 248#define kvmppc_interrupt kvmppc_interrupt_hv 249#else 250#define kvmppc_interrupt kvmppc_interrupt_pr 251#endif 252 253.macro KVMTEST name 254 lbz r10,HSTATE_IN_GUEST(r13) 255 cmpwi r10,0 256 bne \name\()_kvm 257.endm 258 259.macro GEN_KVM name 260 .balign IFETCH_ALIGN_BYTES 261\name\()_kvm: 262 263 .if IKVM_SKIP 264 cmpwi r10,KVM_GUEST_MODE_SKIP 265 beq 89f 266 .else 267BEGIN_FTR_SECTION 268 ld r10,IAREA+EX_CFAR(r13) 269 std r10,HSTATE_CFAR(r13) 270END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 271 .endif 272 273 ld r10,IAREA+EX_CTR(r13) 274 mtctr r10 275BEGIN_FTR_SECTION 276 ld r10,IAREA+EX_PPR(r13) 277 std r10,HSTATE_PPR(r13) 278END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 279 ld r11,IAREA+EX_R11(r13) 280 ld r12,IAREA+EX_R12(r13) 281 std r12,HSTATE_SCRATCH0(r13) 282 sldi r12,r9,32 283 ld r9,IAREA+EX_R9(r13) 284 ld r10,IAREA+EX_R10(r13) 285 /* HSRR variants have the 0x2 bit added to their trap number */ 286 .if IHSRR_IF_HVMODE 287 BEGIN_FTR_SECTION 288 ori r12,r12,(IVEC + 0x2) 289 FTR_SECTION_ELSE 290 ori r12,r12,(IVEC) 291 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 292 .elseif IHSRR 293 ori r12,r12,(IVEC+ 0x2) 294 .else 295 ori r12,r12,(IVEC) 296 .endif 297 b kvmppc_interrupt 298 299 .if IKVM_SKIP 30089: mtocrf 0x80,r9 301 ld r10,IAREA+EX_CTR(r13) 302 mtctr r10 303 ld r9,IAREA+EX_R9(r13) 304 ld r10,IAREA+EX_R10(r13) 305 ld r11,IAREA+EX_R11(r13) 306 ld r12,IAREA+EX_R12(r13) 307 .if IHSRR_IF_HVMODE 308 BEGIN_FTR_SECTION 309 b kvmppc_skip_Hinterrupt 310 FTR_SECTION_ELSE 311 b kvmppc_skip_interrupt 312 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 313 .elseif IHSRR 314 b kvmppc_skip_Hinterrupt 315 .else 316 b kvmppc_skip_interrupt 317 .endif 318 .endif 319.endm 320 321#else 322.macro KVMTEST name 323.endm 324.macro GEN_KVM name 325.endm 326#endif 327 328/* 329 * This is the BOOK3S interrupt entry code macro. 330 * 331 * This can result in one of several things happening: 332 * - Branch to the _common handler, relocated, in virtual mode. 333 * These are normal interrupts (synchronous and asynchronous) handled by 334 * the kernel. 335 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 336 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 337 * / intended for host or guest kernel, but KVM must always be involved 338 * because the machine state is set for guest execution. 339 * - Branch to the masked handler, unrelocated. 340 * These occur when maskable asynchronous interrupts are taken with the 341 * irq_soft_mask set. 342 * - Branch to an "early" handler in real mode but relocated. 343 * This is done if early=1. MCE and HMI use these to handle errors in real 344 * mode. 345 * - Fall through and continue executing in real, unrelocated mode. 346 * This is done if early=2. 347 */ 348 349.macro GEN_BRANCH_TO_COMMON name, virt 350 .if IREALMODE_COMMON 351 LOAD_HANDLER(r10, \name\()_common) 352 mtctr r10 353 bctr 354 .else 355 .if \virt 356#ifndef CONFIG_RELOCATABLE 357 b \name\()_common_virt 358#else 359 LOAD_HANDLER(r10, \name\()_common_virt) 360 mtctr r10 361 bctr 362#endif 363 .else 364 LOAD_HANDLER(r10, \name\()_common_real) 365 mtctr r10 366 bctr 367 .endif 368 .endif 369.endm 370 371.macro GEN_INT_ENTRY name, virt, ool=0 372 SET_SCRATCH0(r13) /* save r13 */ 373 GET_PACA(r13) 374 std r9,IAREA+EX_R9(r13) /* save r9 */ 375BEGIN_FTR_SECTION 376 mfspr r9,SPRN_PPR 377END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 378 HMT_MEDIUM 379 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 380BEGIN_FTR_SECTION 381 mfspr r10,SPRN_CFAR 382END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 383 .if \ool 384 .if !\virt 385 b tramp_real_\name 386 .pushsection .text 387 TRAMP_REAL_BEGIN(tramp_real_\name) 388 .else 389 b tramp_virt_\name 390 .pushsection .text 391 TRAMP_VIRT_BEGIN(tramp_virt_\name) 392 .endif 393 .endif 394 395BEGIN_FTR_SECTION 396 std r9,IAREA+EX_PPR(r13) 397END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 398BEGIN_FTR_SECTION 399 std r10,IAREA+EX_CFAR(r13) 400END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 401 INTERRUPT_TO_KERNEL 402 mfctr r10 403 std r10,IAREA+EX_CTR(r13) 404 mfcr r9 405 std r11,IAREA+EX_R11(r13) 406 std r12,IAREA+EX_R12(r13) 407 408 /* 409 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 410 * because a d-side MCE will clobber those registers so is 411 * not recoverable if they are live. 412 */ 413 GET_SCRATCH0(r10) 414 std r10,IAREA+EX_R13(r13) 415 .if IDAR && !IISIDE 416 .if IHSRR 417 mfspr r10,SPRN_HDAR 418 .else 419 mfspr r10,SPRN_DAR 420 .endif 421 std r10,IAREA+EX_DAR(r13) 422 .endif 423 .if IDSISR && !IISIDE 424 .if IHSRR 425 mfspr r10,SPRN_HDSISR 426 .else 427 mfspr r10,SPRN_DSISR 428 .endif 429 stw r10,IAREA+EX_DSISR(r13) 430 .endif 431 432 .if IHSRR_IF_HVMODE 433 BEGIN_FTR_SECTION 434 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 435 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 436 FTR_SECTION_ELSE 437 mfspr r11,SPRN_SRR0 /* save SRR0 */ 438 mfspr r12,SPRN_SRR1 /* and SRR1 */ 439 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 440 .elseif IHSRR 441 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 442 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 443 .else 444 mfspr r11,SPRN_SRR0 /* save SRR0 */ 445 mfspr r12,SPRN_SRR1 /* and SRR1 */ 446 .endif 447 448 .if IBRANCH_TO_COMMON 449 GEN_BRANCH_TO_COMMON \name \virt 450 .endif 451 452 .if \ool 453 .popsection 454 .endif 455.endm 456 457/* 458 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 459 * entry, except in the case of the real-mode handlers which require 460 * __GEN_REALMODE_COMMON_ENTRY. 461 * 462 * This switches to virtual mode and sets MSR[RI]. 463 */ 464.macro __GEN_COMMON_ENTRY name 465DEFINE_FIXED_SYMBOL(\name\()_common_real) 466\name\()_common_real: 467 .if IKVM_REAL 468 KVMTEST \name 469 .endif 470 471 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 472 /* MSR[RI] is clear iff using SRR regs */ 473 .if IHSRR == EXC_HV_OR_STD 474 BEGIN_FTR_SECTION 475 xori r10,r10,MSR_RI 476 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 477 .elseif ! IHSRR 478 xori r10,r10,MSR_RI 479 .endif 480 mtmsrd r10 481 482 .if IVIRT 483 .if IKVM_VIRT 484 b 1f /* skip the virt test coming from real */ 485 .endif 486 487 .balign IFETCH_ALIGN_BYTES 488DEFINE_FIXED_SYMBOL(\name\()_common_virt) 489\name\()_common_virt: 490 .if IKVM_VIRT 491 KVMTEST \name 4921: 493 .endif 494 .endif /* IVIRT */ 495.endm 496 497/* 498 * Don't switch to virt mode. Used for early MCE and HMI handlers that 499 * want to run in real mode. 500 */ 501.macro __GEN_REALMODE_COMMON_ENTRY name 502DEFINE_FIXED_SYMBOL(\name\()_common_real) 503\name\()_common_real: 504 .if IKVM_REAL 505 KVMTEST \name 506 .endif 507.endm 508 509.macro __GEN_COMMON_BODY name 510 .if IMASK 511 lbz r10,PACAIRQSOFTMASK(r13) 512 andi. r10,r10,IMASK 513 /* Associate vector numbers with bits in paca->irq_happened */ 514 .if IVEC == 0x500 || IVEC == 0xea0 515 li r10,PACA_IRQ_EE 516 .elseif IVEC == 0x900 517 li r10,PACA_IRQ_DEC 518 .elseif IVEC == 0xa00 || IVEC == 0xe80 519 li r10,PACA_IRQ_DBELL 520 .elseif IVEC == 0xe60 521 li r10,PACA_IRQ_HMI 522 .elseif IVEC == 0xf00 523 li r10,PACA_IRQ_PMI 524 .else 525 .abort "Bad maskable vector" 526 .endif 527 528 .if IHSRR_IF_HVMODE 529 BEGIN_FTR_SECTION 530 bne masked_Hinterrupt 531 FTR_SECTION_ELSE 532 bne masked_interrupt 533 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 534 .elseif IHSRR 535 bne masked_Hinterrupt 536 .else 537 bne masked_interrupt 538 .endif 539 .endif 540 541 .if ISTACK 542 andi. r10,r12,MSR_PR /* See if coming from user */ 543 mr r10,r1 /* Save r1 */ 544 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 545 beq- 100f 546 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 547100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 548 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 549 .endif 550 551 std r9,_CCR(r1) /* save CR in stackframe */ 552 std r11,_NIP(r1) /* save SRR0 in stackframe */ 553 std r12,_MSR(r1) /* save SRR1 in stackframe */ 554 std r10,0(r1) /* make stack chain pointer */ 555 std r0,GPR0(r1) /* save r0 in stackframe */ 556 std r10,GPR1(r1) /* save r1 in stackframe */ 557 558 .if ISET_RI 559 li r10,MSR_RI 560 mtmsrd r10,1 /* Set MSR_RI */ 561 .endif 562 563 .if ISTACK 564 .if IKUAP 565 kuap_save_amr_and_lock r9, r10, cr1, cr0 566 .endif 567 beq 101f /* if from kernel mode */ 568 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10) 569BEGIN_FTR_SECTION 570 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 571 std r9,_PPR(r1) 572END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 573101: 574 .else 575 .if IKUAP 576 kuap_save_amr_and_lock r9, r10, cr1 577 .endif 578 .endif 579 580 /* Save original regs values from save area to stack frame. */ 581 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 582 ld r10,IAREA+EX_R10(r13) 583 std r9,GPR9(r1) 584 std r10,GPR10(r1) 585 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 586 ld r10,IAREA+EX_R12(r13) 587 ld r11,IAREA+EX_R13(r13) 588 std r9,GPR11(r1) 589 std r10,GPR12(r1) 590 std r11,GPR13(r1) 591 592 SAVE_NVGPRS(r1) 593 594 .if IDAR 595 .if IISIDE 596 ld r10,_NIP(r1) 597 .else 598 ld r10,IAREA+EX_DAR(r13) 599 .endif 600 std r10,_DAR(r1) 601 .endif 602 603 .if IDSISR 604 .if IISIDE 605 ld r10,_MSR(r1) 606 lis r11,DSISR_SRR1_MATCH_64S@h 607 and r10,r10,r11 608 .else 609 lwz r10,IAREA+EX_DSISR(r13) 610 .endif 611 std r10,_DSISR(r1) 612 .endif 613 614BEGIN_FTR_SECTION 615 ld r10,IAREA+EX_CFAR(r13) 616 std r10,ORIG_GPR3(r1) 617END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 618 ld r10,IAREA+EX_CTR(r13) 619 std r10,_CTR(r1) 620 std r2,GPR2(r1) /* save r2 in stackframe */ 621 SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */ 622 SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */ 623 mflr r9 /* Get LR, later save to stack */ 624 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 625 std r9,_LINK(r1) 626 lbz r10,PACAIRQSOFTMASK(r13) 627 mfspr r11,SPRN_XER /* save XER in stackframe */ 628 std r10,SOFTE(r1) 629 std r11,_XER(r1) 630 li r9,IVEC 631 std r9,_TRAP(r1) /* set trap number */ 632 li r10,0 633 ld r11,exception_marker@toc(r2) 634 std r10,RESULT(r1) /* clear regs->result */ 635 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 636 637 .if ISTACK 638 ACCOUNT_STOLEN_TIME 639 .endif 640 641 .if IRECONCILE 642 RECONCILE_IRQ_STATE(r10, r11) 643 .endif 644.endm 645 646/* 647 * On entry r13 points to the paca, r9-r13 are saved in the paca, 648 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 649 * SRR1, and relocation is on. 650 * 651 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 652 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 653 */ 654.macro GEN_COMMON name 655 __GEN_COMMON_ENTRY \name 656 __GEN_COMMON_BODY \name 657.endm 658 659/* 660 * Restore all registers including H/SRR0/1 saved in a stack frame of a 661 * standard exception. 662 */ 663.macro EXCEPTION_RESTORE_REGS hsrr=0 664 /* Move original SRR0 and SRR1 into the respective regs */ 665 ld r9,_MSR(r1) 666 .if \hsrr 667 mtspr SPRN_HSRR1,r9 668 .else 669 mtspr SPRN_SRR1,r9 670 .endif 671 ld r9,_NIP(r1) 672 .if \hsrr 673 mtspr SPRN_HSRR0,r9 674 .else 675 mtspr SPRN_SRR0,r9 676 .endif 677 ld r9,_CTR(r1) 678 mtctr r9 679 ld r9,_XER(r1) 680 mtxer r9 681 ld r9,_LINK(r1) 682 mtlr r9 683 ld r9,_CCR(r1) 684 mtcr r9 685 REST_8GPRS(2, r1) 686 REST_4GPRS(10, r1) 687 REST_GPR(0, r1) 688 /* restore original r1. */ 689 ld r1,GPR1(r1) 690.endm 691 692#define RUNLATCH_ON \ 693BEGIN_FTR_SECTION \ 694 ld r3, PACA_THREAD_INFO(r13); \ 695 ld r4,TI_LOCAL_FLAGS(r3); \ 696 andi. r0,r4,_TLF_RUNLATCH; \ 697 beql ppc64_runlatch_on_trampoline; \ 698END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 699 700/* 701 * When the idle code in power4_idle puts the CPU into NAP mode, 702 * it has to do so in a loop, and relies on the external interrupt 703 * and decrementer interrupt entry code to get it out of the loop. 704 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 705 * to signal that it is in the loop and needs help to get out. 706 */ 707#ifdef CONFIG_PPC_970_NAP 708#define FINISH_NAP \ 709BEGIN_FTR_SECTION \ 710 ld r11, PACA_THREAD_INFO(r13); \ 711 ld r9,TI_LOCAL_FLAGS(r11); \ 712 andi. r10,r9,_TLF_NAPPING; \ 713 bnel power4_fixup_nap; \ 714END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 715#else 716#define FINISH_NAP 717#endif 718 719/* 720 * There are a few constraints to be concerned with. 721 * - Real mode exceptions code/data must be located at their physical location. 722 * - Virtual mode exceptions must be mapped at their 0xc000... location. 723 * - Fixed location code must not call directly beyond the __end_interrupts 724 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 725 * must be used. 726 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 727 * virtual 0xc00... 728 * - Conditional branch targets must be within +/-32K of caller. 729 * 730 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 731 * therefore don't have to run in physically located code or rfid to 732 * virtual mode kernel code. However on relocatable kernels they do have 733 * to branch to KERNELBASE offset because the rest of the kernel (outside 734 * the exception vectors) may be located elsewhere. 735 * 736 * Virtual exceptions correspond with physical, except their entry points 737 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 738 * offset applied. Virtual exceptions are enabled with the Alternate 739 * Interrupt Location (AIL) bit set in the LPCR. However this does not 740 * guarantee they will be delivered virtually. Some conditions (see the ISA) 741 * cause exceptions to be delivered in real mode. 742 * 743 * It's impossible to receive interrupts below 0x300 via AIL. 744 * 745 * KVM: None of the virtual exceptions are from the guest. Anything that 746 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 747 * 748 * 749 * We layout physical memory as follows: 750 * 0x0000 - 0x00ff : Secondary processor spin code 751 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 752 * 0x1900 - 0x3fff : Real mode trampolines 753 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 754 * 0x5900 - 0x6fff : Relon mode trampolines 755 * 0x7000 - 0x7fff : FWNMI data area 756 * 0x8000 - .... : Common interrupt handlers, remaining early 757 * setup code, rest of kernel. 758 * 759 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 760 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 761 * vectors there. 762 */ 763OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 764OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000) 765OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900) 766OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 767 768#ifdef CONFIG_PPC_POWERNV 769 .globl start_real_trampolines 770 .globl end_real_trampolines 771 .globl start_virt_trampolines 772 .globl end_virt_trampolines 773#endif 774 775#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 776/* 777 * Data area reserved for FWNMI option. 778 * This address (0x7000) is fixed by the RPA. 779 * pseries and powernv need to keep the whole page from 780 * 0x7000 to 0x8000 free for use by the firmware 781 */ 782ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 783OPEN_TEXT_SECTION(0x8000) 784#else 785OPEN_TEXT_SECTION(0x7000) 786#endif 787 788USE_FIXED_SECTION(real_vectors) 789 790/* 791 * This is the start of the interrupt handlers for pSeries 792 * This code runs with relocation off. 793 * Code from here to __end_interrupts gets copied down to real 794 * address 0x100 when we are running a relocatable kernel. 795 * Therefore any relative branches in this section must only 796 * branch to labels in this section. 797 */ 798 .globl __start_interrupts 799__start_interrupts: 800 801/* No virt vectors corresponding with 0x0..0x100 */ 802EXC_VIRT_NONE(0x4000, 0x100) 803 804 805/** 806 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 807 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 808 * It is caused by: 809 * - Wake from power-saving state, on powernv. 810 * - An NMI from another CPU, triggered by firmware or hypercall. 811 * - As crash/debug signal injected from BMC, firmware or hypervisor. 812 * 813 * Handling: 814 * Power-save wakeup is the only performance critical path, so this is 815 * determined quickly as possible first. In this case volatile registers 816 * can be discarded and SPRs like CFAR don't need to be read. 817 * 818 * If not a powersave wakeup, then it's run as a regular interrupt, however 819 * it uses its own stack and PACA save area to preserve the regular kernel 820 * environment for debugging. 821 * 822 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 823 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 824 * correct to switch to virtual mode to run the regular interrupt handler 825 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 826 * is clear). 827 * 828 * FWNMI: 829 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 830 * entry point with a different register set up. Some hypervisors will 831 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 832 * 833 * KVM: 834 * Unlike most SRR interrupts, this may be taken by the host while executing 835 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 836 * mode and then raise the sreset. 837 */ 838INT_DEFINE_BEGIN(system_reset) 839 IVEC=0x100 840 IAREA=PACA_EXNMI 841 IVIRT=0 /* no virt entry point */ 842 /* 843 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is 844 * being used, so a nested NMI exception would corrupt it. 845 */ 846 ISET_RI=0 847 ISTACK=0 848 IRECONCILE=0 849 IKVM_REAL=1 850INT_DEFINE_END(system_reset) 851 852EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 853#ifdef CONFIG_PPC_P7_NAP 854 /* 855 * If running native on arch 2.06 or later, check if we are waking up 856 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 857 * bits 46:47. A non-0 value indicates that we are coming from a power 858 * saving state. The idle wakeup handler initially runs in real mode, 859 * but we branch to the 0xc000... address so we can turn on relocation 860 * with mtmsrd later, after SPRs are restored. 861 * 862 * Careful to minimise cost for the fast path (idle wakeup) while 863 * also avoiding clobbering CFAR for the debug path (non-idle). 864 * 865 * For the idle wake case volatile registers can be clobbered, which 866 * is why we use those initially. If it turns out to not be an idle 867 * wake, carefully put everything back the way it was, so we can use 868 * common exception macros to handle it. 869 */ 870BEGIN_FTR_SECTION 871 SET_SCRATCH0(r13) 872 GET_PACA(r13) 873 std r3,PACA_EXNMI+0*8(r13) 874 std r4,PACA_EXNMI+1*8(r13) 875 std r5,PACA_EXNMI+2*8(r13) 876 mfspr r3,SPRN_SRR1 877 mfocrf r4,0x80 878 rlwinm. r5,r3,47-31,30,31 879 bne+ system_reset_idle_wake 880 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 881 mtocrf 0x80,r4 882 ld r3,PACA_EXNMI+0*8(r13) 883 ld r4,PACA_EXNMI+1*8(r13) 884 ld r5,PACA_EXNMI+2*8(r13) 885 GET_SCRATCH0(r13) 886END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 887#endif 888 889 GEN_INT_ENTRY system_reset, virt=0 890 /* 891 * In theory, we should not enable relocation here if it was disabled 892 * in SRR1, because the MMU may not be configured to support it (e.g., 893 * SLB may have been cleared). In practice, there should only be a few 894 * small windows where that's the case, and sreset is considered to 895 * be dangerous anyway. 896 */ 897EXC_REAL_END(system_reset, 0x100, 0x100) 898EXC_VIRT_NONE(0x4100, 0x100) 899 900#ifdef CONFIG_PPC_P7_NAP 901TRAMP_REAL_BEGIN(system_reset_idle_wake) 902 /* We are waking up from idle, so may clobber any volatile register */ 903 cmpwi cr1,r5,2 904 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 905 BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss)) 906#endif 907 908#ifdef CONFIG_PPC_PSERIES 909/* 910 * Vectors for the FWNMI option. Share common code. 911 */ 912TRAMP_REAL_BEGIN(system_reset_fwnmi) 913 /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */ 914 __IKVM_REAL(system_reset)=0 915 GEN_INT_ENTRY system_reset, virt=0 916 917#endif /* CONFIG_PPC_PSERIES */ 918 919EXC_COMMON_BEGIN(system_reset_common) 920 __GEN_COMMON_ENTRY system_reset 921 /* 922 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able 923 * to recover, but nested NMI will notice in_nmi and not recover 924 * because of the use of the NMI stack. in_nmi reentrancy is tested in 925 * system_reset_exception. 926 */ 927 lhz r10,PACA_IN_NMI(r13) 928 addi r10,r10,1 929 sth r10,PACA_IN_NMI(r13) 930 li r10,MSR_RI 931 mtmsrd r10,1 932 933 mr r10,r1 934 ld r1,PACA_NMI_EMERG_SP(r13) 935 subi r1,r1,INT_FRAME_SIZE 936 __GEN_COMMON_BODY system_reset 937 /* 938 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 939 * the right thing. We do not want to reconcile because that goes 940 * through irq tracing which we don't want in NMI. 941 * 942 * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS 943 * as we are running with MSR[EE]=0. 944 */ 945 li r10,IRQS_ALL_DISABLED 946 stb r10,PACAIRQSOFTMASK(r13) 947 lbz r10,PACAIRQHAPPENED(r13) 948 std r10,RESULT(r1) 949 ori r10,r10,PACA_IRQ_HARD_DIS 950 stb r10,PACAIRQHAPPENED(r13) 951 952 addi r3,r1,STACK_FRAME_OVERHEAD 953 bl system_reset_exception 954 955 /* Clear MSR_RI before setting SRR0 and SRR1. */ 956 li r9,0 957 mtmsrd r9,1 958 959 /* 960 * MSR_RI is clear, now we can decrement paca->in_nmi. 961 */ 962 lhz r10,PACA_IN_NMI(r13) 963 subi r10,r10,1 964 sth r10,PACA_IN_NMI(r13) 965 966 /* 967 * Restore soft mask settings. 968 */ 969 ld r10,RESULT(r1) 970 stb r10,PACAIRQHAPPENED(r13) 971 ld r10,SOFTE(r1) 972 stb r10,PACAIRQSOFTMASK(r13) 973 974 kuap_restore_amr r9, r10 975 EXCEPTION_RESTORE_REGS 976 RFI_TO_USER_OR_KERNEL 977 978 GEN_KVM system_reset 979 980 981/** 982 * Interrupt 0x200 - Machine Check Interrupt (MCE). 983 * This is a non-maskable interrupt always taken in real-mode. It can be 984 * synchronous or asynchronous, caused by hardware or software, and it may be 985 * taken in a power-saving state. 986 * 987 * Handling: 988 * Similarly to system reset, this uses its own stack and PACA save area, 989 * the difference is re-entrancy is allowed on the machine check stack. 990 * 991 * machine_check_early is run in real mode, and carefully decodes the 992 * machine check and tries to handle it (e.g., flush the SLB if there was an 993 * error detected there), determines if it was recoverable and logs the 994 * event. 995 * 996 * This early code does not "reconcile" irq soft-mask state like SRESET or 997 * regular interrupts do, so irqs_disabled() among other things may not work 998 * properly (irq disable/enable already doesn't work because irq tracing can 999 * not work in real mode). 1000 * 1001 * Then, depending on the execution context when the interrupt is taken, there 1002 * are 3 main actions: 1003 * - Executing in kernel mode. The event is queued with irq_work, which means 1004 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1005 * interrupts), which could be immediately when the interrupt returns. This 1006 * avoids nasty issues like switching to virtual mode when the MMU is in a 1007 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1008 * but it has different priorities). Check to see if the CPU was in power 1009 * save, and return via the wake up code if it was. 1010 * 1011 * - Executing in user mode. machine_check_exception is run like a normal 1012 * interrupt handler, which processes the data generated by the early handler. 1013 * 1014 * - Executing in guest mode. The interrupt is run with its KVM test, and 1015 * branches to KVM to deal with. KVM may queue the event for the host 1016 * to report later. 1017 * 1018 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1019 * or SCRATCH0 is in use, it may cause a crash. 1020 * 1021 * KVM: 1022 * See SRESET. 1023 */ 1024INT_DEFINE_BEGIN(machine_check_early) 1025 IVEC=0x200 1026 IAREA=PACA_EXMC 1027 IVIRT=0 /* no virt entry point */ 1028 IREALMODE_COMMON=1 1029 /* 1030 * MSR_RI is not enabled, because PACA_EXMC is being used, so a 1031 * nested machine check corrupts it. machine_check_common enables 1032 * MSR_RI. 1033 */ 1034 ISET_RI=0 1035 ISTACK=0 1036 IDAR=1 1037 IDSISR=1 1038 IRECONCILE=0 1039 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1040INT_DEFINE_END(machine_check_early) 1041 1042INT_DEFINE_BEGIN(machine_check) 1043 IVEC=0x200 1044 IAREA=PACA_EXMC 1045 IVIRT=0 /* no virt entry point */ 1046 ISET_RI=0 1047 IDAR=1 1048 IDSISR=1 1049 IKVM_SKIP=1 1050 IKVM_REAL=1 1051INT_DEFINE_END(machine_check) 1052 1053EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1054 GEN_INT_ENTRY machine_check_early, virt=0 1055EXC_REAL_END(machine_check, 0x200, 0x100) 1056EXC_VIRT_NONE(0x4200, 0x100) 1057 1058#ifdef CONFIG_PPC_PSERIES 1059TRAMP_REAL_BEGIN(machine_check_fwnmi) 1060 /* See comment at machine_check exception, don't turn on RI */ 1061 GEN_INT_ENTRY machine_check_early, virt=0 1062#endif 1063 1064#define MACHINE_CHECK_HANDLER_WINDUP \ 1065 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1066 li r9,0; \ 1067 mtmsrd r9,1; /* Clear MSR_RI */ \ 1068 /* Decrement paca->in_mce now RI is clear. */ \ 1069 lhz r12,PACA_IN_MCE(r13); \ 1070 subi r12,r12,1; \ 1071 sth r12,PACA_IN_MCE(r13); \ 1072 EXCEPTION_RESTORE_REGS 1073 1074EXC_COMMON_BEGIN(machine_check_early_common) 1075 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1076 1077 /* 1078 * Switch to mc_emergency stack and handle re-entrancy (we limit 1079 * the nested MCE upto level 4 to avoid stack overflow). 1080 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1081 * 1082 * We use paca->in_mce to check whether this is the first entry or 1083 * nested machine check. We increment paca->in_mce to track nested 1084 * machine checks. 1085 * 1086 * If this is the first entry then set stack pointer to 1087 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1088 * stack frame on mc_emergency stack. 1089 * 1090 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1091 * checkstop if we get another machine check exception before we do 1092 * rfid with MSR_ME=1. 1093 * 1094 * This interrupt can wake directly from idle. If that is the case, 1095 * the machine check is handled then the idle wakeup code is called 1096 * to restore state. 1097 */ 1098 lhz r10,PACA_IN_MCE(r13) 1099 cmpwi r10,0 /* Are we in nested machine check */ 1100 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1101 addi r10,r10,1 /* increment paca->in_mce */ 1102 sth r10,PACA_IN_MCE(r13) 1103 1104 mr r10,r1 /* Save r1 */ 1105 bne 1f 1106 /* First machine check entry */ 1107 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11081: /* Limit nested MCE to level 4 to avoid stack overflow */ 1109 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1110 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1111 1112 __GEN_COMMON_BODY machine_check_early 1113 1114BEGIN_FTR_SECTION 1115 bl enable_machine_check 1116END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1117 li r10,MSR_RI 1118 mtmsrd r10,1 1119 1120 /* 1121 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see 1122 * system_reset_common) 1123 */ 1124 li r10,IRQS_ALL_DISABLED 1125 stb r10,PACAIRQSOFTMASK(r13) 1126 lbz r10,PACAIRQHAPPENED(r13) 1127 std r10,RESULT(r1) 1128 ori r10,r10,PACA_IRQ_HARD_DIS 1129 stb r10,PACAIRQHAPPENED(r13) 1130 1131 addi r3,r1,STACK_FRAME_OVERHEAD 1132 bl machine_check_early 1133 std r3,RESULT(r1) /* Save result */ 1134 ld r12,_MSR(r1) 1135 1136 /* 1137 * Restore soft mask settings. 1138 */ 1139 ld r10,RESULT(r1) 1140 stb r10,PACAIRQHAPPENED(r13) 1141 ld r10,SOFTE(r1) 1142 stb r10,PACAIRQSOFTMASK(r13) 1143 1144#ifdef CONFIG_PPC_P7_NAP 1145 /* 1146 * Check if thread was in power saving mode. We come here when any 1147 * of the following is true: 1148 * a. thread wasn't in power saving mode 1149 * b. thread was in power saving mode with no state loss, 1150 * supervisor state loss or hypervisor state loss. 1151 * 1152 * Go back to nap/sleep/winkle mode again if (b) is true. 1153 */ 1154BEGIN_FTR_SECTION 1155 rlwinm. r11,r12,47-31,30,31 1156 bne machine_check_idle_common 1157END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1158#endif 1159 1160#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1161 /* 1162 * Check if we are coming from guest. If yes, then run the normal 1163 * exception handler which will take the 1164 * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event 1165 * to guest. 1166 */ 1167 lbz r11,HSTATE_IN_GUEST(r13) 1168 cmpwi r11,0 /* Check if coming from guest */ 1169 bne mce_deliver /* continue if we are. */ 1170#endif 1171 1172 /* 1173 * Check if we are coming from userspace. If yes, then run the normal 1174 * exception handler which will deliver the MC event to this kernel. 1175 */ 1176 andi. r11,r12,MSR_PR /* See if coming from user. */ 1177 bne mce_deliver /* continue in V mode if we are. */ 1178 1179 /* 1180 * At this point we are coming from kernel context. 1181 * Queue up the MCE event and return from the interrupt. 1182 * But before that, check if this is an un-recoverable exception. 1183 * If yes, then stay on emergency stack and panic. 1184 */ 1185 andi. r11,r12,MSR_RI 1186 beq unrecoverable_mce 1187 1188 /* 1189 * Check if we have successfully handled/recovered from error, if not 1190 * then stay on emergency stack and panic. 1191 */ 1192 ld r3,RESULT(r1) /* Load result */ 1193 cmpdi r3,0 /* see if we handled MCE successfully */ 1194 beq unrecoverable_mce /* if !handled then panic */ 1195 1196 /* 1197 * Return from MC interrupt. 1198 * Queue up the MCE event so that we can log it later, while 1199 * returning from kernel or opal call. 1200 */ 1201 bl machine_check_queue_event 1202 MACHINE_CHECK_HANDLER_WINDUP 1203 RFI_TO_KERNEL 1204 1205mce_deliver: 1206 /* 1207 * This is a host user or guest MCE. Restore all registers, then 1208 * run the "late" handler. For host user, this will run the 1209 * machine_check_exception handler in virtual mode like a normal 1210 * interrupt handler. For guest, this will trigger the KVM test 1211 * and branch to the KVM interrupt similarly to other interrupts. 1212 */ 1213BEGIN_FTR_SECTION 1214 ld r10,ORIG_GPR3(r1) 1215 mtspr SPRN_CFAR,r10 1216END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1217 MACHINE_CHECK_HANDLER_WINDUP 1218 GEN_INT_ENTRY machine_check, virt=0 1219 1220EXC_COMMON_BEGIN(machine_check_common) 1221 /* 1222 * Machine check is different because we use a different 1223 * save area: PACA_EXMC instead of PACA_EXGEN. 1224 */ 1225 GEN_COMMON machine_check 1226 1227 FINISH_NAP 1228 /* Enable MSR_RI when finished with PACA_EXMC */ 1229 li r10,MSR_RI 1230 mtmsrd r10,1 1231 addi r3,r1,STACK_FRAME_OVERHEAD 1232 bl machine_check_exception 1233 b interrupt_return 1234 1235 GEN_KVM machine_check 1236 1237 1238#ifdef CONFIG_PPC_P7_NAP 1239/* 1240 * This is an idle wakeup. Low level machine check has already been 1241 * done. Queue the event then call the idle code to do the wake up. 1242 */ 1243EXC_COMMON_BEGIN(machine_check_idle_common) 1244 bl machine_check_queue_event 1245 1246 /* 1247 * GPR-loss wakeups are relatively straightforward, because the 1248 * idle sleep code has saved all non-volatile registers on its 1249 * own stack, and r1 in PACAR1. 1250 * 1251 * For no-loss wakeups the r1 and lr registers used by the 1252 * early machine check handler have to be restored first. r2 is 1253 * the kernel TOC, so no need to restore it. 1254 * 1255 * Then decrement MCE nesting after finishing with the stack. 1256 */ 1257 ld r3,_MSR(r1) 1258 ld r4,_LINK(r1) 1259 ld r1,GPR1(r1) 1260 1261 lhz r11,PACA_IN_MCE(r13) 1262 subi r11,r11,1 1263 sth r11,PACA_IN_MCE(r13) 1264 1265 mtlr r4 1266 rlwinm r10,r3,47-31,30,31 1267 cmpwi cr1,r10,2 1268 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1269 b idle_return_gpr_loss 1270#endif 1271 1272EXC_COMMON_BEGIN(unrecoverable_mce) 1273 /* 1274 * We are going down. But there are chances that we might get hit by 1275 * another MCE during panic path and we may run into unstable state 1276 * with no way out. Hence, turn ME bit off while going down, so that 1277 * when another MCE is hit during panic path, system will checkstop 1278 * and hypervisor will get restarted cleanly by SP. 1279 */ 1280BEGIN_FTR_SECTION 1281 li r10,0 /* clear MSR_RI */ 1282 mtmsrd r10,1 1283 bl disable_machine_check 1284END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1285 ld r10,PACAKMSR(r13) 1286 li r3,MSR_ME 1287 andc r10,r10,r3 1288 mtmsrd r10 1289 1290 lhz r12,PACA_IN_MCE(r13) 1291 subi r12,r12,1 1292 sth r12,PACA_IN_MCE(r13) 1293 1294 /* Invoke machine_check_exception to print MCE event and panic. */ 1295 addi r3,r1,STACK_FRAME_OVERHEAD 1296 bl machine_check_exception 1297 1298 /* 1299 * We will not reach here. Even if we did, there is no way out. 1300 * Call unrecoverable_exception and die. 1301 */ 1302 addi r3,r1,STACK_FRAME_OVERHEAD 1303 bl unrecoverable_exception 1304 b . 1305 1306 1307/** 1308 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1309 * This is a synchronous interrupt generated due to a data access exception, 1310 * e.g., a load orstore which does not have a valid page table entry with 1311 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1312 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1313 * 1314 * Handling: 1315 * - Hash MMU 1316 * Go to do_hash_page first to see if the HPT can be filled from an entry in 1317 * the Linux page table. Hash faults can hit in kernel mode in a fairly 1318 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1319 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1320 * backed by Linux page tables. 1321 * 1322 * If none is found, do a Linux page fault. Linux page faults can happen in 1323 * kernel mode due to user copy operations of course. 1324 * 1325 * - Radix MMU 1326 * The hardware loads from the Linux page table directly, so a fault goes 1327 * immediately to Linux page fault. 1328 * 1329 * Conditions like DAWR match are handled on the way in to Linux page fault. 1330 */ 1331INT_DEFINE_BEGIN(data_access) 1332 IVEC=0x300 1333 IDAR=1 1334 IDSISR=1 1335#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1336 IKVM_SKIP=1 1337 IKVM_REAL=1 1338#endif 1339INT_DEFINE_END(data_access) 1340 1341EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1342 GEN_INT_ENTRY data_access, virt=0 1343EXC_REAL_END(data_access, 0x300, 0x80) 1344EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1345 GEN_INT_ENTRY data_access, virt=1 1346EXC_VIRT_END(data_access, 0x4300, 0x80) 1347EXC_COMMON_BEGIN(data_access_common) 1348 GEN_COMMON data_access 1349 ld r4,_DAR(r1) 1350 ld r5,_DSISR(r1) 1351BEGIN_MMU_FTR_SECTION 1352 ld r6,_MSR(r1) 1353 li r3,0x300 1354 b do_hash_page /* Try to handle as hpte fault */ 1355MMU_FTR_SECTION_ELSE 1356 b handle_page_fault 1357ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1358 1359 GEN_KVM data_access 1360 1361 1362/** 1363 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1364 * This is a synchronous interrupt in response to an MMU fault missing SLB 1365 * entry for HPT, or an address outside RPT translation range. 1366 * 1367 * Handling: 1368 * - HPT: 1369 * This refills the SLB, or reports an access fault similarly to a bad page 1370 * fault. When coming from user-mode, the SLB handler may access any kernel 1371 * data, though it may itself take a DSLB. When coming from kernel mode, 1372 * recursive faults must be avoided so access is restricted to the kernel 1373 * image text/data, kernel stack, and any data allocated below 1374 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1375 * on user-handler data structures. 1376 * 1377 * A dedicated save area EXSLB is used (XXX: but it actually need not be 1378 * these days, we could use EXGEN). 1379 */ 1380INT_DEFINE_BEGIN(data_access_slb) 1381 IVEC=0x380 1382 IAREA=PACA_EXSLB 1383 IRECONCILE=0 1384 IDAR=1 1385#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1386 IKVM_SKIP=1 1387 IKVM_REAL=1 1388#endif 1389INT_DEFINE_END(data_access_slb) 1390 1391EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1392 GEN_INT_ENTRY data_access_slb, virt=0 1393EXC_REAL_END(data_access_slb, 0x380, 0x80) 1394EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1395 GEN_INT_ENTRY data_access_slb, virt=1 1396EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1397EXC_COMMON_BEGIN(data_access_slb_common) 1398 GEN_COMMON data_access_slb 1399 ld r4,_DAR(r1) 1400 addi r3,r1,STACK_FRAME_OVERHEAD 1401BEGIN_MMU_FTR_SECTION 1402 /* HPT case, do SLB fault */ 1403 bl do_slb_fault 1404 cmpdi r3,0 1405 bne- 1f 1406 b fast_interrupt_return 14071: /* Error case */ 1408MMU_FTR_SECTION_ELSE 1409 /* Radix case, access is outside page table range */ 1410 li r3,-EFAULT 1411ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1412 std r3,RESULT(r1) 1413 RECONCILE_IRQ_STATE(r10, r11) 1414 ld r4,_DAR(r1) 1415 ld r5,RESULT(r1) 1416 addi r3,r1,STACK_FRAME_OVERHEAD 1417 bl do_bad_slb_fault 1418 b interrupt_return 1419 1420 GEN_KVM data_access_slb 1421 1422 1423/** 1424 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1425 * This is a synchronous interrupt in response to an MMU fault due to an 1426 * instruction fetch. 1427 * 1428 * Handling: 1429 * Similar to DSI, though in response to fetch. The faulting address is found 1430 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1431 */ 1432INT_DEFINE_BEGIN(instruction_access) 1433 IVEC=0x400 1434 IISIDE=1 1435 IDAR=1 1436 IDSISR=1 1437#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1438 IKVM_REAL=1 1439#endif 1440INT_DEFINE_END(instruction_access) 1441 1442EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1443 GEN_INT_ENTRY instruction_access, virt=0 1444EXC_REAL_END(instruction_access, 0x400, 0x80) 1445EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1446 GEN_INT_ENTRY instruction_access, virt=1 1447EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1448EXC_COMMON_BEGIN(instruction_access_common) 1449 GEN_COMMON instruction_access 1450 ld r4,_DAR(r1) 1451 ld r5,_DSISR(r1) 1452BEGIN_MMU_FTR_SECTION 1453 ld r6,_MSR(r1) 1454 li r3,0x400 1455 b do_hash_page /* Try to handle as hpte fault */ 1456MMU_FTR_SECTION_ELSE 1457 b handle_page_fault 1458ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1459 1460 GEN_KVM instruction_access 1461 1462 1463/** 1464 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1465 * This is a synchronous interrupt in response to an MMU fault due to an 1466 * instruction fetch. 1467 * 1468 * Handling: 1469 * Similar to DSLB, though in response to fetch. The faulting address is found 1470 * in SRR0 (rather than DAR). 1471 */ 1472INT_DEFINE_BEGIN(instruction_access_slb) 1473 IVEC=0x480 1474 IAREA=PACA_EXSLB 1475 IRECONCILE=0 1476 IISIDE=1 1477 IDAR=1 1478#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1479 IKVM_REAL=1 1480#endif 1481INT_DEFINE_END(instruction_access_slb) 1482 1483EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1484 GEN_INT_ENTRY instruction_access_slb, virt=0 1485EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1486EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1487 GEN_INT_ENTRY instruction_access_slb, virt=1 1488EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1489EXC_COMMON_BEGIN(instruction_access_slb_common) 1490 GEN_COMMON instruction_access_slb 1491 ld r4,_DAR(r1) 1492 addi r3,r1,STACK_FRAME_OVERHEAD 1493BEGIN_MMU_FTR_SECTION 1494 /* HPT case, do SLB fault */ 1495 bl do_slb_fault 1496 cmpdi r3,0 1497 bne- 1f 1498 b fast_interrupt_return 14991: /* Error case */ 1500MMU_FTR_SECTION_ELSE 1501 /* Radix case, access is outside page table range */ 1502 li r3,-EFAULT 1503ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1504 std r3,RESULT(r1) 1505 RECONCILE_IRQ_STATE(r10, r11) 1506 ld r4,_DAR(r1) 1507 ld r5,RESULT(r1) 1508 addi r3,r1,STACK_FRAME_OVERHEAD 1509 bl do_bad_slb_fault 1510 b interrupt_return 1511 1512 GEN_KVM instruction_access_slb 1513 1514 1515/** 1516 * Interrupt 0x500 - External Interrupt. 1517 * This is an asynchronous maskable interrupt in response to an "external 1518 * exception" from the interrupt controller or hypervisor (e.g., device 1519 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1520 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1521 * 1522 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1523 * interrupts are delivered with HSRR registers, guests use SRRs, which 1524 * reqiures IHSRR_IF_HVMODE. 1525 * 1526 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1527 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1528 * rather than External Interrupts. 1529 * 1530 * Handling: 1531 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1532 * because registers at the time of the interrupt are not so important as it is 1533 * asynchronous. 1534 * 1535 * If soft masked, the masked handler will note the pending interrupt for 1536 * replay, and clear MSR[EE] in the interrupted context. 1537 */ 1538INT_DEFINE_BEGIN(hardware_interrupt) 1539 IVEC=0x500 1540 IHSRR_IF_HVMODE=1 1541 IMASK=IRQS_DISABLED 1542 IKVM_REAL=1 1543 IKVM_VIRT=1 1544INT_DEFINE_END(hardware_interrupt) 1545 1546EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1547 GEN_INT_ENTRY hardware_interrupt, virt=0 1548EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1549EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1550 GEN_INT_ENTRY hardware_interrupt, virt=1 1551EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1552EXC_COMMON_BEGIN(hardware_interrupt_common) 1553 GEN_COMMON hardware_interrupt 1554 FINISH_NAP 1555 RUNLATCH_ON 1556 addi r3,r1,STACK_FRAME_OVERHEAD 1557 bl do_IRQ 1558 b interrupt_return 1559 1560 GEN_KVM hardware_interrupt 1561 1562 1563/** 1564 * Interrupt 0x600 - Alignment Interrupt 1565 * This is a synchronous interrupt in response to data alignment fault. 1566 */ 1567INT_DEFINE_BEGIN(alignment) 1568 IVEC=0x600 1569 IDAR=1 1570 IDSISR=1 1571#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1572 IKVM_REAL=1 1573#endif 1574INT_DEFINE_END(alignment) 1575 1576EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1577 GEN_INT_ENTRY alignment, virt=0 1578EXC_REAL_END(alignment, 0x600, 0x100) 1579EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1580 GEN_INT_ENTRY alignment, virt=1 1581EXC_VIRT_END(alignment, 0x4600, 0x100) 1582EXC_COMMON_BEGIN(alignment_common) 1583 GEN_COMMON alignment 1584 addi r3,r1,STACK_FRAME_OVERHEAD 1585 bl alignment_exception 1586 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1587 b interrupt_return 1588 1589 GEN_KVM alignment 1590 1591 1592/** 1593 * Interrupt 0x700 - Program Interrupt (program check). 1594 * This is a synchronous interrupt in response to various instruction faults: 1595 * traps, privilege errors, TM errors, floating point exceptions. 1596 * 1597 * Handling: 1598 * This interrupt may use the "emergency stack" in some cases when being taken 1599 * from kernel context, which complicates handling. 1600 */ 1601INT_DEFINE_BEGIN(program_check) 1602 IVEC=0x700 1603#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1604 IKVM_REAL=1 1605#endif 1606INT_DEFINE_END(program_check) 1607 1608EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1609 GEN_INT_ENTRY program_check, virt=0 1610EXC_REAL_END(program_check, 0x700, 0x100) 1611EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1612 GEN_INT_ENTRY program_check, virt=1 1613EXC_VIRT_END(program_check, 0x4700, 0x100) 1614EXC_COMMON_BEGIN(program_check_common) 1615 __GEN_COMMON_ENTRY program_check 1616 1617 /* 1618 * It's possible to receive a TM Bad Thing type program check with 1619 * userspace register values (in particular r1), but with SRR1 reporting 1620 * that we came from the kernel. Normally that would confuse the bad 1621 * stack logic, and we would report a bad kernel stack pointer. Instead 1622 * we switch to the emergency stack if we're taking a TM Bad Thing from 1623 * the kernel. 1624 */ 1625 1626 andi. r10,r12,MSR_PR 1627 bne 2f /* If userspace, go normal path */ 1628 1629 andis. r10,r12,(SRR1_PROGTM)@h 1630 bne 1f /* If TM, emergency */ 1631 1632 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1633 blt 2f /* normal path if not */ 1634 1635 /* Use the emergency stack */ 16361: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1637 /* 3 in EXCEPTION_PROLOG_COMMON */ 1638 mr r10,r1 /* Save r1 */ 1639 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1640 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1641 __ISTACK(program_check)=0 1642 __GEN_COMMON_BODY program_check 1643 b 3f 16442: 1645 __ISTACK(program_check)=1 1646 __GEN_COMMON_BODY program_check 16473: 1648 addi r3,r1,STACK_FRAME_OVERHEAD 1649 bl program_check_exception 1650 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1651 b interrupt_return 1652 1653 GEN_KVM program_check 1654 1655 1656/* 1657 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1658 * This is a synchronous interrupt in response to executing an fp instruction 1659 * with MSR[FP]=0. 1660 * 1661 * Handling: 1662 * This will load FP registers and enable the FP bit if coming from userspace, 1663 * otherwise report a bad kernel use of FP. 1664 */ 1665INT_DEFINE_BEGIN(fp_unavailable) 1666 IVEC=0x800 1667 IRECONCILE=0 1668#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1669 IKVM_REAL=1 1670#endif 1671INT_DEFINE_END(fp_unavailable) 1672 1673EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1674 GEN_INT_ENTRY fp_unavailable, virt=0 1675EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1676EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1677 GEN_INT_ENTRY fp_unavailable, virt=1 1678EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1679EXC_COMMON_BEGIN(fp_unavailable_common) 1680 GEN_COMMON fp_unavailable 1681 bne 1f /* if from user, just load it up */ 1682 RECONCILE_IRQ_STATE(r10, r11) 1683 addi r3,r1,STACK_FRAME_OVERHEAD 1684 bl kernel_fp_unavailable_exception 16850: trap 1686 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 16871: 1688#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1689BEGIN_FTR_SECTION 1690 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1691 * transaction), go do TM stuff 1692 */ 1693 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1694 bne- 2f 1695END_FTR_SECTION_IFSET(CPU_FTR_TM) 1696#endif 1697 bl load_up_fpu 1698 b fast_interrupt_return 1699#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17002: /* User process was in a transaction */ 1701 RECONCILE_IRQ_STATE(r10, r11) 1702 addi r3,r1,STACK_FRAME_OVERHEAD 1703 bl fp_unavailable_tm 1704 b interrupt_return 1705#endif 1706 1707 GEN_KVM fp_unavailable 1708 1709 1710/** 1711 * Interrupt 0x900 - Decrementer Interrupt. 1712 * This is an asynchronous interrupt in response to a decrementer exception 1713 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1714 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1715 * local_irq_disable()). 1716 * 1717 * Handling: 1718 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1719 * 1720 * If soft masked, the masked handler will note the pending interrupt for 1721 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1722 * in the interrupted context. 1723 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1724 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1725 * on the emergency stack. 1726 */ 1727INT_DEFINE_BEGIN(decrementer) 1728 IVEC=0x900 1729 IMASK=IRQS_DISABLED 1730#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1731 IKVM_REAL=1 1732#endif 1733INT_DEFINE_END(decrementer) 1734 1735EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1736 GEN_INT_ENTRY decrementer, virt=0 1737EXC_REAL_END(decrementer, 0x900, 0x80) 1738EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1739 GEN_INT_ENTRY decrementer, virt=1 1740EXC_VIRT_END(decrementer, 0x4900, 0x80) 1741EXC_COMMON_BEGIN(decrementer_common) 1742 GEN_COMMON decrementer 1743 FINISH_NAP 1744 RUNLATCH_ON 1745 addi r3,r1,STACK_FRAME_OVERHEAD 1746 bl timer_interrupt 1747 b interrupt_return 1748 1749 GEN_KVM decrementer 1750 1751 1752/** 1753 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1754 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1755 * register. 1756 * 1757 * Handling: 1758 * Linux does not use this outside KVM where it's used to keep a host timer 1759 * while the guest is given control of DEC. It should normally be caught by 1760 * the KVM test and routed there. 1761 */ 1762INT_DEFINE_BEGIN(hdecrementer) 1763 IVEC=0x980 1764 IHSRR=1 1765 ISTACK=0 1766 IRECONCILE=0 1767 IKVM_REAL=1 1768 IKVM_VIRT=1 1769INT_DEFINE_END(hdecrementer) 1770 1771EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1772 GEN_INT_ENTRY hdecrementer, virt=0 1773EXC_REAL_END(hdecrementer, 0x980, 0x80) 1774EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1775 GEN_INT_ENTRY hdecrementer, virt=1 1776EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1777EXC_COMMON_BEGIN(hdecrementer_common) 1778 __GEN_COMMON_ENTRY hdecrementer 1779 /* 1780 * Hypervisor decrementer interrupts not caught by the KVM test 1781 * shouldn't occur but are sometimes left pending on exit from a KVM 1782 * guest. We don't need to do anything to clear them, as they are 1783 * edge-triggered. 1784 * 1785 * Be careful to avoid touching the kernel stack. 1786 */ 1787 ld r10,PACA_EXGEN+EX_CTR(r13) 1788 mtctr r10 1789 mtcrf 0x80,r9 1790 ld r9,PACA_EXGEN+EX_R9(r13) 1791 ld r10,PACA_EXGEN+EX_R10(r13) 1792 ld r11,PACA_EXGEN+EX_R11(r13) 1793 ld r12,PACA_EXGEN+EX_R12(r13) 1794 ld r13,PACA_EXGEN+EX_R13(r13) 1795 HRFI_TO_KERNEL 1796 1797 GEN_KVM hdecrementer 1798 1799 1800/** 1801 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1802 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1803 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1804 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1805 * 1806 * Handling: 1807 * Guests may use this for IPIs between threads in a core if the 1808 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1809 * 1810 * If soft masked, the masked handler will note the pending interrupt for 1811 * replay, leaving MSR[EE] enabled in the interrupted context because the 1812 * doorbells are edge triggered. 1813 */ 1814INT_DEFINE_BEGIN(doorbell_super) 1815 IVEC=0xa00 1816 IMASK=IRQS_DISABLED 1817#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1818 IKVM_REAL=1 1819#endif 1820INT_DEFINE_END(doorbell_super) 1821 1822EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1823 GEN_INT_ENTRY doorbell_super, virt=0 1824EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1825EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1826 GEN_INT_ENTRY doorbell_super, virt=1 1827EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1828EXC_COMMON_BEGIN(doorbell_super_common) 1829 GEN_COMMON doorbell_super 1830 FINISH_NAP 1831 RUNLATCH_ON 1832 addi r3,r1,STACK_FRAME_OVERHEAD 1833#ifdef CONFIG_PPC_DOORBELL 1834 bl doorbell_exception 1835#else 1836 bl unknown_exception 1837#endif 1838 b interrupt_return 1839 1840 GEN_KVM doorbell_super 1841 1842 1843EXC_REAL_NONE(0xb00, 0x100) 1844EXC_VIRT_NONE(0x4b00, 0x100) 1845 1846/** 1847 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1848 * This is a synchronous interrupt invoked with the "sc" instruction. The 1849 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1850 * is directed to the currently running OS. The hypercall is invoked with 1851 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1852 * 1853 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1854 * 0x4c00 virtual mode. 1855 * 1856 * Handling: 1857 * If the KVM test fires then it was due to a hypercall and is accordingly 1858 * routed to KVM. Otherwise this executes a normal Linux system call. 1859 * 1860 * Call convention: 1861 * 1862 * syscall and hypercalls register conventions are documented in 1863 * Documentation/powerpc/syscall64-abi.rst and 1864 * Documentation/powerpc/papr_hcalls.rst respectively. 1865 * 1866 * The intersection of volatile registers that don't contain possible 1867 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1868 * without saving, though xer is not a good idea to use, as hardware may 1869 * interpret some bits so it may be costly to change them. 1870 */ 1871INT_DEFINE_BEGIN(system_call) 1872 IVEC=0xc00 1873 IKVM_REAL=1 1874 IKVM_VIRT=1 1875INT_DEFINE_END(system_call) 1876 1877.macro SYSTEM_CALL virt 1878#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1879 /* 1880 * There is a little bit of juggling to get syscall and hcall 1881 * working well. Save r13 in ctr to avoid using SPRG scratch 1882 * register. 1883 * 1884 * Userspace syscalls have already saved the PPR, hcalls must save 1885 * it before setting HMT_MEDIUM. 1886 */ 1887 mtctr r13 1888 GET_PACA(r13) 1889 std r10,PACA_EXGEN+EX_R10(r13) 1890 INTERRUPT_TO_KERNEL 1891 KVMTEST system_call /* uses r10, branch to system_call_kvm */ 1892 mfctr r9 1893#else 1894 mr r9,r13 1895 GET_PACA(r13) 1896 INTERRUPT_TO_KERNEL 1897#endif 1898 1899#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1900BEGIN_FTR_SECTION 1901 cmpdi r0,0x1ebe 1902 beq- 1f 1903END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1904#endif 1905 1906 /* We reach here with PACA in r13, r13 in r9. */ 1907 mfspr r11,SPRN_SRR0 1908 mfspr r12,SPRN_SRR1 1909 1910 HMT_MEDIUM 1911 1912 .if ! \virt 1913 __LOAD_HANDLER(r10, system_call_common) 1914 mtspr SPRN_SRR0,r10 1915 ld r10,PACAKMSR(r13) 1916 mtspr SPRN_SRR1,r10 1917 RFI_TO_KERNEL 1918 b . /* prevent speculative execution */ 1919 .else 1920 li r10,MSR_RI 1921 mtmsrd r10,1 /* Set RI (EE=0) */ 1922#ifdef CONFIG_RELOCATABLE 1923 __LOAD_HANDLER(r10, system_call_common) 1924 mtctr r10 1925 bctr 1926#else 1927 b system_call_common 1928#endif 1929 .endif 1930 1931#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1932 /* Fast LE/BE switch system call */ 19331: mfspr r12,SPRN_SRR1 1934 xori r12,r12,MSR_LE 1935 mtspr SPRN_SRR1,r12 1936 mr r13,r9 1937 RFI_TO_USER /* return to userspace */ 1938 b . /* prevent speculative execution */ 1939#endif 1940.endm 1941 1942EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 1943 SYSTEM_CALL 0 1944EXC_REAL_END(system_call, 0xc00, 0x100) 1945EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 1946 SYSTEM_CALL 1 1947EXC_VIRT_END(system_call, 0x4c00, 0x100) 1948 1949#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1950TRAMP_REAL_BEGIN(system_call_kvm) 1951 /* 1952 * This is a hcall, so register convention is as above, with these 1953 * differences: 1954 * r13 = PACA 1955 * ctr = orig r13 1956 * orig r10 saved in PACA 1957 */ 1958 /* 1959 * Save the PPR (on systems that support it) before changing to 1960 * HMT_MEDIUM. That allows the KVM code to save that value into the 1961 * guest state (it is the guest's PPR value). 1962 */ 1963BEGIN_FTR_SECTION 1964 mfspr r10,SPRN_PPR 1965 std r10,HSTATE_PPR(r13) 1966END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1967 HMT_MEDIUM 1968 mfctr r10 1969 SET_SCRATCH0(r10) 1970 mfcr r10 1971 std r12,HSTATE_SCRATCH0(r13) 1972 sldi r12,r10,32 1973 ori r12,r12,0xc00 1974#ifdef CONFIG_RELOCATABLE 1975 /* 1976 * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives 1977 * outside the head section. 1978 */ 1979 __LOAD_FAR_HANDLER(r10, kvmppc_interrupt) 1980 mtctr r10 1981 ld r10,PACA_EXGEN+EX_R10(r13) 1982 bctr 1983#else 1984 ld r10,PACA_EXGEN+EX_R10(r13) 1985 b kvmppc_interrupt 1986#endif 1987#endif 1988 1989 1990/** 1991 * Interrupt 0xd00 - Trace Interrupt. 1992 * This is a synchronous interrupt in response to instruction step or 1993 * breakpoint faults. 1994 */ 1995INT_DEFINE_BEGIN(single_step) 1996 IVEC=0xd00 1997#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1998 IKVM_REAL=1 1999#endif 2000INT_DEFINE_END(single_step) 2001 2002EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2003 GEN_INT_ENTRY single_step, virt=0 2004EXC_REAL_END(single_step, 0xd00, 0x100) 2005EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2006 GEN_INT_ENTRY single_step, virt=1 2007EXC_VIRT_END(single_step, 0x4d00, 0x100) 2008EXC_COMMON_BEGIN(single_step_common) 2009 GEN_COMMON single_step 2010 addi r3,r1,STACK_FRAME_OVERHEAD 2011 bl single_step_exception 2012 b interrupt_return 2013 2014 GEN_KVM single_step 2015 2016 2017/** 2018 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2019 * This is a synchronous interrupt in response to an MMU fault caused by a 2020 * guest data access. 2021 * 2022 * Handling: 2023 * This should always get routed to KVM. In radix MMU mode, this is caused 2024 * by a guest nested radix access that can't be performed due to the 2025 * partition scope page table. In hash mode, this can be caused by guests 2026 * running with translation disabled (virtual real mode) or with VPM enabled. 2027 * KVM will update the page table structures or disallow the access. 2028 */ 2029INT_DEFINE_BEGIN(h_data_storage) 2030 IVEC=0xe00 2031 IHSRR=1 2032 IDAR=1 2033 IDSISR=1 2034 IKVM_SKIP=1 2035 IKVM_REAL=1 2036 IKVM_VIRT=1 2037INT_DEFINE_END(h_data_storage) 2038 2039EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2040 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2041EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2042EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2043 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2044EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2045EXC_COMMON_BEGIN(h_data_storage_common) 2046 GEN_COMMON h_data_storage 2047 addi r3,r1,STACK_FRAME_OVERHEAD 2048BEGIN_MMU_FTR_SECTION 2049 ld r4,_DAR(r1) 2050 li r5,SIGSEGV 2051 bl bad_page_fault 2052MMU_FTR_SECTION_ELSE 2053 bl unknown_exception 2054ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2055 b interrupt_return 2056 2057 GEN_KVM h_data_storage 2058 2059 2060/** 2061 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2062 * This is a synchronous interrupt in response to an MMU fault caused by a 2063 * guest instruction fetch, similar to HDSI. 2064 */ 2065INT_DEFINE_BEGIN(h_instr_storage) 2066 IVEC=0xe20 2067 IHSRR=1 2068 IKVM_REAL=1 2069 IKVM_VIRT=1 2070INT_DEFINE_END(h_instr_storage) 2071 2072EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2073 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2074EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2075EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2076 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2077EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2078EXC_COMMON_BEGIN(h_instr_storage_common) 2079 GEN_COMMON h_instr_storage 2080 addi r3,r1,STACK_FRAME_OVERHEAD 2081 bl unknown_exception 2082 b interrupt_return 2083 2084 GEN_KVM h_instr_storage 2085 2086 2087/** 2088 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2089 */ 2090INT_DEFINE_BEGIN(emulation_assist) 2091 IVEC=0xe40 2092 IHSRR=1 2093 IKVM_REAL=1 2094 IKVM_VIRT=1 2095INT_DEFINE_END(emulation_assist) 2096 2097EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2098 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2099EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2100EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2101 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2102EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2103EXC_COMMON_BEGIN(emulation_assist_common) 2104 GEN_COMMON emulation_assist 2105 addi r3,r1,STACK_FRAME_OVERHEAD 2106 bl emulation_assist_interrupt 2107 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2108 b interrupt_return 2109 2110 GEN_KVM emulation_assist 2111 2112 2113/** 2114 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2115 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2116 * Exception. It is always taken in real mode but uses HSRR registers 2117 * unlike SRESET and MCE. 2118 * 2119 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2120 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2121 * 2122 * Handling: 2123 * This is a special case, this is handled similarly to machine checks, with an 2124 * initial real mode handler that is not soft-masked, which attempts to fix the 2125 * problem. Then a regular handler which is soft-maskable and reports the 2126 * problem. 2127 * 2128 * The emergency stack is used for the early real mode handler. 2129 * 2130 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2131 * either use soft-masking for the MCE, or use irq_work for the HMI. 2132 * 2133 * KVM: 2134 * Unlike MCE, this calls into KVM without calling the real mode handler 2135 * first. 2136 */ 2137INT_DEFINE_BEGIN(hmi_exception_early) 2138 IVEC=0xe60 2139 IHSRR=1 2140 IREALMODE_COMMON=1 2141 ISTACK=0 2142 IRECONCILE=0 2143 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2144 IKVM_REAL=1 2145INT_DEFINE_END(hmi_exception_early) 2146 2147INT_DEFINE_BEGIN(hmi_exception) 2148 IVEC=0xe60 2149 IHSRR=1 2150 IMASK=IRQS_DISABLED 2151 IKVM_REAL=1 2152INT_DEFINE_END(hmi_exception) 2153 2154EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2155 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2156EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2157EXC_VIRT_NONE(0x4e60, 0x20) 2158 2159EXC_COMMON_BEGIN(hmi_exception_early_common) 2160 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2161 2162 mr r10,r1 /* Save r1 */ 2163 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2164 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2165 2166 __GEN_COMMON_BODY hmi_exception_early 2167 2168 addi r3,r1,STACK_FRAME_OVERHEAD 2169 bl hmi_exception_realmode 2170 cmpdi cr0,r3,0 2171 bne 1f 2172 2173 EXCEPTION_RESTORE_REGS hsrr=1 2174 HRFI_TO_USER_OR_KERNEL 2175 21761: 2177 /* 2178 * Go to virtual mode and pull the HMI event information from 2179 * firmware. 2180 */ 2181 EXCEPTION_RESTORE_REGS hsrr=1 2182 GEN_INT_ENTRY hmi_exception, virt=0 2183 2184 GEN_KVM hmi_exception_early 2185 2186EXC_COMMON_BEGIN(hmi_exception_common) 2187 GEN_COMMON hmi_exception 2188 FINISH_NAP 2189 RUNLATCH_ON 2190 addi r3,r1,STACK_FRAME_OVERHEAD 2191 bl handle_hmi_exception 2192 b interrupt_return 2193 2194 GEN_KVM hmi_exception 2195 2196 2197/** 2198 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2199 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2200 * Similar to the 0xa00 doorbell but for host rather than guest. 2201 */ 2202INT_DEFINE_BEGIN(h_doorbell) 2203 IVEC=0xe80 2204 IHSRR=1 2205 IMASK=IRQS_DISABLED 2206 IKVM_REAL=1 2207 IKVM_VIRT=1 2208INT_DEFINE_END(h_doorbell) 2209 2210EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2211 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2212EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2213EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2214 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2215EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2216EXC_COMMON_BEGIN(h_doorbell_common) 2217 GEN_COMMON h_doorbell 2218 FINISH_NAP 2219 RUNLATCH_ON 2220 addi r3,r1,STACK_FRAME_OVERHEAD 2221#ifdef CONFIG_PPC_DOORBELL 2222 bl doorbell_exception 2223#else 2224 bl unknown_exception 2225#endif 2226 b interrupt_return 2227 2228 GEN_KVM h_doorbell 2229 2230 2231/** 2232 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2233 * This is an asynchronous interrupt in response to an "external exception". 2234 * Similar to 0x500 but for host only. 2235 */ 2236INT_DEFINE_BEGIN(h_virt_irq) 2237 IVEC=0xea0 2238 IHSRR=1 2239 IMASK=IRQS_DISABLED 2240 IKVM_REAL=1 2241 IKVM_VIRT=1 2242INT_DEFINE_END(h_virt_irq) 2243 2244EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2245 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2246EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2247EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2248 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2249EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2250EXC_COMMON_BEGIN(h_virt_irq_common) 2251 GEN_COMMON h_virt_irq 2252 FINISH_NAP 2253 RUNLATCH_ON 2254 addi r3,r1,STACK_FRAME_OVERHEAD 2255 bl do_IRQ 2256 b interrupt_return 2257 2258 GEN_KVM h_virt_irq 2259 2260 2261EXC_REAL_NONE(0xec0, 0x20) 2262EXC_VIRT_NONE(0x4ec0, 0x20) 2263EXC_REAL_NONE(0xee0, 0x20) 2264EXC_VIRT_NONE(0x4ee0, 0x20) 2265 2266 2267/* 2268 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2269 * This is an asynchronous interrupt in response to a PMU exception. 2270 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2271 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2272 * 2273 * Handling: 2274 * This calls into the perf subsystem. 2275 * 2276 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2277 * runs under local_irq_disable. However it may be soft-masked in 2278 * powerpc-specific code. 2279 * 2280 * If soft masked, the masked handler will note the pending interrupt for 2281 * replay, and clear MSR[EE] in the interrupted context. 2282 */ 2283INT_DEFINE_BEGIN(performance_monitor) 2284 IVEC=0xf00 2285 IMASK=IRQS_PMI_DISABLED 2286#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2287 IKVM_REAL=1 2288#endif 2289INT_DEFINE_END(performance_monitor) 2290 2291EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2292 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2293EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2294EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2295 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2296EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2297EXC_COMMON_BEGIN(performance_monitor_common) 2298 GEN_COMMON performance_monitor 2299 FINISH_NAP 2300 RUNLATCH_ON 2301 addi r3,r1,STACK_FRAME_OVERHEAD 2302 bl performance_monitor_exception 2303 b interrupt_return 2304 2305 GEN_KVM performance_monitor 2306 2307 2308/** 2309 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2310 * This is a synchronous interrupt in response to 2311 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2312 * Similar to FP unavailable. 2313 */ 2314INT_DEFINE_BEGIN(altivec_unavailable) 2315 IVEC=0xf20 2316 IRECONCILE=0 2317#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2318 IKVM_REAL=1 2319#endif 2320INT_DEFINE_END(altivec_unavailable) 2321 2322EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2323 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2324EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2325EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2326 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2327EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2328EXC_COMMON_BEGIN(altivec_unavailable_common) 2329 GEN_COMMON altivec_unavailable 2330#ifdef CONFIG_ALTIVEC 2331BEGIN_FTR_SECTION 2332 beq 1f 2333#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2334 BEGIN_FTR_SECTION_NESTED(69) 2335 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2336 * transaction), go do TM stuff 2337 */ 2338 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2339 bne- 2f 2340 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2341#endif 2342 bl load_up_altivec 2343 b fast_interrupt_return 2344#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23452: /* User process was in a transaction */ 2346 RECONCILE_IRQ_STATE(r10, r11) 2347 addi r3,r1,STACK_FRAME_OVERHEAD 2348 bl altivec_unavailable_tm 2349 b interrupt_return 2350#endif 23511: 2352END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2353#endif 2354 RECONCILE_IRQ_STATE(r10, r11) 2355 addi r3,r1,STACK_FRAME_OVERHEAD 2356 bl altivec_unavailable_exception 2357 b interrupt_return 2358 2359 GEN_KVM altivec_unavailable 2360 2361 2362/** 2363 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2364 * This is a synchronous interrupt in response to 2365 * executing a VSX instruction with MSR[VSX]=0. 2366 * Similar to FP unavailable. 2367 */ 2368INT_DEFINE_BEGIN(vsx_unavailable) 2369 IVEC=0xf40 2370 IRECONCILE=0 2371#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2372 IKVM_REAL=1 2373#endif 2374INT_DEFINE_END(vsx_unavailable) 2375 2376EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2377 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2378EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2379EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2380 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2381EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2382EXC_COMMON_BEGIN(vsx_unavailable_common) 2383 GEN_COMMON vsx_unavailable 2384#ifdef CONFIG_VSX 2385BEGIN_FTR_SECTION 2386 beq 1f 2387#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2388 BEGIN_FTR_SECTION_NESTED(69) 2389 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2390 * transaction), go do TM stuff 2391 */ 2392 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2393 bne- 2f 2394 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2395#endif 2396 b load_up_vsx 2397#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23982: /* User process was in a transaction */ 2399 RECONCILE_IRQ_STATE(r10, r11) 2400 addi r3,r1,STACK_FRAME_OVERHEAD 2401 bl vsx_unavailable_tm 2402 b interrupt_return 2403#endif 24041: 2405END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2406#endif 2407 RECONCILE_IRQ_STATE(r10, r11) 2408 addi r3,r1,STACK_FRAME_OVERHEAD 2409 bl vsx_unavailable_exception 2410 b interrupt_return 2411 2412 GEN_KVM vsx_unavailable 2413 2414 2415/** 2416 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2417 * This is a synchronous interrupt in response to 2418 * executing an instruction without access to the facility that can be 2419 * resolved by the OS (e.g., FSCR, MSR). 2420 * Similar to FP unavailable. 2421 */ 2422INT_DEFINE_BEGIN(facility_unavailable) 2423 IVEC=0xf60 2424#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2425 IKVM_REAL=1 2426#endif 2427INT_DEFINE_END(facility_unavailable) 2428 2429EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2430 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2431EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2432EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2433 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2434EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2435EXC_COMMON_BEGIN(facility_unavailable_common) 2436 GEN_COMMON facility_unavailable 2437 addi r3,r1,STACK_FRAME_OVERHEAD 2438 bl facility_unavailable_exception 2439 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2440 b interrupt_return 2441 2442 GEN_KVM facility_unavailable 2443 2444 2445/** 2446 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2447 * This is a synchronous interrupt in response to 2448 * executing an instruction without access to the facility that can only 2449 * be resolved in HV mode (e.g., HFSCR). 2450 * Similar to FP unavailable. 2451 */ 2452INT_DEFINE_BEGIN(h_facility_unavailable) 2453 IVEC=0xf80 2454 IHSRR=1 2455 IKVM_REAL=1 2456 IKVM_VIRT=1 2457INT_DEFINE_END(h_facility_unavailable) 2458 2459EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2460 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2461EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2462EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2463 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2464EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2465EXC_COMMON_BEGIN(h_facility_unavailable_common) 2466 GEN_COMMON h_facility_unavailable 2467 addi r3,r1,STACK_FRAME_OVERHEAD 2468 bl facility_unavailable_exception 2469 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2470 b interrupt_return 2471 2472 GEN_KVM h_facility_unavailable 2473 2474 2475EXC_REAL_NONE(0xfa0, 0x20) 2476EXC_VIRT_NONE(0x4fa0, 0x20) 2477EXC_REAL_NONE(0xfc0, 0x20) 2478EXC_VIRT_NONE(0x4fc0, 0x20) 2479EXC_REAL_NONE(0xfe0, 0x20) 2480EXC_VIRT_NONE(0x4fe0, 0x20) 2481 2482EXC_REAL_NONE(0x1000, 0x100) 2483EXC_VIRT_NONE(0x5000, 0x100) 2484EXC_REAL_NONE(0x1100, 0x100) 2485EXC_VIRT_NONE(0x5100, 0x100) 2486 2487#ifdef CONFIG_CBE_RAS 2488INT_DEFINE_BEGIN(cbe_system_error) 2489 IVEC=0x1200 2490 IHSRR=1 2491 IKVM_SKIP=1 2492 IKVM_REAL=1 2493INT_DEFINE_END(cbe_system_error) 2494 2495EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2496 GEN_INT_ENTRY cbe_system_error, virt=0 2497EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2498EXC_VIRT_NONE(0x5200, 0x100) 2499EXC_COMMON_BEGIN(cbe_system_error_common) 2500 GEN_COMMON cbe_system_error 2501 addi r3,r1,STACK_FRAME_OVERHEAD 2502 bl cbe_system_error_exception 2503 b interrupt_return 2504 2505 GEN_KVM cbe_system_error 2506 2507#else /* CONFIG_CBE_RAS */ 2508EXC_REAL_NONE(0x1200, 0x100) 2509EXC_VIRT_NONE(0x5200, 0x100) 2510#endif 2511 2512 2513INT_DEFINE_BEGIN(instruction_breakpoint) 2514 IVEC=0x1300 2515#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2516 IKVM_SKIP=1 2517 IKVM_REAL=1 2518#endif 2519INT_DEFINE_END(instruction_breakpoint) 2520 2521EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2522 GEN_INT_ENTRY instruction_breakpoint, virt=0 2523EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2524EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2525 GEN_INT_ENTRY instruction_breakpoint, virt=1 2526EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2527EXC_COMMON_BEGIN(instruction_breakpoint_common) 2528 GEN_COMMON instruction_breakpoint 2529 addi r3,r1,STACK_FRAME_OVERHEAD 2530 bl instruction_breakpoint_exception 2531 b interrupt_return 2532 2533 GEN_KVM instruction_breakpoint 2534 2535 2536EXC_REAL_NONE(0x1400, 0x100) 2537EXC_VIRT_NONE(0x5400, 0x100) 2538 2539/** 2540 * Interrupt 0x1500 - Soft Patch Interrupt 2541 * 2542 * Handling: 2543 * This is an implementation specific interrupt which can be used for a 2544 * range of exceptions. 2545 * 2546 * This interrupt handler is unique in that it runs the denormal assist 2547 * code even for guests (and even in guest context) without going to KVM, 2548 * for speed. POWER9 does not raise denorm exceptions, so this special case 2549 * could be phased out in future to reduce special cases. 2550 */ 2551INT_DEFINE_BEGIN(denorm_exception) 2552 IVEC=0x1500 2553 IHSRR=1 2554 IBRANCH_COMMON=0 2555 IKVM_REAL=1 2556INT_DEFINE_END(denorm_exception) 2557 2558EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2559 GEN_INT_ENTRY denorm_exception, virt=0 2560#ifdef CONFIG_PPC_DENORMALISATION 2561 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2562 bne+ denorm_assist 2563#endif 2564 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2565EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2566#ifdef CONFIG_PPC_DENORMALISATION 2567EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2568 GEN_INT_ENTRY denorm_exception, virt=1 2569 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2570 bne+ denorm_assist 2571 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2572EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2573#else 2574EXC_VIRT_NONE(0x5500, 0x100) 2575#endif 2576 2577#ifdef CONFIG_PPC_DENORMALISATION 2578TRAMP_REAL_BEGIN(denorm_assist) 2579BEGIN_FTR_SECTION 2580/* 2581 * To denormalise we need to move a copy of the register to itself. 2582 * For POWER6 do that here for all FP regs. 2583 */ 2584 mfmsr r10 2585 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2586 xori r10,r10,(MSR_FE0|MSR_FE1) 2587 mtmsrd r10 2588 sync 2589 2590 .Lreg=0 2591 .rept 32 2592 fmr .Lreg,.Lreg 2593 .Lreg=.Lreg+1 2594 .endr 2595 2596FTR_SECTION_ELSE 2597/* 2598 * To denormalise we need to move a copy of the register to itself. 2599 * For POWER7 do that here for the first 32 VSX registers only. 2600 */ 2601 mfmsr r10 2602 oris r10,r10,MSR_VSX@h 2603 mtmsrd r10 2604 sync 2605 2606 .Lreg=0 2607 .rept 32 2608 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2609 .Lreg=.Lreg+1 2610 .endr 2611 2612ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2613 2614BEGIN_FTR_SECTION 2615 b denorm_done 2616END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2617/* 2618 * To denormalise we need to move a copy of the register to itself. 2619 * For POWER8 we need to do that for all 64 VSX registers 2620 */ 2621 .Lreg=32 2622 .rept 32 2623 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2624 .Lreg=.Lreg+1 2625 .endr 2626 2627denorm_done: 2628 mfspr r11,SPRN_HSRR0 2629 subi r11,r11,4 2630 mtspr SPRN_HSRR0,r11 2631 mtcrf 0x80,r9 2632 ld r9,PACA_EXGEN+EX_R9(r13) 2633BEGIN_FTR_SECTION 2634 ld r10,PACA_EXGEN+EX_PPR(r13) 2635 mtspr SPRN_PPR,r10 2636END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2637BEGIN_FTR_SECTION 2638 ld r10,PACA_EXGEN+EX_CFAR(r13) 2639 mtspr SPRN_CFAR,r10 2640END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2641 ld r10,PACA_EXGEN+EX_R10(r13) 2642 ld r11,PACA_EXGEN+EX_R11(r13) 2643 ld r12,PACA_EXGEN+EX_R12(r13) 2644 ld r13,PACA_EXGEN+EX_R13(r13) 2645 HRFI_TO_UNKNOWN 2646 b . 2647#endif 2648 2649EXC_COMMON_BEGIN(denorm_exception_common) 2650 GEN_COMMON denorm_exception 2651 addi r3,r1,STACK_FRAME_OVERHEAD 2652 bl unknown_exception 2653 b interrupt_return 2654 2655 GEN_KVM denorm_exception 2656 2657 2658#ifdef CONFIG_CBE_RAS 2659INT_DEFINE_BEGIN(cbe_maintenance) 2660 IVEC=0x1600 2661 IHSRR=1 2662 IKVM_SKIP=1 2663 IKVM_REAL=1 2664INT_DEFINE_END(cbe_maintenance) 2665 2666EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2667 GEN_INT_ENTRY cbe_maintenance, virt=0 2668EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2669EXC_VIRT_NONE(0x5600, 0x100) 2670EXC_COMMON_BEGIN(cbe_maintenance_common) 2671 GEN_COMMON cbe_maintenance 2672 addi r3,r1,STACK_FRAME_OVERHEAD 2673 bl cbe_maintenance_exception 2674 b interrupt_return 2675 2676 GEN_KVM cbe_maintenance 2677 2678#else /* CONFIG_CBE_RAS */ 2679EXC_REAL_NONE(0x1600, 0x100) 2680EXC_VIRT_NONE(0x5600, 0x100) 2681#endif 2682 2683 2684INT_DEFINE_BEGIN(altivec_assist) 2685 IVEC=0x1700 2686#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2687 IKVM_REAL=1 2688#endif 2689INT_DEFINE_END(altivec_assist) 2690 2691EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2692 GEN_INT_ENTRY altivec_assist, virt=0 2693EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2694EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2695 GEN_INT_ENTRY altivec_assist, virt=1 2696EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2697EXC_COMMON_BEGIN(altivec_assist_common) 2698 GEN_COMMON altivec_assist 2699 addi r3,r1,STACK_FRAME_OVERHEAD 2700#ifdef CONFIG_ALTIVEC 2701 bl altivec_assist_exception 2702 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2703#else 2704 bl unknown_exception 2705#endif 2706 b interrupt_return 2707 2708 GEN_KVM altivec_assist 2709 2710 2711#ifdef CONFIG_CBE_RAS 2712INT_DEFINE_BEGIN(cbe_thermal) 2713 IVEC=0x1800 2714 IHSRR=1 2715 IKVM_SKIP=1 2716 IKVM_REAL=1 2717INT_DEFINE_END(cbe_thermal) 2718 2719EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2720 GEN_INT_ENTRY cbe_thermal, virt=0 2721EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2722EXC_VIRT_NONE(0x5800, 0x100) 2723EXC_COMMON_BEGIN(cbe_thermal_common) 2724 GEN_COMMON cbe_thermal 2725 addi r3,r1,STACK_FRAME_OVERHEAD 2726 bl cbe_thermal_exception 2727 b interrupt_return 2728 2729 GEN_KVM cbe_thermal 2730 2731#else /* CONFIG_CBE_RAS */ 2732EXC_REAL_NONE(0x1800, 0x100) 2733EXC_VIRT_NONE(0x5800, 0x100) 2734#endif 2735 2736 2737#ifdef CONFIG_PPC_WATCHDOG 2738 2739INT_DEFINE_BEGIN(soft_nmi) 2740 IVEC=0x900 2741 ISTACK=0 2742 IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */ 2743INT_DEFINE_END(soft_nmi) 2744 2745/* 2746 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2747 * stack is one that is usable by maskable interrupts so long as MSR_EE 2748 * remains off. It is used for recovery when something has corrupted the 2749 * normal kernel stack, for example. The "soft NMI" must not use the process 2750 * stack because we want irq disabled sections to avoid touching the stack 2751 * at all (other than PMU interrupts), so use the emergency stack for this, 2752 * and run it entirely with interrupts hard disabled. 2753 */ 2754EXC_COMMON_BEGIN(soft_nmi_common) 2755 mfspr r11,SPRN_SRR0 2756 mr r10,r1 2757 ld r1,PACAEMERGSP(r13) 2758 subi r1,r1,INT_FRAME_SIZE 2759 __GEN_COMMON_BODY soft_nmi 2760 2761 /* 2762 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see 2763 * system_reset_common) 2764 */ 2765 li r10,IRQS_ALL_DISABLED 2766 stb r10,PACAIRQSOFTMASK(r13) 2767 lbz r10,PACAIRQHAPPENED(r13) 2768 std r10,RESULT(r1) 2769 ori r10,r10,PACA_IRQ_HARD_DIS 2770 stb r10,PACAIRQHAPPENED(r13) 2771 2772 addi r3,r1,STACK_FRAME_OVERHEAD 2773 bl soft_nmi_interrupt 2774 2775 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2776 li r9,0 2777 mtmsrd r9,1 2778 2779 /* 2780 * Restore soft mask settings. 2781 */ 2782 ld r10,RESULT(r1) 2783 stb r10,PACAIRQHAPPENED(r13) 2784 ld r10,SOFTE(r1) 2785 stb r10,PACAIRQSOFTMASK(r13) 2786 2787 kuap_restore_amr r9, r10 2788 EXCEPTION_RESTORE_REGS hsrr=0 2789 RFI_TO_KERNEL 2790 2791#endif /* CONFIG_PPC_WATCHDOG */ 2792 2793/* 2794 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2795 * - If it was a decrementer interrupt, we bump the dec to max and and return. 2796 * - If it was a doorbell we return immediately since doorbells are edge 2797 * triggered and won't automatically refire. 2798 * - If it was a HMI we return immediately since we handled it in realmode 2799 * and it won't refire. 2800 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2801 * This is called with r10 containing the value to OR to the paca field. 2802 */ 2803.macro MASKED_INTERRUPT hsrr=0 2804 .if \hsrr 2805masked_Hinterrupt: 2806 .else 2807masked_interrupt: 2808 .endif 2809 lbz r11,PACAIRQHAPPENED(r13) 2810 or r11,r11,r10 2811 stb r11,PACAIRQHAPPENED(r13) 2812 cmpwi r10,PACA_IRQ_DEC 2813 bne 1f 2814 lis r10,0x7fff 2815 ori r10,r10,0xffff 2816 mtspr SPRN_DEC,r10 2817#ifdef CONFIG_PPC_WATCHDOG 2818 b soft_nmi_common 2819#else 2820 b 2f 2821#endif 28221: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2823 beq 2f 2824 xori r12,r12,MSR_EE /* clear MSR_EE */ 2825 .if \hsrr 2826 mtspr SPRN_HSRR1,r12 2827 .else 2828 mtspr SPRN_SRR1,r12 2829 .endif 2830 ori r11,r11,PACA_IRQ_HARD_DIS 2831 stb r11,PACAIRQHAPPENED(r13) 28322: /* done */ 2833 ld r10,PACA_EXGEN+EX_CTR(r13) 2834 mtctr r10 2835 mtcrf 0x80,r9 2836 std r1,PACAR1(r13) 2837 ld r9,PACA_EXGEN+EX_R9(r13) 2838 ld r10,PACA_EXGEN+EX_R10(r13) 2839 ld r11,PACA_EXGEN+EX_R11(r13) 2840 ld r12,PACA_EXGEN+EX_R12(r13) 2841 /* returns to kernel where r13 must be set up, so don't restore it */ 2842 .if \hsrr 2843 HRFI_TO_KERNEL 2844 .else 2845 RFI_TO_KERNEL 2846 .endif 2847 b . 2848.endm 2849 2850TRAMP_REAL_BEGIN(stf_barrier_fallback) 2851 std r9,PACA_EXRFI+EX_R9(r13) 2852 std r10,PACA_EXRFI+EX_R10(r13) 2853 sync 2854 ld r9,PACA_EXRFI+EX_R9(r13) 2855 ld r10,PACA_EXRFI+EX_R10(r13) 2856 ori 31,31,0 2857 .rept 14 2858 b 1f 28591: 2860 .endr 2861 blr 2862 2863TRAMP_REAL_BEGIN(rfi_flush_fallback) 2864 SET_SCRATCH0(r13); 2865 GET_PACA(r13); 2866 std r1,PACA_EXRFI+EX_R12(r13) 2867 ld r1,PACAKSAVE(r13) 2868 std r9,PACA_EXRFI+EX_R9(r13) 2869 std r10,PACA_EXRFI+EX_R10(r13) 2870 std r11,PACA_EXRFI+EX_R11(r13) 2871 mfctr r9 2872 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2873 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2874 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2875 mtctr r11 2876 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2877 2878 /* order ld/st prior to dcbt stop all streams with flushing */ 2879 sync 2880 2881 /* 2882 * The load adresses are at staggered offsets within cachelines, 2883 * which suits some pipelines better (on others it should not 2884 * hurt). 2885 */ 28861: 2887 ld r11,(0x80 + 8)*0(r10) 2888 ld r11,(0x80 + 8)*1(r10) 2889 ld r11,(0x80 + 8)*2(r10) 2890 ld r11,(0x80 + 8)*3(r10) 2891 ld r11,(0x80 + 8)*4(r10) 2892 ld r11,(0x80 + 8)*5(r10) 2893 ld r11,(0x80 + 8)*6(r10) 2894 ld r11,(0x80 + 8)*7(r10) 2895 addi r10,r10,0x80*8 2896 bdnz 1b 2897 2898 mtctr r9 2899 ld r9,PACA_EXRFI+EX_R9(r13) 2900 ld r10,PACA_EXRFI+EX_R10(r13) 2901 ld r11,PACA_EXRFI+EX_R11(r13) 2902 ld r1,PACA_EXRFI+EX_R12(r13) 2903 GET_SCRATCH0(r13); 2904 rfid 2905 2906TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2907 SET_SCRATCH0(r13); 2908 GET_PACA(r13); 2909 std r1,PACA_EXRFI+EX_R12(r13) 2910 ld r1,PACAKSAVE(r13) 2911 std r9,PACA_EXRFI+EX_R9(r13) 2912 std r10,PACA_EXRFI+EX_R10(r13) 2913 std r11,PACA_EXRFI+EX_R11(r13) 2914 mfctr r9 2915 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2916 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2917 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2918 mtctr r11 2919 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2920 2921 /* order ld/st prior to dcbt stop all streams with flushing */ 2922 sync 2923 2924 /* 2925 * The load adresses are at staggered offsets within cachelines, 2926 * which suits some pipelines better (on others it should not 2927 * hurt). 2928 */ 29291: 2930 ld r11,(0x80 + 8)*0(r10) 2931 ld r11,(0x80 + 8)*1(r10) 2932 ld r11,(0x80 + 8)*2(r10) 2933 ld r11,(0x80 + 8)*3(r10) 2934 ld r11,(0x80 + 8)*4(r10) 2935 ld r11,(0x80 + 8)*5(r10) 2936 ld r11,(0x80 + 8)*6(r10) 2937 ld r11,(0x80 + 8)*7(r10) 2938 addi r10,r10,0x80*8 2939 bdnz 1b 2940 2941 mtctr r9 2942 ld r9,PACA_EXRFI+EX_R9(r13) 2943 ld r10,PACA_EXRFI+EX_R10(r13) 2944 ld r11,PACA_EXRFI+EX_R11(r13) 2945 ld r1,PACA_EXRFI+EX_R12(r13) 2946 GET_SCRATCH0(r13); 2947 hrfid 2948 2949USE_TEXT_SECTION() 2950 MASKED_INTERRUPT 2951 MASKED_INTERRUPT hsrr=1 2952 2953#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2954kvmppc_skip_interrupt: 2955 /* 2956 * Here all GPRs are unchanged from when the interrupt happened 2957 * except for r13, which is saved in SPRG_SCRATCH0. 2958 */ 2959 mfspr r13, SPRN_SRR0 2960 addi r13, r13, 4 2961 mtspr SPRN_SRR0, r13 2962 GET_SCRATCH0(r13) 2963 RFI_TO_KERNEL 2964 b . 2965 2966kvmppc_skip_Hinterrupt: 2967 /* 2968 * Here all GPRs are unchanged from when the interrupt happened 2969 * except for r13, which is saved in SPRG_SCRATCH0. 2970 */ 2971 mfspr r13, SPRN_HSRR0 2972 addi r13, r13, 4 2973 mtspr SPRN_HSRR0, r13 2974 GET_SCRATCH0(r13) 2975 HRFI_TO_KERNEL 2976 b . 2977#endif 2978 2979 /* 2980 * Relocation-on interrupts: A subset of the interrupts can be delivered 2981 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 2982 * it. Addresses are the same as the original interrupt addresses, but 2983 * offset by 0xc000000000004000. 2984 * It's impossible to receive interrupts below 0x300 via this mechanism. 2985 * KVM: None of these traps are from the guest ; anything that escalated 2986 * to HV=1 from HV=0 is delivered via real mode handlers. 2987 */ 2988 2989 /* 2990 * This uses the standard macro, since the original 0x300 vector 2991 * only has extra guff for STAB-based processors -- which never 2992 * come here. 2993 */ 2994 2995EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) 2996 b __ppc64_runlatch_on 2997 2998USE_FIXED_SECTION(virt_trampolines) 2999 /* 3000 * The __end_interrupts marker must be past the out-of-line (OOL) 3001 * handlers, so that they are copied to real address 0x100 when running 3002 * a relocatable kernel. This ensures they can be reached from the short 3003 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3004 * directly, without using LOAD_HANDLER(). 3005 */ 3006 .align 7 3007 .globl __end_interrupts 3008__end_interrupts: 3009DEFINE_FIXED_SYMBOL(__end_interrupts) 3010 3011#ifdef CONFIG_PPC_970_NAP 3012 /* 3013 * Called by exception entry code if _TLF_NAPPING was set, this clears 3014 * the NAPPING flag, and redirects the exception exit to 3015 * power4_fixup_nap_return. 3016 */ 3017 .globl power4_fixup_nap 3018EXC_COMMON_BEGIN(power4_fixup_nap) 3019 andc r9,r9,r10 3020 std r9,TI_LOCAL_FLAGS(r11) 3021 LOAD_REG_ADDR(r10, power4_idle_nap_return) 3022 std r10,_NIP(r1) 3023 blr 3024 3025power4_idle_nap_return: 3026 blr 3027#endif 3028 3029CLOSE_FIXED_SECTION(real_vectors); 3030CLOSE_FIXED_SECTION(real_trampolines); 3031CLOSE_FIXED_SECTION(virt_vectors); 3032CLOSE_FIXED_SECTION(virt_trampolines); 3033 3034USE_TEXT_SECTION() 3035 3036/* MSR[RI] should be clear because this uses SRR[01] */ 3037enable_machine_check: 3038 mflr r0 3039 bcl 20,31,$+4 30400: mflr r3 3041 addi r3,r3,(1f - 0b) 3042 mtspr SPRN_SRR0,r3 3043 mfmsr r3 3044 ori r3,r3,MSR_ME 3045 mtspr SPRN_SRR1,r3 3046 RFI_TO_KERNEL 30471: mtlr r0 3048 blr 3049 3050/* MSR[RI] should be clear because this uses SRR[01] */ 3051disable_machine_check: 3052 mflr r0 3053 bcl 20,31,$+4 30540: mflr r3 3055 addi r3,r3,(1f - 0b) 3056 mtspr SPRN_SRR0,r3 3057 mfmsr r3 3058 li r4,MSR_ME 3059 andc r3,r3,r4 3060 mtspr SPRN_SRR1,r3 3061 RFI_TO_KERNEL 30621: mtlr r0 3063 blr 3064 3065/* 3066 * Hash table stuff 3067 */ 3068 .balign IFETCH_ALIGN_BYTES 3069do_hash_page: 3070#ifdef CONFIG_PPC_BOOK3S_64 3071 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h 3072 ori r0,r0,DSISR_BAD_FAULT_64S@l 3073 and. r0,r5,r0 /* weird error? */ 3074 bne- handle_page_fault /* if not, try to insert a HPTE */ 3075 ld r11, PACA_THREAD_INFO(r13) 3076 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 3077 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 3078 bne 77f /* then don't call hash_page now */ 3079 3080 /* 3081 * r3 contains the trap number 3082 * r4 contains the faulting address 3083 * r5 contains dsisr 3084 * r6 msr 3085 * 3086 * at return r3 = 0 for success, 1 for page fault, negative for error 3087 */ 3088 bl __hash_page /* build HPTE if possible */ 3089 cmpdi r3,0 /* see if __hash_page succeeded */ 3090 3091 /* Success */ 3092 beq interrupt_return /* Return from exception on success */ 3093 3094 /* Error */ 3095 blt- 13f 3096 3097 /* Reload DAR/DSISR into r4/r5 for the DABR check below */ 3098 ld r4,_DAR(r1) 3099 ld r5,_DSISR(r1) 3100#endif /* CONFIG_PPC_BOOK3S_64 */ 3101 3102/* Here we have a page fault that hash_page can't handle. */ 3103handle_page_fault: 310411: andis. r0,r5,DSISR_DABRMATCH@h 3105 bne- handle_dabr_fault 3106 addi r3,r1,STACK_FRAME_OVERHEAD 3107 bl do_page_fault 3108 cmpdi r3,0 3109 beq+ interrupt_return 3110 mr r5,r3 3111 addi r3,r1,STACK_FRAME_OVERHEAD 3112 ld r4,_DAR(r1) 3113 bl bad_page_fault 3114 b interrupt_return 3115 3116/* We have a data breakpoint exception - handle it */ 3117handle_dabr_fault: 3118 ld r4,_DAR(r1) 3119 ld r5,_DSISR(r1) 3120 addi r3,r1,STACK_FRAME_OVERHEAD 3121 bl do_break 3122 /* 3123 * do_break() may have changed the NV GPRS while handling a breakpoint. 3124 * If so, we need to restore them with their updated values. 3125 */ 3126 REST_NVGPRS(r1) 3127 b interrupt_return 3128 3129 3130#ifdef CONFIG_PPC_BOOK3S_64 3131/* We have a page fault that hash_page could handle but HV refused 3132 * the PTE insertion 3133 */ 313413: mr r5,r3 3135 addi r3,r1,STACK_FRAME_OVERHEAD 3136 ld r4,_DAR(r1) 3137 bl low_hash_fault 3138 b interrupt_return 3139#endif 3140 3141/* 3142 * We come here as a result of a DSI at a point where we don't want 3143 * to call hash_page, such as when we are accessing memory (possibly 3144 * user memory) inside a PMU interrupt that occurred while interrupts 3145 * were soft-disabled. We want to invoke the exception handler for 3146 * the access, or panic if there isn't a handler. 3147 */ 314877: addi r3,r1,STACK_FRAME_OVERHEAD 3149 li r5,SIGSEGV 3150 bl bad_page_fault 3151 b interrupt_return 3152