1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label))@l; \ 90 addis reg,reg,(ABS_ADDR(label))@h 91 92/* 93 * Branch to label using its 0xC000 address. This results in instruction 94 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned 95 * on using mtmsr rather than rfid. 96 * 97 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than 98 * load KBASE for a slight optimisation. 99 */ 100#define BRANCH_TO_C000(reg, label) \ 101 __LOAD_FAR_HANDLER(reg, label); \ 102 mtctr reg; \ 103 bctr 104 105/* 106 * Interrupt code generation macros 107 */ 108#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 109#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 110#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 111#define IAREA .L_IAREA_\name\() /* PACA save area */ 112#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 113#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 114#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 115#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 116#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */ 117#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 118#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 119#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 120#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 121#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 122#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 123#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 124#define __ISTACK(name) .L_ISTACK_ ## name 125#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 126 127#define INT_DEFINE_BEGIN(n) \ 128.macro int_define_ ## n name 129 130#define INT_DEFINE_END(n) \ 131.endm ; \ 132int_define_ ## n n ; \ 133do_define_int n 134 135.macro do_define_int name 136 .ifndef IVEC 137 .error "IVEC not defined" 138 .endif 139 .ifndef IHSRR 140 IHSRR=0 141 .endif 142 .ifndef IHSRR_IF_HVMODE 143 IHSRR_IF_HVMODE=0 144 .endif 145 .ifndef IAREA 146 IAREA=PACA_EXGEN 147 .endif 148 .ifndef IVIRT 149 IVIRT=1 150 .endif 151 .ifndef IISIDE 152 IISIDE=0 153 .endif 154 .ifndef IDAR 155 IDAR=0 156 .endif 157 .ifndef IDSISR 158 IDSISR=0 159 .endif 160 .ifndef ISET_RI 161 ISET_RI=1 162 .endif 163 .ifndef IBRANCH_TO_COMMON 164 IBRANCH_TO_COMMON=1 165 .endif 166 .ifndef IREALMODE_COMMON 167 IREALMODE_COMMON=0 168 .else 169 .if ! IBRANCH_TO_COMMON 170 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 171 .endif 172 .endif 173 .ifndef IMASK 174 IMASK=0 175 .endif 176 .ifndef IKVM_REAL 177 IKVM_REAL=0 178 .endif 179 .ifndef IKVM_VIRT 180 IKVM_VIRT=0 181 .endif 182 .ifndef ISTACK 183 ISTACK=1 184 .endif 185 .ifndef IKUAP 186 IKUAP=1 187 .endif 188.endm 189 190/* 191 * All interrupts which set HSRR registers, as well as SRESET and MCE and 192 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 193 * so they all generally need to test whether they were taken in guest context. 194 * 195 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 196 * taken with MSR[HV]=0. 197 * 198 * Interrupts which set SRR registers (with the above exceptions) do not 199 * elevate to MSR[HV]=1 mode, though most can be taken when running with 200 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 201 * not need to test whether a guest is running because they get delivered to 202 * the guest directly, including nested HV KVM guests. 203 * 204 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 205 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 206 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 207 * delivered to the real-mode entry point, therefore such interrupts only test 208 * KVM in their real mode handlers, and only when PR KVM is possible. 209 * 210 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 211 * delivered in real-mode when the MMU is in hash mode because the MMU 212 * registers are not set appropriately to translate host addresses. In nested 213 * radix mode these can be delivered in virt-mode as the host translations are 214 * used implicitly (see: effective LPID, effective PID). 215 */ 216 217/* 218 * If an interrupt is taken while a guest is running, it is immediately routed 219 * to KVM to handle. 220 */ 221 222.macro KVMTEST name handler 223#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 224 lbz r10,HSTATE_IN_GUEST(r13) 225 cmpwi r10,0 226 /* HSRR variants have the 0x2 bit added to their trap number */ 227 .if IHSRR_IF_HVMODE 228 BEGIN_FTR_SECTION 229 li r10,(IVEC + 0x2) 230 FTR_SECTION_ELSE 231 li r10,(IVEC) 232 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 233 .elseif IHSRR 234 li r10,(IVEC + 0x2) 235 .else 236 li r10,(IVEC) 237 .endif 238 bne \handler 239#endif 240.endm 241 242/* 243 * This is the BOOK3S interrupt entry code macro. 244 * 245 * This can result in one of several things happening: 246 * - Branch to the _common handler, relocated, in virtual mode. 247 * These are normal interrupts (synchronous and asynchronous) handled by 248 * the kernel. 249 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 250 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 251 * / intended for host or guest kernel, but KVM must always be involved 252 * because the machine state is set for guest execution. 253 * - Branch to the masked handler, unrelocated. 254 * These occur when maskable asynchronous interrupts are taken with the 255 * irq_soft_mask set. 256 * - Branch to an "early" handler in real mode but relocated. 257 * This is done if early=1. MCE and HMI use these to handle errors in real 258 * mode. 259 * - Fall through and continue executing in real, unrelocated mode. 260 * This is done if early=2. 261 */ 262 263.macro GEN_BRANCH_TO_COMMON name, virt 264 .if IREALMODE_COMMON 265 LOAD_HANDLER(r10, \name\()_common) 266 mtctr r10 267 bctr 268 .else 269 .if \virt 270#ifndef CONFIG_RELOCATABLE 271 b \name\()_common_virt 272#else 273 LOAD_HANDLER(r10, \name\()_common_virt) 274 mtctr r10 275 bctr 276#endif 277 .else 278 LOAD_HANDLER(r10, \name\()_common_real) 279 mtctr r10 280 bctr 281 .endif 282 .endif 283.endm 284 285.macro GEN_INT_ENTRY name, virt, ool=0 286 SET_SCRATCH0(r13) /* save r13 */ 287 GET_PACA(r13) 288 std r9,IAREA+EX_R9(r13) /* save r9 */ 289BEGIN_FTR_SECTION 290 mfspr r9,SPRN_PPR 291END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 292 HMT_MEDIUM 293 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 294BEGIN_FTR_SECTION 295 mfspr r10,SPRN_CFAR 296END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 297 .if \ool 298 .if !\virt 299 b tramp_real_\name 300 .pushsection .text 301 TRAMP_REAL_BEGIN(tramp_real_\name) 302 .else 303 b tramp_virt_\name 304 .pushsection .text 305 TRAMP_VIRT_BEGIN(tramp_virt_\name) 306 .endif 307 .endif 308 309BEGIN_FTR_SECTION 310 std r9,IAREA+EX_PPR(r13) 311END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 312BEGIN_FTR_SECTION 313 std r10,IAREA+EX_CFAR(r13) 314END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 315 INTERRUPT_TO_KERNEL 316 mfctr r10 317 std r10,IAREA+EX_CTR(r13) 318 mfcr r9 319 std r11,IAREA+EX_R11(r13) 320 std r12,IAREA+EX_R12(r13) 321 322 /* 323 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 324 * because a d-side MCE will clobber those registers so is 325 * not recoverable if they are live. 326 */ 327 GET_SCRATCH0(r10) 328 std r10,IAREA+EX_R13(r13) 329 .if IDAR && !IISIDE 330 .if IHSRR 331 mfspr r10,SPRN_HDAR 332 .else 333 mfspr r10,SPRN_DAR 334 .endif 335 std r10,IAREA+EX_DAR(r13) 336 .endif 337 .if IDSISR && !IISIDE 338 .if IHSRR 339 mfspr r10,SPRN_HDSISR 340 .else 341 mfspr r10,SPRN_DSISR 342 .endif 343 stw r10,IAREA+EX_DSISR(r13) 344 .endif 345 346 .if IHSRR_IF_HVMODE 347 BEGIN_FTR_SECTION 348 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 349 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 350 FTR_SECTION_ELSE 351 mfspr r11,SPRN_SRR0 /* save SRR0 */ 352 mfspr r12,SPRN_SRR1 /* and SRR1 */ 353 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 354 .elseif IHSRR 355 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 356 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 357 .else 358 mfspr r11,SPRN_SRR0 /* save SRR0 */ 359 mfspr r12,SPRN_SRR1 /* and SRR1 */ 360 .endif 361 362 .if IBRANCH_TO_COMMON 363 GEN_BRANCH_TO_COMMON \name \virt 364 .endif 365 366 .if \ool 367 .popsection 368 .endif 369.endm 370 371/* 372 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 373 * entry, except in the case of the real-mode handlers which require 374 * __GEN_REALMODE_COMMON_ENTRY. 375 * 376 * This switches to virtual mode and sets MSR[RI]. 377 */ 378.macro __GEN_COMMON_ENTRY name 379DEFINE_FIXED_SYMBOL(\name\()_common_real) 380\name\()_common_real: 381 .if IKVM_REAL 382 KVMTEST \name kvm_interrupt 383 .endif 384 385 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 386 /* MSR[RI] is clear iff using SRR regs */ 387 .if IHSRR_IF_HVMODE 388 BEGIN_FTR_SECTION 389 xori r10,r10,MSR_RI 390 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 391 .elseif ! IHSRR 392 xori r10,r10,MSR_RI 393 .endif 394 mtmsrd r10 395 396 .if IVIRT 397 .if IKVM_VIRT 398 b 1f /* skip the virt test coming from real */ 399 .endif 400 401 .balign IFETCH_ALIGN_BYTES 402DEFINE_FIXED_SYMBOL(\name\()_common_virt) 403\name\()_common_virt: 404 .if IKVM_VIRT 405 KVMTEST \name kvm_interrupt 4061: 407 .endif 408 .endif /* IVIRT */ 409.endm 410 411/* 412 * Don't switch to virt mode. Used for early MCE and HMI handlers that 413 * want to run in real mode. 414 */ 415.macro __GEN_REALMODE_COMMON_ENTRY name 416DEFINE_FIXED_SYMBOL(\name\()_common_real) 417\name\()_common_real: 418 .if IKVM_REAL 419 KVMTEST \name kvm_interrupt 420 .endif 421.endm 422 423.macro __GEN_COMMON_BODY name 424 .if IMASK 425 .if ! ISTACK 426 .error "No support for masked interrupt to use custom stack" 427 .endif 428 429 /* If coming from user, skip soft-mask tests. */ 430 andi. r10,r12,MSR_PR 431 bne 3f 432 433 /* 434 * Kernel code running below __end_soft_masked may be 435 * implicitly soft-masked if it is within the regions 436 * in the soft mask table. 437 */ 438 LOAD_HANDLER(r10, __end_soft_masked) 439 cmpld r11,r10 440 bge+ 1f 441 442 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 443 mtctr r12 444 stw r9,PACA_EXGEN+EX_CCR(r13) 445 SEARCH_SOFT_MASK_TABLE 446 cmpdi r12,0 447 mfctr r12 /* Restore r12 to SRR1 */ 448 lwz r9,PACA_EXGEN+EX_CCR(r13) 449 beq 1f /* Not in soft-mask table */ 450 li r10,IMASK 451 b 2f /* In soft-mask table, always mask */ 452 453 /* Test the soft mask state against our interrupt's bit */ 4541: lbz r10,PACAIRQSOFTMASK(r13) 4552: andi. r10,r10,IMASK 456 /* Associate vector numbers with bits in paca->irq_happened */ 457 .if IVEC == 0x500 || IVEC == 0xea0 458 li r10,PACA_IRQ_EE 459 .elseif IVEC == 0x900 460 li r10,PACA_IRQ_DEC 461 .elseif IVEC == 0xa00 || IVEC == 0xe80 462 li r10,PACA_IRQ_DBELL 463 .elseif IVEC == 0xe60 464 li r10,PACA_IRQ_HMI 465 .elseif IVEC == 0xf00 466 li r10,PACA_IRQ_PMI 467 .else 468 .abort "Bad maskable vector" 469 .endif 470 471 .if IHSRR_IF_HVMODE 472 BEGIN_FTR_SECTION 473 bne masked_Hinterrupt 474 FTR_SECTION_ELSE 475 bne masked_interrupt 476 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 477 .elseif IHSRR 478 bne masked_Hinterrupt 479 .else 480 bne masked_interrupt 481 .endif 482 .endif 483 484 .if ISTACK 485 andi. r10,r12,MSR_PR /* See if coming from user */ 4863: mr r10,r1 /* Save r1 */ 487 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 488 beq- 100f 489 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 490100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 491 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 492 .endif 493 494 std r9,_CCR(r1) /* save CR in stackframe */ 495 std r11,_NIP(r1) /* save SRR0 in stackframe */ 496 std r12,_MSR(r1) /* save SRR1 in stackframe */ 497 std r10,0(r1) /* make stack chain pointer */ 498 std r0,GPR0(r1) /* save r0 in stackframe */ 499 std r10,GPR1(r1) /* save r1 in stackframe */ 500 501 /* Mark our [H]SRRs valid for return */ 502 li r10,1 503 .if IHSRR_IF_HVMODE 504 BEGIN_FTR_SECTION 505 stb r10,PACAHSRR_VALID(r13) 506 FTR_SECTION_ELSE 507 stb r10,PACASRR_VALID(r13) 508 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 509 .elseif IHSRR 510 stb r10,PACAHSRR_VALID(r13) 511 .else 512 stb r10,PACASRR_VALID(r13) 513 .endif 514 515 .if ISET_RI 516 li r10,MSR_RI 517 mtmsrd r10,1 /* Set MSR_RI */ 518 .endif 519 520 .if ISTACK 521 .if IKUAP 522 kuap_save_amr_and_lock r9, r10, cr1, cr0 523 .endif 524 beq 101f /* if from kernel mode */ 525BEGIN_FTR_SECTION 526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 527 std r9,_PPR(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 529101: 530 .else 531 .if IKUAP 532 kuap_save_amr_and_lock r9, r10, cr1 533 .endif 534 .endif 535 536 /* Save original regs values from save area to stack frame. */ 537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 538 ld r10,IAREA+EX_R10(r13) 539 std r9,GPR9(r1) 540 std r10,GPR10(r1) 541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 542 ld r10,IAREA+EX_R12(r13) 543 ld r11,IAREA+EX_R13(r13) 544 std r9,GPR11(r1) 545 std r10,GPR12(r1) 546 std r11,GPR13(r1) 547 548 SAVE_NVGPRS(r1) 549 550 .if IDAR 551 .if IISIDE 552 ld r10,_NIP(r1) 553 .else 554 ld r10,IAREA+EX_DAR(r13) 555 .endif 556 std r10,_DAR(r1) 557 .endif 558 559 .if IDSISR 560 .if IISIDE 561 ld r10,_MSR(r1) 562 lis r11,DSISR_SRR1_MATCH_64S@h 563 and r10,r10,r11 564 .else 565 lwz r10,IAREA+EX_DSISR(r13) 566 .endif 567 std r10,_DSISR(r1) 568 .endif 569 570BEGIN_FTR_SECTION 571 ld r10,IAREA+EX_CFAR(r13) 572 std r10,ORIG_GPR3(r1) 573END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 574 ld r10,IAREA+EX_CTR(r13) 575 std r10,_CTR(r1) 576 std r2,GPR2(r1) /* save r2 in stackframe */ 577 SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */ 578 SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */ 579 mflr r9 /* Get LR, later save to stack */ 580 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 581 std r9,_LINK(r1) 582 lbz r10,PACAIRQSOFTMASK(r13) 583 mfspr r11,SPRN_XER /* save XER in stackframe */ 584 std r10,SOFTE(r1) 585 std r11,_XER(r1) 586 li r9,IVEC 587 std r9,_TRAP(r1) /* set trap number */ 588 li r10,0 589 ld r11,exception_marker@toc(r2) 590 std r10,RESULT(r1) /* clear regs->result */ 591 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 592.endm 593 594/* 595 * On entry r13 points to the paca, r9-r13 are saved in the paca, 596 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 597 * SRR1, and relocation is on. 598 * 599 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 600 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 601 */ 602.macro GEN_COMMON name 603 __GEN_COMMON_ENTRY \name 604 __GEN_COMMON_BODY \name 605.endm 606 607.macro SEARCH_RESTART_TABLE 608#ifdef CONFIG_RELOCATABLE 609 mr r12,r2 610 ld r2,PACATOC(r13) 611 LOAD_REG_ADDR(r9, __start___restart_table) 612 LOAD_REG_ADDR(r10, __stop___restart_table) 613 mr r2,r12 614#else 615 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 616 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 617#endif 618300: 619 cmpd r9,r10 620 beq 302f 621 ld r12,0(r9) 622 cmpld r11,r12 623 blt 301f 624 ld r12,8(r9) 625 cmpld r11,r12 626 bge 301f 627 ld r12,16(r9) 628 b 303f 629301: 630 addi r9,r9,24 631 b 300b 632302: 633 li r12,0 634303: 635.endm 636 637.macro SEARCH_SOFT_MASK_TABLE 638#ifdef CONFIG_RELOCATABLE 639 mr r12,r2 640 ld r2,PACATOC(r13) 641 LOAD_REG_ADDR(r9, __start___soft_mask_table) 642 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 643 mr r2,r12 644#else 645 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 646 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 647#endif 648300: 649 cmpd r9,r10 650 beq 302f 651 ld r12,0(r9) 652 cmpld r11,r12 653 blt 301f 654 ld r12,8(r9) 655 cmpld r11,r12 656 bge 301f 657 li r12,1 658 b 303f 659301: 660 addi r9,r9,16 661 b 300b 662302: 663 li r12,0 664303: 665.endm 666 667/* 668 * Restore all registers including H/SRR0/1 saved in a stack frame of a 669 * standard exception. 670 */ 671.macro EXCEPTION_RESTORE_REGS hsrr=0 672 /* Move original SRR0 and SRR1 into the respective regs */ 673 ld r9,_MSR(r1) 674 li r10,0 675 .if \hsrr 676 mtspr SPRN_HSRR1,r9 677 stb r10,PACAHSRR_VALID(r13) 678 .else 679 mtspr SPRN_SRR1,r9 680 stb r10,PACASRR_VALID(r13) 681 .endif 682 ld r9,_NIP(r1) 683 .if \hsrr 684 mtspr SPRN_HSRR0,r9 685 .else 686 mtspr SPRN_SRR0,r9 687 .endif 688 ld r9,_CTR(r1) 689 mtctr r9 690 ld r9,_XER(r1) 691 mtxer r9 692 ld r9,_LINK(r1) 693 mtlr r9 694 ld r9,_CCR(r1) 695 mtcr r9 696 REST_8GPRS(2, r1) 697 REST_4GPRS(10, r1) 698 REST_GPR(0, r1) 699 /* restore original r1. */ 700 ld r1,GPR1(r1) 701.endm 702 703/* 704 * There are a few constraints to be concerned with. 705 * - Real mode exceptions code/data must be located at their physical location. 706 * - Virtual mode exceptions must be mapped at their 0xc000... location. 707 * - Fixed location code must not call directly beyond the __end_interrupts 708 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 709 * must be used. 710 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 711 * virtual 0xc00... 712 * - Conditional branch targets must be within +/-32K of caller. 713 * 714 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 715 * therefore don't have to run in physically located code or rfid to 716 * virtual mode kernel code. However on relocatable kernels they do have 717 * to branch to KERNELBASE offset because the rest of the kernel (outside 718 * the exception vectors) may be located elsewhere. 719 * 720 * Virtual exceptions correspond with physical, except their entry points 721 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 722 * offset applied. Virtual exceptions are enabled with the Alternate 723 * Interrupt Location (AIL) bit set in the LPCR. However this does not 724 * guarantee they will be delivered virtually. Some conditions (see the ISA) 725 * cause exceptions to be delivered in real mode. 726 * 727 * The scv instructions are a special case. They get a 0x3000 offset applied. 728 * scv exceptions have unique reentrancy properties, see below. 729 * 730 * It's impossible to receive interrupts below 0x300 via AIL. 731 * 732 * KVM: None of the virtual exceptions are from the guest. Anything that 733 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 734 * 735 * 736 * We layout physical memory as follows: 737 * 0x0000 - 0x00ff : Secondary processor spin code 738 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 739 * 0x1900 - 0x2fff : Real mode trampolines 740 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 741 * 0x5900 - 0x6fff : Relon mode trampolines 742 * 0x7000 - 0x7fff : FWNMI data area 743 * 0x8000 - .... : Common interrupt handlers, remaining early 744 * setup code, rest of kernel. 745 * 746 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 747 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 748 * vectors there. 749 */ 750OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 751OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 752OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 753OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 754 755#ifdef CONFIG_PPC_POWERNV 756 .globl start_real_trampolines 757 .globl end_real_trampolines 758 .globl start_virt_trampolines 759 .globl end_virt_trampolines 760#endif 761 762#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 763/* 764 * Data area reserved for FWNMI option. 765 * This address (0x7000) is fixed by the RPA. 766 * pseries and powernv need to keep the whole page from 767 * 0x7000 to 0x8000 free for use by the firmware 768 */ 769ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 770OPEN_TEXT_SECTION(0x8000) 771#else 772OPEN_TEXT_SECTION(0x7000) 773#endif 774 775USE_FIXED_SECTION(real_vectors) 776 777/* 778 * This is the start of the interrupt handlers for pSeries 779 * This code runs with relocation off. 780 * Code from here to __end_interrupts gets copied down to real 781 * address 0x100 when we are running a relocatable kernel. 782 * Therefore any relative branches in this section must only 783 * branch to labels in this section. 784 */ 785 .globl __start_interrupts 786__start_interrupts: 787 788/** 789 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 790 * This is a synchronous interrupt invoked with the "scv" instruction. The 791 * system call does not alter the HV bit, so it is directed to the OS. 792 * 793 * Handling: 794 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 795 * In particular, this means we can take a maskable interrupt at any point 796 * in the scv handler, which is unlike any other interrupt. This is solved 797 * by treating the instruction addresses in the handler as being soft-masked, 798 * by adding a SOFT_MASK_TABLE entry for them. 799 * 800 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 801 * ensure scv is never executed with relocation off, which means AIL-0 802 * should never happen. 803 * 804 * Before leaving the following inside-__end_soft_masked text, at least of the 805 * following must be true: 806 * - MSR[PR]=1 (i.e., return to userspace) 807 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 808 * - Standard kernel environment is set up (stack, paca, etc) 809 * 810 * Call convention: 811 * 812 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 813 */ 814EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 8151: 816 /* SCV 0 */ 817 mr r9,r13 818 GET_PACA(r13) 819 mflr r11 820 mfctr r12 821 li r10,IRQS_ALL_DISABLED 822 stb r10,PACAIRQSOFTMASK(r13) 823#ifdef CONFIG_RELOCATABLE 824 b system_call_vectored_tramp 825#else 826 b system_call_vectored_common 827#endif 828 nop 829 830 /* SCV 1 - 127 */ 831 .rept 127 832 mr r9,r13 833 GET_PACA(r13) 834 mflr r11 835 mfctr r12 836 li r10,IRQS_ALL_DISABLED 837 stb r10,PACAIRQSOFTMASK(r13) 838 li r0,-1 /* cause failure */ 839#ifdef CONFIG_RELOCATABLE 840 b system_call_vectored_sigill_tramp 841#else 842 b system_call_vectored_sigill 843#endif 844 .endr 8452: 846EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 847 848SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above. 849 850#ifdef CONFIG_RELOCATABLE 851TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 852 __LOAD_HANDLER(r10, system_call_vectored_common) 853 mtctr r10 854 bctr 855 856TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 857 __LOAD_HANDLER(r10, system_call_vectored_sigill) 858 mtctr r10 859 bctr 860#endif 861 862 863/* No virt vectors corresponding with 0x0..0x100 */ 864EXC_VIRT_NONE(0x4000, 0x100) 865 866 867/** 868 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 869 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 870 * It is caused by: 871 * - Wake from power-saving state, on powernv. 872 * - An NMI from another CPU, triggered by firmware or hypercall. 873 * - As crash/debug signal injected from BMC, firmware or hypervisor. 874 * 875 * Handling: 876 * Power-save wakeup is the only performance critical path, so this is 877 * determined quickly as possible first. In this case volatile registers 878 * can be discarded and SPRs like CFAR don't need to be read. 879 * 880 * If not a powersave wakeup, then it's run as a regular interrupt, however 881 * it uses its own stack and PACA save area to preserve the regular kernel 882 * environment for debugging. 883 * 884 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 885 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 886 * correct to switch to virtual mode to run the regular interrupt handler 887 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 888 * is clear). 889 * 890 * FWNMI: 891 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 892 * entry point with a different register set up. Some hypervisors will 893 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 894 * 895 * KVM: 896 * Unlike most SRR interrupts, this may be taken by the host while executing 897 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 898 * mode and then raise the sreset. 899 */ 900INT_DEFINE_BEGIN(system_reset) 901 IVEC=0x100 902 IAREA=PACA_EXNMI 903 IVIRT=0 /* no virt entry point */ 904 /* 905 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is 906 * being used, so a nested NMI exception would corrupt it. 907 */ 908 ISET_RI=0 909 ISTACK=0 910 IKVM_REAL=1 911INT_DEFINE_END(system_reset) 912 913EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 914#ifdef CONFIG_PPC_P7_NAP 915 /* 916 * If running native on arch 2.06 or later, check if we are waking up 917 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 918 * bits 46:47. A non-0 value indicates that we are coming from a power 919 * saving state. The idle wakeup handler initially runs in real mode, 920 * but we branch to the 0xc000... address so we can turn on relocation 921 * with mtmsrd later, after SPRs are restored. 922 * 923 * Careful to minimise cost for the fast path (idle wakeup) while 924 * also avoiding clobbering CFAR for the debug path (non-idle). 925 * 926 * For the idle wake case volatile registers can be clobbered, which 927 * is why we use those initially. If it turns out to not be an idle 928 * wake, carefully put everything back the way it was, so we can use 929 * common exception macros to handle it. 930 */ 931BEGIN_FTR_SECTION 932 SET_SCRATCH0(r13) 933 GET_PACA(r13) 934 std r3,PACA_EXNMI+0*8(r13) 935 std r4,PACA_EXNMI+1*8(r13) 936 std r5,PACA_EXNMI+2*8(r13) 937 mfspr r3,SPRN_SRR1 938 mfocrf r4,0x80 939 rlwinm. r5,r3,47-31,30,31 940 bne+ system_reset_idle_wake 941 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 942 mtocrf 0x80,r4 943 ld r3,PACA_EXNMI+0*8(r13) 944 ld r4,PACA_EXNMI+1*8(r13) 945 ld r5,PACA_EXNMI+2*8(r13) 946 GET_SCRATCH0(r13) 947END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 948#endif 949 950 GEN_INT_ENTRY system_reset, virt=0 951 /* 952 * In theory, we should not enable relocation here if it was disabled 953 * in SRR1, because the MMU may not be configured to support it (e.g., 954 * SLB may have been cleared). In practice, there should only be a few 955 * small windows where that's the case, and sreset is considered to 956 * be dangerous anyway. 957 */ 958EXC_REAL_END(system_reset, 0x100, 0x100) 959EXC_VIRT_NONE(0x4100, 0x100) 960 961#ifdef CONFIG_PPC_P7_NAP 962TRAMP_REAL_BEGIN(system_reset_idle_wake) 963 /* We are waking up from idle, so may clobber any volatile register */ 964 cmpwi cr1,r5,2 965 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 966 BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss)) 967#endif 968 969#ifdef CONFIG_PPC_PSERIES 970/* 971 * Vectors for the FWNMI option. Share common code. 972 */ 973TRAMP_REAL_BEGIN(system_reset_fwnmi) 974 GEN_INT_ENTRY system_reset, virt=0 975 976#endif /* CONFIG_PPC_PSERIES */ 977 978EXC_COMMON_BEGIN(system_reset_common) 979 __GEN_COMMON_ENTRY system_reset 980 /* 981 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able 982 * to recover, but nested NMI will notice in_nmi and not recover 983 * because of the use of the NMI stack. in_nmi reentrancy is tested in 984 * system_reset_exception. 985 */ 986 lhz r10,PACA_IN_NMI(r13) 987 addi r10,r10,1 988 sth r10,PACA_IN_NMI(r13) 989 li r10,MSR_RI 990 mtmsrd r10,1 991 992 mr r10,r1 993 ld r1,PACA_NMI_EMERG_SP(r13) 994 subi r1,r1,INT_FRAME_SIZE 995 __GEN_COMMON_BODY system_reset 996 997 addi r3,r1,STACK_FRAME_OVERHEAD 998 bl system_reset_exception 999 1000 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1001 li r9,0 1002 mtmsrd r9,1 1003 1004 /* 1005 * MSR_RI is clear, now we can decrement paca->in_nmi. 1006 */ 1007 lhz r10,PACA_IN_NMI(r13) 1008 subi r10,r10,1 1009 sth r10,PACA_IN_NMI(r13) 1010 1011 kuap_kernel_restore r9, r10 1012 EXCEPTION_RESTORE_REGS 1013 RFI_TO_USER_OR_KERNEL 1014 1015 1016/** 1017 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1018 * This is a non-maskable interrupt always taken in real-mode. It can be 1019 * synchronous or asynchronous, caused by hardware or software, and it may be 1020 * taken in a power-saving state. 1021 * 1022 * Handling: 1023 * Similarly to system reset, this uses its own stack and PACA save area, 1024 * the difference is re-entrancy is allowed on the machine check stack. 1025 * 1026 * machine_check_early is run in real mode, and carefully decodes the 1027 * machine check and tries to handle it (e.g., flush the SLB if there was an 1028 * error detected there), determines if it was recoverable and logs the 1029 * event. 1030 * 1031 * This early code does not "reconcile" irq soft-mask state like SRESET or 1032 * regular interrupts do, so irqs_disabled() among other things may not work 1033 * properly (irq disable/enable already doesn't work because irq tracing can 1034 * not work in real mode). 1035 * 1036 * Then, depending on the execution context when the interrupt is taken, there 1037 * are 3 main actions: 1038 * - Executing in kernel mode. The event is queued with irq_work, which means 1039 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1040 * interrupts), which could be immediately when the interrupt returns. This 1041 * avoids nasty issues like switching to virtual mode when the MMU is in a 1042 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1043 * but it has different priorities). Check to see if the CPU was in power 1044 * save, and return via the wake up code if it was. 1045 * 1046 * - Executing in user mode. machine_check_exception is run like a normal 1047 * interrupt handler, which processes the data generated by the early handler. 1048 * 1049 * - Executing in guest mode. The interrupt is run with its KVM test, and 1050 * branches to KVM to deal with. KVM may queue the event for the host 1051 * to report later. 1052 * 1053 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1054 * or SCRATCH0 is in use, it may cause a crash. 1055 * 1056 * KVM: 1057 * See SRESET. 1058 */ 1059INT_DEFINE_BEGIN(machine_check_early) 1060 IVEC=0x200 1061 IAREA=PACA_EXMC 1062 IVIRT=0 /* no virt entry point */ 1063 IREALMODE_COMMON=1 1064 /* 1065 * MSR_RI is not enabled, because PACA_EXMC is being used, so a 1066 * nested machine check corrupts it. machine_check_common enables 1067 * MSR_RI. 1068 */ 1069 ISET_RI=0 1070 ISTACK=0 1071 IDAR=1 1072 IDSISR=1 1073 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1074INT_DEFINE_END(machine_check_early) 1075 1076INT_DEFINE_BEGIN(machine_check) 1077 IVEC=0x200 1078 IAREA=PACA_EXMC 1079 IVIRT=0 /* no virt entry point */ 1080 ISET_RI=0 1081 IDAR=1 1082 IDSISR=1 1083 IKVM_REAL=1 1084INT_DEFINE_END(machine_check) 1085 1086EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1087 GEN_INT_ENTRY machine_check_early, virt=0 1088EXC_REAL_END(machine_check, 0x200, 0x100) 1089EXC_VIRT_NONE(0x4200, 0x100) 1090 1091#ifdef CONFIG_PPC_PSERIES 1092TRAMP_REAL_BEGIN(machine_check_fwnmi) 1093 /* See comment at machine_check exception, don't turn on RI */ 1094 GEN_INT_ENTRY machine_check_early, virt=0 1095#endif 1096 1097#define MACHINE_CHECK_HANDLER_WINDUP \ 1098 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1099 li r9,0; \ 1100 mtmsrd r9,1; /* Clear MSR_RI */ \ 1101 /* Decrement paca->in_mce now RI is clear. */ \ 1102 lhz r12,PACA_IN_MCE(r13); \ 1103 subi r12,r12,1; \ 1104 sth r12,PACA_IN_MCE(r13); \ 1105 EXCEPTION_RESTORE_REGS 1106 1107EXC_COMMON_BEGIN(machine_check_early_common) 1108 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1109 1110 /* 1111 * Switch to mc_emergency stack and handle re-entrancy (we limit 1112 * the nested MCE upto level 4 to avoid stack overflow). 1113 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1114 * 1115 * We use paca->in_mce to check whether this is the first entry or 1116 * nested machine check. We increment paca->in_mce to track nested 1117 * machine checks. 1118 * 1119 * If this is the first entry then set stack pointer to 1120 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1121 * stack frame on mc_emergency stack. 1122 * 1123 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1124 * checkstop if we get another machine check exception before we do 1125 * rfid with MSR_ME=1. 1126 * 1127 * This interrupt can wake directly from idle. If that is the case, 1128 * the machine check is handled then the idle wakeup code is called 1129 * to restore state. 1130 */ 1131 lhz r10,PACA_IN_MCE(r13) 1132 cmpwi r10,0 /* Are we in nested machine check */ 1133 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1134 addi r10,r10,1 /* increment paca->in_mce */ 1135 sth r10,PACA_IN_MCE(r13) 1136 1137 mr r10,r1 /* Save r1 */ 1138 bne 1f 1139 /* First machine check entry */ 1140 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11411: /* Limit nested MCE to level 4 to avoid stack overflow */ 1142 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1143 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1144 1145 __GEN_COMMON_BODY machine_check_early 1146 1147BEGIN_FTR_SECTION 1148 bl enable_machine_check 1149END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1150 li r10,MSR_RI 1151 mtmsrd r10,1 1152 1153 addi r3,r1,STACK_FRAME_OVERHEAD 1154 bl machine_check_early 1155 std r3,RESULT(r1) /* Save result */ 1156 ld r12,_MSR(r1) 1157 1158#ifdef CONFIG_PPC_P7_NAP 1159 /* 1160 * Check if thread was in power saving mode. We come here when any 1161 * of the following is true: 1162 * a. thread wasn't in power saving mode 1163 * b. thread was in power saving mode with no state loss, 1164 * supervisor state loss or hypervisor state loss. 1165 * 1166 * Go back to nap/sleep/winkle mode again if (b) is true. 1167 */ 1168BEGIN_FTR_SECTION 1169 rlwinm. r11,r12,47-31,30,31 1170 bne machine_check_idle_common 1171END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1172#endif 1173 1174#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1175 /* 1176 * Check if we are coming from guest. If yes, then run the normal 1177 * exception handler which will take the 1178 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1179 * to guest. 1180 */ 1181 lbz r11,HSTATE_IN_GUEST(r13) 1182 cmpwi r11,0 /* Check if coming from guest */ 1183 bne mce_deliver /* continue if we are. */ 1184#endif 1185 1186 /* 1187 * Check if we are coming from userspace. If yes, then run the normal 1188 * exception handler which will deliver the MC event to this kernel. 1189 */ 1190 andi. r11,r12,MSR_PR /* See if coming from user. */ 1191 bne mce_deliver /* continue in V mode if we are. */ 1192 1193 /* 1194 * At this point we are coming from kernel context. 1195 * Queue up the MCE event and return from the interrupt. 1196 * But before that, check if this is an un-recoverable exception. 1197 * If yes, then stay on emergency stack and panic. 1198 */ 1199 andi. r11,r12,MSR_RI 1200 beq unrecoverable_mce 1201 1202 /* 1203 * Check if we have successfully handled/recovered from error, if not 1204 * then stay on emergency stack and panic. 1205 */ 1206 ld r3,RESULT(r1) /* Load result */ 1207 cmpdi r3,0 /* see if we handled MCE successfully */ 1208 beq unrecoverable_mce /* if !handled then panic */ 1209 1210 /* 1211 * Return from MC interrupt. 1212 * Queue up the MCE event so that we can log it later, while 1213 * returning from kernel or opal call. 1214 */ 1215 bl machine_check_queue_event 1216 MACHINE_CHECK_HANDLER_WINDUP 1217 RFI_TO_KERNEL 1218 1219mce_deliver: 1220 /* 1221 * This is a host user or guest MCE. Restore all registers, then 1222 * run the "late" handler. For host user, this will run the 1223 * machine_check_exception handler in virtual mode like a normal 1224 * interrupt handler. For guest, this will trigger the KVM test 1225 * and branch to the KVM interrupt similarly to other interrupts. 1226 */ 1227BEGIN_FTR_SECTION 1228 ld r10,ORIG_GPR3(r1) 1229 mtspr SPRN_CFAR,r10 1230END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1231 MACHINE_CHECK_HANDLER_WINDUP 1232 GEN_INT_ENTRY machine_check, virt=0 1233 1234EXC_COMMON_BEGIN(machine_check_common) 1235 /* 1236 * Machine check is different because we use a different 1237 * save area: PACA_EXMC instead of PACA_EXGEN. 1238 */ 1239 GEN_COMMON machine_check 1240 1241 /* Enable MSR_RI when finished with PACA_EXMC */ 1242 li r10,MSR_RI 1243 mtmsrd r10,1 1244 addi r3,r1,STACK_FRAME_OVERHEAD 1245 bl machine_check_exception 1246 b interrupt_return_srr 1247 1248 1249#ifdef CONFIG_PPC_P7_NAP 1250/* 1251 * This is an idle wakeup. Low level machine check has already been 1252 * done. Queue the event then call the idle code to do the wake up. 1253 */ 1254EXC_COMMON_BEGIN(machine_check_idle_common) 1255 bl machine_check_queue_event 1256 1257 /* 1258 * GPR-loss wakeups are relatively straightforward, because the 1259 * idle sleep code has saved all non-volatile registers on its 1260 * own stack, and r1 in PACAR1. 1261 * 1262 * For no-loss wakeups the r1 and lr registers used by the 1263 * early machine check handler have to be restored first. r2 is 1264 * the kernel TOC, so no need to restore it. 1265 * 1266 * Then decrement MCE nesting after finishing with the stack. 1267 */ 1268 ld r3,_MSR(r1) 1269 ld r4,_LINK(r1) 1270 ld r1,GPR1(r1) 1271 1272 lhz r11,PACA_IN_MCE(r13) 1273 subi r11,r11,1 1274 sth r11,PACA_IN_MCE(r13) 1275 1276 mtlr r4 1277 rlwinm r10,r3,47-31,30,31 1278 cmpwi cr1,r10,2 1279 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1280 b idle_return_gpr_loss 1281#endif 1282 1283EXC_COMMON_BEGIN(unrecoverable_mce) 1284 /* 1285 * We are going down. But there are chances that we might get hit by 1286 * another MCE during panic path and we may run into unstable state 1287 * with no way out. Hence, turn ME bit off while going down, so that 1288 * when another MCE is hit during panic path, system will checkstop 1289 * and hypervisor will get restarted cleanly by SP. 1290 */ 1291BEGIN_FTR_SECTION 1292 li r10,0 /* clear MSR_RI */ 1293 mtmsrd r10,1 1294 bl disable_machine_check 1295END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1296 ld r10,PACAKMSR(r13) 1297 li r3,MSR_ME 1298 andc r10,r10,r3 1299 mtmsrd r10 1300 1301 lhz r12,PACA_IN_MCE(r13) 1302 subi r12,r12,1 1303 sth r12,PACA_IN_MCE(r13) 1304 1305 /* Invoke machine_check_exception to print MCE event and panic. */ 1306 addi r3,r1,STACK_FRAME_OVERHEAD 1307 bl machine_check_exception 1308 1309 /* 1310 * We will not reach here. Even if we did, there is no way out. 1311 * Call unrecoverable_exception and die. 1312 */ 1313 addi r3,r1,STACK_FRAME_OVERHEAD 1314 bl unrecoverable_exception 1315 b . 1316 1317 1318/** 1319 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1320 * This is a synchronous interrupt generated due to a data access exception, 1321 * e.g., a load orstore which does not have a valid page table entry with 1322 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1323 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1324 * 1325 * Handling: 1326 * - Hash MMU 1327 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1328 * Linux page table. Hash faults can hit in kernel mode in a fairly 1329 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1330 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1331 * backed by Linux page table entries. 1332 * 1333 * If no entry is found the Linux page fault handler is invoked (by 1334 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1335 * copy operations of course. 1336 * 1337 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1338 * MMU context, which may cause a DSI in the host, which must go to the 1339 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1340 * always be used regardless of AIL setting. 1341 * 1342 * - Radix MMU 1343 * The hardware loads from the Linux page table directly, so a fault goes 1344 * immediately to Linux page fault. 1345 * 1346 * Conditions like DAWR match are handled on the way in to Linux page fault. 1347 */ 1348INT_DEFINE_BEGIN(data_access) 1349 IVEC=0x300 1350 IDAR=1 1351 IDSISR=1 1352 IKVM_REAL=1 1353INT_DEFINE_END(data_access) 1354 1355EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1356 GEN_INT_ENTRY data_access, virt=0 1357EXC_REAL_END(data_access, 0x300, 0x80) 1358EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1359 GEN_INT_ENTRY data_access, virt=1 1360EXC_VIRT_END(data_access, 0x4300, 0x80) 1361EXC_COMMON_BEGIN(data_access_common) 1362 GEN_COMMON data_access 1363 ld r4,_DSISR(r1) 1364 addi r3,r1,STACK_FRAME_OVERHEAD 1365 andis. r0,r4,DSISR_DABRMATCH@h 1366 bne- 1f 1367BEGIN_MMU_FTR_SECTION 1368 bl do_hash_fault 1369MMU_FTR_SECTION_ELSE 1370 bl do_page_fault 1371ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1372 b interrupt_return_srr 1373 13741: bl do_break 1375 /* 1376 * do_break() may have changed the NV GPRS while handling a breakpoint. 1377 * If so, we need to restore them with their updated values. 1378 */ 1379 REST_NVGPRS(r1) 1380 b interrupt_return_srr 1381 1382 1383/** 1384 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1385 * This is a synchronous interrupt in response to an MMU fault missing SLB 1386 * entry for HPT, or an address outside RPT translation range. 1387 * 1388 * Handling: 1389 * - HPT: 1390 * This refills the SLB, or reports an access fault similarly to a bad page 1391 * fault. When coming from user-mode, the SLB handler may access any kernel 1392 * data, though it may itself take a DSLB. When coming from kernel mode, 1393 * recursive faults must be avoided so access is restricted to the kernel 1394 * image text/data, kernel stack, and any data allocated below 1395 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1396 * on user-handler data structures. 1397 * 1398 * KVM: Same as 0x300, DSLB must test for KVM guest. 1399 */ 1400INT_DEFINE_BEGIN(data_access_slb) 1401 IVEC=0x380 1402 IDAR=1 1403 IKVM_REAL=1 1404INT_DEFINE_END(data_access_slb) 1405 1406EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1407 GEN_INT_ENTRY data_access_slb, virt=0 1408EXC_REAL_END(data_access_slb, 0x380, 0x80) 1409EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1410 GEN_INT_ENTRY data_access_slb, virt=1 1411EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1412EXC_COMMON_BEGIN(data_access_slb_common) 1413 GEN_COMMON data_access_slb 1414BEGIN_MMU_FTR_SECTION 1415 /* HPT case, do SLB fault */ 1416 addi r3,r1,STACK_FRAME_OVERHEAD 1417 bl do_slb_fault 1418 cmpdi r3,0 1419 bne- 1f 1420 b fast_interrupt_return_srr 14211: /* Error case */ 1422MMU_FTR_SECTION_ELSE 1423 /* Radix case, access is outside page table range */ 1424 li r3,-EFAULT 1425ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1426 std r3,RESULT(r1) 1427 addi r3,r1,STACK_FRAME_OVERHEAD 1428 bl do_bad_slb_fault 1429 b interrupt_return_srr 1430 1431 1432/** 1433 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1434 * This is a synchronous interrupt in response to an MMU fault due to an 1435 * instruction fetch. 1436 * 1437 * Handling: 1438 * Similar to DSI, though in response to fetch. The faulting address is found 1439 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1440 */ 1441INT_DEFINE_BEGIN(instruction_access) 1442 IVEC=0x400 1443 IISIDE=1 1444 IDAR=1 1445 IDSISR=1 1446#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1447 IKVM_REAL=1 1448#endif 1449INT_DEFINE_END(instruction_access) 1450 1451EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1452 GEN_INT_ENTRY instruction_access, virt=0 1453EXC_REAL_END(instruction_access, 0x400, 0x80) 1454EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1455 GEN_INT_ENTRY instruction_access, virt=1 1456EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1457EXC_COMMON_BEGIN(instruction_access_common) 1458 GEN_COMMON instruction_access 1459 addi r3,r1,STACK_FRAME_OVERHEAD 1460BEGIN_MMU_FTR_SECTION 1461 bl do_hash_fault 1462MMU_FTR_SECTION_ELSE 1463 bl do_page_fault 1464ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1465 b interrupt_return_srr 1466 1467 1468/** 1469 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1470 * This is a synchronous interrupt in response to an MMU fault due to an 1471 * instruction fetch. 1472 * 1473 * Handling: 1474 * Similar to DSLB, though in response to fetch. The faulting address is found 1475 * in SRR0 (rather than DAR). 1476 */ 1477INT_DEFINE_BEGIN(instruction_access_slb) 1478 IVEC=0x480 1479 IISIDE=1 1480 IDAR=1 1481#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1482 IKVM_REAL=1 1483#endif 1484INT_DEFINE_END(instruction_access_slb) 1485 1486EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1487 GEN_INT_ENTRY instruction_access_slb, virt=0 1488EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1489EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1490 GEN_INT_ENTRY instruction_access_slb, virt=1 1491EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1492EXC_COMMON_BEGIN(instruction_access_slb_common) 1493 GEN_COMMON instruction_access_slb 1494BEGIN_MMU_FTR_SECTION 1495 /* HPT case, do SLB fault */ 1496 addi r3,r1,STACK_FRAME_OVERHEAD 1497 bl do_slb_fault 1498 cmpdi r3,0 1499 bne- 1f 1500 b fast_interrupt_return_srr 15011: /* Error case */ 1502MMU_FTR_SECTION_ELSE 1503 /* Radix case, access is outside page table range */ 1504 li r3,-EFAULT 1505ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1506 std r3,RESULT(r1) 1507 addi r3,r1,STACK_FRAME_OVERHEAD 1508 bl do_bad_slb_fault 1509 b interrupt_return_srr 1510 1511 1512/** 1513 * Interrupt 0x500 - External Interrupt. 1514 * This is an asynchronous maskable interrupt in response to an "external 1515 * exception" from the interrupt controller or hypervisor (e.g., device 1516 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1517 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1518 * 1519 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1520 * interrupts are delivered with HSRR registers, guests use SRRs, which 1521 * reqiures IHSRR_IF_HVMODE. 1522 * 1523 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1524 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1525 * rather than External Interrupts. 1526 * 1527 * Handling: 1528 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1529 * because registers at the time of the interrupt are not so important as it is 1530 * asynchronous. 1531 * 1532 * If soft masked, the masked handler will note the pending interrupt for 1533 * replay, and clear MSR[EE] in the interrupted context. 1534 */ 1535INT_DEFINE_BEGIN(hardware_interrupt) 1536 IVEC=0x500 1537 IHSRR_IF_HVMODE=1 1538 IMASK=IRQS_DISABLED 1539 IKVM_REAL=1 1540 IKVM_VIRT=1 1541INT_DEFINE_END(hardware_interrupt) 1542 1543EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1544 GEN_INT_ENTRY hardware_interrupt, virt=0 1545EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1546EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1547 GEN_INT_ENTRY hardware_interrupt, virt=1 1548EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1549EXC_COMMON_BEGIN(hardware_interrupt_common) 1550 GEN_COMMON hardware_interrupt 1551 addi r3,r1,STACK_FRAME_OVERHEAD 1552 bl do_IRQ 1553 BEGIN_FTR_SECTION 1554 b interrupt_return_hsrr 1555 FTR_SECTION_ELSE 1556 b interrupt_return_srr 1557 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1558 1559 1560/** 1561 * Interrupt 0x600 - Alignment Interrupt 1562 * This is a synchronous interrupt in response to data alignment fault. 1563 */ 1564INT_DEFINE_BEGIN(alignment) 1565 IVEC=0x600 1566 IDAR=1 1567 IDSISR=1 1568#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1569 IKVM_REAL=1 1570#endif 1571INT_DEFINE_END(alignment) 1572 1573EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1574 GEN_INT_ENTRY alignment, virt=0 1575EXC_REAL_END(alignment, 0x600, 0x100) 1576EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1577 GEN_INT_ENTRY alignment, virt=1 1578EXC_VIRT_END(alignment, 0x4600, 0x100) 1579EXC_COMMON_BEGIN(alignment_common) 1580 GEN_COMMON alignment 1581 addi r3,r1,STACK_FRAME_OVERHEAD 1582 bl alignment_exception 1583 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1584 b interrupt_return_srr 1585 1586 1587/** 1588 * Interrupt 0x700 - Program Interrupt (program check). 1589 * This is a synchronous interrupt in response to various instruction faults: 1590 * traps, privilege errors, TM errors, floating point exceptions. 1591 * 1592 * Handling: 1593 * This interrupt may use the "emergency stack" in some cases when being taken 1594 * from kernel context, which complicates handling. 1595 */ 1596INT_DEFINE_BEGIN(program_check) 1597 IVEC=0x700 1598#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1599 IKVM_REAL=1 1600#endif 1601INT_DEFINE_END(program_check) 1602 1603EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1604 1605#ifdef CONFIG_CPU_LITTLE_ENDIAN 1606 /* 1607 * There's a short window during boot where although the kernel is 1608 * running little endian, any exceptions will cause the CPU to switch 1609 * back to big endian. For example a WARN() boils down to a trap 1610 * instruction, which will cause a program check, and we end up here but 1611 * with the CPU in big endian mode. The first instruction of the program 1612 * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when 1613 * executed in the wrong endian is an lhzu with a ~3GB displacement from 1614 * r3. The content of r3 is random, so that is a load from some random 1615 * location, and depending on the system can easily lead to a checkstop, 1616 * or an infinitely recursive page fault. 1617 * 1618 * So to handle that case we have a trampoline here that can detect we 1619 * are in the wrong endian and flip us back to the correct endian. We 1620 * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires 1621 * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as 1622 * SPRG1 is already used for the paca. SPRG3 is user readable, but this 1623 * trampoline is only active very early in boot, and SPRG3 will be 1624 * reinitialised in vdso_getcpu_init() before userspace starts. 1625 */ 1626BEGIN_FTR_SECTION 1627 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 1628 b 1f // Skip trampoline if endian is correct 1629 .long 0xa643707d // mtsprg 0, r11 Backup r11 1630 .long 0xa6027a7d // mfsrr0 r11 1631 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 1632 .long 0xa6027b7d // mfsrr1 r11 1633 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 1634 .long 0xa600607d // mfmsr r11 1635 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 1636 .long 0xa6037b7d // mtsrr1 r11 1637 .long 0x34076039 // li r11, 0x734 1638 .long 0xa6037a7d // mtsrr0 r11 1639 .long 0x2400004c // rfid 1640 mfsprg r11, 3 1641 mtsrr1 r11 // Restore SRR1 1642 mfsprg r11, 2 1643 mtsrr0 r11 // Restore SRR0 1644 mfsprg r11, 0 // Restore r11 16451: 1646END_FTR_SECTION(0, 1) // nop out after boot 1647#endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1648 1649 GEN_INT_ENTRY program_check, virt=0 1650EXC_REAL_END(program_check, 0x700, 0x100) 1651EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1652 GEN_INT_ENTRY program_check, virt=1 1653EXC_VIRT_END(program_check, 0x4700, 0x100) 1654EXC_COMMON_BEGIN(program_check_common) 1655 __GEN_COMMON_ENTRY program_check 1656 1657 /* 1658 * It's possible to receive a TM Bad Thing type program check with 1659 * userspace register values (in particular r1), but with SRR1 reporting 1660 * that we came from the kernel. Normally that would confuse the bad 1661 * stack logic, and we would report a bad kernel stack pointer. Instead 1662 * we switch to the emergency stack if we're taking a TM Bad Thing from 1663 * the kernel. 1664 */ 1665 1666 andi. r10,r12,MSR_PR 1667 bne 2f /* If userspace, go normal path */ 1668 1669 andis. r10,r12,(SRR1_PROGTM)@h 1670 bne 1f /* If TM, emergency */ 1671 1672 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1673 blt 2f /* normal path if not */ 1674 1675 /* Use the emergency stack */ 16761: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1677 /* 3 in EXCEPTION_PROLOG_COMMON */ 1678 mr r10,r1 /* Save r1 */ 1679 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1680 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1681 __ISTACK(program_check)=0 1682 __GEN_COMMON_BODY program_check 1683 b 3f 16842: 1685 __ISTACK(program_check)=1 1686 __GEN_COMMON_BODY program_check 16873: 1688 addi r3,r1,STACK_FRAME_OVERHEAD 1689 bl program_check_exception 1690 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1691 b interrupt_return_srr 1692 1693 1694/* 1695 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1696 * This is a synchronous interrupt in response to executing an fp instruction 1697 * with MSR[FP]=0. 1698 * 1699 * Handling: 1700 * This will load FP registers and enable the FP bit if coming from userspace, 1701 * otherwise report a bad kernel use of FP. 1702 */ 1703INT_DEFINE_BEGIN(fp_unavailable) 1704 IVEC=0x800 1705#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1706 IKVM_REAL=1 1707#endif 1708INT_DEFINE_END(fp_unavailable) 1709 1710EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1711 GEN_INT_ENTRY fp_unavailable, virt=0 1712EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1713EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1714 GEN_INT_ENTRY fp_unavailable, virt=1 1715EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1716EXC_COMMON_BEGIN(fp_unavailable_common) 1717 GEN_COMMON fp_unavailable 1718 bne 1f /* if from user, just load it up */ 1719 addi r3,r1,STACK_FRAME_OVERHEAD 1720 bl kernel_fp_unavailable_exception 17210: trap 1722 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17231: 1724#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1725BEGIN_FTR_SECTION 1726 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1727 * transaction), go do TM stuff 1728 */ 1729 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1730 bne- 2f 1731END_FTR_SECTION_IFSET(CPU_FTR_TM) 1732#endif 1733 bl load_up_fpu 1734 b fast_interrupt_return_srr 1735#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17362: /* User process was in a transaction */ 1737 addi r3,r1,STACK_FRAME_OVERHEAD 1738 bl fp_unavailable_tm 1739 b interrupt_return_srr 1740#endif 1741 1742 1743/** 1744 * Interrupt 0x900 - Decrementer Interrupt. 1745 * This is an asynchronous interrupt in response to a decrementer exception 1746 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1747 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1748 * local_irq_disable()). 1749 * 1750 * Handling: 1751 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1752 * 1753 * If soft masked, the masked handler will note the pending interrupt for 1754 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1755 * in the interrupted context. 1756 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1757 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1758 * on the emergency stack. 1759 */ 1760INT_DEFINE_BEGIN(decrementer) 1761 IVEC=0x900 1762 IMASK=IRQS_DISABLED 1763#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1764 IKVM_REAL=1 1765#endif 1766INT_DEFINE_END(decrementer) 1767 1768EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1769 GEN_INT_ENTRY decrementer, virt=0 1770EXC_REAL_END(decrementer, 0x900, 0x80) 1771EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1772 GEN_INT_ENTRY decrementer, virt=1 1773EXC_VIRT_END(decrementer, 0x4900, 0x80) 1774EXC_COMMON_BEGIN(decrementer_common) 1775 GEN_COMMON decrementer 1776 addi r3,r1,STACK_FRAME_OVERHEAD 1777 bl timer_interrupt 1778 b interrupt_return_srr 1779 1780 1781/** 1782 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1783 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1784 * register. 1785 * 1786 * Handling: 1787 * Linux does not use this outside KVM where it's used to keep a host timer 1788 * while the guest is given control of DEC. It should normally be caught by 1789 * the KVM test and routed there. 1790 */ 1791INT_DEFINE_BEGIN(hdecrementer) 1792 IVEC=0x980 1793 IHSRR=1 1794 ISTACK=0 1795 IKVM_REAL=1 1796 IKVM_VIRT=1 1797INT_DEFINE_END(hdecrementer) 1798 1799EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1800 GEN_INT_ENTRY hdecrementer, virt=0 1801EXC_REAL_END(hdecrementer, 0x980, 0x80) 1802EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1803 GEN_INT_ENTRY hdecrementer, virt=1 1804EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1805EXC_COMMON_BEGIN(hdecrementer_common) 1806 __GEN_COMMON_ENTRY hdecrementer 1807 /* 1808 * Hypervisor decrementer interrupts not caught by the KVM test 1809 * shouldn't occur but are sometimes left pending on exit from a KVM 1810 * guest. We don't need to do anything to clear them, as they are 1811 * edge-triggered. 1812 * 1813 * Be careful to avoid touching the kernel stack. 1814 */ 1815 li r10,0 1816 stb r10,PACAHSRR_VALID(r13) 1817 ld r10,PACA_EXGEN+EX_CTR(r13) 1818 mtctr r10 1819 mtcrf 0x80,r9 1820 ld r9,PACA_EXGEN+EX_R9(r13) 1821 ld r10,PACA_EXGEN+EX_R10(r13) 1822 ld r11,PACA_EXGEN+EX_R11(r13) 1823 ld r12,PACA_EXGEN+EX_R12(r13) 1824 ld r13,PACA_EXGEN+EX_R13(r13) 1825 HRFI_TO_KERNEL 1826 1827 1828/** 1829 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1830 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1831 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1832 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1833 * 1834 * Handling: 1835 * Guests may use this for IPIs between threads in a core if the 1836 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1837 * 1838 * If soft masked, the masked handler will note the pending interrupt for 1839 * replay, leaving MSR[EE] enabled in the interrupted context because the 1840 * doorbells are edge triggered. 1841 */ 1842INT_DEFINE_BEGIN(doorbell_super) 1843 IVEC=0xa00 1844 IMASK=IRQS_DISABLED 1845#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1846 IKVM_REAL=1 1847#endif 1848INT_DEFINE_END(doorbell_super) 1849 1850EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1851 GEN_INT_ENTRY doorbell_super, virt=0 1852EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1853EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1854 GEN_INT_ENTRY doorbell_super, virt=1 1855EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1856EXC_COMMON_BEGIN(doorbell_super_common) 1857 GEN_COMMON doorbell_super 1858 addi r3,r1,STACK_FRAME_OVERHEAD 1859#ifdef CONFIG_PPC_DOORBELL 1860 bl doorbell_exception 1861#else 1862 bl unknown_async_exception 1863#endif 1864 b interrupt_return_srr 1865 1866 1867EXC_REAL_NONE(0xb00, 0x100) 1868EXC_VIRT_NONE(0x4b00, 0x100) 1869 1870/** 1871 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1872 * This is a synchronous interrupt invoked with the "sc" instruction. The 1873 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1874 * is directed to the currently running OS. The hypercall is invoked with 1875 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1876 * 1877 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1878 * 0x4c00 virtual mode. 1879 * 1880 * Handling: 1881 * If the KVM test fires then it was due to a hypercall and is accordingly 1882 * routed to KVM. Otherwise this executes a normal Linux system call. 1883 * 1884 * Call convention: 1885 * 1886 * syscall and hypercalls register conventions are documented in 1887 * Documentation/powerpc/syscall64-abi.rst and 1888 * Documentation/powerpc/papr_hcalls.rst respectively. 1889 * 1890 * The intersection of volatile registers that don't contain possible 1891 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1892 * without saving, though xer is not a good idea to use, as hardware may 1893 * interpret some bits so it may be costly to change them. 1894 */ 1895INT_DEFINE_BEGIN(system_call) 1896 IVEC=0xc00 1897 IKVM_REAL=1 1898 IKVM_VIRT=1 1899INT_DEFINE_END(system_call) 1900 1901.macro SYSTEM_CALL virt 1902#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1903 /* 1904 * There is a little bit of juggling to get syscall and hcall 1905 * working well. Save r13 in ctr to avoid using SPRG scratch 1906 * register. 1907 * 1908 * Userspace syscalls have already saved the PPR, hcalls must save 1909 * it before setting HMT_MEDIUM. 1910 */ 1911 mtctr r13 1912 GET_PACA(r13) 1913 std r10,PACA_EXGEN+EX_R10(r13) 1914 INTERRUPT_TO_KERNEL 1915 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1916 mfctr r9 1917#else 1918 mr r9,r13 1919 GET_PACA(r13) 1920 INTERRUPT_TO_KERNEL 1921#endif 1922 1923#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1924BEGIN_FTR_SECTION 1925 cmpdi r0,0x1ebe 1926 beq- 1f 1927END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1928#endif 1929 1930 /* We reach here with PACA in r13, r13 in r9. */ 1931 mfspr r11,SPRN_SRR0 1932 mfspr r12,SPRN_SRR1 1933 1934 HMT_MEDIUM 1935 1936 .if ! \virt 1937 __LOAD_HANDLER(r10, system_call_common_real) 1938 mtctr r10 1939 bctr 1940 .else 1941#ifdef CONFIG_RELOCATABLE 1942 __LOAD_HANDLER(r10, system_call_common) 1943 mtctr r10 1944 bctr 1945#else 1946 b system_call_common 1947#endif 1948 .endif 1949 1950#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1951 /* Fast LE/BE switch system call */ 19521: mfspr r12,SPRN_SRR1 1953 xori r12,r12,MSR_LE 1954 mtspr SPRN_SRR1,r12 1955 mr r13,r9 1956 RFI_TO_USER /* return to userspace */ 1957 b . /* prevent speculative execution */ 1958#endif 1959.endm 1960 1961EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 1962 SYSTEM_CALL 0 1963EXC_REAL_END(system_call, 0xc00, 0x100) 1964EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 1965 SYSTEM_CALL 1 1966EXC_VIRT_END(system_call, 0x4c00, 0x100) 1967 1968#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1969TRAMP_REAL_BEGIN(kvm_hcall) 1970 std r9,PACA_EXGEN+EX_R9(r13) 1971 std r11,PACA_EXGEN+EX_R11(r13) 1972 std r12,PACA_EXGEN+EX_R12(r13) 1973 mfcr r9 1974 mfctr r10 1975 std r10,PACA_EXGEN+EX_R13(r13) 1976 li r10,0 1977 std r10,PACA_EXGEN+EX_CFAR(r13) 1978 std r10,PACA_EXGEN+EX_CTR(r13) 1979 /* 1980 * Save the PPR (on systems that support it) before changing to 1981 * HMT_MEDIUM. That allows the KVM code to save that value into the 1982 * guest state (it is the guest's PPR value). 1983 */ 1984BEGIN_FTR_SECTION 1985 mfspr r10,SPRN_PPR 1986 std r10,PACA_EXGEN+EX_PPR(r13) 1987END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1988 1989 HMT_MEDIUM 1990 1991#ifdef CONFIG_RELOCATABLE 1992 /* 1993 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 1994 * outside the head section. 1995 */ 1996 __LOAD_FAR_HANDLER(r10, kvmppc_hcall) 1997 mtctr r10 1998 bctr 1999#else 2000 b kvmppc_hcall 2001#endif 2002#endif 2003 2004/** 2005 * Interrupt 0xd00 - Trace Interrupt. 2006 * This is a synchronous interrupt in response to instruction step or 2007 * breakpoint faults. 2008 */ 2009INT_DEFINE_BEGIN(single_step) 2010 IVEC=0xd00 2011#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2012 IKVM_REAL=1 2013#endif 2014INT_DEFINE_END(single_step) 2015 2016EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2017 GEN_INT_ENTRY single_step, virt=0 2018EXC_REAL_END(single_step, 0xd00, 0x100) 2019EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2020 GEN_INT_ENTRY single_step, virt=1 2021EXC_VIRT_END(single_step, 0x4d00, 0x100) 2022EXC_COMMON_BEGIN(single_step_common) 2023 GEN_COMMON single_step 2024 addi r3,r1,STACK_FRAME_OVERHEAD 2025 bl single_step_exception 2026 b interrupt_return_srr 2027 2028 2029/** 2030 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2031 * This is a synchronous interrupt in response to an MMU fault caused by a 2032 * guest data access. 2033 * 2034 * Handling: 2035 * This should always get routed to KVM. In radix MMU mode, this is caused 2036 * by a guest nested radix access that can't be performed due to the 2037 * partition scope page table. In hash mode, this can be caused by guests 2038 * running with translation disabled (virtual real mode) or with VPM enabled. 2039 * KVM will update the page table structures or disallow the access. 2040 */ 2041INT_DEFINE_BEGIN(h_data_storage) 2042 IVEC=0xe00 2043 IHSRR=1 2044 IDAR=1 2045 IDSISR=1 2046 IKVM_REAL=1 2047 IKVM_VIRT=1 2048INT_DEFINE_END(h_data_storage) 2049 2050EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2051 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2052EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2053EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2054 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2055EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2056EXC_COMMON_BEGIN(h_data_storage_common) 2057 GEN_COMMON h_data_storage 2058 addi r3,r1,STACK_FRAME_OVERHEAD 2059BEGIN_MMU_FTR_SECTION 2060 bl do_bad_page_fault_segv 2061MMU_FTR_SECTION_ELSE 2062 bl unknown_exception 2063ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2064 b interrupt_return_hsrr 2065 2066 2067/** 2068 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2069 * This is a synchronous interrupt in response to an MMU fault caused by a 2070 * guest instruction fetch, similar to HDSI. 2071 */ 2072INT_DEFINE_BEGIN(h_instr_storage) 2073 IVEC=0xe20 2074 IHSRR=1 2075 IKVM_REAL=1 2076 IKVM_VIRT=1 2077INT_DEFINE_END(h_instr_storage) 2078 2079EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2080 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2081EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2082EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2083 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2084EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2085EXC_COMMON_BEGIN(h_instr_storage_common) 2086 GEN_COMMON h_instr_storage 2087 addi r3,r1,STACK_FRAME_OVERHEAD 2088 bl unknown_exception 2089 b interrupt_return_hsrr 2090 2091 2092/** 2093 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2094 */ 2095INT_DEFINE_BEGIN(emulation_assist) 2096 IVEC=0xe40 2097 IHSRR=1 2098 IKVM_REAL=1 2099 IKVM_VIRT=1 2100INT_DEFINE_END(emulation_assist) 2101 2102EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2103 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2104EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2105EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2106 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2107EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2108EXC_COMMON_BEGIN(emulation_assist_common) 2109 GEN_COMMON emulation_assist 2110 addi r3,r1,STACK_FRAME_OVERHEAD 2111 bl emulation_assist_interrupt 2112 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2113 b interrupt_return_hsrr 2114 2115 2116/** 2117 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2118 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2119 * Exception. It is always taken in real mode but uses HSRR registers 2120 * unlike SRESET and MCE. 2121 * 2122 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2123 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2124 * 2125 * Handling: 2126 * This is a special case, this is handled similarly to machine checks, with an 2127 * initial real mode handler that is not soft-masked, which attempts to fix the 2128 * problem. Then a regular handler which is soft-maskable and reports the 2129 * problem. 2130 * 2131 * The emergency stack is used for the early real mode handler. 2132 * 2133 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2134 * either use soft-masking for the MCE, or use irq_work for the HMI. 2135 * 2136 * KVM: 2137 * Unlike MCE, this calls into KVM without calling the real mode handler 2138 * first. 2139 */ 2140INT_DEFINE_BEGIN(hmi_exception_early) 2141 IVEC=0xe60 2142 IHSRR=1 2143 IREALMODE_COMMON=1 2144 ISTACK=0 2145 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2146 IKVM_REAL=1 2147INT_DEFINE_END(hmi_exception_early) 2148 2149INT_DEFINE_BEGIN(hmi_exception) 2150 IVEC=0xe60 2151 IHSRR=1 2152 IMASK=IRQS_DISABLED 2153 IKVM_REAL=1 2154INT_DEFINE_END(hmi_exception) 2155 2156EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2157 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2158EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2159EXC_VIRT_NONE(0x4e60, 0x20) 2160 2161EXC_COMMON_BEGIN(hmi_exception_early_common) 2162 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2163 2164 mr r10,r1 /* Save r1 */ 2165 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2166 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2167 2168 __GEN_COMMON_BODY hmi_exception_early 2169 2170 addi r3,r1,STACK_FRAME_OVERHEAD 2171 bl hmi_exception_realmode 2172 cmpdi cr0,r3,0 2173 bne 1f 2174 2175 EXCEPTION_RESTORE_REGS hsrr=1 2176 HRFI_TO_USER_OR_KERNEL 2177 21781: 2179 /* 2180 * Go to virtual mode and pull the HMI event information from 2181 * firmware. 2182 */ 2183 EXCEPTION_RESTORE_REGS hsrr=1 2184 GEN_INT_ENTRY hmi_exception, virt=0 2185 2186EXC_COMMON_BEGIN(hmi_exception_common) 2187 GEN_COMMON hmi_exception 2188 addi r3,r1,STACK_FRAME_OVERHEAD 2189 bl handle_hmi_exception 2190 b interrupt_return_hsrr 2191 2192 2193/** 2194 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2195 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2196 * Similar to the 0xa00 doorbell but for host rather than guest. 2197 */ 2198INT_DEFINE_BEGIN(h_doorbell) 2199 IVEC=0xe80 2200 IHSRR=1 2201 IMASK=IRQS_DISABLED 2202 IKVM_REAL=1 2203 IKVM_VIRT=1 2204INT_DEFINE_END(h_doorbell) 2205 2206EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2207 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2208EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2209EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2210 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2211EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2212EXC_COMMON_BEGIN(h_doorbell_common) 2213 GEN_COMMON h_doorbell 2214 addi r3,r1,STACK_FRAME_OVERHEAD 2215#ifdef CONFIG_PPC_DOORBELL 2216 bl doorbell_exception 2217#else 2218 bl unknown_async_exception 2219#endif 2220 b interrupt_return_hsrr 2221 2222 2223/** 2224 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2225 * This is an asynchronous interrupt in response to an "external exception". 2226 * Similar to 0x500 but for host only. 2227 */ 2228INT_DEFINE_BEGIN(h_virt_irq) 2229 IVEC=0xea0 2230 IHSRR=1 2231 IMASK=IRQS_DISABLED 2232 IKVM_REAL=1 2233 IKVM_VIRT=1 2234INT_DEFINE_END(h_virt_irq) 2235 2236EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2237 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2238EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2239EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2240 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2241EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2242EXC_COMMON_BEGIN(h_virt_irq_common) 2243 GEN_COMMON h_virt_irq 2244 addi r3,r1,STACK_FRAME_OVERHEAD 2245 bl do_IRQ 2246 b interrupt_return_hsrr 2247 2248 2249EXC_REAL_NONE(0xec0, 0x20) 2250EXC_VIRT_NONE(0x4ec0, 0x20) 2251EXC_REAL_NONE(0xee0, 0x20) 2252EXC_VIRT_NONE(0x4ee0, 0x20) 2253 2254 2255/* 2256 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2257 * This is an asynchronous interrupt in response to a PMU exception. 2258 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2259 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2260 * 2261 * Handling: 2262 * This calls into the perf subsystem. 2263 * 2264 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2265 * runs under local_irq_disable. However it may be soft-masked in 2266 * powerpc-specific code. 2267 * 2268 * If soft masked, the masked handler will note the pending interrupt for 2269 * replay, and clear MSR[EE] in the interrupted context. 2270 */ 2271INT_DEFINE_BEGIN(performance_monitor) 2272 IVEC=0xf00 2273 IMASK=IRQS_PMI_DISABLED 2274#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2275 IKVM_REAL=1 2276#endif 2277INT_DEFINE_END(performance_monitor) 2278 2279EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2280 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2281EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2282EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2283 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2284EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2285EXC_COMMON_BEGIN(performance_monitor_common) 2286 GEN_COMMON performance_monitor 2287 addi r3,r1,STACK_FRAME_OVERHEAD 2288 bl performance_monitor_exception 2289 b interrupt_return_srr 2290 2291 2292/** 2293 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2294 * This is a synchronous interrupt in response to 2295 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2296 * Similar to FP unavailable. 2297 */ 2298INT_DEFINE_BEGIN(altivec_unavailable) 2299 IVEC=0xf20 2300#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2301 IKVM_REAL=1 2302#endif 2303INT_DEFINE_END(altivec_unavailable) 2304 2305EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2306 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2307EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2308EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2309 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2310EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2311EXC_COMMON_BEGIN(altivec_unavailable_common) 2312 GEN_COMMON altivec_unavailable 2313#ifdef CONFIG_ALTIVEC 2314BEGIN_FTR_SECTION 2315 beq 1f 2316#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2317 BEGIN_FTR_SECTION_NESTED(69) 2318 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2319 * transaction), go do TM stuff 2320 */ 2321 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2322 bne- 2f 2323 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2324#endif 2325 bl load_up_altivec 2326 b fast_interrupt_return_srr 2327#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23282: /* User process was in a transaction */ 2329 addi r3,r1,STACK_FRAME_OVERHEAD 2330 bl altivec_unavailable_tm 2331 b interrupt_return_srr 2332#endif 23331: 2334END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2335#endif 2336 addi r3,r1,STACK_FRAME_OVERHEAD 2337 bl altivec_unavailable_exception 2338 b interrupt_return_srr 2339 2340 2341/** 2342 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2343 * This is a synchronous interrupt in response to 2344 * executing a VSX instruction with MSR[VSX]=0. 2345 * Similar to FP unavailable. 2346 */ 2347INT_DEFINE_BEGIN(vsx_unavailable) 2348 IVEC=0xf40 2349#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2350 IKVM_REAL=1 2351#endif 2352INT_DEFINE_END(vsx_unavailable) 2353 2354EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2355 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2356EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2357EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2358 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2359EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2360EXC_COMMON_BEGIN(vsx_unavailable_common) 2361 GEN_COMMON vsx_unavailable 2362#ifdef CONFIG_VSX 2363BEGIN_FTR_SECTION 2364 beq 1f 2365#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2366 BEGIN_FTR_SECTION_NESTED(69) 2367 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2368 * transaction), go do TM stuff 2369 */ 2370 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2371 bne- 2f 2372 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2373#endif 2374 b load_up_vsx 2375#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 23762: /* User process was in a transaction */ 2377 addi r3,r1,STACK_FRAME_OVERHEAD 2378 bl vsx_unavailable_tm 2379 b interrupt_return_srr 2380#endif 23811: 2382END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2383#endif 2384 addi r3,r1,STACK_FRAME_OVERHEAD 2385 bl vsx_unavailable_exception 2386 b interrupt_return_srr 2387 2388 2389/** 2390 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2391 * This is a synchronous interrupt in response to 2392 * executing an instruction without access to the facility that can be 2393 * resolved by the OS (e.g., FSCR, MSR). 2394 * Similar to FP unavailable. 2395 */ 2396INT_DEFINE_BEGIN(facility_unavailable) 2397 IVEC=0xf60 2398#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2399 IKVM_REAL=1 2400#endif 2401INT_DEFINE_END(facility_unavailable) 2402 2403EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2404 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2405EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2406EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2407 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2408EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2409EXC_COMMON_BEGIN(facility_unavailable_common) 2410 GEN_COMMON facility_unavailable 2411 addi r3,r1,STACK_FRAME_OVERHEAD 2412 bl facility_unavailable_exception 2413 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2414 b interrupt_return_srr 2415 2416 2417/** 2418 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2419 * This is a synchronous interrupt in response to 2420 * executing an instruction without access to the facility that can only 2421 * be resolved in HV mode (e.g., HFSCR). 2422 * Similar to FP unavailable. 2423 */ 2424INT_DEFINE_BEGIN(h_facility_unavailable) 2425 IVEC=0xf80 2426 IHSRR=1 2427 IKVM_REAL=1 2428 IKVM_VIRT=1 2429INT_DEFINE_END(h_facility_unavailable) 2430 2431EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2432 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2433EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2434EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2435 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2436EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2437EXC_COMMON_BEGIN(h_facility_unavailable_common) 2438 GEN_COMMON h_facility_unavailable 2439 addi r3,r1,STACK_FRAME_OVERHEAD 2440 bl facility_unavailable_exception 2441 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2442 b interrupt_return_hsrr 2443 2444 2445EXC_REAL_NONE(0xfa0, 0x20) 2446EXC_VIRT_NONE(0x4fa0, 0x20) 2447EXC_REAL_NONE(0xfc0, 0x20) 2448EXC_VIRT_NONE(0x4fc0, 0x20) 2449EXC_REAL_NONE(0xfe0, 0x20) 2450EXC_VIRT_NONE(0x4fe0, 0x20) 2451 2452EXC_REAL_NONE(0x1000, 0x100) 2453EXC_VIRT_NONE(0x5000, 0x100) 2454EXC_REAL_NONE(0x1100, 0x100) 2455EXC_VIRT_NONE(0x5100, 0x100) 2456 2457#ifdef CONFIG_CBE_RAS 2458INT_DEFINE_BEGIN(cbe_system_error) 2459 IVEC=0x1200 2460 IHSRR=1 2461INT_DEFINE_END(cbe_system_error) 2462 2463EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2464 GEN_INT_ENTRY cbe_system_error, virt=0 2465EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2466EXC_VIRT_NONE(0x5200, 0x100) 2467EXC_COMMON_BEGIN(cbe_system_error_common) 2468 GEN_COMMON cbe_system_error 2469 addi r3,r1,STACK_FRAME_OVERHEAD 2470 bl cbe_system_error_exception 2471 b interrupt_return_hsrr 2472 2473#else /* CONFIG_CBE_RAS */ 2474EXC_REAL_NONE(0x1200, 0x100) 2475EXC_VIRT_NONE(0x5200, 0x100) 2476#endif 2477 2478/** 2479 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2480 * This has been removed from the ISA before 2.01, which is the earliest 2481 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2482 * interrupt with a non-architected feature available through the support 2483 * processor interface. 2484 */ 2485INT_DEFINE_BEGIN(instruction_breakpoint) 2486 IVEC=0x1300 2487#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2488 IKVM_REAL=1 2489#endif 2490INT_DEFINE_END(instruction_breakpoint) 2491 2492EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2493 GEN_INT_ENTRY instruction_breakpoint, virt=0 2494EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2495EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2496 GEN_INT_ENTRY instruction_breakpoint, virt=1 2497EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2498EXC_COMMON_BEGIN(instruction_breakpoint_common) 2499 GEN_COMMON instruction_breakpoint 2500 addi r3,r1,STACK_FRAME_OVERHEAD 2501 bl instruction_breakpoint_exception 2502 b interrupt_return_srr 2503 2504 2505EXC_REAL_NONE(0x1400, 0x100) 2506EXC_VIRT_NONE(0x5400, 0x100) 2507 2508/** 2509 * Interrupt 0x1500 - Soft Patch Interrupt 2510 * 2511 * Handling: 2512 * This is an implementation specific interrupt which can be used for a 2513 * range of exceptions. 2514 * 2515 * This interrupt handler is unique in that it runs the denormal assist 2516 * code even for guests (and even in guest context) without going to KVM, 2517 * for speed. POWER9 does not raise denorm exceptions, so this special case 2518 * could be phased out in future to reduce special cases. 2519 */ 2520INT_DEFINE_BEGIN(denorm_exception) 2521 IVEC=0x1500 2522 IHSRR=1 2523 IBRANCH_TO_COMMON=0 2524 IKVM_REAL=1 2525INT_DEFINE_END(denorm_exception) 2526 2527EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2528 GEN_INT_ENTRY denorm_exception, virt=0 2529#ifdef CONFIG_PPC_DENORMALISATION 2530 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2531 bne+ denorm_assist 2532#endif 2533 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2534EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2535#ifdef CONFIG_PPC_DENORMALISATION 2536EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2537 GEN_INT_ENTRY denorm_exception, virt=1 2538 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2539 bne+ denorm_assist 2540 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2541EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2542#else 2543EXC_VIRT_NONE(0x5500, 0x100) 2544#endif 2545 2546#ifdef CONFIG_PPC_DENORMALISATION 2547TRAMP_REAL_BEGIN(denorm_assist) 2548BEGIN_FTR_SECTION 2549/* 2550 * To denormalise we need to move a copy of the register to itself. 2551 * For POWER6 do that here for all FP regs. 2552 */ 2553 mfmsr r10 2554 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2555 xori r10,r10,(MSR_FE0|MSR_FE1) 2556 mtmsrd r10 2557 sync 2558 2559 .Lreg=0 2560 .rept 32 2561 fmr .Lreg,.Lreg 2562 .Lreg=.Lreg+1 2563 .endr 2564 2565FTR_SECTION_ELSE 2566/* 2567 * To denormalise we need to move a copy of the register to itself. 2568 * For POWER7 do that here for the first 32 VSX registers only. 2569 */ 2570 mfmsr r10 2571 oris r10,r10,MSR_VSX@h 2572 mtmsrd r10 2573 sync 2574 2575 .Lreg=0 2576 .rept 32 2577 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2578 .Lreg=.Lreg+1 2579 .endr 2580 2581ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2582 2583BEGIN_FTR_SECTION 2584 b denorm_done 2585END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2586/* 2587 * To denormalise we need to move a copy of the register to itself. 2588 * For POWER8 we need to do that for all 64 VSX registers 2589 */ 2590 .Lreg=32 2591 .rept 32 2592 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2593 .Lreg=.Lreg+1 2594 .endr 2595 2596denorm_done: 2597 mfspr r11,SPRN_HSRR0 2598 subi r11,r11,4 2599 mtspr SPRN_HSRR0,r11 2600 mtcrf 0x80,r9 2601 ld r9,PACA_EXGEN+EX_R9(r13) 2602BEGIN_FTR_SECTION 2603 ld r10,PACA_EXGEN+EX_PPR(r13) 2604 mtspr SPRN_PPR,r10 2605END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2606BEGIN_FTR_SECTION 2607 ld r10,PACA_EXGEN+EX_CFAR(r13) 2608 mtspr SPRN_CFAR,r10 2609END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2610 li r10,0 2611 stb r10,PACAHSRR_VALID(r13) 2612 ld r10,PACA_EXGEN+EX_R10(r13) 2613 ld r11,PACA_EXGEN+EX_R11(r13) 2614 ld r12,PACA_EXGEN+EX_R12(r13) 2615 ld r13,PACA_EXGEN+EX_R13(r13) 2616 HRFI_TO_UNKNOWN 2617 b . 2618#endif 2619 2620EXC_COMMON_BEGIN(denorm_exception_common) 2621 GEN_COMMON denorm_exception 2622 addi r3,r1,STACK_FRAME_OVERHEAD 2623 bl unknown_exception 2624 b interrupt_return_hsrr 2625 2626 2627#ifdef CONFIG_CBE_RAS 2628INT_DEFINE_BEGIN(cbe_maintenance) 2629 IVEC=0x1600 2630 IHSRR=1 2631INT_DEFINE_END(cbe_maintenance) 2632 2633EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2634 GEN_INT_ENTRY cbe_maintenance, virt=0 2635EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2636EXC_VIRT_NONE(0x5600, 0x100) 2637EXC_COMMON_BEGIN(cbe_maintenance_common) 2638 GEN_COMMON cbe_maintenance 2639 addi r3,r1,STACK_FRAME_OVERHEAD 2640 bl cbe_maintenance_exception 2641 b interrupt_return_hsrr 2642 2643#else /* CONFIG_CBE_RAS */ 2644EXC_REAL_NONE(0x1600, 0x100) 2645EXC_VIRT_NONE(0x5600, 0x100) 2646#endif 2647 2648 2649INT_DEFINE_BEGIN(altivec_assist) 2650 IVEC=0x1700 2651#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2652 IKVM_REAL=1 2653#endif 2654INT_DEFINE_END(altivec_assist) 2655 2656EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2657 GEN_INT_ENTRY altivec_assist, virt=0 2658EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2659EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2660 GEN_INT_ENTRY altivec_assist, virt=1 2661EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2662EXC_COMMON_BEGIN(altivec_assist_common) 2663 GEN_COMMON altivec_assist 2664 addi r3,r1,STACK_FRAME_OVERHEAD 2665#ifdef CONFIG_ALTIVEC 2666 bl altivec_assist_exception 2667 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2668#else 2669 bl unknown_exception 2670#endif 2671 b interrupt_return_srr 2672 2673 2674#ifdef CONFIG_CBE_RAS 2675INT_DEFINE_BEGIN(cbe_thermal) 2676 IVEC=0x1800 2677 IHSRR=1 2678INT_DEFINE_END(cbe_thermal) 2679 2680EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2681 GEN_INT_ENTRY cbe_thermal, virt=0 2682EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2683EXC_VIRT_NONE(0x5800, 0x100) 2684EXC_COMMON_BEGIN(cbe_thermal_common) 2685 GEN_COMMON cbe_thermal 2686 addi r3,r1,STACK_FRAME_OVERHEAD 2687 bl cbe_thermal_exception 2688 b interrupt_return_hsrr 2689 2690#else /* CONFIG_CBE_RAS */ 2691EXC_REAL_NONE(0x1800, 0x100) 2692EXC_VIRT_NONE(0x5800, 0x100) 2693#endif 2694 2695 2696#ifdef CONFIG_PPC_WATCHDOG 2697 2698INT_DEFINE_BEGIN(soft_nmi) 2699 IVEC=0x900 2700 ISTACK=0 2701INT_DEFINE_END(soft_nmi) 2702 2703/* 2704 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2705 * stack is one that is usable by maskable interrupts so long as MSR_EE 2706 * remains off. It is used for recovery when something has corrupted the 2707 * normal kernel stack, for example. The "soft NMI" must not use the process 2708 * stack because we want irq disabled sections to avoid touching the stack 2709 * at all (other than PMU interrupts), so use the emergency stack for this, 2710 * and run it entirely with interrupts hard disabled. 2711 */ 2712EXC_COMMON_BEGIN(soft_nmi_common) 2713 mr r10,r1 2714 ld r1,PACAEMERGSP(r13) 2715 subi r1,r1,INT_FRAME_SIZE 2716 __GEN_COMMON_BODY soft_nmi 2717 2718 addi r3,r1,STACK_FRAME_OVERHEAD 2719 bl soft_nmi_interrupt 2720 2721 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2722 li r9,0 2723 mtmsrd r9,1 2724 2725 kuap_kernel_restore r9, r10 2726 2727 EXCEPTION_RESTORE_REGS hsrr=0 2728 RFI_TO_KERNEL 2729 2730#endif /* CONFIG_PPC_WATCHDOG */ 2731 2732/* 2733 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2734 * - If it was a decrementer interrupt, we bump the dec to max and and return. 2735 * - If it was a doorbell we return immediately since doorbells are edge 2736 * triggered and won't automatically refire. 2737 * - If it was a HMI we return immediately since we handled it in realmode 2738 * and it won't refire. 2739 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2740 * This is called with r10 containing the value to OR to the paca field. 2741 */ 2742.macro MASKED_INTERRUPT hsrr=0 2743 .if \hsrr 2744masked_Hinterrupt: 2745 .else 2746masked_interrupt: 2747 .endif 2748 stw r9,PACA_EXGEN+EX_CCR(r13) 2749 lbz r9,PACAIRQHAPPENED(r13) 2750 or r9,r9,r10 2751 stb r9,PACAIRQHAPPENED(r13) 2752 2753 .if ! \hsrr 2754 cmpwi r10,PACA_IRQ_DEC 2755 bne 1f 2756 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2757 mtspr SPRN_DEC,r9 2758#ifdef CONFIG_PPC_WATCHDOG 2759 lwz r9,PACA_EXGEN+EX_CCR(r13) 2760 b soft_nmi_common 2761#else 2762 b 2f 2763#endif 2764 .endif 2765 27661: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2767 beq 2f 2768 xori r12,r12,MSR_EE /* clear MSR_EE */ 2769 .if \hsrr 2770 mtspr SPRN_HSRR1,r12 2771 .else 2772 mtspr SPRN_SRR1,r12 2773 .endif 2774 ori r9,r9,PACA_IRQ_HARD_DIS 2775 stb r9,PACAIRQHAPPENED(r13) 27762: /* done */ 2777 li r9,0 2778 .if \hsrr 2779 stb r9,PACAHSRR_VALID(r13) 2780 .else 2781 stb r9,PACASRR_VALID(r13) 2782 .endif 2783 2784 SEARCH_RESTART_TABLE 2785 cmpdi r12,0 2786 beq 3f 2787 .if \hsrr 2788 mtspr SPRN_HSRR0,r12 2789 .else 2790 mtspr SPRN_SRR0,r12 2791 .endif 27923: 2793 2794 ld r9,PACA_EXGEN+EX_CTR(r13) 2795 mtctr r9 2796 lwz r9,PACA_EXGEN+EX_CCR(r13) 2797 mtcrf 0x80,r9 2798 std r1,PACAR1(r13) 2799 ld r9,PACA_EXGEN+EX_R9(r13) 2800 ld r10,PACA_EXGEN+EX_R10(r13) 2801 ld r11,PACA_EXGEN+EX_R11(r13) 2802 ld r12,PACA_EXGEN+EX_R12(r13) 2803 ld r13,PACA_EXGEN+EX_R13(r13) 2804 /* May return to masked low address where r13 is not set up */ 2805 .if \hsrr 2806 HRFI_TO_KERNEL 2807 .else 2808 RFI_TO_KERNEL 2809 .endif 2810 b . 2811.endm 2812 2813TRAMP_REAL_BEGIN(stf_barrier_fallback) 2814 std r9,PACA_EXRFI+EX_R9(r13) 2815 std r10,PACA_EXRFI+EX_R10(r13) 2816 sync 2817 ld r9,PACA_EXRFI+EX_R9(r13) 2818 ld r10,PACA_EXRFI+EX_R10(r13) 2819 ori 31,31,0 2820 .rept 14 2821 b 1f 28221: 2823 .endr 2824 blr 2825 2826/* Clobbers r10, r11, ctr */ 2827.macro L1D_DISPLACEMENT_FLUSH 2828 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2829 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2830 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2831 mtctr r11 2832 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2833 2834 /* order ld/st prior to dcbt stop all streams with flushing */ 2835 sync 2836 2837 /* 2838 * The load addresses are at staggered offsets within cachelines, 2839 * which suits some pipelines better (on others it should not 2840 * hurt). 2841 */ 28421: 2843 ld r11,(0x80 + 8)*0(r10) 2844 ld r11,(0x80 + 8)*1(r10) 2845 ld r11,(0x80 + 8)*2(r10) 2846 ld r11,(0x80 + 8)*3(r10) 2847 ld r11,(0x80 + 8)*4(r10) 2848 ld r11,(0x80 + 8)*5(r10) 2849 ld r11,(0x80 + 8)*6(r10) 2850 ld r11,(0x80 + 8)*7(r10) 2851 addi r10,r10,0x80*8 2852 bdnz 1b 2853.endm 2854 2855TRAMP_REAL_BEGIN(entry_flush_fallback) 2856 std r9,PACA_EXRFI+EX_R9(r13) 2857 std r10,PACA_EXRFI+EX_R10(r13) 2858 std r11,PACA_EXRFI+EX_R11(r13) 2859 mfctr r9 2860 L1D_DISPLACEMENT_FLUSH 2861 mtctr r9 2862 ld r9,PACA_EXRFI+EX_R9(r13) 2863 ld r10,PACA_EXRFI+EX_R10(r13) 2864 ld r11,PACA_EXRFI+EX_R11(r13) 2865 blr 2866 2867/* 2868 * The SCV entry flush happens with interrupts enabled, so it must disable 2869 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2870 * (containing LR) does not need to be preserved here because scv entry 2871 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2872 */ 2873TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2874 li r10,0 2875 mtmsrd r10,1 2876 lbz r10,PACAIRQHAPPENED(r13) 2877 ori r10,r10,PACA_IRQ_HARD_DIS 2878 stb r10,PACAIRQHAPPENED(r13) 2879 std r11,PACA_EXRFI+EX_R11(r13) 2880 L1D_DISPLACEMENT_FLUSH 2881 ld r11,PACA_EXRFI+EX_R11(r13) 2882 li r10,MSR_RI 2883 mtmsrd r10,1 2884 blr 2885 2886TRAMP_REAL_BEGIN(rfi_flush_fallback) 2887 SET_SCRATCH0(r13); 2888 GET_PACA(r13); 2889 std r1,PACA_EXRFI+EX_R12(r13) 2890 ld r1,PACAKSAVE(r13) 2891 std r9,PACA_EXRFI+EX_R9(r13) 2892 std r10,PACA_EXRFI+EX_R10(r13) 2893 std r11,PACA_EXRFI+EX_R11(r13) 2894 mfctr r9 2895 L1D_DISPLACEMENT_FLUSH 2896 mtctr r9 2897 ld r9,PACA_EXRFI+EX_R9(r13) 2898 ld r10,PACA_EXRFI+EX_R10(r13) 2899 ld r11,PACA_EXRFI+EX_R11(r13) 2900 ld r1,PACA_EXRFI+EX_R12(r13) 2901 GET_SCRATCH0(r13); 2902 rfid 2903 2904TRAMP_REAL_BEGIN(hrfi_flush_fallback) 2905 SET_SCRATCH0(r13); 2906 GET_PACA(r13); 2907 std r1,PACA_EXRFI+EX_R12(r13) 2908 ld r1,PACAKSAVE(r13) 2909 std r9,PACA_EXRFI+EX_R9(r13) 2910 std r10,PACA_EXRFI+EX_R10(r13) 2911 std r11,PACA_EXRFI+EX_R11(r13) 2912 mfctr r9 2913 L1D_DISPLACEMENT_FLUSH 2914 mtctr r9 2915 ld r9,PACA_EXRFI+EX_R9(r13) 2916 ld r10,PACA_EXRFI+EX_R10(r13) 2917 ld r11,PACA_EXRFI+EX_R11(r13) 2918 ld r1,PACA_EXRFI+EX_R12(r13) 2919 GET_SCRATCH0(r13); 2920 hrfid 2921 2922TRAMP_REAL_BEGIN(rfscv_flush_fallback) 2923 /* system call volatile */ 2924 mr r7,r13 2925 GET_PACA(r13); 2926 mr r8,r1 2927 ld r1,PACAKSAVE(r13) 2928 mfctr r9 2929 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2930 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2931 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2932 mtctr r11 2933 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2934 2935 /* order ld/st prior to dcbt stop all streams with flushing */ 2936 sync 2937 2938 /* 2939 * The load adresses are at staggered offsets within cachelines, 2940 * which suits some pipelines better (on others it should not 2941 * hurt). 2942 */ 29431: 2944 ld r11,(0x80 + 8)*0(r10) 2945 ld r11,(0x80 + 8)*1(r10) 2946 ld r11,(0x80 + 8)*2(r10) 2947 ld r11,(0x80 + 8)*3(r10) 2948 ld r11,(0x80 + 8)*4(r10) 2949 ld r11,(0x80 + 8)*5(r10) 2950 ld r11,(0x80 + 8)*6(r10) 2951 ld r11,(0x80 + 8)*7(r10) 2952 addi r10,r10,0x80*8 2953 bdnz 1b 2954 2955 mtctr r9 2956 li r9,0 2957 li r10,0 2958 li r11,0 2959 mr r1,r8 2960 mr r13,r7 2961 RFSCV 2962 2963USE_TEXT_SECTION() 2964 2965#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2966kvm_interrupt: 2967 /* 2968 * The conditional branch in KVMTEST can't reach all the way, 2969 * make a stub. 2970 */ 2971 b kvmppc_interrupt 2972#endif 2973 2974_GLOBAL(do_uaccess_flush) 2975 UACCESS_FLUSH_FIXUP_SECTION 2976 nop 2977 nop 2978 nop 2979 blr 2980 L1D_DISPLACEMENT_FLUSH 2981 blr 2982_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 2983EXPORT_SYMBOL(do_uaccess_flush) 2984 2985 2986MASKED_INTERRUPT 2987MASKED_INTERRUPT hsrr=1 2988 2989 /* 2990 * Relocation-on interrupts: A subset of the interrupts can be delivered 2991 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 2992 * it. Addresses are the same as the original interrupt addresses, but 2993 * offset by 0xc000000000004000. 2994 * It's impossible to receive interrupts below 0x300 via this mechanism. 2995 * KVM: None of these traps are from the guest ; anything that escalated 2996 * to HV=1 from HV=0 is delivered via real mode handlers. 2997 */ 2998 2999 /* 3000 * This uses the standard macro, since the original 0x300 vector 3001 * only has extra guff for STAB-based processors -- which never 3002 * come here. 3003 */ 3004 3005USE_FIXED_SECTION(virt_trampolines) 3006 /* 3007 * All code below __end_soft_masked is treated as soft-masked. If 3008 * any code runs here with MSR[EE]=1, it must then cope with pending 3009 * soft interrupt being raised (i.e., by ensuring it is replayed). 3010 * 3011 * The __end_interrupts marker must be past the out-of-line (OOL) 3012 * handlers, so that they are copied to real address 0x100 when running 3013 * a relocatable kernel. This ensures they can be reached from the short 3014 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3015 * directly, without using LOAD_HANDLER(). 3016 */ 3017 .align 7 3018 .globl __end_interrupts 3019__end_interrupts: 3020DEFINE_FIXED_SYMBOL(__end_interrupts) 3021 3022CLOSE_FIXED_SECTION(real_vectors); 3023CLOSE_FIXED_SECTION(real_trampolines); 3024CLOSE_FIXED_SECTION(virt_vectors); 3025CLOSE_FIXED_SECTION(virt_trampolines); 3026 3027USE_TEXT_SECTION() 3028 3029/* MSR[RI] should be clear because this uses SRR[01] */ 3030enable_machine_check: 3031 mflr r0 3032 bcl 20,31,$+4 30330: mflr r3 3034 addi r3,r3,(1f - 0b) 3035 mtspr SPRN_SRR0,r3 3036 mfmsr r3 3037 ori r3,r3,MSR_ME 3038 mtspr SPRN_SRR1,r3 3039 RFI_TO_KERNEL 30401: mtlr r0 3041 blr 3042 3043/* MSR[RI] should be clear because this uses SRR[01] */ 3044disable_machine_check: 3045 mflr r0 3046 bcl 20,31,$+4 30470: mflr r3 3048 addi r3,r3,(1f - 0b) 3049 mtspr SPRN_SRR0,r3 3050 mfmsr r3 3051 li r4,MSR_ME 3052 andc r3,r3,r4 3053 mtspr SPRN_SRR1,r3 3054 RFI_TO_KERNEL 30551: mtlr r0 3056 blr 3057