1#include <asm/asm-offsets.h> 2#include <asm/bug.h> 3#ifdef CONFIG_PPC_BOOK3S 4#include <asm/exception-64s.h> 5#else 6#include <asm/exception-64e.h> 7#endif 8#include <asm/feature-fixups.h> 9#include <asm/head-64.h> 10#include <asm/hw_irq.h> 11#include <asm/kup.h> 12#include <asm/mmu.h> 13#include <asm/ppc_asm.h> 14#include <asm/ptrace.h> 15 16 .align 7 17 18.macro DEBUG_SRR_VALID srr 19#ifdef CONFIG_PPC_RFI_SRR_DEBUG 20 .ifc \srr,srr 21 mfspr r11,SPRN_SRR0 22 ld r12,_NIP(r1) 23 clrrdi r11,r11,2 24 clrrdi r12,r12,2 25100: tdne r11,r12 26 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 27 mfspr r11,SPRN_SRR1 28 ld r12,_MSR(r1) 29100: tdne r11,r12 30 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 31 .else 32 mfspr r11,SPRN_HSRR0 33 ld r12,_NIP(r1) 34 clrrdi r11,r11,2 35 clrrdi r12,r12,2 36100: tdne r11,r12 37 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 38 mfspr r11,SPRN_HSRR1 39 ld r12,_MSR(r1) 40100: tdne r11,r12 41 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 42 .endif 43#endif 44.endm 45 46#ifdef CONFIG_PPC_BOOK3S 47.macro system_call_vectored name trapnr 48 .globl system_call_vectored_\name 49system_call_vectored_\name: 50_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) 51 SCV_INTERRUPT_TO_KERNEL 52 mr r10,r1 53 ld r1,PACAKSAVE(r13) 54 std r10,0(r1) 55 std r11,_NIP(r1) 56 std r12,_MSR(r1) 57 std r0,GPR0(r1) 58 std r10,GPR1(r1) 59 std r2,GPR2(r1) 60 ld r2,PACATOC(r13) 61 mfcr r12 62 li r11,0 63 /* Can we avoid saving r3-r8 in common case? */ 64 std r3,GPR3(r1) 65 std r4,GPR4(r1) 66 std r5,GPR5(r1) 67 std r6,GPR6(r1) 68 std r7,GPR7(r1) 69 std r8,GPR8(r1) 70 /* Zero r9-r12, this should only be required when restoring all GPRs */ 71 std r11,GPR9(r1) 72 std r11,GPR10(r1) 73 std r11,GPR11(r1) 74 std r11,GPR12(r1) 75 std r9,GPR13(r1) 76 SAVE_NVGPRS(r1) 77 std r11,_XER(r1) 78 std r11,_LINK(r1) 79 std r11,_CTR(r1) 80 81 li r11,\trapnr 82 std r11,_TRAP(r1) 83 std r12,_CCR(r1) 84 addi r10,r1,STACK_FRAME_OVERHEAD 85 ld r11,exception_marker@toc(r2) 86 std r11,-16(r10) /* "regshere" marker */ 87 88BEGIN_FTR_SECTION 89 HMT_MEDIUM 90END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 91 92 /* 93 * scv enters with MSR[EE]=1 and is immediately considered soft-masked. 94 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED, 95 * and interrupts may be masked and pending already. 96 * system_call_exception() will call trace_hardirqs_off() which means 97 * interrupts could already have been blocked before trace_hardirqs_off, 98 * but this is the best we can do. 99 */ 100 101 /* Calling convention has r9 = orig r0, r10 = regs */ 102 mr r9,r0 103 bl system_call_exception 104 105.Lsyscall_vectored_\name\()_exit: 106 addi r4,r1,STACK_FRAME_OVERHEAD 107 li r5,1 /* scv */ 108 bl syscall_exit_prepare 109 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 110.Lsyscall_vectored_\name\()_rst_start: 111 lbz r11,PACAIRQHAPPENED(r13) 112 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l 113 bne- syscall_vectored_\name\()_restart 114 li r11,IRQS_ENABLED 115 stb r11,PACAIRQSOFTMASK(r13) 116 li r11,0 117 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS 118 119 ld r2,_CCR(r1) 120 ld r4,_NIP(r1) 121 ld r5,_MSR(r1) 122 123BEGIN_FTR_SECTION 124 stdcx. r0,0,r1 /* to clear the reservation */ 125END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 126 127BEGIN_FTR_SECTION 128 HMT_MEDIUM_LOW 129END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 130 131 cmpdi r3,0 132 bne .Lsyscall_vectored_\name\()_restore_regs 133 134 /* rfscv returns with LR->NIA and CTR->MSR */ 135 mtlr r4 136 mtctr r5 137 138 /* Could zero these as per ABI, but we may consider a stricter ABI 139 * which preserves these if libc implementations can benefit, so 140 * restore them for now until further measurement is done. */ 141 ld r0,GPR0(r1) 142 ld r4,GPR4(r1) 143 ld r5,GPR5(r1) 144 ld r6,GPR6(r1) 145 ld r7,GPR7(r1) 146 ld r8,GPR8(r1) 147 /* Zero volatile regs that may contain sensitive kernel data */ 148 li r9,0 149 li r10,0 150 li r11,0 151 li r12,0 152 mtspr SPRN_XER,r0 153 154 /* 155 * We don't need to restore AMR on the way back to userspace for KUAP. 156 * The value of AMR only matters while we're in the kernel. 157 */ 158 mtcr r2 159 REST_GPRS(2, 3, r1) 160 REST_GPR(13, r1) 161 REST_GPR(1, r1) 162 RFSCV_TO_USER 163 b . /* prevent speculative execution */ 164 165.Lsyscall_vectored_\name\()_restore_regs: 166 mtspr SPRN_SRR0,r4 167 mtspr SPRN_SRR1,r5 168 169 ld r3,_CTR(r1) 170 ld r4,_LINK(r1) 171 ld r5,_XER(r1) 172 173 REST_NVGPRS(r1) 174 ld r0,GPR0(r1) 175 mtcr r2 176 mtctr r3 177 mtlr r4 178 mtspr SPRN_XER,r5 179 REST_GPRS(2, 13, r1) 180 REST_GPR(1, r1) 181 RFI_TO_USER 182.Lsyscall_vectored_\name\()_rst_end: 183 184syscall_vectored_\name\()_restart: 185_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart) 186 GET_PACA(r13) 187 ld r1,PACA_EXIT_SAVE_R1(r13) 188 ld r2,PACATOC(r13) 189 ld r3,RESULT(r1) 190 addi r4,r1,STACK_FRAME_OVERHEAD 191 li r11,IRQS_ALL_DISABLED 192 stb r11,PACAIRQSOFTMASK(r13) 193 bl syscall_exit_restart 194 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 195 b .Lsyscall_vectored_\name\()_rst_start 1961: 197 198SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b) 199RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart) 200 201.endm 202 203system_call_vectored common 0x3000 204 205/* 206 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0 207 * which is tested by system_call_exception when r0 is -1 (as set by vector 208 * entry code). 209 */ 210system_call_vectored sigill 0x7ff0 211 212#endif /* CONFIG_PPC_BOOK3S */ 213 214 .balign IFETCH_ALIGN_BYTES 215 .globl system_call_common_real 216system_call_common_real: 217_ASM_NOKPROBE_SYMBOL(system_call_common_real) 218 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 219 mtmsrd r10 220 221 .balign IFETCH_ALIGN_BYTES 222 .globl system_call_common 223system_call_common: 224_ASM_NOKPROBE_SYMBOL(system_call_common) 225 mr r10,r1 226 ld r1,PACAKSAVE(r13) 227 std r10,0(r1) 228 std r11,_NIP(r1) 229 std r12,_MSR(r1) 230 std r0,GPR0(r1) 231 std r10,GPR1(r1) 232 std r2,GPR2(r1) 233#ifdef CONFIG_PPC_FSL_BOOK3E 234START_BTB_FLUSH_SECTION 235 BTB_FLUSH(r10) 236END_BTB_FLUSH_SECTION 237#endif 238 ld r2,PACATOC(r13) 239 mfcr r12 240 li r11,0 241 /* Can we avoid saving r3-r8 in common case? */ 242 std r3,GPR3(r1) 243 std r4,GPR4(r1) 244 std r5,GPR5(r1) 245 std r6,GPR6(r1) 246 std r7,GPR7(r1) 247 std r8,GPR8(r1) 248 /* Zero r9-r12, this should only be required when restoring all GPRs */ 249 std r11,GPR9(r1) 250 std r11,GPR10(r1) 251 std r11,GPR11(r1) 252 std r11,GPR12(r1) 253 std r9,GPR13(r1) 254 SAVE_NVGPRS(r1) 255 std r11,_XER(r1) 256 std r11,_CTR(r1) 257 mflr r10 258 259 /* 260 * This clears CR0.SO (bit 28), which is the error indication on 261 * return from this system call. 262 */ 263 rldimi r12,r11,28,(63-28) 264 li r11,0xc00 265 std r10,_LINK(r1) 266 std r11,_TRAP(r1) 267 std r12,_CCR(r1) 268 addi r10,r1,STACK_FRAME_OVERHEAD 269 ld r11,exception_marker@toc(r2) 270 std r11,-16(r10) /* "regshere" marker */ 271 272#ifdef CONFIG_PPC_BOOK3S 273 li r11,1 274 stb r11,PACASRR_VALID(r13) 275#endif 276 277 /* 278 * We always enter kernel from userspace with irq soft-mask enabled and 279 * nothing pending. system_call_exception() will call 280 * trace_hardirqs_off(). 281 */ 282 li r11,IRQS_ALL_DISABLED 283 stb r11,PACAIRQSOFTMASK(r13) 284#ifdef CONFIG_PPC_BOOK3S 285 li r12,-1 /* Set MSR_EE and MSR_RI */ 286 mtmsrd r12,1 287#else 288 wrteei 1 289#endif 290 291 /* Calling convention has r9 = orig r0, r10 = regs */ 292 mr r9,r0 293 bl system_call_exception 294 295.Lsyscall_exit: 296 addi r4,r1,STACK_FRAME_OVERHEAD 297 li r5,0 /* !scv */ 298 bl syscall_exit_prepare 299 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 300#ifdef CONFIG_PPC_BOOK3S 301.Lsyscall_rst_start: 302 lbz r11,PACAIRQHAPPENED(r13) 303 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l 304 bne- syscall_restart 305#endif 306 li r11,IRQS_ENABLED 307 stb r11,PACAIRQSOFTMASK(r13) 308 li r11,0 309 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS 310 311 ld r2,_CCR(r1) 312 ld r6,_LINK(r1) 313 mtlr r6 314 315#ifdef CONFIG_PPC_BOOK3S 316 lbz r4,PACASRR_VALID(r13) 317 cmpdi r4,0 318 bne 1f 319 li r4,0 320 stb r4,PACASRR_VALID(r13) 321#endif 322 ld r4,_NIP(r1) 323 ld r5,_MSR(r1) 324 mtspr SPRN_SRR0,r4 325 mtspr SPRN_SRR1,r5 3261: 327 DEBUG_SRR_VALID srr 328 329BEGIN_FTR_SECTION 330 stdcx. r0,0,r1 /* to clear the reservation */ 331END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 332 333 cmpdi r3,0 334 bne .Lsyscall_restore_regs 335 /* Zero volatile regs that may contain sensitive kernel data */ 336 li r0,0 337 li r4,0 338 li r5,0 339 li r6,0 340 li r7,0 341 li r8,0 342 li r9,0 343 li r10,0 344 li r11,0 345 li r12,0 346 mtctr r0 347 mtspr SPRN_XER,r0 348.Lsyscall_restore_regs_cont: 349 350BEGIN_FTR_SECTION 351 HMT_MEDIUM_LOW 352END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 353 354 /* 355 * We don't need to restore AMR on the way back to userspace for KUAP. 356 * The value of AMR only matters while we're in the kernel. 357 */ 358 mtcr r2 359 REST_GPRS(2, 3, r1) 360 REST_GPR(13, r1) 361 REST_GPR(1, r1) 362 RFI_TO_USER 363 b . /* prevent speculative execution */ 364 365.Lsyscall_restore_regs: 366 ld r3,_CTR(r1) 367 ld r4,_XER(r1) 368 REST_NVGPRS(r1) 369 mtctr r3 370 mtspr SPRN_XER,r4 371 ld r0,GPR0(r1) 372 REST_GPRS(4, 12, r1) 373 b .Lsyscall_restore_regs_cont 374.Lsyscall_rst_end: 375 376#ifdef CONFIG_PPC_BOOK3S 377syscall_restart: 378_ASM_NOKPROBE_SYMBOL(syscall_restart) 379 GET_PACA(r13) 380 ld r1,PACA_EXIT_SAVE_R1(r13) 381 ld r2,PACATOC(r13) 382 ld r3,RESULT(r1) 383 addi r4,r1,STACK_FRAME_OVERHEAD 384 li r11,IRQS_ALL_DISABLED 385 stb r11,PACAIRQSOFTMASK(r13) 386 bl syscall_exit_restart 387 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 388 b .Lsyscall_rst_start 3891: 390 391SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b) 392RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) 393#endif 394 395 /* 396 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not 397 * touched, no exit work created, then this can be used. 398 */ 399 .balign IFETCH_ALIGN_BYTES 400 .globl fast_interrupt_return_srr 401fast_interrupt_return_srr: 402_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr) 403 kuap_check_amr r3, r4 404 ld r5,_MSR(r1) 405 andi. r0,r5,MSR_PR 406#ifdef CONFIG_PPC_BOOK3S 407 beq 1f 408 kuap_user_restore r3, r4 409 b .Lfast_user_interrupt_return_srr 4101: kuap_kernel_restore r3, r4 411 andi. r0,r5,MSR_RI 412 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ 413 bne+ .Lfast_kernel_interrupt_return_srr 414 addi r3,r1,STACK_FRAME_OVERHEAD 415 bl unrecoverable_exception 416 b . /* should not get here */ 417#else 418 bne .Lfast_user_interrupt_return_srr 419 b .Lfast_kernel_interrupt_return_srr 420#endif 421 422.macro interrupt_return_macro srr 423 .balign IFETCH_ALIGN_BYTES 424 .globl interrupt_return_\srr 425interrupt_return_\srr\(): 426_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()) 427 ld r4,_MSR(r1) 428 andi. r0,r4,MSR_PR 429 beq interrupt_return_\srr\()_kernel 430interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */ 431_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user) 432 addi r3,r1,STACK_FRAME_OVERHEAD 433 bl interrupt_exit_user_prepare 434 cmpdi r3,0 435 bne- .Lrestore_nvgprs_\srr 436.Lrestore_nvgprs_\srr\()_cont: 437 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 438#ifdef CONFIG_PPC_BOOK3S 439.Linterrupt_return_\srr\()_user_rst_start: 440 lbz r11,PACAIRQHAPPENED(r13) 441 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l 442 bne- interrupt_return_\srr\()_user_restart 443#endif 444 li r11,IRQS_ENABLED 445 stb r11,PACAIRQSOFTMASK(r13) 446 li r11,0 447 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS 448 449.Lfast_user_interrupt_return_\srr\(): 450#ifdef CONFIG_PPC_BOOK3S 451 .ifc \srr,srr 452 lbz r4,PACASRR_VALID(r13) 453 .else 454 lbz r4,PACAHSRR_VALID(r13) 455 .endif 456 cmpdi r4,0 457 li r4,0 458 bne 1f 459#endif 460 ld r11,_NIP(r1) 461 ld r12,_MSR(r1) 462 .ifc \srr,srr 463 mtspr SPRN_SRR0,r11 464 mtspr SPRN_SRR1,r12 4651: 466#ifdef CONFIG_PPC_BOOK3S 467 stb r4,PACASRR_VALID(r13) 468#endif 469 .else 470 mtspr SPRN_HSRR0,r11 471 mtspr SPRN_HSRR1,r12 4721: 473#ifdef CONFIG_PPC_BOOK3S 474 stb r4,PACAHSRR_VALID(r13) 475#endif 476 .endif 477 DEBUG_SRR_VALID \srr 478 479#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 480 lbz r4,PACAIRQSOFTMASK(r13) 481 tdnei r4,IRQS_ENABLED 482#endif 483 484BEGIN_FTR_SECTION 485 ld r10,_PPR(r1) 486 mtspr SPRN_PPR,r10 487END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 488 489BEGIN_FTR_SECTION 490 stdcx. r0,0,r1 /* to clear the reservation */ 491FTR_SECTION_ELSE 492 ldarx r0,0,r1 493ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 494 495 ld r3,_CCR(r1) 496 ld r4,_LINK(r1) 497 ld r5,_CTR(r1) 498 ld r6,_XER(r1) 499 li r0,0 500 501 REST_GPRS(7, 13, r1) 502 503 mtcr r3 504 mtlr r4 505 mtctr r5 506 mtspr SPRN_XER,r6 507 508 REST_GPRS(2, 6, r1) 509 REST_GPR(0, r1) 510 REST_GPR(1, r1) 511 .ifc \srr,srr 512 RFI_TO_USER 513 .else 514 HRFI_TO_USER 515 .endif 516 b . /* prevent speculative execution */ 517.Linterrupt_return_\srr\()_user_rst_end: 518 519.Lrestore_nvgprs_\srr\(): 520 REST_NVGPRS(r1) 521 b .Lrestore_nvgprs_\srr\()_cont 522 523#ifdef CONFIG_PPC_BOOK3S 524interrupt_return_\srr\()_user_restart: 525_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart) 526 GET_PACA(r13) 527 ld r1,PACA_EXIT_SAVE_R1(r13) 528 ld r2,PACATOC(r13) 529 addi r3,r1,STACK_FRAME_OVERHEAD 530 li r11,IRQS_ALL_DISABLED 531 stb r11,PACAIRQSOFTMASK(r13) 532 bl interrupt_exit_user_restart 533 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 534 b .Linterrupt_return_\srr\()_user_rst_start 5351: 536 537SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b) 538RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart) 539#endif 540 541 .balign IFETCH_ALIGN_BYTES 542interrupt_return_\srr\()_kernel: 543_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) 544 addi r3,r1,STACK_FRAME_OVERHEAD 545 bl interrupt_exit_kernel_prepare 546 547 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 548.Linterrupt_return_\srr\()_kernel_rst_start: 549 ld r11,SOFTE(r1) 550 cmpwi r11,IRQS_ENABLED 551 stb r11,PACAIRQSOFTMASK(r13) 552 bne 1f 553#ifdef CONFIG_PPC_BOOK3S 554 lbz r11,PACAIRQHAPPENED(r13) 555 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l 556 bne- interrupt_return_\srr\()_kernel_restart 557#endif 558 li r11,0 559 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS 5601: 561 562.Lfast_kernel_interrupt_return_\srr\(): 563 cmpdi cr1,r3,0 564#ifdef CONFIG_PPC_BOOK3S 565 .ifc \srr,srr 566 lbz r4,PACASRR_VALID(r13) 567 .else 568 lbz r4,PACAHSRR_VALID(r13) 569 .endif 570 cmpdi r4,0 571 li r4,0 572 bne 1f 573#endif 574 ld r11,_NIP(r1) 575 ld r12,_MSR(r1) 576 .ifc \srr,srr 577 mtspr SPRN_SRR0,r11 578 mtspr SPRN_SRR1,r12 5791: 580#ifdef CONFIG_PPC_BOOK3S 581 stb r4,PACASRR_VALID(r13) 582#endif 583 .else 584 mtspr SPRN_HSRR0,r11 585 mtspr SPRN_HSRR1,r12 5861: 587#ifdef CONFIG_PPC_BOOK3S 588 stb r4,PACAHSRR_VALID(r13) 589#endif 590 .endif 591 DEBUG_SRR_VALID \srr 592 593BEGIN_FTR_SECTION 594 stdcx. r0,0,r1 /* to clear the reservation */ 595FTR_SECTION_ELSE 596 ldarx r0,0,r1 597ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 598 599 ld r3,_LINK(r1) 600 ld r4,_CTR(r1) 601 ld r5,_XER(r1) 602 ld r6,_CCR(r1) 603 li r0,0 604 605 REST_GPRS(7, 12, r1) 606 607 mtlr r3 608 mtctr r4 609 mtspr SPRN_XER,r5 610 611 /* 612 * Leaving a stale exception_marker on the stack can confuse 613 * the reliable stack unwinder later on. Clear it. 614 */ 615 std r0,STACK_FRAME_OVERHEAD-16(r1) 616 617 REST_GPRS(2, 5, r1) 618 619 bne- cr1,1f /* emulate stack store */ 620 mtcr r6 621 REST_GPR(6, r1) 622 REST_GPR(0, r1) 623 REST_GPR(1, r1) 624 .ifc \srr,srr 625 RFI_TO_KERNEL 626 .else 627 HRFI_TO_KERNEL 628 .endif 629 b . /* prevent speculative execution */ 630 6311: /* 632 * Emulate stack store with update. New r1 value was already calculated 633 * and updated in our interrupt regs by emulate_loadstore, but we can't 634 * store the previous value of r1 to the stack before re-loading our 635 * registers from it, otherwise they could be clobbered. Use 636 * PACA_EXGEN as temporary storage to hold the store data, as 637 * interrupts are disabled here so it won't be clobbered. 638 */ 639 mtcr r6 640 std r9,PACA_EXGEN+0(r13) 641 addi r9,r1,INT_FRAME_SIZE /* get original r1 */ 642 REST_GPR(6, r1) 643 REST_GPR(0, r1) 644 REST_GPR(1, r1) 645 std r9,0(r1) /* perform store component of stdu */ 646 ld r9,PACA_EXGEN+0(r13) 647 648 .ifc \srr,srr 649 RFI_TO_KERNEL 650 .else 651 HRFI_TO_KERNEL 652 .endif 653 b . /* prevent speculative execution */ 654.Linterrupt_return_\srr\()_kernel_rst_end: 655 656#ifdef CONFIG_PPC_BOOK3S 657interrupt_return_\srr\()_kernel_restart: 658_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart) 659 GET_PACA(r13) 660 ld r1,PACA_EXIT_SAVE_R1(r13) 661 ld r2,PACATOC(r13) 662 addi r3,r1,STACK_FRAME_OVERHEAD 663 li r11,IRQS_ALL_DISABLED 664 stb r11,PACAIRQSOFTMASK(r13) 665 bl interrupt_exit_kernel_restart 666 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 667 b .Linterrupt_return_\srr\()_kernel_rst_start 6681: 669 670SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b) 671RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart) 672#endif 673 674.endm 675 676interrupt_return_macro srr 677#ifdef CONFIG_PPC_BOOK3S 678interrupt_return_macro hsrr 679 680 .globl __end_soft_masked 681__end_soft_masked: 682DEFINE_FIXED_SYMBOL(__end_soft_masked, text) 683#endif /* CONFIG_PPC_BOOK3S */ 684 685#ifdef CONFIG_PPC_BOOK3S 686_GLOBAL(ret_from_fork_scv) 687 bl schedule_tail 688 REST_NVGPRS(r1) 689 li r3,0 /* fork() return value */ 690 b .Lsyscall_vectored_common_exit 691#endif 692 693_GLOBAL(ret_from_fork) 694 bl schedule_tail 695 REST_NVGPRS(r1) 696 li r3,0 /* fork() return value */ 697 b .Lsyscall_exit 698 699_GLOBAL(ret_from_kernel_thread) 700 bl schedule_tail 701 REST_NVGPRS(r1) 702 mtctr r14 703 mr r3,r15 704#ifdef CONFIG_PPC64_ELF_ABI_V2 705 mr r12,r14 706#endif 707 bctrl 708 li r3,0 709 b .Lsyscall_exit 710