1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/context_tracking.h> 4 #include <linux/err.h> 5 #include <linux/compat.h> 6 #include <linux/sched/debug.h> /* for show_regs */ 7 8 #include <asm/asm-prototypes.h> 9 #include <asm/kup.h> 10 #include <asm/cputime.h> 11 #include <asm/hw_irq.h> 12 #include <asm/interrupt.h> 13 #include <asm/kprobes.h> 14 #include <asm/paca.h> 15 #include <asm/ptrace.h> 16 #include <asm/reg.h> 17 #include <asm/signal.h> 18 #include <asm/switch_to.h> 19 #include <asm/syscall.h> 20 #include <asm/time.h> 21 #include <asm/tm.h> 22 #include <asm/unistd.h> 23 24 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) 25 unsigned long global_dbcr0[NR_CPUS]; 26 #endif 27 28 typedef long (*syscall_fn)(long, long, long, long, long, long); 29 30 #ifdef CONFIG_PPC_BOOK3S_64 31 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 32 static inline bool exit_must_hard_disable(void) 33 { 34 return static_branch_unlikely(&interrupt_exit_not_reentrant); 35 } 36 #else 37 static inline bool exit_must_hard_disable(void) 38 { 39 return true; 40 } 41 #endif 42 43 /* 44 * local irqs must be disabled. Returns false if the caller must re-enable 45 * them, check for new work, and try again. 46 * 47 * This should be called with local irqs disabled, but if they were previously 48 * enabled when the interrupt handler returns (indicating a process-context / 49 * synchronous interrupt) then irqs_enabled should be true. 50 * 51 * restartable is true then EE/RI can be left on because interrupts are handled 52 * with a restart sequence. 53 */ 54 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable) 55 { 56 /* This must be done with RI=1 because tracing may touch vmaps */ 57 trace_hardirqs_on(); 58 59 if (exit_must_hard_disable() || !restartable) 60 __hard_EE_RI_disable(); 61 62 #ifdef CONFIG_PPC64 63 /* This pattern matches prep_irq_for_idle */ 64 if (unlikely(lazy_irq_pending_nocheck())) { 65 if (exit_must_hard_disable() || !restartable) { 66 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 67 __hard_RI_enable(); 68 } 69 trace_hardirqs_off(); 70 71 return false; 72 } 73 #endif 74 return true; 75 } 76 77 /* Has to run notrace because it is entered not completely "reconciled" */ 78 notrace long system_call_exception(long r3, long r4, long r5, 79 long r6, long r7, long r8, 80 unsigned long r0, struct pt_regs *regs) 81 { 82 syscall_fn f; 83 84 kuap_lock(); 85 86 regs->orig_gpr3 = r3; 87 88 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 89 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); 90 91 trace_hardirqs_off(); /* finish reconciling */ 92 93 CT_WARN_ON(ct_state() == CONTEXT_KERNEL); 94 user_exit_irqoff(); 95 96 BUG_ON(regs_is_unrecoverable(regs)); 97 BUG_ON(!(regs->msr & MSR_PR)); 98 BUG_ON(arch_irq_disabled_regs(regs)); 99 100 #ifdef CONFIG_PPC_PKEY 101 if (mmu_has_feature(MMU_FTR_PKEY)) { 102 unsigned long amr, iamr; 103 bool flush_needed = false; 104 /* 105 * When entering from userspace we mostly have the AMR/IAMR 106 * different from kernel default values. Hence don't compare. 107 */ 108 amr = mfspr(SPRN_AMR); 109 iamr = mfspr(SPRN_IAMR); 110 regs->amr = amr; 111 regs->iamr = iamr; 112 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { 113 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); 114 flush_needed = true; 115 } 116 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { 117 mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED); 118 flush_needed = true; 119 } 120 if (flush_needed) 121 isync(); 122 } else 123 #endif 124 kuap_assert_locked(); 125 126 booke_restore_dbcr0(); 127 128 account_cpu_user_entry(); 129 130 account_stolen_time(); 131 132 /* 133 * This is not required for the syscall exit path, but makes the 134 * stack frame look nicer. If this was initialised in the first stack 135 * frame, or if the unwinder was taught the first stack frame always 136 * returns to user with IRQS_ENABLED, this store could be avoided! 137 */ 138 irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); 139 140 /* 141 * If system call is called with TM active, set _TIF_RESTOREALL to 142 * prevent RFSCV being used to return to userspace, because POWER9 143 * TM implementation has problems with this instruction returning to 144 * transactional state. Final register values are not relevant because 145 * the transaction will be aborted upon return anyway. Or in the case 146 * of unsupported_scv SIGILL fault, the return state does not much 147 * matter because it's an edge case. 148 */ 149 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && 150 unlikely(MSR_TM_TRANSACTIONAL(regs->msr))) 151 set_bits(_TIF_RESTOREALL, ¤t_thread_info()->flags); 152 153 /* 154 * If the system call was made with a transaction active, doom it and 155 * return without performing the system call. Unless it was an 156 * unsupported scv vector, in which case it's treated like an illegal 157 * instruction. 158 */ 159 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 160 if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) && 161 !trap_is_unsupported_scv(regs)) { 162 /* Enable TM in the kernel, and disable EE (for scv) */ 163 hard_irq_disable(); 164 mtmsr(mfmsr() | MSR_TM); 165 166 /* tabort, this dooms the transaction, nothing else */ 167 asm volatile(".long 0x7c00071d | ((%0) << 16)" 168 :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)); 169 170 /* 171 * Userspace will never see the return value. Execution will 172 * resume after the tbegin. of the aborted transaction with the 173 * checkpointed register state. A context switch could occur 174 * or signal delivered to the process before resuming the 175 * doomed transaction context, but that should all be handled 176 * as expected. 177 */ 178 return -ENOSYS; 179 } 180 #endif // CONFIG_PPC_TRANSACTIONAL_MEM 181 182 local_irq_enable(); 183 184 if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) { 185 if (unlikely(trap_is_unsupported_scv(regs))) { 186 /* Unsupported scv vector */ 187 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 188 return regs->gpr[3]; 189 } 190 /* 191 * We use the return value of do_syscall_trace_enter() as the 192 * syscall number. If the syscall was rejected for any reason 193 * do_syscall_trace_enter() returns an invalid syscall number 194 * and the test against NR_syscalls will fail and the return 195 * value to be used is in regs->gpr[3]. 196 */ 197 r0 = do_syscall_trace_enter(regs); 198 if (unlikely(r0 >= NR_syscalls)) 199 return regs->gpr[3]; 200 r3 = regs->gpr[3]; 201 r4 = regs->gpr[4]; 202 r5 = regs->gpr[5]; 203 r6 = regs->gpr[6]; 204 r7 = regs->gpr[7]; 205 r8 = regs->gpr[8]; 206 207 } else if (unlikely(r0 >= NR_syscalls)) { 208 if (unlikely(trap_is_unsupported_scv(regs))) { 209 /* Unsupported scv vector */ 210 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 211 return regs->gpr[3]; 212 } 213 return -ENOSYS; 214 } 215 216 /* May be faster to do array_index_nospec? */ 217 barrier_nospec(); 218 219 if (unlikely(is_compat_task())) { 220 f = (void *)compat_sys_call_table[r0]; 221 222 r3 &= 0x00000000ffffffffULL; 223 r4 &= 0x00000000ffffffffULL; 224 r5 &= 0x00000000ffffffffULL; 225 r6 &= 0x00000000ffffffffULL; 226 r7 &= 0x00000000ffffffffULL; 227 r8 &= 0x00000000ffffffffULL; 228 229 } else { 230 f = (void *)sys_call_table[r0]; 231 } 232 233 return f(r3, r4, r5, r6, r7, r8); 234 } 235 236 static notrace void booke_load_dbcr0(void) 237 { 238 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 239 unsigned long dbcr0 = current->thread.debug.dbcr0; 240 241 if (likely(!(dbcr0 & DBCR0_IDM))) 242 return; 243 244 /* 245 * Check to see if the dbcr0 register is set up to debug. 246 * Use the internal debug mode bit to do this. 247 */ 248 mtmsr(mfmsr() & ~MSR_DE); 249 if (IS_ENABLED(CONFIG_PPC32)) { 250 isync(); 251 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0); 252 } 253 mtspr(SPRN_DBCR0, dbcr0); 254 mtspr(SPRN_DBSR, -1); 255 #endif 256 } 257 258 static void check_return_regs_valid(struct pt_regs *regs) 259 { 260 #ifdef CONFIG_PPC_BOOK3S_64 261 unsigned long trap, srr0, srr1; 262 static bool warned; 263 u8 *validp; 264 char *h; 265 266 if (trap_is_scv(regs)) 267 return; 268 269 trap = TRAP(regs); 270 // EE in HV mode sets HSRRs like 0xea0 271 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL) 272 trap = 0xea0; 273 274 switch (trap) { 275 case 0x980: 276 case INTERRUPT_H_DATA_STORAGE: 277 case 0xe20: 278 case 0xe40: 279 case INTERRUPT_HMI: 280 case 0xe80: 281 case 0xea0: 282 case INTERRUPT_H_FAC_UNAVAIL: 283 case 0x1200: 284 case 0x1500: 285 case 0x1600: 286 case 0x1800: 287 validp = &local_paca->hsrr_valid; 288 if (!*validp) 289 return; 290 291 srr0 = mfspr(SPRN_HSRR0); 292 srr1 = mfspr(SPRN_HSRR1); 293 h = "H"; 294 295 break; 296 default: 297 validp = &local_paca->srr_valid; 298 if (!*validp) 299 return; 300 301 srr0 = mfspr(SPRN_SRR0); 302 srr1 = mfspr(SPRN_SRR1); 303 h = ""; 304 break; 305 } 306 307 if (srr0 == regs->nip && srr1 == regs->msr) 308 return; 309 310 /* 311 * A NMI / soft-NMI interrupt may have come in after we found 312 * srr_valid and before the SRRs are loaded. The interrupt then 313 * comes in and clobbers SRRs and clears srr_valid. Then we load 314 * the SRRs here and test them above and find they don't match. 315 * 316 * Test validity again after that, to catch such false positives. 317 * 318 * This test in general will have some window for false negatives 319 * and may not catch and fix all such cases if an NMI comes in 320 * later and clobbers SRRs without clearing srr_valid, but hopefully 321 * such things will get caught most of the time, statistically 322 * enough to be able to get a warning out. 323 */ 324 barrier(); 325 326 if (!*validp) 327 return; 328 329 if (!warned) { 330 warned = true; 331 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip); 332 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr); 333 show_regs(regs); 334 } 335 336 *validp = 0; /* fixup */ 337 #endif 338 } 339 340 static notrace unsigned long 341 interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs) 342 { 343 unsigned long ti_flags; 344 345 again: 346 ti_flags = read_thread_flags(); 347 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { 348 local_irq_enable(); 349 if (ti_flags & _TIF_NEED_RESCHED) { 350 schedule(); 351 } else { 352 /* 353 * SIGPENDING must restore signal handler function 354 * argument GPRs, and some non-volatiles (e.g., r1). 355 * Restore all for now. This could be made lighter. 356 */ 357 if (ti_flags & _TIF_SIGPENDING) 358 ret |= _TIF_RESTOREALL; 359 do_notify_resume(regs, ti_flags); 360 } 361 local_irq_disable(); 362 ti_flags = read_thread_flags(); 363 } 364 365 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) { 366 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && 367 unlikely((ti_flags & _TIF_RESTORE_TM))) { 368 restore_tm_state(regs); 369 } else { 370 unsigned long mathflags = MSR_FP; 371 372 if (cpu_has_feature(CPU_FTR_VSX)) 373 mathflags |= MSR_VEC | MSR_VSX; 374 else if (cpu_has_feature(CPU_FTR_ALTIVEC)) 375 mathflags |= MSR_VEC; 376 377 /* 378 * If userspace MSR has all available FP bits set, 379 * then they are live and no need to restore. If not, 380 * it means the regs were given up and restore_math 381 * may decide to restore them (to avoid taking an FP 382 * fault). 383 */ 384 if ((regs->msr & mathflags) != mathflags) 385 restore_math(regs); 386 } 387 } 388 389 check_return_regs_valid(regs); 390 391 user_enter_irqoff(); 392 if (!prep_irq_for_enabled_exit(true)) { 393 user_exit_irqoff(); 394 local_irq_enable(); 395 local_irq_disable(); 396 goto again; 397 } 398 399 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 400 local_paca->tm_scratch = regs->msr; 401 #endif 402 403 booke_load_dbcr0(); 404 405 account_cpu_user_exit(); 406 407 /* Restore user access locks last */ 408 kuap_user_restore(regs); 409 410 return ret; 411 } 412 413 /* 414 * This should be called after a syscall returns, with r3 the return value 415 * from the syscall. If this function returns non-zero, the system call 416 * exit assembly should additionally load all GPR registers and CTR and XER 417 * from the interrupt frame. 418 * 419 * The function graph tracer can not trace the return side of this function, 420 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace. 421 */ 422 notrace unsigned long syscall_exit_prepare(unsigned long r3, 423 struct pt_regs *regs, 424 long scv) 425 { 426 unsigned long ti_flags; 427 unsigned long ret = 0; 428 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; 429 430 CT_WARN_ON(ct_state() == CONTEXT_USER); 431 432 kuap_assert_locked(); 433 434 regs->result = r3; 435 436 /* Check whether the syscall is issued inside a restartable sequence */ 437 rseq_syscall(regs); 438 439 ti_flags = read_thread_flags(); 440 441 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) { 442 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { 443 r3 = -r3; 444 regs->ccr |= 0x10000000; /* Set SO bit in CR */ 445 } 446 } 447 448 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) { 449 if (ti_flags & _TIF_RESTOREALL) 450 ret = _TIF_RESTOREALL; 451 else 452 regs->gpr[3] = r3; 453 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags); 454 } else { 455 regs->gpr[3] = r3; 456 } 457 458 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) { 459 do_syscall_trace_leave(regs); 460 ret |= _TIF_RESTOREALL; 461 } 462 463 local_irq_disable(); 464 ret = interrupt_exit_user_prepare_main(ret, regs); 465 466 #ifdef CONFIG_PPC64 467 regs->exit_result = ret; 468 #endif 469 470 return ret; 471 } 472 473 #ifdef CONFIG_PPC64 474 notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs) 475 { 476 /* 477 * This is called when detecting a soft-pending interrupt as well as 478 * an alternate-return interrupt. So we can't just have the alternate 479 * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless 480 * the soft-pending case were to fix things up as well). RI might be 481 * disabled, in which case it gets re-enabled by __hard_irq_disable(). 482 */ 483 __hard_irq_disable(); 484 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 485 486 #ifdef CONFIG_PPC_BOOK3S_64 487 set_kuap(AMR_KUAP_BLOCKED); 488 #endif 489 490 trace_hardirqs_off(); 491 user_exit_irqoff(); 492 account_cpu_user_entry(); 493 494 BUG_ON(!user_mode(regs)); 495 496 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs); 497 498 return regs->exit_result; 499 } 500 #endif 501 502 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs) 503 { 504 unsigned long ret; 505 506 BUG_ON(regs_is_unrecoverable(regs)); 507 BUG_ON(arch_irq_disabled_regs(regs)); 508 CT_WARN_ON(ct_state() == CONTEXT_USER); 509 510 /* 511 * We don't need to restore AMR on the way back to userspace for KUAP. 512 * AMR can only have been unlocked if we interrupted the kernel. 513 */ 514 kuap_assert_locked(); 515 516 local_irq_disable(); 517 518 ret = interrupt_exit_user_prepare_main(0, regs); 519 520 #ifdef CONFIG_PPC64 521 regs->exit_result = ret; 522 #endif 523 524 return ret; 525 } 526 527 void preempt_schedule_irq(void); 528 529 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) 530 { 531 unsigned long flags; 532 unsigned long ret = 0; 533 unsigned long kuap; 534 bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE; 535 536 if (regs_is_unrecoverable(regs)) 537 unrecoverable_exception(regs); 538 /* 539 * CT_WARN_ON comes here via program_check_exception, 540 * so avoid recursion. 541 */ 542 if (TRAP(regs) != INTERRUPT_PROGRAM) 543 CT_WARN_ON(ct_state() == CONTEXT_USER); 544 545 kuap = kuap_get_and_assert_locked(); 546 547 local_irq_save(flags); 548 549 if (!arch_irq_disabled_regs(regs)) { 550 /* Returning to a kernel context with local irqs enabled. */ 551 WARN_ON_ONCE(!(regs->msr & MSR_EE)); 552 again: 553 if (IS_ENABLED(CONFIG_PREEMPT)) { 554 /* Return to preemptible kernel context */ 555 if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { 556 if (preempt_count() == 0) 557 preempt_schedule_irq(); 558 } 559 } 560 561 check_return_regs_valid(regs); 562 563 /* 564 * Stack store exit can't be restarted because the interrupt 565 * stack frame might have been clobbered. 566 */ 567 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) { 568 /* 569 * Replay pending soft-masked interrupts now. Don't 570 * just local_irq_enabe(); local_irq_disable(); because 571 * if we are returning from an asynchronous interrupt 572 * here, another one might hit after irqs are enabled, 573 * and it would exit via this same path allowing 574 * another to fire, and so on unbounded. 575 */ 576 hard_irq_disable(); 577 replay_soft_interrupts(); 578 /* Took an interrupt, may have more exit work to do. */ 579 goto again; 580 } 581 #ifdef CONFIG_PPC64 582 /* 583 * An interrupt may clear MSR[EE] and set this concurrently, 584 * but it will be marked pending and the exit will be retried. 585 * This leaves a racy window where MSR[EE]=0 and HARD_DIS is 586 * clear, until interrupt_exit_kernel_restart() calls 587 * hard_irq_disable(), which will set HARD_DIS again. 588 */ 589 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 590 591 } else { 592 check_return_regs_valid(regs); 593 594 if (unlikely(stack_store)) 595 __hard_EE_RI_disable(); 596 /* 597 * Returning to a kernel context with local irqs disabled. 598 * Here, if EE was enabled in the interrupted context, enable 599 * it on return as well. A problem exists here where a soft 600 * masked interrupt may have cleared MSR[EE] and set HARD_DIS 601 * here, and it will still exist on return to the caller. This 602 * will be resolved by the masked interrupt firing again. 603 */ 604 if (regs->msr & MSR_EE) 605 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 606 #endif /* CONFIG_PPC64 */ 607 } 608 609 if (unlikely(stack_store)) { 610 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags); 611 ret = 1; 612 } 613 614 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 615 local_paca->tm_scratch = regs->msr; 616 #endif 617 618 /* 619 * 64s does not want to mfspr(SPRN_AMR) here, because this comes after 620 * mtmsr, which would cause Read-After-Write stalls. Hence, take the 621 * AMR value from the check above. 622 */ 623 kuap_kernel_restore(regs, kuap); 624 625 return ret; 626 } 627 628 #ifdef CONFIG_PPC64 629 notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs) 630 { 631 __hard_irq_disable(); 632 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 633 634 #ifdef CONFIG_PPC_BOOK3S_64 635 set_kuap(AMR_KUAP_BLOCKED); 636 #endif 637 638 trace_hardirqs_off(); 639 user_exit_irqoff(); 640 account_cpu_user_entry(); 641 642 BUG_ON(!user_mode(regs)); 643 644 regs->exit_result |= interrupt_exit_user_prepare(regs); 645 646 return regs->exit_result; 647 } 648 649 /* 650 * No real need to return a value here because the stack store case does not 651 * get restarted. 652 */ 653 notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs) 654 { 655 __hard_irq_disable(); 656 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 657 658 #ifdef CONFIG_PPC_BOOK3S_64 659 set_kuap(AMR_KUAP_BLOCKED); 660 #endif 661 662 if (regs->softe == IRQS_ENABLED) 663 trace_hardirqs_off(); 664 665 BUG_ON(user_mode(regs)); 666 667 return interrupt_exit_kernel_prepare(regs); 668 } 669 #endif 670