1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Exception handling code 4 * 5 * Copyright (C) 2019 ARM Ltd. 6 */ 7 8 #include <linux/context_tracking.h> 9 #include <linux/irq-entry-common.h> 10 #include <linux/kasan.h> 11 #include <linux/linkage.h> 12 #include <linux/livepatch.h> 13 #include <linux/lockdep.h> 14 #include <linux/ptrace.h> 15 #include <linux/resume_user_mode.h> 16 #include <linux/sched.h> 17 #include <linux/sched/debug.h> 18 #include <linux/thread_info.h> 19 20 #include <asm/cpufeature.h> 21 #include <asm/daifflags.h> 22 #include <asm/esr.h> 23 #include <asm/exception.h> 24 #include <asm/irq_regs.h> 25 #include <asm/kprobes.h> 26 #include <asm/mmu.h> 27 #include <asm/processor.h> 28 #include <asm/sdei.h> 29 #include <asm/stacktrace.h> 30 #include <asm/sysreg.h> 31 #include <asm/system_misc.h> 32 33 /* 34 * Handle IRQ/context state management when entering from kernel mode. 35 * Before this function is called it is not safe to call regular kernel code, 36 * instrumentable code, or any code which may trigger an exception. 37 * 38 * This is intended to match the logic in irqentry_enter(), handling the kernel 39 * mode transitions only. 40 */ 41 static __always_inline irqentry_state_t __enter_from_kernel_mode(struct pt_regs *regs) 42 { 43 return irqentry_enter(regs); 44 } 45 46 static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs) 47 { 48 irqentry_state_t state; 49 50 state = __enter_from_kernel_mode(regs); 51 mte_check_tfsr_entry(); 52 mte_disable_tco_entry(current); 53 54 return state; 55 } 56 57 /* 58 * Handle IRQ/context state management when exiting to kernel mode. 59 * After this function returns it is not safe to call regular kernel code, 60 * instrumentable code, or any code which may trigger an exception. 61 * 62 * This is intended to match the logic in irqentry_exit(), handling the kernel 63 * mode transitions only, and with preemption handled elsewhere. 64 */ 65 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs, 66 irqentry_state_t state) 67 { 68 irqentry_exit(regs, state); 69 } 70 71 static void noinstr exit_to_kernel_mode(struct pt_regs *regs, 72 irqentry_state_t state) 73 { 74 mte_check_tfsr_exit(); 75 __exit_to_kernel_mode(regs, state); 76 } 77 78 /* 79 * Handle IRQ/context state management when entering from user mode. 80 * Before this function is called it is not safe to call regular kernel code, 81 * instrumentable code, or any code which may trigger an exception. 82 */ 83 static __always_inline void __enter_from_user_mode(struct pt_regs *regs) 84 { 85 enter_from_user_mode(regs); 86 mte_disable_tco_entry(current); 87 } 88 89 static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs) 90 { 91 __enter_from_user_mode(regs); 92 } 93 94 /* 95 * Handle IRQ/context state management when exiting to user mode. 96 * After this function returns it is not safe to call regular kernel code, 97 * instrumentable code, or any code which may trigger an exception. 98 */ 99 100 static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs) 101 { 102 local_irq_disable(); 103 exit_to_user_mode_prepare(regs); 104 local_daif_mask(); 105 mte_check_tfsr_exit(); 106 exit_to_user_mode(); 107 } 108 109 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) 110 { 111 arm64_exit_to_user_mode(regs); 112 } 113 114 /* 115 * Handle IRQ/context state management when entering a debug exception from 116 * kernel mode. Before this function is called it is not safe to call regular 117 * kernel code, instrumentable code, or any code which may trigger an exception. 118 */ 119 static noinstr irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs) 120 { 121 irqentry_state_t state; 122 123 state.lockdep = lockdep_hardirqs_enabled(); 124 125 lockdep_hardirqs_off(CALLER_ADDR0); 126 ct_nmi_enter(); 127 128 trace_hardirqs_off_finish(); 129 130 return state; 131 } 132 133 /* 134 * Handle IRQ/context state management when exiting a debug exception from 135 * kernel mode. After this function returns it is not safe to call regular 136 * kernel code, instrumentable code, or any code which may trigger an exception. 137 */ 138 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs, 139 irqentry_state_t state) 140 { 141 if (state.lockdep) { 142 trace_hardirqs_on_prepare(); 143 lockdep_hardirqs_on_prepare(); 144 } 145 146 ct_nmi_exit(); 147 if (state.lockdep) 148 lockdep_hardirqs_on(CALLER_ADDR0); 149 } 150 151 static void do_interrupt_handler(struct pt_regs *regs, 152 void (*handler)(struct pt_regs *)) 153 { 154 struct pt_regs *old_regs = set_irq_regs(regs); 155 156 if (on_thread_stack()) 157 call_on_irq_stack(regs, handler); 158 else 159 handler(regs); 160 161 set_irq_regs(old_regs); 162 } 163 164 extern void (*handle_arch_irq)(struct pt_regs *); 165 extern void (*handle_arch_fiq)(struct pt_regs *); 166 167 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, 168 unsigned long esr) 169 { 170 irqentry_nmi_enter(regs); 171 172 console_verbose(); 173 174 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", 175 vector, smp_processor_id(), esr, 176 esr_get_class_string(esr)); 177 178 __show_regs(regs); 179 panic("Unhandled exception"); 180 } 181 182 #define UNHANDLED(el, regsize, vector) \ 183 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ 184 { \ 185 const char *desc = #regsize "-bit " #el " " #vector; \ 186 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ 187 } 188 189 #ifdef CONFIG_ARM64_ERRATUM_1463225 190 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); 191 192 static void cortex_a76_erratum_1463225_svc_handler(void) 193 { 194 u64 reg, val; 195 196 if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) 197 return; 198 199 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) 200 return; 201 202 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); 203 reg = read_sysreg(mdscr_el1); 204 val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE; 205 write_sysreg(val, mdscr_el1); 206 asm volatile("msr daifclr, #8"); 207 isb(); 208 209 /* We will have taken a single-step exception by this point */ 210 211 write_sysreg(reg, mdscr_el1); 212 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); 213 } 214 215 static __always_inline bool 216 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 217 { 218 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) 219 return false; 220 221 /* 222 * We've taken a dummy step exception from the kernel to ensure 223 * that interrupts are re-enabled on the syscall path. Return back 224 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions 225 * masked so that we can safely restore the mdscr and get on with 226 * handling the syscall. 227 */ 228 regs->pstate |= PSR_D_BIT; 229 return true; 230 } 231 #else /* CONFIG_ARM64_ERRATUM_1463225 */ 232 static void cortex_a76_erratum_1463225_svc_handler(void) { } 233 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 234 { 235 return false; 236 } 237 #endif /* CONFIG_ARM64_ERRATUM_1463225 */ 238 239 /* 240 * As per the ABI exit SME streaming mode and clear the SVE state not 241 * shared with FPSIMD on syscall entry. 242 */ 243 static inline void fpsimd_syscall_enter(void) 244 { 245 /* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */ 246 if (system_supports_sme()) 247 sme_smstop_sm(); 248 249 /* 250 * The CPU is not in streaming mode. If non-streaming SVE is not 251 * supported, there is no SVE state that needs to be discarded. 252 */ 253 if (!system_supports_sve()) 254 return; 255 256 if (test_thread_flag(TIF_SVE)) { 257 unsigned int sve_vq_minus_one; 258 259 sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; 260 sve_flush_live(true, sve_vq_minus_one); 261 } 262 263 /* 264 * Any live non-FPSIMD SVE state has been zeroed. Allow 265 * fpsimd_save_user_state() to lazily discard SVE state until either 266 * the live state is unbound or fpsimd_syscall_exit() is called. 267 */ 268 __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD); 269 } 270 271 static __always_inline void fpsimd_syscall_exit(void) 272 { 273 if (!system_supports_sve()) 274 return; 275 276 /* 277 * The current task's user FPSIMD/SVE/SME state is now bound to this 278 * CPU. The fpsimd_last_state.to_save value is either: 279 * 280 * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU 281 * since fpsimd_syscall_enter(). 282 * 283 * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at 284 * any point. 285 * 286 * Reset this to FP_STATE_CURRENT to stop lazy discarding. 287 */ 288 __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT); 289 } 290 291 /* 292 * In debug exception context, we explicitly disable preemption despite 293 * having interrupts disabled. 294 * This serves two purposes: it makes it much less likely that we would 295 * accidentally schedule in exception context and it will force a warning 296 * if we somehow manage to schedule by accident. 297 */ 298 static void debug_exception_enter(struct pt_regs *regs) 299 { 300 preempt_disable(); 301 302 /* This code is a bit fragile. Test it. */ 303 RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); 304 } 305 NOKPROBE_SYMBOL(debug_exception_enter); 306 307 static void debug_exception_exit(struct pt_regs *regs) 308 { 309 preempt_enable_no_resched(); 310 } 311 NOKPROBE_SYMBOL(debug_exception_exit); 312 313 UNHANDLED(el1t, 64, sync) 314 UNHANDLED(el1t, 64, irq) 315 UNHANDLED(el1t, 64, fiq) 316 UNHANDLED(el1t, 64, error) 317 318 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) 319 { 320 unsigned long far = read_sysreg(far_el1); 321 irqentry_state_t state; 322 323 state = enter_from_kernel_mode(regs); 324 local_daif_inherit(regs); 325 do_mem_abort(far, esr, regs); 326 local_daif_mask(); 327 exit_to_kernel_mode(regs, state); 328 } 329 330 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) 331 { 332 unsigned long far = read_sysreg(far_el1); 333 irqentry_state_t state; 334 335 state = enter_from_kernel_mode(regs); 336 local_daif_inherit(regs); 337 do_sp_pc_abort(far, esr, regs); 338 local_daif_mask(); 339 exit_to_kernel_mode(regs, state); 340 } 341 342 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) 343 { 344 irqentry_state_t state; 345 346 state = enter_from_kernel_mode(regs); 347 local_daif_inherit(regs); 348 do_el1_undef(regs, esr); 349 local_daif_mask(); 350 exit_to_kernel_mode(regs, state); 351 } 352 353 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) 354 { 355 irqentry_state_t state; 356 357 state = enter_from_kernel_mode(regs); 358 local_daif_inherit(regs); 359 do_el1_bti(regs, esr); 360 local_daif_mask(); 361 exit_to_kernel_mode(regs, state); 362 } 363 364 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr) 365 { 366 irqentry_state_t state; 367 368 state = enter_from_kernel_mode(regs); 369 local_daif_inherit(regs); 370 do_el1_gcs(regs, esr); 371 local_daif_mask(); 372 exit_to_kernel_mode(regs, state); 373 } 374 375 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) 376 { 377 irqentry_state_t state; 378 379 state = enter_from_kernel_mode(regs); 380 local_daif_inherit(regs); 381 do_el1_mops(regs, esr); 382 local_daif_mask(); 383 exit_to_kernel_mode(regs, state); 384 } 385 386 static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr) 387 { 388 irqentry_state_t state; 389 390 state = arm64_enter_el1_dbg(regs); 391 debug_exception_enter(regs); 392 do_breakpoint(esr, regs); 393 debug_exception_exit(regs); 394 arm64_exit_el1_dbg(regs, state); 395 } 396 397 static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr) 398 { 399 irqentry_state_t state; 400 401 state = arm64_enter_el1_dbg(regs); 402 if (!cortex_a76_erratum_1463225_debug_handler(regs)) { 403 debug_exception_enter(regs); 404 /* 405 * After handling a breakpoint, we suspend the breakpoint 406 * and use single-step to move to the next instruction. 407 * If we are stepping a suspended breakpoint there's nothing more to do: 408 * the single-step is complete. 409 */ 410 if (!try_step_suspended_breakpoints(regs)) 411 do_el1_softstep(esr, regs); 412 debug_exception_exit(regs); 413 } 414 arm64_exit_el1_dbg(regs, state); 415 } 416 417 static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr) 418 { 419 /* Watchpoints are the only debug exception to write FAR_EL1 */ 420 unsigned long far = read_sysreg(far_el1); 421 irqentry_state_t state; 422 423 state = arm64_enter_el1_dbg(regs); 424 debug_exception_enter(regs); 425 do_watchpoint(far, esr, regs); 426 debug_exception_exit(regs); 427 arm64_exit_el1_dbg(regs, state); 428 } 429 430 static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr) 431 { 432 irqentry_state_t state; 433 434 state = arm64_enter_el1_dbg(regs); 435 debug_exception_enter(regs); 436 do_el1_brk64(esr, regs); 437 debug_exception_exit(regs); 438 arm64_exit_el1_dbg(regs, state); 439 } 440 441 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) 442 { 443 irqentry_state_t state; 444 445 state = enter_from_kernel_mode(regs); 446 local_daif_inherit(regs); 447 do_el1_fpac(regs, esr); 448 local_daif_mask(); 449 exit_to_kernel_mode(regs, state); 450 } 451 452 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) 453 { 454 unsigned long esr = read_sysreg(esr_el1); 455 456 switch (ESR_ELx_EC(esr)) { 457 case ESR_ELx_EC_DABT_CUR: 458 case ESR_ELx_EC_IABT_CUR: 459 el1_abort(regs, esr); 460 break; 461 /* 462 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a 463 * recursive exception when trying to push the initial pt_regs. 464 */ 465 case ESR_ELx_EC_PC_ALIGN: 466 el1_pc(regs, esr); 467 break; 468 case ESR_ELx_EC_SYS64: 469 case ESR_ELx_EC_UNKNOWN: 470 el1_undef(regs, esr); 471 break; 472 case ESR_ELx_EC_BTI: 473 el1_bti(regs, esr); 474 break; 475 case ESR_ELx_EC_GCS: 476 el1_gcs(regs, esr); 477 break; 478 case ESR_ELx_EC_MOPS: 479 el1_mops(regs, esr); 480 break; 481 case ESR_ELx_EC_BREAKPT_CUR: 482 el1_breakpt(regs, esr); 483 break; 484 case ESR_ELx_EC_SOFTSTP_CUR: 485 el1_softstp(regs, esr); 486 break; 487 case ESR_ELx_EC_WATCHPT_CUR: 488 el1_watchpt(regs, esr); 489 break; 490 case ESR_ELx_EC_BRK64: 491 el1_brk64(regs, esr); 492 break; 493 case ESR_ELx_EC_FPAC: 494 el1_fpac(regs, esr); 495 break; 496 default: 497 __panic_unhandled(regs, "64-bit el1h sync", esr); 498 } 499 } 500 501 static __always_inline void __el1_pnmi(struct pt_regs *regs, 502 void (*handler)(struct pt_regs *)) 503 { 504 irqentry_state_t state; 505 506 state = irqentry_nmi_enter(regs); 507 do_interrupt_handler(regs, handler); 508 irqentry_nmi_exit(regs, state); 509 } 510 511 static __always_inline void __el1_irq(struct pt_regs *regs, 512 void (*handler)(struct pt_regs *)) 513 { 514 irqentry_state_t state; 515 516 state = enter_from_kernel_mode(regs); 517 518 irq_enter_rcu(); 519 do_interrupt_handler(regs, handler); 520 irq_exit_rcu(); 521 522 exit_to_kernel_mode(regs, state); 523 } 524 static void noinstr el1_interrupt(struct pt_regs *regs, 525 void (*handler)(struct pt_regs *)) 526 { 527 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 528 529 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && regs_irqs_disabled(regs)) 530 __el1_pnmi(regs, handler); 531 else 532 __el1_irq(regs, handler); 533 } 534 535 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) 536 { 537 el1_interrupt(regs, handle_arch_irq); 538 } 539 540 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) 541 { 542 el1_interrupt(regs, handle_arch_fiq); 543 } 544 545 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) 546 { 547 unsigned long esr = read_sysreg(esr_el1); 548 irqentry_state_t state; 549 550 local_daif_restore(DAIF_ERRCTX); 551 state = irqentry_nmi_enter(regs); 552 do_serror(regs, esr); 553 irqentry_nmi_exit(regs, state); 554 } 555 556 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) 557 { 558 unsigned long far = read_sysreg(far_el1); 559 560 arm64_enter_from_user_mode(regs); 561 local_daif_restore(DAIF_PROCCTX); 562 do_mem_abort(far, esr, regs); 563 arm64_exit_to_user_mode(regs); 564 } 565 566 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) 567 { 568 unsigned long far = read_sysreg(far_el1); 569 570 /* 571 * We've taken an instruction abort from userspace and not yet 572 * re-enabled IRQs. If the address is a kernel address, apply 573 * BP hardening prior to enabling IRQs and pre-emption. 574 */ 575 if (!is_ttbr0_addr(far)) 576 arm64_apply_bp_hardening(); 577 578 arm64_enter_from_user_mode(regs); 579 local_daif_restore(DAIF_PROCCTX); 580 do_mem_abort(far, esr, regs); 581 arm64_exit_to_user_mode(regs); 582 } 583 584 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) 585 { 586 arm64_enter_from_user_mode(regs); 587 local_daif_restore(DAIF_PROCCTX); 588 do_fpsimd_acc(esr, regs); 589 arm64_exit_to_user_mode(regs); 590 } 591 592 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) 593 { 594 arm64_enter_from_user_mode(regs); 595 local_daif_restore(DAIF_PROCCTX); 596 do_sve_acc(esr, regs); 597 arm64_exit_to_user_mode(regs); 598 } 599 600 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) 601 { 602 arm64_enter_from_user_mode(regs); 603 local_daif_restore(DAIF_PROCCTX); 604 do_sme_acc(esr, regs); 605 arm64_exit_to_user_mode(regs); 606 } 607 608 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) 609 { 610 arm64_enter_from_user_mode(regs); 611 local_daif_restore(DAIF_PROCCTX); 612 do_fpsimd_exc(esr, regs); 613 arm64_exit_to_user_mode(regs); 614 } 615 616 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) 617 { 618 arm64_enter_from_user_mode(regs); 619 local_daif_restore(DAIF_PROCCTX); 620 do_el0_sys(esr, regs); 621 arm64_exit_to_user_mode(regs); 622 } 623 624 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) 625 { 626 unsigned long far = read_sysreg(far_el1); 627 628 if (!is_ttbr0_addr(instruction_pointer(regs))) 629 arm64_apply_bp_hardening(); 630 631 arm64_enter_from_user_mode(regs); 632 local_daif_restore(DAIF_PROCCTX); 633 do_sp_pc_abort(far, esr, regs); 634 arm64_exit_to_user_mode(regs); 635 } 636 637 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) 638 { 639 arm64_enter_from_user_mode(regs); 640 local_daif_restore(DAIF_PROCCTX); 641 do_sp_pc_abort(regs->sp, esr, regs); 642 arm64_exit_to_user_mode(regs); 643 } 644 645 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) 646 { 647 arm64_enter_from_user_mode(regs); 648 local_daif_restore(DAIF_PROCCTX); 649 do_el0_undef(regs, esr); 650 arm64_exit_to_user_mode(regs); 651 } 652 653 static void noinstr el0_bti(struct pt_regs *regs) 654 { 655 arm64_enter_from_user_mode(regs); 656 local_daif_restore(DAIF_PROCCTX); 657 do_el0_bti(regs); 658 arm64_exit_to_user_mode(regs); 659 } 660 661 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) 662 { 663 arm64_enter_from_user_mode(regs); 664 local_daif_restore(DAIF_PROCCTX); 665 do_el0_mops(regs, esr); 666 arm64_exit_to_user_mode(regs); 667 } 668 669 static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr) 670 { 671 arm64_enter_from_user_mode(regs); 672 local_daif_restore(DAIF_PROCCTX); 673 do_el0_gcs(regs, esr); 674 arm64_exit_to_user_mode(regs); 675 } 676 677 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) 678 { 679 arm64_enter_from_user_mode(regs); 680 local_daif_restore(DAIF_PROCCTX); 681 bad_el0_sync(regs, 0, esr); 682 arm64_exit_to_user_mode(regs); 683 } 684 685 static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr) 686 { 687 if (!is_ttbr0_addr(regs->pc)) 688 arm64_apply_bp_hardening(); 689 690 arm64_enter_from_user_mode(regs); 691 debug_exception_enter(regs); 692 do_breakpoint(esr, regs); 693 debug_exception_exit(regs); 694 local_daif_restore(DAIF_PROCCTX); 695 arm64_exit_to_user_mode(regs); 696 } 697 698 static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr) 699 { 700 if (!is_ttbr0_addr(regs->pc)) 701 arm64_apply_bp_hardening(); 702 703 arm64_enter_from_user_mode(regs); 704 /* 705 * After handling a breakpoint, we suspend the breakpoint 706 * and use single-step to move to the next instruction. 707 * If we are stepping a suspended breakpoint there's nothing more to do: 708 * the single-step is complete. 709 */ 710 if (!try_step_suspended_breakpoints(regs)) { 711 local_daif_restore(DAIF_PROCCTX); 712 do_el0_softstep(esr, regs); 713 } 714 arm64_exit_to_user_mode(regs); 715 } 716 717 static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr) 718 { 719 /* Watchpoints are the only debug exception to write FAR_EL1 */ 720 unsigned long far = read_sysreg(far_el1); 721 722 arm64_enter_from_user_mode(regs); 723 debug_exception_enter(regs); 724 do_watchpoint(far, esr, regs); 725 debug_exception_exit(regs); 726 local_daif_restore(DAIF_PROCCTX); 727 arm64_exit_to_user_mode(regs); 728 } 729 730 static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr) 731 { 732 arm64_enter_from_user_mode(regs); 733 local_daif_restore(DAIF_PROCCTX); 734 do_el0_brk64(esr, regs); 735 arm64_exit_to_user_mode(regs); 736 } 737 738 static void noinstr el0_svc(struct pt_regs *regs) 739 { 740 arm64_enter_from_user_mode(regs); 741 cortex_a76_erratum_1463225_svc_handler(); 742 fpsimd_syscall_enter(); 743 local_daif_restore(DAIF_PROCCTX); 744 do_el0_svc(regs); 745 arm64_exit_to_user_mode(regs); 746 fpsimd_syscall_exit(); 747 } 748 749 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) 750 { 751 arm64_enter_from_user_mode(regs); 752 local_daif_restore(DAIF_PROCCTX); 753 do_el0_fpac(regs, esr); 754 arm64_exit_to_user_mode(regs); 755 } 756 757 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) 758 { 759 unsigned long esr = read_sysreg(esr_el1); 760 761 switch (ESR_ELx_EC(esr)) { 762 case ESR_ELx_EC_SVC64: 763 el0_svc(regs); 764 break; 765 case ESR_ELx_EC_DABT_LOW: 766 el0_da(regs, esr); 767 break; 768 case ESR_ELx_EC_IABT_LOW: 769 el0_ia(regs, esr); 770 break; 771 case ESR_ELx_EC_FP_ASIMD: 772 el0_fpsimd_acc(regs, esr); 773 break; 774 case ESR_ELx_EC_SVE: 775 el0_sve_acc(regs, esr); 776 break; 777 case ESR_ELx_EC_SME: 778 el0_sme_acc(regs, esr); 779 break; 780 case ESR_ELx_EC_FP_EXC64: 781 el0_fpsimd_exc(regs, esr); 782 break; 783 case ESR_ELx_EC_SYS64: 784 case ESR_ELx_EC_WFx: 785 el0_sys(regs, esr); 786 break; 787 case ESR_ELx_EC_SP_ALIGN: 788 el0_sp(regs, esr); 789 break; 790 case ESR_ELx_EC_PC_ALIGN: 791 el0_pc(regs, esr); 792 break; 793 case ESR_ELx_EC_UNKNOWN: 794 el0_undef(regs, esr); 795 break; 796 case ESR_ELx_EC_BTI: 797 el0_bti(regs); 798 break; 799 case ESR_ELx_EC_MOPS: 800 el0_mops(regs, esr); 801 break; 802 case ESR_ELx_EC_GCS: 803 el0_gcs(regs, esr); 804 break; 805 case ESR_ELx_EC_BREAKPT_LOW: 806 el0_breakpt(regs, esr); 807 break; 808 case ESR_ELx_EC_SOFTSTP_LOW: 809 el0_softstp(regs, esr); 810 break; 811 case ESR_ELx_EC_WATCHPT_LOW: 812 el0_watchpt(regs, esr); 813 break; 814 case ESR_ELx_EC_BRK64: 815 el0_brk64(regs, esr); 816 break; 817 case ESR_ELx_EC_FPAC: 818 el0_fpac(regs, esr); 819 break; 820 default: 821 el0_inv(regs, esr); 822 } 823 } 824 825 static void noinstr el0_interrupt(struct pt_regs *regs, 826 void (*handler)(struct pt_regs *)) 827 { 828 arm64_enter_from_user_mode(regs); 829 830 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 831 832 if (regs->pc & BIT(55)) 833 arm64_apply_bp_hardening(); 834 835 irq_enter_rcu(); 836 do_interrupt_handler(regs, handler); 837 irq_exit_rcu(); 838 839 arm64_exit_to_user_mode(regs); 840 } 841 842 static void noinstr __el0_irq_handler_common(struct pt_regs *regs) 843 { 844 el0_interrupt(regs, handle_arch_irq); 845 } 846 847 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) 848 { 849 __el0_irq_handler_common(regs); 850 } 851 852 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) 853 { 854 el0_interrupt(regs, handle_arch_fiq); 855 } 856 857 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) 858 { 859 __el0_fiq_handler_common(regs); 860 } 861 862 static void noinstr __el0_error_handler_common(struct pt_regs *regs) 863 { 864 unsigned long esr = read_sysreg(esr_el1); 865 irqentry_state_t state; 866 867 arm64_enter_from_user_mode(regs); 868 local_daif_restore(DAIF_ERRCTX); 869 state = irqentry_nmi_enter(regs); 870 do_serror(regs, esr); 871 irqentry_nmi_exit(regs, state); 872 local_daif_restore(DAIF_PROCCTX); 873 arm64_exit_to_user_mode(regs); 874 } 875 876 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) 877 { 878 __el0_error_handler_common(regs); 879 } 880 881 #ifdef CONFIG_COMPAT 882 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) 883 { 884 arm64_enter_from_user_mode(regs); 885 local_daif_restore(DAIF_PROCCTX); 886 do_el0_cp15(esr, regs); 887 arm64_exit_to_user_mode(regs); 888 } 889 890 static void noinstr el0_svc_compat(struct pt_regs *regs) 891 { 892 arm64_enter_from_user_mode(regs); 893 cortex_a76_erratum_1463225_svc_handler(); 894 local_daif_restore(DAIF_PROCCTX); 895 do_el0_svc_compat(regs); 896 arm64_exit_to_user_mode(regs); 897 } 898 899 static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr) 900 { 901 arm64_enter_from_user_mode(regs); 902 local_daif_restore(DAIF_PROCCTX); 903 do_bkpt32(esr, regs); 904 arm64_exit_to_user_mode(regs); 905 } 906 907 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) 908 { 909 unsigned long esr = read_sysreg(esr_el1); 910 911 switch (ESR_ELx_EC(esr)) { 912 case ESR_ELx_EC_SVC32: 913 el0_svc_compat(regs); 914 break; 915 case ESR_ELx_EC_DABT_LOW: 916 el0_da(regs, esr); 917 break; 918 case ESR_ELx_EC_IABT_LOW: 919 el0_ia(regs, esr); 920 break; 921 case ESR_ELx_EC_FP_ASIMD: 922 el0_fpsimd_acc(regs, esr); 923 break; 924 case ESR_ELx_EC_FP_EXC32: 925 el0_fpsimd_exc(regs, esr); 926 break; 927 case ESR_ELx_EC_PC_ALIGN: 928 el0_pc(regs, esr); 929 break; 930 case ESR_ELx_EC_UNKNOWN: 931 case ESR_ELx_EC_CP14_MR: 932 case ESR_ELx_EC_CP14_LS: 933 case ESR_ELx_EC_CP14_64: 934 el0_undef(regs, esr); 935 break; 936 case ESR_ELx_EC_CP15_32: 937 case ESR_ELx_EC_CP15_64: 938 el0_cp15(regs, esr); 939 break; 940 case ESR_ELx_EC_BREAKPT_LOW: 941 el0_breakpt(regs, esr); 942 break; 943 case ESR_ELx_EC_SOFTSTP_LOW: 944 el0_softstp(regs, esr); 945 break; 946 case ESR_ELx_EC_WATCHPT_LOW: 947 el0_watchpt(regs, esr); 948 break; 949 case ESR_ELx_EC_BKPT32: 950 el0_bkpt32(regs, esr); 951 break; 952 default: 953 el0_inv(regs, esr); 954 } 955 } 956 957 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) 958 { 959 __el0_irq_handler_common(regs); 960 } 961 962 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) 963 { 964 __el0_fiq_handler_common(regs); 965 } 966 967 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) 968 { 969 __el0_error_handler_common(regs); 970 } 971 #else /* CONFIG_COMPAT */ 972 UNHANDLED(el0t, 32, sync) 973 UNHANDLED(el0t, 32, irq) 974 UNHANDLED(el0t, 32, fiq) 975 UNHANDLED(el0t, 32, error) 976 #endif /* CONFIG_COMPAT */ 977 978 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) 979 { 980 unsigned long esr = read_sysreg(esr_el1); 981 unsigned long far = read_sysreg(far_el1); 982 983 irqentry_nmi_enter(regs); 984 panic_bad_stack(regs, esr, far); 985 } 986 987 #ifdef CONFIG_ARM_SDE_INTERFACE 988 asmlinkage noinstr unsigned long 989 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) 990 { 991 irqentry_state_t state; 992 unsigned long ret; 993 994 /* 995 * We didn't take an exception to get here, so the HW hasn't 996 * set/cleared bits in PSTATE that we may rely on. 997 * 998 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to 999 * whether PSTATE bits are inherited unchanged or generated from 1000 * scratch, and the TF-A implementation always clears PAN and always 1001 * clears UAO. There are no other known implementations. 1002 * 1003 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how 1004 * PSTATE is modified upon architectural exceptions, and so PAN is 1005 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always 1006 * cleared. 1007 * 1008 * We must explicitly reset PAN to the expected state, including 1009 * clearing it when the host isn't using it, in case a VM had it set. 1010 */ 1011 if (system_uses_hw_pan()) 1012 set_pstate_pan(1); 1013 else if (cpu_has_pan()) 1014 set_pstate_pan(0); 1015 1016 state = irqentry_nmi_enter(regs); 1017 ret = do_sdei_event(regs, arg); 1018 irqentry_nmi_exit(regs, state); 1019 1020 return ret; 1021 } 1022 #endif /* CONFIG_ARM_SDE_INTERFACE */ 1023