1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Exception handling code 4 * 5 * Copyright (C) 2019 ARM Ltd. 6 */ 7 8 #include <linux/context_tracking.h> 9 #include <linux/irq-entry-common.h> 10 #include <linux/kasan.h> 11 #include <linux/linkage.h> 12 #include <linux/livepatch.h> 13 #include <linux/lockdep.h> 14 #include <linux/ptrace.h> 15 #include <linux/resume_user_mode.h> 16 #include <linux/sched.h> 17 #include <linux/sched/debug.h> 18 #include <linux/thread_info.h> 19 20 #include <asm/cpufeature.h> 21 #include <asm/daifflags.h> 22 #include <asm/esr.h> 23 #include <asm/exception.h> 24 #include <asm/irq_regs.h> 25 #include <asm/kprobes.h> 26 #include <asm/mmu.h> 27 #include <asm/processor.h> 28 #include <asm/sdei.h> 29 #include <asm/stacktrace.h> 30 #include <asm/sysreg.h> 31 #include <asm/system_misc.h> 32 33 /* 34 * Handle IRQ/context state management when entering from kernel mode. 35 * Before this function is called it is not safe to call regular kernel code, 36 * instrumentable code, or any code which may trigger an exception. 37 * 38 * This is intended to match the logic in irqentry_enter(), handling the kernel 39 * mode transitions only. 40 */ 41 static __always_inline irqentry_state_t __enter_from_kernel_mode(struct pt_regs *regs) 42 { 43 return irqentry_enter(regs); 44 } 45 46 static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs) 47 { 48 irqentry_state_t state; 49 50 state = __enter_from_kernel_mode(regs); 51 mte_check_tfsr_entry(); 52 mte_disable_tco_entry(current); 53 54 return state; 55 } 56 57 /* 58 * Handle IRQ/context state management when exiting to kernel mode. 59 * After this function returns it is not safe to call regular kernel code, 60 * instrumentable code, or any code which may trigger an exception. 61 * 62 * This is intended to match the logic in irqentry_exit(), handling the kernel 63 * mode transitions only, and with preemption handled elsewhere. 64 */ 65 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs, 66 irqentry_state_t state) 67 { 68 irqentry_exit(regs, state); 69 } 70 71 static void noinstr exit_to_kernel_mode(struct pt_regs *regs, 72 irqentry_state_t state) 73 { 74 mte_check_tfsr_exit(); 75 __exit_to_kernel_mode(regs, state); 76 } 77 78 /* 79 * Handle IRQ/context state management when entering from user mode. 80 * Before this function is called it is not safe to call regular kernel code, 81 * instrumentable code, or any code which may trigger an exception. 82 */ 83 static __always_inline void __enter_from_user_mode(struct pt_regs *regs) 84 { 85 enter_from_user_mode(regs); 86 mte_disable_tco_entry(current); 87 } 88 89 static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs) 90 { 91 __enter_from_user_mode(regs); 92 } 93 94 /* 95 * Handle IRQ/context state management when exiting to user mode. 96 * After this function returns it is not safe to call regular kernel code, 97 * instrumentable code, or any code which may trigger an exception. 98 */ 99 100 static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs) 101 { 102 local_irq_disable(); 103 exit_to_user_mode_prepare(regs); 104 local_daif_mask(); 105 mte_check_tfsr_exit(); 106 exit_to_user_mode(); 107 } 108 109 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) 110 { 111 arm64_exit_to_user_mode(regs); 112 } 113 114 /* 115 * Handle IRQ/context state management when entering a debug exception from 116 * kernel mode. Before this function is called it is not safe to call regular 117 * kernel code, instrumentable code, or any code which may trigger an exception. 118 */ 119 static noinstr irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs) 120 { 121 irqentry_state_t state; 122 123 state.lockdep = lockdep_hardirqs_enabled(); 124 125 lockdep_hardirqs_off(CALLER_ADDR0); 126 ct_nmi_enter(); 127 128 trace_hardirqs_off_finish(); 129 130 return state; 131 } 132 133 /* 134 * Handle IRQ/context state management when exiting a debug exception from 135 * kernel mode. After this function returns it is not safe to call regular 136 * kernel code, instrumentable code, or any code which may trigger an exception. 137 */ 138 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs, 139 irqentry_state_t state) 140 { 141 if (state.lockdep) { 142 trace_hardirqs_on_prepare(); 143 lockdep_hardirqs_on_prepare(); 144 } 145 146 ct_nmi_exit(); 147 if (state.lockdep) 148 lockdep_hardirqs_on(CALLER_ADDR0); 149 } 150 151 static void do_interrupt_handler(struct pt_regs *regs, 152 void (*handler)(struct pt_regs *)) 153 { 154 struct pt_regs *old_regs = set_irq_regs(regs); 155 156 if (on_thread_stack()) 157 call_on_irq_stack(regs, handler); 158 else 159 handler(regs); 160 161 set_irq_regs(old_regs); 162 } 163 164 extern void (*handle_arch_irq)(struct pt_regs *); 165 extern void (*handle_arch_fiq)(struct pt_regs *); 166 167 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, 168 unsigned long esr) 169 { 170 irqentry_nmi_enter(regs); 171 172 console_verbose(); 173 174 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", 175 vector, smp_processor_id(), esr, 176 esr_get_class_string(esr)); 177 178 __show_regs(regs); 179 panic("Unhandled exception"); 180 } 181 182 #define UNHANDLED(el, regsize, vector) \ 183 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ 184 { \ 185 const char *desc = #regsize "-bit " #el " " #vector; \ 186 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ 187 } 188 189 #ifdef CONFIG_ARM64_ERRATUM_1463225 190 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); 191 192 static void cortex_a76_erratum_1463225_svc_handler(void) 193 { 194 u64 reg, val; 195 196 if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) 197 return; 198 199 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) 200 return; 201 202 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); 203 reg = read_sysreg(mdscr_el1); 204 val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE; 205 write_sysreg(val, mdscr_el1); 206 asm volatile("msr daifclr, #8"); 207 isb(); 208 209 /* We will have taken a single-step exception by this point */ 210 211 write_sysreg(reg, mdscr_el1); 212 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); 213 } 214 215 static __always_inline bool 216 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 217 { 218 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) 219 return false; 220 221 /* 222 * We've taken a dummy step exception from the kernel to ensure 223 * that interrupts are re-enabled on the syscall path. Return back 224 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions 225 * masked so that we can safely restore the mdscr and get on with 226 * handling the syscall. 227 */ 228 regs->pstate |= PSR_D_BIT; 229 return true; 230 } 231 #else /* CONFIG_ARM64_ERRATUM_1463225 */ 232 static void cortex_a76_erratum_1463225_svc_handler(void) { } 233 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 234 { 235 return false; 236 } 237 #endif /* CONFIG_ARM64_ERRATUM_1463225 */ 238 239 /* 240 * As per the ABI exit SME streaming mode and clear the SVE state not 241 * shared with FPSIMD on syscall entry. 242 */ 243 static inline void fpsimd_syscall_enter(void) 244 { 245 /* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */ 246 if (system_supports_sme()) 247 sme_smstop_sm(); 248 249 /* 250 * The CPU is not in streaming mode. If non-streaming SVE is not 251 * supported, there is no SVE state that needs to be discarded. 252 */ 253 if (!system_supports_sve()) 254 return; 255 256 if (test_thread_flag(TIF_SVE)) { 257 unsigned int sve_vq_minus_one; 258 259 sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; 260 sve_flush_live(true, sve_vq_minus_one); 261 } 262 263 /* 264 * Any live non-FPSIMD SVE state has been zeroed. Allow 265 * fpsimd_save_user_state() to lazily discard SVE state until either 266 * the live state is unbound or fpsimd_syscall_exit() is called. 267 */ 268 __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD); 269 } 270 271 static __always_inline void fpsimd_syscall_exit(void) 272 { 273 if (!system_supports_sve()) 274 return; 275 276 /* 277 * The current task's user FPSIMD/SVE/SME state is now bound to this 278 * CPU. The fpsimd_last_state.to_save value is either: 279 * 280 * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU 281 * since fpsimd_syscall_enter(). 282 * 283 * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at 284 * any point. 285 * 286 * Reset this to FP_STATE_CURRENT to stop lazy discarding. 287 */ 288 __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT); 289 } 290 291 /* 292 * In debug exception context, we explicitly disable preemption despite 293 * having interrupts disabled. 294 * This serves two purposes: it makes it much less likely that we would 295 * accidentally schedule in exception context and it will force a warning 296 * if we somehow manage to schedule by accident. 297 */ 298 static void debug_exception_enter(struct pt_regs *regs) 299 { 300 preempt_disable(); 301 302 /* This code is a bit fragile. Test it. */ 303 RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); 304 } 305 NOKPROBE_SYMBOL(debug_exception_enter); 306 307 static void debug_exception_exit(struct pt_regs *regs) 308 { 309 preempt_enable_no_resched(); 310 } 311 NOKPROBE_SYMBOL(debug_exception_exit); 312 313 UNHANDLED(el1t, 64, sync) 314 UNHANDLED(el1t, 64, irq) 315 UNHANDLED(el1t, 64, fiq) 316 UNHANDLED(el1t, 64, error) 317 318 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) 319 { 320 unsigned long far = read_sysreg(far_el1); 321 irqentry_state_t state; 322 323 state = enter_from_kernel_mode(regs); 324 local_daif_inherit(regs); 325 do_mem_abort(far, esr, regs); 326 local_daif_mask(); 327 exit_to_kernel_mode(regs, state); 328 } 329 330 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) 331 { 332 unsigned long far = read_sysreg(far_el1); 333 irqentry_state_t state; 334 335 state = enter_from_kernel_mode(regs); 336 local_daif_inherit(regs); 337 do_sp_pc_abort(far, esr, regs); 338 local_daif_mask(); 339 exit_to_kernel_mode(regs, state); 340 } 341 342 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) 343 { 344 irqentry_state_t state; 345 346 state = enter_from_kernel_mode(regs); 347 local_daif_inherit(regs); 348 do_el1_undef(regs, esr); 349 local_daif_mask(); 350 exit_to_kernel_mode(regs, state); 351 } 352 353 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) 354 { 355 irqentry_state_t state; 356 357 state = enter_from_kernel_mode(regs); 358 local_daif_inherit(regs); 359 do_el1_bti(regs, esr); 360 local_daif_mask(); 361 exit_to_kernel_mode(regs, state); 362 } 363 364 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr) 365 { 366 irqentry_state_t state; 367 368 state = enter_from_kernel_mode(regs); 369 local_daif_inherit(regs); 370 do_el1_gcs(regs, esr); 371 local_daif_mask(); 372 exit_to_kernel_mode(regs, state); 373 } 374 375 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) 376 { 377 irqentry_state_t state; 378 379 state = enter_from_kernel_mode(regs); 380 local_daif_inherit(regs); 381 do_el1_mops(regs, esr); 382 local_daif_mask(); 383 exit_to_kernel_mode(regs, state); 384 } 385 386 static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr) 387 { 388 irqentry_state_t state; 389 390 state = arm64_enter_el1_dbg(regs); 391 debug_exception_enter(regs); 392 do_breakpoint(esr, regs); 393 debug_exception_exit(regs); 394 arm64_exit_el1_dbg(regs, state); 395 } 396 397 static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr) 398 { 399 irqentry_state_t state; 400 401 state = arm64_enter_el1_dbg(regs); 402 if (!cortex_a76_erratum_1463225_debug_handler(regs)) { 403 debug_exception_enter(regs); 404 /* 405 * After handling a breakpoint, we suspend the breakpoint 406 * and use single-step to move to the next instruction. 407 * If we are stepping a suspended breakpoint there's nothing more to do: 408 * the single-step is complete. 409 */ 410 if (!try_step_suspended_breakpoints(regs)) 411 do_el1_softstep(esr, regs); 412 debug_exception_exit(regs); 413 } 414 arm64_exit_el1_dbg(regs, state); 415 } 416 417 static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr) 418 { 419 /* Watchpoints are the only debug exception to write FAR_EL1 */ 420 unsigned long far = read_sysreg(far_el1); 421 irqentry_state_t state; 422 423 state = arm64_enter_el1_dbg(regs); 424 debug_exception_enter(regs); 425 do_watchpoint(far, esr, regs); 426 debug_exception_exit(regs); 427 arm64_exit_el1_dbg(regs, state); 428 } 429 430 static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr) 431 { 432 irqentry_state_t state; 433 434 state = arm64_enter_el1_dbg(regs); 435 debug_exception_enter(regs); 436 do_el1_brk64(esr, regs); 437 debug_exception_exit(regs); 438 arm64_exit_el1_dbg(regs, state); 439 } 440 441 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) 442 { 443 irqentry_state_t state; 444 445 state = enter_from_kernel_mode(regs); 446 local_daif_inherit(regs); 447 do_el1_fpac(regs, esr); 448 local_daif_mask(); 449 exit_to_kernel_mode(regs, state); 450 } 451 452 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) 453 { 454 unsigned long esr = read_sysreg(esr_el1); 455 456 switch (ESR_ELx_EC(esr)) { 457 case ESR_ELx_EC_DABT_CUR: 458 case ESR_ELx_EC_IABT_CUR: 459 el1_abort(regs, esr); 460 break; 461 /* 462 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a 463 * recursive exception when trying to push the initial pt_regs. 464 */ 465 case ESR_ELx_EC_PC_ALIGN: 466 el1_pc(regs, esr); 467 break; 468 case ESR_ELx_EC_SYS64: 469 case ESR_ELx_EC_UNKNOWN: 470 el1_undef(regs, esr); 471 break; 472 case ESR_ELx_EC_BTI: 473 el1_bti(regs, esr); 474 break; 475 case ESR_ELx_EC_GCS: 476 el1_gcs(regs, esr); 477 break; 478 case ESR_ELx_EC_MOPS: 479 el1_mops(regs, esr); 480 break; 481 case ESR_ELx_EC_BREAKPT_CUR: 482 el1_breakpt(regs, esr); 483 break; 484 case ESR_ELx_EC_SOFTSTP_CUR: 485 el1_softstp(regs, esr); 486 break; 487 case ESR_ELx_EC_WATCHPT_CUR: 488 el1_watchpt(regs, esr); 489 break; 490 case ESR_ELx_EC_BRK64: 491 el1_brk64(regs, esr); 492 break; 493 case ESR_ELx_EC_FPAC: 494 el1_fpac(regs, esr); 495 break; 496 default: 497 __panic_unhandled(regs, "64-bit el1h sync", esr); 498 } 499 } 500 501 static __always_inline void __el1_pnmi(struct pt_regs *regs, 502 void (*handler)(struct pt_regs *)) 503 { 504 irqentry_state_t state; 505 506 state = irqentry_nmi_enter(regs); 507 do_interrupt_handler(regs, handler); 508 irqentry_nmi_exit(regs, state); 509 } 510 511 static __always_inline void __el1_irq(struct pt_regs *regs, 512 void (*handler)(struct pt_regs *)) 513 { 514 irqentry_state_t state; 515 516 state = enter_from_kernel_mode(regs); 517 518 irq_enter_rcu(); 519 do_interrupt_handler(regs, handler); 520 irq_exit_rcu(); 521 522 exit_to_kernel_mode(regs, state); 523 } 524 static void noinstr el1_interrupt(struct pt_regs *regs, 525 void (*handler)(struct pt_regs *)) 526 { 527 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 528 529 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && regs_irqs_disabled(regs)) 530 __el1_pnmi(regs, handler); 531 else 532 __el1_irq(regs, handler); 533 } 534 535 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) 536 { 537 el1_interrupt(regs, handle_arch_irq); 538 } 539 540 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) 541 { 542 el1_interrupt(regs, handle_arch_fiq); 543 } 544 545 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) 546 { 547 unsigned long esr = read_sysreg(esr_el1); 548 irqentry_state_t state; 549 550 local_daif_restore(DAIF_ERRCTX); 551 state = irqentry_nmi_enter(regs); 552 do_serror(regs, esr); 553 irqentry_nmi_exit(regs, state); 554 } 555 556 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) 557 { 558 unsigned long far = read_sysreg(far_el1); 559 560 arm64_enter_from_user_mode(regs); 561 local_daif_restore(DAIF_PROCCTX); 562 do_mem_abort(far, esr, regs); 563 arm64_exit_to_user_mode(regs); 564 } 565 566 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) 567 { 568 unsigned long far = read_sysreg(far_el1); 569 570 /* 571 * We've taken an instruction abort from userspace and not yet 572 * re-enabled IRQs. If the address is a kernel address, apply 573 * BP hardening prior to enabling IRQs and pre-emption. 574 */ 575 if (!is_ttbr0_addr(far)) 576 arm64_apply_bp_hardening(); 577 578 arm64_enter_from_user_mode(regs); 579 local_daif_restore(DAIF_PROCCTX); 580 do_mem_abort(far, esr, regs); 581 arm64_exit_to_user_mode(regs); 582 } 583 584 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) 585 { 586 arm64_enter_from_user_mode(regs); 587 local_daif_restore(DAIF_PROCCTX); 588 do_fpsimd_acc(esr, regs); 589 arm64_exit_to_user_mode(regs); 590 } 591 592 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) 593 { 594 arm64_enter_from_user_mode(regs); 595 local_daif_restore(DAIF_PROCCTX); 596 do_sve_acc(esr, regs); 597 arm64_exit_to_user_mode(regs); 598 } 599 600 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) 601 { 602 arm64_enter_from_user_mode(regs); 603 local_daif_restore(DAIF_PROCCTX); 604 do_sme_acc(esr, regs); 605 arm64_exit_to_user_mode(regs); 606 } 607 608 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) 609 { 610 arm64_enter_from_user_mode(regs); 611 local_daif_restore(DAIF_PROCCTX); 612 do_fpsimd_exc(esr, regs); 613 arm64_exit_to_user_mode(regs); 614 } 615 616 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) 617 { 618 arm64_enter_from_user_mode(regs); 619 local_daif_restore(DAIF_PROCCTX); 620 do_el0_sys(esr, regs); 621 arm64_exit_to_user_mode(regs); 622 } 623 624 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) 625 { 626 unsigned long far = read_sysreg(far_el1); 627 628 if (!is_ttbr0_addr(instruction_pointer(regs))) 629 arm64_apply_bp_hardening(); 630 631 arm64_enter_from_user_mode(regs); 632 local_daif_restore(DAIF_PROCCTX); 633 do_sp_pc_abort(far, esr, regs); 634 arm64_exit_to_user_mode(regs); 635 } 636 637 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) 638 { 639 arm64_enter_from_user_mode(regs); 640 local_daif_restore(DAIF_PROCCTX); 641 do_sp_pc_abort(regs->sp, esr, regs); 642 arm64_exit_to_user_mode(regs); 643 } 644 645 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) 646 { 647 arm64_enter_from_user_mode(regs); 648 local_daif_restore(DAIF_PROCCTX); 649 do_el0_undef(regs, esr); 650 arm64_exit_to_user_mode(regs); 651 } 652 653 static void noinstr el0_bti(struct pt_regs *regs) 654 { 655 arm64_enter_from_user_mode(regs); 656 local_daif_restore(DAIF_PROCCTX); 657 do_el0_bti(regs); 658 arm64_exit_to_user_mode(regs); 659 } 660 661 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) 662 { 663 arm64_enter_from_user_mode(regs); 664 local_daif_restore(DAIF_PROCCTX); 665 do_el0_mops(regs, esr); 666 arm64_exit_to_user_mode(regs); 667 } 668 669 static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr) 670 { 671 arm64_enter_from_user_mode(regs); 672 local_daif_restore(DAIF_PROCCTX); 673 do_el0_gcs(regs, esr); 674 arm64_exit_to_user_mode(regs); 675 } 676 677 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) 678 { 679 arm64_enter_from_user_mode(regs); 680 local_daif_restore(DAIF_PROCCTX); 681 bad_el0_sync(regs, 0, esr); 682 arm64_exit_to_user_mode(regs); 683 } 684 685 static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr) 686 { 687 if (!is_ttbr0_addr(regs->pc)) 688 arm64_apply_bp_hardening(); 689 690 arm64_enter_from_user_mode(regs); 691 debug_exception_enter(regs); 692 do_breakpoint(esr, regs); 693 debug_exception_exit(regs); 694 local_daif_restore(DAIF_PROCCTX); 695 arm64_exit_to_user_mode(regs); 696 } 697 698 static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr) 699 { 700 bool step_done; 701 702 if (!is_ttbr0_addr(regs->pc)) 703 arm64_apply_bp_hardening(); 704 705 arm64_enter_from_user_mode(regs); 706 /* 707 * After handling a breakpoint, we suspend the breakpoint 708 * and use single-step to move to the next instruction. 709 * If we are stepping a suspended breakpoint there's nothing more to do: 710 * the single-step is complete. 711 */ 712 step_done = try_step_suspended_breakpoints(regs); 713 local_daif_restore(DAIF_PROCCTX); 714 if (!step_done) 715 do_el0_softstep(esr, regs); 716 arm64_exit_to_user_mode(regs); 717 } 718 719 static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr) 720 { 721 /* Watchpoints are the only debug exception to write FAR_EL1 */ 722 unsigned long far = read_sysreg(far_el1); 723 724 arm64_enter_from_user_mode(regs); 725 debug_exception_enter(regs); 726 do_watchpoint(far, esr, regs); 727 debug_exception_exit(regs); 728 local_daif_restore(DAIF_PROCCTX); 729 arm64_exit_to_user_mode(regs); 730 } 731 732 static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr) 733 { 734 arm64_enter_from_user_mode(regs); 735 local_daif_restore(DAIF_PROCCTX); 736 do_el0_brk64(esr, regs); 737 arm64_exit_to_user_mode(regs); 738 } 739 740 static void noinstr el0_svc(struct pt_regs *regs) 741 { 742 arm64_enter_from_user_mode(regs); 743 cortex_a76_erratum_1463225_svc_handler(); 744 fpsimd_syscall_enter(); 745 local_daif_restore(DAIF_PROCCTX); 746 do_el0_svc(regs); 747 arm64_exit_to_user_mode(regs); 748 fpsimd_syscall_exit(); 749 } 750 751 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) 752 { 753 arm64_enter_from_user_mode(regs); 754 local_daif_restore(DAIF_PROCCTX); 755 do_el0_fpac(regs, esr); 756 arm64_exit_to_user_mode(regs); 757 } 758 759 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) 760 { 761 unsigned long esr = read_sysreg(esr_el1); 762 763 switch (ESR_ELx_EC(esr)) { 764 case ESR_ELx_EC_SVC64: 765 el0_svc(regs); 766 break; 767 case ESR_ELx_EC_DABT_LOW: 768 el0_da(regs, esr); 769 break; 770 case ESR_ELx_EC_IABT_LOW: 771 el0_ia(regs, esr); 772 break; 773 case ESR_ELx_EC_FP_ASIMD: 774 el0_fpsimd_acc(regs, esr); 775 break; 776 case ESR_ELx_EC_SVE: 777 el0_sve_acc(regs, esr); 778 break; 779 case ESR_ELx_EC_SME: 780 el0_sme_acc(regs, esr); 781 break; 782 case ESR_ELx_EC_FP_EXC64: 783 el0_fpsimd_exc(regs, esr); 784 break; 785 case ESR_ELx_EC_SYS64: 786 case ESR_ELx_EC_WFx: 787 el0_sys(regs, esr); 788 break; 789 case ESR_ELx_EC_SP_ALIGN: 790 el0_sp(regs, esr); 791 break; 792 case ESR_ELx_EC_PC_ALIGN: 793 el0_pc(regs, esr); 794 break; 795 case ESR_ELx_EC_UNKNOWN: 796 el0_undef(regs, esr); 797 break; 798 case ESR_ELx_EC_BTI: 799 el0_bti(regs); 800 break; 801 case ESR_ELx_EC_MOPS: 802 el0_mops(regs, esr); 803 break; 804 case ESR_ELx_EC_GCS: 805 el0_gcs(regs, esr); 806 break; 807 case ESR_ELx_EC_BREAKPT_LOW: 808 el0_breakpt(regs, esr); 809 break; 810 case ESR_ELx_EC_SOFTSTP_LOW: 811 el0_softstp(regs, esr); 812 break; 813 case ESR_ELx_EC_WATCHPT_LOW: 814 el0_watchpt(regs, esr); 815 break; 816 case ESR_ELx_EC_BRK64: 817 el0_brk64(regs, esr); 818 break; 819 case ESR_ELx_EC_FPAC: 820 el0_fpac(regs, esr); 821 break; 822 default: 823 el0_inv(regs, esr); 824 } 825 } 826 827 static void noinstr el0_interrupt(struct pt_regs *regs, 828 void (*handler)(struct pt_regs *)) 829 { 830 arm64_enter_from_user_mode(regs); 831 832 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 833 834 if (regs->pc & BIT(55)) 835 arm64_apply_bp_hardening(); 836 837 irq_enter_rcu(); 838 do_interrupt_handler(regs, handler); 839 irq_exit_rcu(); 840 841 arm64_exit_to_user_mode(regs); 842 } 843 844 static void noinstr __el0_irq_handler_common(struct pt_regs *regs) 845 { 846 el0_interrupt(regs, handle_arch_irq); 847 } 848 849 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) 850 { 851 __el0_irq_handler_common(regs); 852 } 853 854 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) 855 { 856 el0_interrupt(regs, handle_arch_fiq); 857 } 858 859 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) 860 { 861 __el0_fiq_handler_common(regs); 862 } 863 864 static void noinstr __el0_error_handler_common(struct pt_regs *regs) 865 { 866 unsigned long esr = read_sysreg(esr_el1); 867 irqentry_state_t state; 868 869 arm64_enter_from_user_mode(regs); 870 local_daif_restore(DAIF_ERRCTX); 871 state = irqentry_nmi_enter(regs); 872 do_serror(regs, esr); 873 irqentry_nmi_exit(regs, state); 874 local_daif_restore(DAIF_PROCCTX); 875 arm64_exit_to_user_mode(regs); 876 } 877 878 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) 879 { 880 __el0_error_handler_common(regs); 881 } 882 883 #ifdef CONFIG_COMPAT 884 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) 885 { 886 arm64_enter_from_user_mode(regs); 887 local_daif_restore(DAIF_PROCCTX); 888 do_el0_cp15(esr, regs); 889 arm64_exit_to_user_mode(regs); 890 } 891 892 static void noinstr el0_svc_compat(struct pt_regs *regs) 893 { 894 arm64_enter_from_user_mode(regs); 895 cortex_a76_erratum_1463225_svc_handler(); 896 local_daif_restore(DAIF_PROCCTX); 897 do_el0_svc_compat(regs); 898 arm64_exit_to_user_mode(regs); 899 } 900 901 static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr) 902 { 903 arm64_enter_from_user_mode(regs); 904 local_daif_restore(DAIF_PROCCTX); 905 do_bkpt32(esr, regs); 906 arm64_exit_to_user_mode(regs); 907 } 908 909 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) 910 { 911 unsigned long esr = read_sysreg(esr_el1); 912 913 switch (ESR_ELx_EC(esr)) { 914 case ESR_ELx_EC_SVC32: 915 el0_svc_compat(regs); 916 break; 917 case ESR_ELx_EC_DABT_LOW: 918 el0_da(regs, esr); 919 break; 920 case ESR_ELx_EC_IABT_LOW: 921 el0_ia(regs, esr); 922 break; 923 case ESR_ELx_EC_FP_ASIMD: 924 el0_fpsimd_acc(regs, esr); 925 break; 926 case ESR_ELx_EC_FP_EXC32: 927 el0_fpsimd_exc(regs, esr); 928 break; 929 case ESR_ELx_EC_PC_ALIGN: 930 el0_pc(regs, esr); 931 break; 932 case ESR_ELx_EC_UNKNOWN: 933 case ESR_ELx_EC_CP14_MR: 934 case ESR_ELx_EC_CP14_LS: 935 case ESR_ELx_EC_CP14_64: 936 el0_undef(regs, esr); 937 break; 938 case ESR_ELx_EC_CP15_32: 939 case ESR_ELx_EC_CP15_64: 940 el0_cp15(regs, esr); 941 break; 942 case ESR_ELx_EC_BREAKPT_LOW: 943 el0_breakpt(regs, esr); 944 break; 945 case ESR_ELx_EC_SOFTSTP_LOW: 946 el0_softstp(regs, esr); 947 break; 948 case ESR_ELx_EC_WATCHPT_LOW: 949 el0_watchpt(regs, esr); 950 break; 951 case ESR_ELx_EC_BKPT32: 952 el0_bkpt32(regs, esr); 953 break; 954 default: 955 el0_inv(regs, esr); 956 } 957 } 958 959 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) 960 { 961 __el0_irq_handler_common(regs); 962 } 963 964 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) 965 { 966 __el0_fiq_handler_common(regs); 967 } 968 969 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) 970 { 971 __el0_error_handler_common(regs); 972 } 973 #else /* CONFIG_COMPAT */ 974 UNHANDLED(el0t, 32, sync) 975 UNHANDLED(el0t, 32, irq) 976 UNHANDLED(el0t, 32, fiq) 977 UNHANDLED(el0t, 32, error) 978 #endif /* CONFIG_COMPAT */ 979 980 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) 981 { 982 unsigned long esr = read_sysreg(esr_el1); 983 unsigned long far = read_sysreg(far_el1); 984 985 irqentry_nmi_enter(regs); 986 panic_bad_stack(regs, esr, far); 987 } 988 989 #ifdef CONFIG_ARM_SDE_INTERFACE 990 asmlinkage noinstr unsigned long 991 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) 992 { 993 irqentry_state_t state; 994 unsigned long ret; 995 996 /* 997 * We didn't take an exception to get here, so the HW hasn't 998 * set/cleared bits in PSTATE that we may rely on. 999 * 1000 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to 1001 * whether PSTATE bits are inherited unchanged or generated from 1002 * scratch, and the TF-A implementation always clears PAN and always 1003 * clears UAO. There are no other known implementations. 1004 * 1005 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how 1006 * PSTATE is modified upon architectural exceptions, and so PAN is 1007 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always 1008 * cleared. 1009 * 1010 * We must explicitly reset PAN to the expected state, including 1011 * clearing it when the host isn't using it, in case a VM had it set. 1012 */ 1013 if (system_uses_hw_pan()) 1014 set_pstate_pan(1); 1015 else if (cpu_has_pan()) 1016 set_pstate_pan(0); 1017 1018 state = irqentry_nmi_enter(regs); 1019 ret = do_sdei_event(regs, arg); 1020 irqentry_nmi_exit(regs, state); 1021 1022 return ret; 1023 } 1024 #endif /* CONFIG_ARM_SDE_INTERFACE */ 1025