1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Exception handling code 4 * 5 * Copyright (C) 2019 ARM Ltd. 6 */ 7 8 #include <linux/context_tracking.h> 9 #include <linux/kasan.h> 10 #include <linux/linkage.h> 11 #include <linux/lockdep.h> 12 #include <linux/ptrace.h> 13 #include <linux/resume_user_mode.h> 14 #include <linux/sched.h> 15 #include <linux/sched/debug.h> 16 #include <linux/thread_info.h> 17 18 #include <asm/cpufeature.h> 19 #include <asm/daifflags.h> 20 #include <asm/esr.h> 21 #include <asm/exception.h> 22 #include <asm/irq_regs.h> 23 #include <asm/kprobes.h> 24 #include <asm/mmu.h> 25 #include <asm/processor.h> 26 #include <asm/sdei.h> 27 #include <asm/stacktrace.h> 28 #include <asm/sysreg.h> 29 #include <asm/system_misc.h> 30 31 /* 32 * Handle IRQ/context state management when entering from kernel mode. 33 * Before this function is called it is not safe to call regular kernel code, 34 * instrumentable code, or any code which may trigger an exception. 35 * 36 * This is intended to match the logic in irqentry_enter(), handling the kernel 37 * mode transitions only. 38 */ 39 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs) 40 { 41 regs->exit_rcu = false; 42 43 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { 44 lockdep_hardirqs_off(CALLER_ADDR0); 45 ct_irq_enter(); 46 trace_hardirqs_off_finish(); 47 48 regs->exit_rcu = true; 49 return; 50 } 51 52 lockdep_hardirqs_off(CALLER_ADDR0); 53 rcu_irq_enter_check_tick(); 54 trace_hardirqs_off_finish(); 55 } 56 57 static void noinstr enter_from_kernel_mode(struct pt_regs *regs) 58 { 59 __enter_from_kernel_mode(regs); 60 mte_check_tfsr_entry(); 61 mte_disable_tco_entry(current); 62 } 63 64 /* 65 * Handle IRQ/context state management when exiting to kernel mode. 66 * After this function returns it is not safe to call regular kernel code, 67 * instrumentable code, or any code which may trigger an exception. 68 * 69 * This is intended to match the logic in irqentry_exit(), handling the kernel 70 * mode transitions only, and with preemption handled elsewhere. 71 */ 72 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) 73 { 74 lockdep_assert_irqs_disabled(); 75 76 if (interrupts_enabled(regs)) { 77 if (regs->exit_rcu) { 78 trace_hardirqs_on_prepare(); 79 lockdep_hardirqs_on_prepare(); 80 ct_irq_exit(); 81 lockdep_hardirqs_on(CALLER_ADDR0); 82 return; 83 } 84 85 trace_hardirqs_on(); 86 } else { 87 if (regs->exit_rcu) 88 ct_irq_exit(); 89 } 90 } 91 92 static void noinstr exit_to_kernel_mode(struct pt_regs *regs) 93 { 94 mte_check_tfsr_exit(); 95 __exit_to_kernel_mode(regs); 96 } 97 98 /* 99 * Handle IRQ/context state management when entering from user mode. 100 * Before this function is called it is not safe to call regular kernel code, 101 * instrumentable code, or any code which may trigger an exception. 102 */ 103 static __always_inline void __enter_from_user_mode(void) 104 { 105 lockdep_hardirqs_off(CALLER_ADDR0); 106 CT_WARN_ON(ct_state() != CT_STATE_USER); 107 user_exit_irqoff(); 108 trace_hardirqs_off_finish(); 109 mte_disable_tco_entry(current); 110 } 111 112 static __always_inline void enter_from_user_mode(struct pt_regs *regs) 113 { 114 __enter_from_user_mode(); 115 } 116 117 /* 118 * Handle IRQ/context state management when exiting to user mode. 119 * After this function returns it is not safe to call regular kernel code, 120 * instrumentable code, or any code which may trigger an exception. 121 */ 122 static __always_inline void __exit_to_user_mode(void) 123 { 124 trace_hardirqs_on_prepare(); 125 lockdep_hardirqs_on_prepare(); 126 user_enter_irqoff(); 127 lockdep_hardirqs_on(CALLER_ADDR0); 128 } 129 130 static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) 131 { 132 do { 133 local_irq_enable(); 134 135 if (thread_flags & _TIF_NEED_RESCHED) 136 schedule(); 137 138 if (thread_flags & _TIF_UPROBE) 139 uprobe_notify_resume(regs); 140 141 if (thread_flags & _TIF_MTE_ASYNC_FAULT) { 142 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 143 send_sig_fault(SIGSEGV, SEGV_MTEAERR, 144 (void __user *)NULL, current); 145 } 146 147 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 148 do_signal(regs); 149 150 if (thread_flags & _TIF_NOTIFY_RESUME) 151 resume_user_mode_work(regs); 152 153 if (thread_flags & _TIF_FOREIGN_FPSTATE) 154 fpsimd_restore_current_state(); 155 156 local_irq_disable(); 157 thread_flags = read_thread_flags(); 158 } while (thread_flags & _TIF_WORK_MASK); 159 } 160 161 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) 162 { 163 unsigned long flags; 164 165 local_irq_disable(); 166 167 flags = read_thread_flags(); 168 if (unlikely(flags & _TIF_WORK_MASK)) 169 do_notify_resume(regs, flags); 170 171 local_daif_mask(); 172 173 lockdep_sys_exit(); 174 } 175 176 static __always_inline void exit_to_user_mode(struct pt_regs *regs) 177 { 178 exit_to_user_mode_prepare(regs); 179 mte_check_tfsr_exit(); 180 __exit_to_user_mode(); 181 } 182 183 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) 184 { 185 exit_to_user_mode(regs); 186 } 187 188 /* 189 * Handle IRQ/context state management when entering an NMI from user/kernel 190 * mode. Before this function is called it is not safe to call regular kernel 191 * code, instrumentable code, or any code which may trigger an exception. 192 */ 193 static void noinstr arm64_enter_nmi(struct pt_regs *regs) 194 { 195 regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); 196 197 __nmi_enter(); 198 lockdep_hardirqs_off(CALLER_ADDR0); 199 lockdep_hardirq_enter(); 200 ct_nmi_enter(); 201 202 trace_hardirqs_off_finish(); 203 ftrace_nmi_enter(); 204 } 205 206 /* 207 * Handle IRQ/context state management when exiting an NMI from user/kernel 208 * mode. After this function returns it is not safe to call regular kernel 209 * code, instrumentable code, or any code which may trigger an exception. 210 */ 211 static void noinstr arm64_exit_nmi(struct pt_regs *regs) 212 { 213 bool restore = regs->lockdep_hardirqs; 214 215 ftrace_nmi_exit(); 216 if (restore) { 217 trace_hardirqs_on_prepare(); 218 lockdep_hardirqs_on_prepare(); 219 } 220 221 ct_nmi_exit(); 222 lockdep_hardirq_exit(); 223 if (restore) 224 lockdep_hardirqs_on(CALLER_ADDR0); 225 __nmi_exit(); 226 } 227 228 /* 229 * Handle IRQ/context state management when entering a debug exception from 230 * kernel mode. Before this function is called it is not safe to call regular 231 * kernel code, instrumentable code, or any code which may trigger an exception. 232 */ 233 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) 234 { 235 regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); 236 237 lockdep_hardirqs_off(CALLER_ADDR0); 238 ct_nmi_enter(); 239 240 trace_hardirqs_off_finish(); 241 } 242 243 /* 244 * Handle IRQ/context state management when exiting a debug exception from 245 * kernel mode. After this function returns it is not safe to call regular 246 * kernel code, instrumentable code, or any code which may trigger an exception. 247 */ 248 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) 249 { 250 bool restore = regs->lockdep_hardirqs; 251 252 if (restore) { 253 trace_hardirqs_on_prepare(); 254 lockdep_hardirqs_on_prepare(); 255 } 256 257 ct_nmi_exit(); 258 if (restore) 259 lockdep_hardirqs_on(CALLER_ADDR0); 260 } 261 262 #ifdef CONFIG_PREEMPT_DYNAMIC 263 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); 264 #define need_irq_preemption() \ 265 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) 266 #else 267 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) 268 #endif 269 270 static void __sched arm64_preempt_schedule_irq(void) 271 { 272 if (!need_irq_preemption()) 273 return; 274 275 /* 276 * Note: thread_info::preempt_count includes both thread_info::count 277 * and thread_info::need_resched, and is not equivalent to 278 * preempt_count(). 279 */ 280 if (READ_ONCE(current_thread_info()->preempt_count) != 0) 281 return; 282 283 /* 284 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC 285 * priority masking is used the GIC irqchip driver will clear DAIF.IF 286 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in 287 * DAIF we must have handled an NMI, so skip preemption. 288 */ 289 if (system_uses_irq_prio_masking() && read_sysreg(daif)) 290 return; 291 292 /* 293 * Preempting a task from an IRQ means we leave copies of PSTATE 294 * on the stack. cpufeature's enable calls may modify PSTATE, but 295 * resuming one of these preempted tasks would undo those changes. 296 * 297 * Only allow a task to be preempted once cpufeatures have been 298 * enabled. 299 */ 300 if (system_capabilities_finalized()) 301 preempt_schedule_irq(); 302 } 303 304 static void do_interrupt_handler(struct pt_regs *regs, 305 void (*handler)(struct pt_regs *)) 306 { 307 struct pt_regs *old_regs = set_irq_regs(regs); 308 309 if (on_thread_stack()) 310 call_on_irq_stack(regs, handler); 311 else 312 handler(regs); 313 314 set_irq_regs(old_regs); 315 } 316 317 extern void (*handle_arch_irq)(struct pt_regs *); 318 extern void (*handle_arch_fiq)(struct pt_regs *); 319 320 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, 321 unsigned long esr) 322 { 323 arm64_enter_nmi(regs); 324 325 console_verbose(); 326 327 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", 328 vector, smp_processor_id(), esr, 329 esr_get_class_string(esr)); 330 331 __show_regs(regs); 332 panic("Unhandled exception"); 333 } 334 335 #define UNHANDLED(el, regsize, vector) \ 336 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ 337 { \ 338 const char *desc = #regsize "-bit " #el " " #vector; \ 339 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ 340 } 341 342 #ifdef CONFIG_ARM64_ERRATUM_1463225 343 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); 344 345 static void cortex_a76_erratum_1463225_svc_handler(void) 346 { 347 u32 reg, val; 348 349 if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) 350 return; 351 352 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) 353 return; 354 355 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); 356 reg = read_sysreg(mdscr_el1); 357 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; 358 write_sysreg(val, mdscr_el1); 359 asm volatile("msr daifclr, #8"); 360 isb(); 361 362 /* We will have taken a single-step exception by this point */ 363 364 write_sysreg(reg, mdscr_el1); 365 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); 366 } 367 368 static __always_inline bool 369 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 370 { 371 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) 372 return false; 373 374 /* 375 * We've taken a dummy step exception from the kernel to ensure 376 * that interrupts are re-enabled on the syscall path. Return back 377 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions 378 * masked so that we can safely restore the mdscr and get on with 379 * handling the syscall. 380 */ 381 regs->pstate |= PSR_D_BIT; 382 return true; 383 } 384 #else /* CONFIG_ARM64_ERRATUM_1463225 */ 385 static void cortex_a76_erratum_1463225_svc_handler(void) { } 386 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 387 { 388 return false; 389 } 390 #endif /* CONFIG_ARM64_ERRATUM_1463225 */ 391 392 /* 393 * As per the ABI exit SME streaming mode and clear the SVE state not 394 * shared with FPSIMD on syscall entry. 395 */ 396 static inline void fp_user_discard(void) 397 { 398 /* 399 * If SME is active then exit streaming mode. If ZA is active 400 * then flush the SVE registers but leave userspace access to 401 * both SVE and SME enabled, otherwise disable SME for the 402 * task and fall through to disabling SVE too. This means 403 * that after a syscall we never have any streaming mode 404 * register state to track, if this changes the KVM code will 405 * need updating. 406 */ 407 if (system_supports_sme()) 408 sme_smstop_sm(); 409 410 if (!system_supports_sve()) 411 return; 412 413 if (test_thread_flag(TIF_SVE)) { 414 unsigned int sve_vq_minus_one; 415 416 sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; 417 sve_flush_live(true, sve_vq_minus_one); 418 } 419 } 420 421 UNHANDLED(el1t, 64, sync) 422 UNHANDLED(el1t, 64, irq) 423 UNHANDLED(el1t, 64, fiq) 424 UNHANDLED(el1t, 64, error) 425 426 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) 427 { 428 unsigned long far = read_sysreg(far_el1); 429 430 enter_from_kernel_mode(regs); 431 local_daif_inherit(regs); 432 do_mem_abort(far, esr, regs); 433 local_daif_mask(); 434 exit_to_kernel_mode(regs); 435 } 436 437 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) 438 { 439 unsigned long far = read_sysreg(far_el1); 440 441 enter_from_kernel_mode(regs); 442 local_daif_inherit(regs); 443 do_sp_pc_abort(far, esr, regs); 444 local_daif_mask(); 445 exit_to_kernel_mode(regs); 446 } 447 448 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) 449 { 450 enter_from_kernel_mode(regs); 451 local_daif_inherit(regs); 452 do_el1_undef(regs, esr); 453 local_daif_mask(); 454 exit_to_kernel_mode(regs); 455 } 456 457 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) 458 { 459 enter_from_kernel_mode(regs); 460 local_daif_inherit(regs); 461 do_el1_bti(regs, esr); 462 local_daif_mask(); 463 exit_to_kernel_mode(regs); 464 } 465 466 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr) 467 { 468 enter_from_kernel_mode(regs); 469 local_daif_inherit(regs); 470 do_el1_gcs(regs, esr); 471 local_daif_mask(); 472 exit_to_kernel_mode(regs); 473 } 474 475 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) 476 { 477 enter_from_kernel_mode(regs); 478 local_daif_inherit(regs); 479 do_el1_mops(regs, esr); 480 local_daif_mask(); 481 exit_to_kernel_mode(regs); 482 } 483 484 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) 485 { 486 unsigned long far = read_sysreg(far_el1); 487 488 arm64_enter_el1_dbg(regs); 489 if (!cortex_a76_erratum_1463225_debug_handler(regs)) 490 do_debug_exception(far, esr, regs); 491 arm64_exit_el1_dbg(regs); 492 } 493 494 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) 495 { 496 enter_from_kernel_mode(regs); 497 local_daif_inherit(regs); 498 do_el1_fpac(regs, esr); 499 local_daif_mask(); 500 exit_to_kernel_mode(regs); 501 } 502 503 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) 504 { 505 unsigned long esr = read_sysreg(esr_el1); 506 507 switch (ESR_ELx_EC(esr)) { 508 case ESR_ELx_EC_DABT_CUR: 509 case ESR_ELx_EC_IABT_CUR: 510 el1_abort(regs, esr); 511 break; 512 /* 513 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a 514 * recursive exception when trying to push the initial pt_regs. 515 */ 516 case ESR_ELx_EC_PC_ALIGN: 517 el1_pc(regs, esr); 518 break; 519 case ESR_ELx_EC_SYS64: 520 case ESR_ELx_EC_UNKNOWN: 521 el1_undef(regs, esr); 522 break; 523 case ESR_ELx_EC_BTI: 524 el1_bti(regs, esr); 525 break; 526 case ESR_ELx_EC_GCS: 527 el1_gcs(regs, esr); 528 break; 529 case ESR_ELx_EC_MOPS: 530 el1_mops(regs, esr); 531 break; 532 case ESR_ELx_EC_BREAKPT_CUR: 533 case ESR_ELx_EC_SOFTSTP_CUR: 534 case ESR_ELx_EC_WATCHPT_CUR: 535 case ESR_ELx_EC_BRK64: 536 el1_dbg(regs, esr); 537 break; 538 case ESR_ELx_EC_FPAC: 539 el1_fpac(regs, esr); 540 break; 541 default: 542 __panic_unhandled(regs, "64-bit el1h sync", esr); 543 } 544 } 545 546 static __always_inline void __el1_pnmi(struct pt_regs *regs, 547 void (*handler)(struct pt_regs *)) 548 { 549 arm64_enter_nmi(regs); 550 do_interrupt_handler(regs, handler); 551 arm64_exit_nmi(regs); 552 } 553 554 static __always_inline void __el1_irq(struct pt_regs *regs, 555 void (*handler)(struct pt_regs *)) 556 { 557 enter_from_kernel_mode(regs); 558 559 irq_enter_rcu(); 560 do_interrupt_handler(regs, handler); 561 irq_exit_rcu(); 562 563 arm64_preempt_schedule_irq(); 564 565 exit_to_kernel_mode(regs); 566 } 567 static void noinstr el1_interrupt(struct pt_regs *regs, 568 void (*handler)(struct pt_regs *)) 569 { 570 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 571 572 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) 573 __el1_pnmi(regs, handler); 574 else 575 __el1_irq(regs, handler); 576 } 577 578 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) 579 { 580 el1_interrupt(regs, handle_arch_irq); 581 } 582 583 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) 584 { 585 el1_interrupt(regs, handle_arch_fiq); 586 } 587 588 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) 589 { 590 unsigned long esr = read_sysreg(esr_el1); 591 592 local_daif_restore(DAIF_ERRCTX); 593 arm64_enter_nmi(regs); 594 do_serror(regs, esr); 595 arm64_exit_nmi(regs); 596 } 597 598 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) 599 { 600 unsigned long far = read_sysreg(far_el1); 601 602 enter_from_user_mode(regs); 603 local_daif_restore(DAIF_PROCCTX); 604 do_mem_abort(far, esr, regs); 605 exit_to_user_mode(regs); 606 } 607 608 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) 609 { 610 unsigned long far = read_sysreg(far_el1); 611 612 /* 613 * We've taken an instruction abort from userspace and not yet 614 * re-enabled IRQs. If the address is a kernel address, apply 615 * BP hardening prior to enabling IRQs and pre-emption. 616 */ 617 if (!is_ttbr0_addr(far)) 618 arm64_apply_bp_hardening(); 619 620 enter_from_user_mode(regs); 621 local_daif_restore(DAIF_PROCCTX); 622 do_mem_abort(far, esr, regs); 623 exit_to_user_mode(regs); 624 } 625 626 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) 627 { 628 enter_from_user_mode(regs); 629 local_daif_restore(DAIF_PROCCTX); 630 do_fpsimd_acc(esr, regs); 631 exit_to_user_mode(regs); 632 } 633 634 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) 635 { 636 enter_from_user_mode(regs); 637 local_daif_restore(DAIF_PROCCTX); 638 do_sve_acc(esr, regs); 639 exit_to_user_mode(regs); 640 } 641 642 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) 643 { 644 enter_from_user_mode(regs); 645 local_daif_restore(DAIF_PROCCTX); 646 do_sme_acc(esr, regs); 647 exit_to_user_mode(regs); 648 } 649 650 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) 651 { 652 enter_from_user_mode(regs); 653 local_daif_restore(DAIF_PROCCTX); 654 do_fpsimd_exc(esr, regs); 655 exit_to_user_mode(regs); 656 } 657 658 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) 659 { 660 enter_from_user_mode(regs); 661 local_daif_restore(DAIF_PROCCTX); 662 do_el0_sys(esr, regs); 663 exit_to_user_mode(regs); 664 } 665 666 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) 667 { 668 unsigned long far = read_sysreg(far_el1); 669 670 if (!is_ttbr0_addr(instruction_pointer(regs))) 671 arm64_apply_bp_hardening(); 672 673 enter_from_user_mode(regs); 674 local_daif_restore(DAIF_PROCCTX); 675 do_sp_pc_abort(far, esr, regs); 676 exit_to_user_mode(regs); 677 } 678 679 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) 680 { 681 enter_from_user_mode(regs); 682 local_daif_restore(DAIF_PROCCTX); 683 do_sp_pc_abort(regs->sp, esr, regs); 684 exit_to_user_mode(regs); 685 } 686 687 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) 688 { 689 enter_from_user_mode(regs); 690 local_daif_restore(DAIF_PROCCTX); 691 do_el0_undef(regs, esr); 692 exit_to_user_mode(regs); 693 } 694 695 static void noinstr el0_bti(struct pt_regs *regs) 696 { 697 enter_from_user_mode(regs); 698 local_daif_restore(DAIF_PROCCTX); 699 do_el0_bti(regs); 700 exit_to_user_mode(regs); 701 } 702 703 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) 704 { 705 enter_from_user_mode(regs); 706 local_daif_restore(DAIF_PROCCTX); 707 do_el0_mops(regs, esr); 708 exit_to_user_mode(regs); 709 } 710 711 static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr) 712 { 713 enter_from_user_mode(regs); 714 local_daif_restore(DAIF_PROCCTX); 715 do_el0_gcs(regs, esr); 716 exit_to_user_mode(regs); 717 } 718 719 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) 720 { 721 enter_from_user_mode(regs); 722 local_daif_restore(DAIF_PROCCTX); 723 bad_el0_sync(regs, 0, esr); 724 exit_to_user_mode(regs); 725 } 726 727 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) 728 { 729 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ 730 unsigned long far = read_sysreg(far_el1); 731 732 enter_from_user_mode(regs); 733 do_debug_exception(far, esr, regs); 734 local_daif_restore(DAIF_PROCCTX); 735 exit_to_user_mode(regs); 736 } 737 738 static void noinstr el0_svc(struct pt_regs *regs) 739 { 740 enter_from_user_mode(regs); 741 cortex_a76_erratum_1463225_svc_handler(); 742 fp_user_discard(); 743 local_daif_restore(DAIF_PROCCTX); 744 do_el0_svc(regs); 745 exit_to_user_mode(regs); 746 } 747 748 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) 749 { 750 enter_from_user_mode(regs); 751 local_daif_restore(DAIF_PROCCTX); 752 do_el0_fpac(regs, esr); 753 exit_to_user_mode(regs); 754 } 755 756 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) 757 { 758 unsigned long esr = read_sysreg(esr_el1); 759 760 switch (ESR_ELx_EC(esr)) { 761 case ESR_ELx_EC_SVC64: 762 el0_svc(regs); 763 break; 764 case ESR_ELx_EC_DABT_LOW: 765 el0_da(regs, esr); 766 break; 767 case ESR_ELx_EC_IABT_LOW: 768 el0_ia(regs, esr); 769 break; 770 case ESR_ELx_EC_FP_ASIMD: 771 el0_fpsimd_acc(regs, esr); 772 break; 773 case ESR_ELx_EC_SVE: 774 el0_sve_acc(regs, esr); 775 break; 776 case ESR_ELx_EC_SME: 777 el0_sme_acc(regs, esr); 778 break; 779 case ESR_ELx_EC_FP_EXC64: 780 el0_fpsimd_exc(regs, esr); 781 break; 782 case ESR_ELx_EC_SYS64: 783 case ESR_ELx_EC_WFx: 784 el0_sys(regs, esr); 785 break; 786 case ESR_ELx_EC_SP_ALIGN: 787 el0_sp(regs, esr); 788 break; 789 case ESR_ELx_EC_PC_ALIGN: 790 el0_pc(regs, esr); 791 break; 792 case ESR_ELx_EC_UNKNOWN: 793 el0_undef(regs, esr); 794 break; 795 case ESR_ELx_EC_BTI: 796 el0_bti(regs); 797 break; 798 case ESR_ELx_EC_MOPS: 799 el0_mops(regs, esr); 800 break; 801 case ESR_ELx_EC_GCS: 802 el0_gcs(regs, esr); 803 break; 804 case ESR_ELx_EC_BREAKPT_LOW: 805 case ESR_ELx_EC_SOFTSTP_LOW: 806 case ESR_ELx_EC_WATCHPT_LOW: 807 case ESR_ELx_EC_BRK64: 808 el0_dbg(regs, esr); 809 break; 810 case ESR_ELx_EC_FPAC: 811 el0_fpac(regs, esr); 812 break; 813 default: 814 el0_inv(regs, esr); 815 } 816 } 817 818 static void noinstr el0_interrupt(struct pt_regs *regs, 819 void (*handler)(struct pt_regs *)) 820 { 821 enter_from_user_mode(regs); 822 823 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 824 825 if (regs->pc & BIT(55)) 826 arm64_apply_bp_hardening(); 827 828 irq_enter_rcu(); 829 do_interrupt_handler(regs, handler); 830 irq_exit_rcu(); 831 832 exit_to_user_mode(regs); 833 } 834 835 static void noinstr __el0_irq_handler_common(struct pt_regs *regs) 836 { 837 el0_interrupt(regs, handle_arch_irq); 838 } 839 840 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) 841 { 842 __el0_irq_handler_common(regs); 843 } 844 845 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) 846 { 847 el0_interrupt(regs, handle_arch_fiq); 848 } 849 850 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) 851 { 852 __el0_fiq_handler_common(regs); 853 } 854 855 static void noinstr __el0_error_handler_common(struct pt_regs *regs) 856 { 857 unsigned long esr = read_sysreg(esr_el1); 858 859 enter_from_user_mode(regs); 860 local_daif_restore(DAIF_ERRCTX); 861 arm64_enter_nmi(regs); 862 do_serror(regs, esr); 863 arm64_exit_nmi(regs); 864 local_daif_restore(DAIF_PROCCTX); 865 exit_to_user_mode(regs); 866 } 867 868 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) 869 { 870 __el0_error_handler_common(regs); 871 } 872 873 #ifdef CONFIG_COMPAT 874 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) 875 { 876 enter_from_user_mode(regs); 877 local_daif_restore(DAIF_PROCCTX); 878 do_el0_cp15(esr, regs); 879 exit_to_user_mode(regs); 880 } 881 882 static void noinstr el0_svc_compat(struct pt_regs *regs) 883 { 884 enter_from_user_mode(regs); 885 cortex_a76_erratum_1463225_svc_handler(); 886 local_daif_restore(DAIF_PROCCTX); 887 do_el0_svc_compat(regs); 888 exit_to_user_mode(regs); 889 } 890 891 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) 892 { 893 unsigned long esr = read_sysreg(esr_el1); 894 895 switch (ESR_ELx_EC(esr)) { 896 case ESR_ELx_EC_SVC32: 897 el0_svc_compat(regs); 898 break; 899 case ESR_ELx_EC_DABT_LOW: 900 el0_da(regs, esr); 901 break; 902 case ESR_ELx_EC_IABT_LOW: 903 el0_ia(regs, esr); 904 break; 905 case ESR_ELx_EC_FP_ASIMD: 906 el0_fpsimd_acc(regs, esr); 907 break; 908 case ESR_ELx_EC_FP_EXC32: 909 el0_fpsimd_exc(regs, esr); 910 break; 911 case ESR_ELx_EC_PC_ALIGN: 912 el0_pc(regs, esr); 913 break; 914 case ESR_ELx_EC_UNKNOWN: 915 case ESR_ELx_EC_CP14_MR: 916 case ESR_ELx_EC_CP14_LS: 917 case ESR_ELx_EC_CP14_64: 918 el0_undef(regs, esr); 919 break; 920 case ESR_ELx_EC_CP15_32: 921 case ESR_ELx_EC_CP15_64: 922 el0_cp15(regs, esr); 923 break; 924 case ESR_ELx_EC_BREAKPT_LOW: 925 case ESR_ELx_EC_SOFTSTP_LOW: 926 case ESR_ELx_EC_WATCHPT_LOW: 927 case ESR_ELx_EC_BKPT32: 928 el0_dbg(regs, esr); 929 break; 930 default: 931 el0_inv(regs, esr); 932 } 933 } 934 935 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) 936 { 937 __el0_irq_handler_common(regs); 938 } 939 940 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) 941 { 942 __el0_fiq_handler_common(regs); 943 } 944 945 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) 946 { 947 __el0_error_handler_common(regs); 948 } 949 #else /* CONFIG_COMPAT */ 950 UNHANDLED(el0t, 32, sync) 951 UNHANDLED(el0t, 32, irq) 952 UNHANDLED(el0t, 32, fiq) 953 UNHANDLED(el0t, 32, error) 954 #endif /* CONFIG_COMPAT */ 955 956 #ifdef CONFIG_VMAP_STACK 957 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) 958 { 959 unsigned long esr = read_sysreg(esr_el1); 960 unsigned long far = read_sysreg(far_el1); 961 962 arm64_enter_nmi(regs); 963 panic_bad_stack(regs, esr, far); 964 } 965 #endif /* CONFIG_VMAP_STACK */ 966 967 #ifdef CONFIG_ARM_SDE_INTERFACE 968 asmlinkage noinstr unsigned long 969 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) 970 { 971 unsigned long ret; 972 973 /* 974 * We didn't take an exception to get here, so the HW hasn't 975 * set/cleared bits in PSTATE that we may rely on. 976 * 977 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to 978 * whether PSTATE bits are inherited unchanged or generated from 979 * scratch, and the TF-A implementation always clears PAN and always 980 * clears UAO. There are no other known implementations. 981 * 982 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how 983 * PSTATE is modified upon architectural exceptions, and so PAN is 984 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always 985 * cleared. 986 * 987 * We must explicitly reset PAN to the expected state, including 988 * clearing it when the host isn't using it, in case a VM had it set. 989 */ 990 if (system_uses_hw_pan()) 991 set_pstate_pan(1); 992 else if (cpu_has_pan()) 993 set_pstate_pan(0); 994 995 arm64_enter_nmi(regs); 996 ret = do_sdei_event(regs, arg); 997 arm64_exit_nmi(regs); 998 999 return ret; 1000 } 1001 #endif /* CONFIG_ARM_SDE_INTERFACE */ 1002