1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Exception handling code 4 * 5 * Copyright (C) 2019 ARM Ltd. 6 */ 7 8 #include <linux/context_tracking.h> 9 #include <linux/kasan.h> 10 #include <linux/linkage.h> 11 #include <linux/lockdep.h> 12 #include <linux/ptrace.h> 13 #include <linux/sched.h> 14 #include <linux/sched/debug.h> 15 #include <linux/thread_info.h> 16 17 #include <asm/cpufeature.h> 18 #include <asm/daifflags.h> 19 #include <asm/esr.h> 20 #include <asm/exception.h> 21 #include <asm/irq_regs.h> 22 #include <asm/kprobes.h> 23 #include <asm/mmu.h> 24 #include <asm/processor.h> 25 #include <asm/sdei.h> 26 #include <asm/stacktrace.h> 27 #include <asm/sysreg.h> 28 #include <asm/system_misc.h> 29 30 /* 31 * Handle IRQ/context state management when entering from kernel mode. 32 * Before this function is called it is not safe to call regular kernel code, 33 * intrumentable code, or any code which may trigger an exception. 34 * 35 * This is intended to match the logic in irqentry_enter(), handling the kernel 36 * mode transitions only. 37 */ 38 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs) 39 { 40 regs->exit_rcu = false; 41 42 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { 43 lockdep_hardirqs_off(CALLER_ADDR0); 44 rcu_irq_enter(); 45 trace_hardirqs_off_finish(); 46 47 regs->exit_rcu = true; 48 return; 49 } 50 51 lockdep_hardirqs_off(CALLER_ADDR0); 52 rcu_irq_enter_check_tick(); 53 trace_hardirqs_off_finish(); 54 } 55 56 static void noinstr enter_from_kernel_mode(struct pt_regs *regs) 57 { 58 __enter_from_kernel_mode(regs); 59 mte_check_tfsr_entry(); 60 mte_disable_tco_entry(current); 61 } 62 63 /* 64 * Handle IRQ/context state management when exiting to kernel mode. 65 * After this function returns it is not safe to call regular kernel code, 66 * intrumentable code, or any code which may trigger an exception. 67 * 68 * This is intended to match the logic in irqentry_exit(), handling the kernel 69 * mode transitions only, and with preemption handled elsewhere. 70 */ 71 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) 72 { 73 lockdep_assert_irqs_disabled(); 74 75 if (interrupts_enabled(regs)) { 76 if (regs->exit_rcu) { 77 trace_hardirqs_on_prepare(); 78 lockdep_hardirqs_on_prepare(CALLER_ADDR0); 79 rcu_irq_exit(); 80 lockdep_hardirqs_on(CALLER_ADDR0); 81 return; 82 } 83 84 trace_hardirqs_on(); 85 } else { 86 if (regs->exit_rcu) 87 rcu_irq_exit(); 88 } 89 } 90 91 static void noinstr exit_to_kernel_mode(struct pt_regs *regs) 92 { 93 mte_check_tfsr_exit(); 94 __exit_to_kernel_mode(regs); 95 } 96 97 /* 98 * Handle IRQ/context state management when entering from user mode. 99 * Before this function is called it is not safe to call regular kernel code, 100 * intrumentable code, or any code which may trigger an exception. 101 */ 102 static __always_inline void __enter_from_user_mode(void) 103 { 104 lockdep_hardirqs_off(CALLER_ADDR0); 105 CT_WARN_ON(ct_state() != CONTEXT_USER); 106 user_exit_irqoff(); 107 trace_hardirqs_off_finish(); 108 mte_disable_tco_entry(current); 109 } 110 111 static __always_inline void enter_from_user_mode(struct pt_regs *regs) 112 { 113 __enter_from_user_mode(); 114 } 115 116 /* 117 * Handle IRQ/context state management when exiting to user mode. 118 * After this function returns it is not safe to call regular kernel code, 119 * intrumentable code, or any code which may trigger an exception. 120 */ 121 static __always_inline void __exit_to_user_mode(void) 122 { 123 trace_hardirqs_on_prepare(); 124 lockdep_hardirqs_on_prepare(CALLER_ADDR0); 125 user_enter_irqoff(); 126 lockdep_hardirqs_on(CALLER_ADDR0); 127 } 128 129 static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) 130 { 131 unsigned long flags; 132 133 local_daif_mask(); 134 135 flags = read_thread_flags(); 136 if (unlikely(flags & _TIF_WORK_MASK)) 137 do_notify_resume(regs, flags); 138 } 139 140 static __always_inline void exit_to_user_mode(struct pt_regs *regs) 141 { 142 prepare_exit_to_user_mode(regs); 143 mte_check_tfsr_exit(); 144 __exit_to_user_mode(); 145 } 146 147 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) 148 { 149 exit_to_user_mode(regs); 150 } 151 152 /* 153 * Handle IRQ/context state management when entering an NMI from user/kernel 154 * mode. Before this function is called it is not safe to call regular kernel 155 * code, intrumentable code, or any code which may trigger an exception. 156 */ 157 static void noinstr arm64_enter_nmi(struct pt_regs *regs) 158 { 159 regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); 160 161 __nmi_enter(); 162 lockdep_hardirqs_off(CALLER_ADDR0); 163 lockdep_hardirq_enter(); 164 rcu_nmi_enter(); 165 166 trace_hardirqs_off_finish(); 167 ftrace_nmi_enter(); 168 } 169 170 /* 171 * Handle IRQ/context state management when exiting an NMI from user/kernel 172 * mode. After this function returns it is not safe to call regular kernel 173 * code, intrumentable code, or any code which may trigger an exception. 174 */ 175 static void noinstr arm64_exit_nmi(struct pt_regs *regs) 176 { 177 bool restore = regs->lockdep_hardirqs; 178 179 ftrace_nmi_exit(); 180 if (restore) { 181 trace_hardirqs_on_prepare(); 182 lockdep_hardirqs_on_prepare(CALLER_ADDR0); 183 } 184 185 rcu_nmi_exit(); 186 lockdep_hardirq_exit(); 187 if (restore) 188 lockdep_hardirqs_on(CALLER_ADDR0); 189 __nmi_exit(); 190 } 191 192 /* 193 * Handle IRQ/context state management when entering a debug exception from 194 * kernel mode. Before this function is called it is not safe to call regular 195 * kernel code, intrumentable code, or any code which may trigger an exception. 196 */ 197 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) 198 { 199 regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); 200 201 lockdep_hardirqs_off(CALLER_ADDR0); 202 rcu_nmi_enter(); 203 204 trace_hardirqs_off_finish(); 205 } 206 207 /* 208 * Handle IRQ/context state management when exiting a debug exception from 209 * kernel mode. After this function returns it is not safe to call regular 210 * kernel code, intrumentable code, or any code which may trigger an exception. 211 */ 212 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) 213 { 214 bool restore = regs->lockdep_hardirqs; 215 216 if (restore) { 217 trace_hardirqs_on_prepare(); 218 lockdep_hardirqs_on_prepare(CALLER_ADDR0); 219 } 220 221 rcu_nmi_exit(); 222 if (restore) 223 lockdep_hardirqs_on(CALLER_ADDR0); 224 } 225 226 static void __sched arm64_preempt_schedule_irq(void) 227 { 228 lockdep_assert_irqs_disabled(); 229 230 /* 231 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC 232 * priority masking is used the GIC irqchip driver will clear DAIF.IF 233 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in 234 * DAIF we must have handled an NMI, so skip preemption. 235 */ 236 if (system_uses_irq_prio_masking() && read_sysreg(daif)) 237 return; 238 239 /* 240 * Preempting a task from an IRQ means we leave copies of PSTATE 241 * on the stack. cpufeature's enable calls may modify PSTATE, but 242 * resuming one of these preempted tasks would undo those changes. 243 * 244 * Only allow a task to be preempted once cpufeatures have been 245 * enabled. 246 */ 247 if (system_capabilities_finalized()) 248 preempt_schedule_irq(); 249 } 250 251 static void do_interrupt_handler(struct pt_regs *regs, 252 void (*handler)(struct pt_regs *)) 253 { 254 struct pt_regs *old_regs = set_irq_regs(regs); 255 256 if (on_thread_stack()) 257 call_on_irq_stack(regs, handler); 258 else 259 handler(regs); 260 261 set_irq_regs(old_regs); 262 } 263 264 extern void (*handle_arch_irq)(struct pt_regs *); 265 extern void (*handle_arch_fiq)(struct pt_regs *); 266 267 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, 268 unsigned int esr) 269 { 270 arm64_enter_nmi(regs); 271 272 console_verbose(); 273 274 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", 275 vector, smp_processor_id(), esr, 276 esr_get_class_string(esr)); 277 278 __show_regs(regs); 279 panic("Unhandled exception"); 280 } 281 282 #define UNHANDLED(el, regsize, vector) \ 283 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ 284 { \ 285 const char *desc = #regsize "-bit " #el " " #vector; \ 286 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ 287 } 288 289 #ifdef CONFIG_ARM64_ERRATUM_1463225 290 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); 291 292 static void cortex_a76_erratum_1463225_svc_handler(void) 293 { 294 u32 reg, val; 295 296 if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) 297 return; 298 299 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) 300 return; 301 302 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); 303 reg = read_sysreg(mdscr_el1); 304 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; 305 write_sysreg(val, mdscr_el1); 306 asm volatile("msr daifclr, #8"); 307 isb(); 308 309 /* We will have taken a single-step exception by this point */ 310 311 write_sysreg(reg, mdscr_el1); 312 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); 313 } 314 315 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 316 { 317 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) 318 return false; 319 320 /* 321 * We've taken a dummy step exception from the kernel to ensure 322 * that interrupts are re-enabled on the syscall path. Return back 323 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions 324 * masked so that we can safely restore the mdscr and get on with 325 * handling the syscall. 326 */ 327 regs->pstate |= PSR_D_BIT; 328 return true; 329 } 330 #else /* CONFIG_ARM64_ERRATUM_1463225 */ 331 static void cortex_a76_erratum_1463225_svc_handler(void) { } 332 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 333 { 334 return false; 335 } 336 #endif /* CONFIG_ARM64_ERRATUM_1463225 */ 337 338 UNHANDLED(el1t, 64, sync) 339 UNHANDLED(el1t, 64, irq) 340 UNHANDLED(el1t, 64, fiq) 341 UNHANDLED(el1t, 64, error) 342 343 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) 344 { 345 unsigned long far = read_sysreg(far_el1); 346 347 enter_from_kernel_mode(regs); 348 local_daif_inherit(regs); 349 do_mem_abort(far, esr, regs); 350 local_daif_mask(); 351 exit_to_kernel_mode(regs); 352 } 353 354 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) 355 { 356 unsigned long far = read_sysreg(far_el1); 357 358 enter_from_kernel_mode(regs); 359 local_daif_inherit(regs); 360 do_sp_pc_abort(far, esr, regs); 361 local_daif_mask(); 362 exit_to_kernel_mode(regs); 363 } 364 365 static void noinstr el1_undef(struct pt_regs *regs) 366 { 367 enter_from_kernel_mode(regs); 368 local_daif_inherit(regs); 369 do_undefinstr(regs); 370 local_daif_mask(); 371 exit_to_kernel_mode(regs); 372 } 373 374 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) 375 { 376 unsigned long far = read_sysreg(far_el1); 377 378 arm64_enter_el1_dbg(regs); 379 if (!cortex_a76_erratum_1463225_debug_handler(regs)) 380 do_debug_exception(far, esr, regs); 381 arm64_exit_el1_dbg(regs); 382 } 383 384 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) 385 { 386 enter_from_kernel_mode(regs); 387 local_daif_inherit(regs); 388 do_ptrauth_fault(regs, esr); 389 local_daif_mask(); 390 exit_to_kernel_mode(regs); 391 } 392 393 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) 394 { 395 unsigned long esr = read_sysreg(esr_el1); 396 397 switch (ESR_ELx_EC(esr)) { 398 case ESR_ELx_EC_DABT_CUR: 399 case ESR_ELx_EC_IABT_CUR: 400 el1_abort(regs, esr); 401 break; 402 /* 403 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a 404 * recursive exception when trying to push the initial pt_regs. 405 */ 406 case ESR_ELx_EC_PC_ALIGN: 407 el1_pc(regs, esr); 408 break; 409 case ESR_ELx_EC_SYS64: 410 case ESR_ELx_EC_UNKNOWN: 411 el1_undef(regs); 412 break; 413 case ESR_ELx_EC_BREAKPT_CUR: 414 case ESR_ELx_EC_SOFTSTP_CUR: 415 case ESR_ELx_EC_WATCHPT_CUR: 416 case ESR_ELx_EC_BRK64: 417 el1_dbg(regs, esr); 418 break; 419 case ESR_ELx_EC_FPAC: 420 el1_fpac(regs, esr); 421 break; 422 default: 423 __panic_unhandled(regs, "64-bit el1h sync", esr); 424 } 425 } 426 427 static __always_inline void __el1_pnmi(struct pt_regs *regs, 428 void (*handler)(struct pt_regs *)) 429 { 430 arm64_enter_nmi(regs); 431 do_interrupt_handler(regs, handler); 432 arm64_exit_nmi(regs); 433 } 434 435 static __always_inline void __el1_irq(struct pt_regs *regs, 436 void (*handler)(struct pt_regs *)) 437 { 438 enter_from_kernel_mode(regs); 439 440 irq_enter_rcu(); 441 do_interrupt_handler(regs, handler); 442 irq_exit_rcu(); 443 444 /* 445 * Note: thread_info::preempt_count includes both thread_info::count 446 * and thread_info::need_resched, and is not equivalent to 447 * preempt_count(). 448 */ 449 if (IS_ENABLED(CONFIG_PREEMPTION) && 450 READ_ONCE(current_thread_info()->preempt_count) == 0) 451 arm64_preempt_schedule_irq(); 452 453 exit_to_kernel_mode(regs); 454 } 455 static void noinstr el1_interrupt(struct pt_regs *regs, 456 void (*handler)(struct pt_regs *)) 457 { 458 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 459 460 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) 461 __el1_pnmi(regs, handler); 462 else 463 __el1_irq(regs, handler); 464 } 465 466 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) 467 { 468 el1_interrupt(regs, handle_arch_irq); 469 } 470 471 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) 472 { 473 el1_interrupt(regs, handle_arch_fiq); 474 } 475 476 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) 477 { 478 unsigned long esr = read_sysreg(esr_el1); 479 480 local_daif_restore(DAIF_ERRCTX); 481 arm64_enter_nmi(regs); 482 do_serror(regs, esr); 483 arm64_exit_nmi(regs); 484 } 485 486 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) 487 { 488 unsigned long far = read_sysreg(far_el1); 489 490 enter_from_user_mode(regs); 491 local_daif_restore(DAIF_PROCCTX); 492 do_mem_abort(far, esr, regs); 493 exit_to_user_mode(regs); 494 } 495 496 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) 497 { 498 unsigned long far = read_sysreg(far_el1); 499 500 /* 501 * We've taken an instruction abort from userspace and not yet 502 * re-enabled IRQs. If the address is a kernel address, apply 503 * BP hardening prior to enabling IRQs and pre-emption. 504 */ 505 if (!is_ttbr0_addr(far)) 506 arm64_apply_bp_hardening(); 507 508 enter_from_user_mode(regs); 509 local_daif_restore(DAIF_PROCCTX); 510 do_mem_abort(far, esr, regs); 511 exit_to_user_mode(regs); 512 } 513 514 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) 515 { 516 enter_from_user_mode(regs); 517 local_daif_restore(DAIF_PROCCTX); 518 do_fpsimd_acc(esr, regs); 519 exit_to_user_mode(regs); 520 } 521 522 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) 523 { 524 enter_from_user_mode(regs); 525 local_daif_restore(DAIF_PROCCTX); 526 do_sve_acc(esr, regs); 527 exit_to_user_mode(regs); 528 } 529 530 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) 531 { 532 enter_from_user_mode(regs); 533 local_daif_restore(DAIF_PROCCTX); 534 do_fpsimd_exc(esr, regs); 535 exit_to_user_mode(regs); 536 } 537 538 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) 539 { 540 enter_from_user_mode(regs); 541 local_daif_restore(DAIF_PROCCTX); 542 do_sysinstr(esr, regs); 543 exit_to_user_mode(regs); 544 } 545 546 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) 547 { 548 unsigned long far = read_sysreg(far_el1); 549 550 if (!is_ttbr0_addr(instruction_pointer(regs))) 551 arm64_apply_bp_hardening(); 552 553 enter_from_user_mode(regs); 554 local_daif_restore(DAIF_PROCCTX); 555 do_sp_pc_abort(far, esr, regs); 556 exit_to_user_mode(regs); 557 } 558 559 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) 560 { 561 enter_from_user_mode(regs); 562 local_daif_restore(DAIF_PROCCTX); 563 do_sp_pc_abort(regs->sp, esr, regs); 564 exit_to_user_mode(regs); 565 } 566 567 static void noinstr el0_undef(struct pt_regs *regs) 568 { 569 enter_from_user_mode(regs); 570 local_daif_restore(DAIF_PROCCTX); 571 do_undefinstr(regs); 572 exit_to_user_mode(regs); 573 } 574 575 static void noinstr el0_bti(struct pt_regs *regs) 576 { 577 enter_from_user_mode(regs); 578 local_daif_restore(DAIF_PROCCTX); 579 do_bti(regs); 580 exit_to_user_mode(regs); 581 } 582 583 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) 584 { 585 enter_from_user_mode(regs); 586 local_daif_restore(DAIF_PROCCTX); 587 bad_el0_sync(regs, 0, esr); 588 exit_to_user_mode(regs); 589 } 590 591 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) 592 { 593 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ 594 unsigned long far = read_sysreg(far_el1); 595 596 enter_from_user_mode(regs); 597 do_debug_exception(far, esr, regs); 598 local_daif_restore(DAIF_PROCCTX); 599 exit_to_user_mode(regs); 600 } 601 602 static void noinstr el0_svc(struct pt_regs *regs) 603 { 604 enter_from_user_mode(regs); 605 cortex_a76_erratum_1463225_svc_handler(); 606 do_el0_svc(regs); 607 exit_to_user_mode(regs); 608 } 609 610 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) 611 { 612 enter_from_user_mode(regs); 613 local_daif_restore(DAIF_PROCCTX); 614 do_ptrauth_fault(regs, esr); 615 exit_to_user_mode(regs); 616 } 617 618 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) 619 { 620 unsigned long esr = read_sysreg(esr_el1); 621 622 switch (ESR_ELx_EC(esr)) { 623 case ESR_ELx_EC_SVC64: 624 el0_svc(regs); 625 break; 626 case ESR_ELx_EC_DABT_LOW: 627 el0_da(regs, esr); 628 break; 629 case ESR_ELx_EC_IABT_LOW: 630 el0_ia(regs, esr); 631 break; 632 case ESR_ELx_EC_FP_ASIMD: 633 el0_fpsimd_acc(regs, esr); 634 break; 635 case ESR_ELx_EC_SVE: 636 el0_sve_acc(regs, esr); 637 break; 638 case ESR_ELx_EC_FP_EXC64: 639 el0_fpsimd_exc(regs, esr); 640 break; 641 case ESR_ELx_EC_SYS64: 642 case ESR_ELx_EC_WFx: 643 el0_sys(regs, esr); 644 break; 645 case ESR_ELx_EC_SP_ALIGN: 646 el0_sp(regs, esr); 647 break; 648 case ESR_ELx_EC_PC_ALIGN: 649 el0_pc(regs, esr); 650 break; 651 case ESR_ELx_EC_UNKNOWN: 652 el0_undef(regs); 653 break; 654 case ESR_ELx_EC_BTI: 655 el0_bti(regs); 656 break; 657 case ESR_ELx_EC_BREAKPT_LOW: 658 case ESR_ELx_EC_SOFTSTP_LOW: 659 case ESR_ELx_EC_WATCHPT_LOW: 660 case ESR_ELx_EC_BRK64: 661 el0_dbg(regs, esr); 662 break; 663 case ESR_ELx_EC_FPAC: 664 el0_fpac(regs, esr); 665 break; 666 default: 667 el0_inv(regs, esr); 668 } 669 } 670 671 static void noinstr el0_interrupt(struct pt_regs *regs, 672 void (*handler)(struct pt_regs *)) 673 { 674 enter_from_user_mode(regs); 675 676 write_sysreg(DAIF_PROCCTX_NOIRQ, daif); 677 678 if (regs->pc & BIT(55)) 679 arm64_apply_bp_hardening(); 680 681 irq_enter_rcu(); 682 do_interrupt_handler(regs, handler); 683 irq_exit_rcu(); 684 685 exit_to_user_mode(regs); 686 } 687 688 static void noinstr __el0_irq_handler_common(struct pt_regs *regs) 689 { 690 el0_interrupt(regs, handle_arch_irq); 691 } 692 693 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) 694 { 695 __el0_irq_handler_common(regs); 696 } 697 698 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) 699 { 700 el0_interrupt(regs, handle_arch_fiq); 701 } 702 703 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) 704 { 705 __el0_fiq_handler_common(regs); 706 } 707 708 static void noinstr __el0_error_handler_common(struct pt_regs *regs) 709 { 710 unsigned long esr = read_sysreg(esr_el1); 711 712 enter_from_user_mode(regs); 713 local_daif_restore(DAIF_ERRCTX); 714 arm64_enter_nmi(regs); 715 do_serror(regs, esr); 716 arm64_exit_nmi(regs); 717 local_daif_restore(DAIF_PROCCTX); 718 exit_to_user_mode(regs); 719 } 720 721 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) 722 { 723 __el0_error_handler_common(regs); 724 } 725 726 #ifdef CONFIG_COMPAT 727 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) 728 { 729 enter_from_user_mode(regs); 730 local_daif_restore(DAIF_PROCCTX); 731 do_cp15instr(esr, regs); 732 exit_to_user_mode(regs); 733 } 734 735 static void noinstr el0_svc_compat(struct pt_regs *regs) 736 { 737 enter_from_user_mode(regs); 738 cortex_a76_erratum_1463225_svc_handler(); 739 do_el0_svc_compat(regs); 740 exit_to_user_mode(regs); 741 } 742 743 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) 744 { 745 unsigned long esr = read_sysreg(esr_el1); 746 747 switch (ESR_ELx_EC(esr)) { 748 case ESR_ELx_EC_SVC32: 749 el0_svc_compat(regs); 750 break; 751 case ESR_ELx_EC_DABT_LOW: 752 el0_da(regs, esr); 753 break; 754 case ESR_ELx_EC_IABT_LOW: 755 el0_ia(regs, esr); 756 break; 757 case ESR_ELx_EC_FP_ASIMD: 758 el0_fpsimd_acc(regs, esr); 759 break; 760 case ESR_ELx_EC_FP_EXC32: 761 el0_fpsimd_exc(regs, esr); 762 break; 763 case ESR_ELx_EC_PC_ALIGN: 764 el0_pc(regs, esr); 765 break; 766 case ESR_ELx_EC_UNKNOWN: 767 case ESR_ELx_EC_CP14_MR: 768 case ESR_ELx_EC_CP14_LS: 769 case ESR_ELx_EC_CP14_64: 770 el0_undef(regs); 771 break; 772 case ESR_ELx_EC_CP15_32: 773 case ESR_ELx_EC_CP15_64: 774 el0_cp15(regs, esr); 775 break; 776 case ESR_ELx_EC_BREAKPT_LOW: 777 case ESR_ELx_EC_SOFTSTP_LOW: 778 case ESR_ELx_EC_WATCHPT_LOW: 779 case ESR_ELx_EC_BKPT32: 780 el0_dbg(regs, esr); 781 break; 782 default: 783 el0_inv(regs, esr); 784 } 785 } 786 787 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) 788 { 789 __el0_irq_handler_common(regs); 790 } 791 792 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) 793 { 794 __el0_fiq_handler_common(regs); 795 } 796 797 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) 798 { 799 __el0_error_handler_common(regs); 800 } 801 #else /* CONFIG_COMPAT */ 802 UNHANDLED(el0t, 32, sync) 803 UNHANDLED(el0t, 32, irq) 804 UNHANDLED(el0t, 32, fiq) 805 UNHANDLED(el0t, 32, error) 806 #endif /* CONFIG_COMPAT */ 807 808 #ifdef CONFIG_VMAP_STACK 809 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) 810 { 811 unsigned int esr = read_sysreg(esr_el1); 812 unsigned long far = read_sysreg(far_el1); 813 814 arm64_enter_nmi(regs); 815 panic_bad_stack(regs, esr, far); 816 } 817 #endif /* CONFIG_VMAP_STACK */ 818 819 #ifdef CONFIG_ARM_SDE_INTERFACE 820 asmlinkage noinstr unsigned long 821 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) 822 { 823 unsigned long ret; 824 825 /* 826 * We didn't take an exception to get here, so the HW hasn't 827 * set/cleared bits in PSTATE that we may rely on. 828 * 829 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to 830 * whether PSTATE bits are inherited unchanged or generated from 831 * scratch, and the TF-A implementation always clears PAN and always 832 * clears UAO. There are no other known implementations. 833 * 834 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how 835 * PSTATE is modified upon architectural exceptions, and so PAN is 836 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always 837 * cleared. 838 * 839 * We must explicitly reset PAN to the expected state, including 840 * clearing it when the host isn't using it, in case a VM had it set. 841 */ 842 if (system_uses_hw_pan()) 843 set_pstate_pan(1); 844 else if (cpu_has_pan()) 845 set_pstate_pan(0); 846 847 arm64_enter_nmi(regs); 848 ret = do_sdei_event(regs, arg); 849 arm64_exit_nmi(regs); 850 851 return ret; 852 } 853 #endif /* CONFIG_ARM_SDE_INTERFACE */ 854