1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low-level exception handling code 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Authors: Catalin Marinas <catalin.marinas@arm.com> 7 * Will Deacon <will.deacon@arm.com> 8 */ 9 10#include <linux/arm-smccc.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13 14#include <asm/alternative.h> 15#include <asm/assembler.h> 16#include <asm/asm-offsets.h> 17#include <asm/asm_pointer_auth.h> 18#include <asm/bug.h> 19#include <asm/cpufeature.h> 20#include <asm/errno.h> 21#include <asm/esr.h> 22#include <asm/irq.h> 23#include <asm/memory.h> 24#include <asm/mmu.h> 25#include <asm/processor.h> 26#include <asm/ptrace.h> 27#include <asm/scs.h> 28#include <asm/thread_info.h> 29#include <asm/asm-uaccess.h> 30#include <asm/unistd.h> 31 32 .macro clear_gp_regs 33 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 34 mov x\n, xzr 35 .endr 36 .endm 37 38 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req 39 .align 7 40.Lventry_start\@: 41 .if \el == 0 42 /* 43 * This must be the first instruction of the EL0 vector entries. It is 44 * skipped by the trampoline vectors, to trigger the cleanup. 45 */ 46 b .Lskip_tramp_vectors_cleanup\@ 47 .if \regsize == 64 48 mrs x30, tpidrro_el0 49 msr tpidrro_el0, xzr 50 .else 51 mov x30, xzr 52 .endif 53.Lskip_tramp_vectors_cleanup\@: 54 .endif 55 56 sub sp, sp, #PT_REGS_SIZE 57#ifdef CONFIG_VMAP_STACK 58 /* 59 * Test whether the SP has overflowed, without corrupting a GPR. 60 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) 61 * should always be zero. 62 */ 63 add sp, sp, x0 // sp' = sp + x0 64 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp 65 tbnz x0, #THREAD_SHIFT, 0f 66 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 67 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp 68 b el\el\ht\()_\regsize\()_\label 69 700: 71 /* 72 * Either we've just detected an overflow, or we've taken an exception 73 * while on the overflow stack. Either way, we won't return to 74 * userspace, and can clobber EL0 registers to free up GPRs. 75 */ 76 77 /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ 78 msr tpidr_el0, x0 79 80 /* Recover the original x0 value and stash it in tpidrro_el0 */ 81 sub x0, sp, x0 82 msr tpidrro_el0, x0 83 84 /* Switch to the overflow stack */ 85 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 86 87 /* 88 * Check whether we were already on the overflow stack. This may happen 89 * after panic() re-enables interrupts. 90 */ 91 mrs x0, tpidr_el0 // sp of interrupted context 92 sub x0, sp, x0 // delta with top of overflow stack 93 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? 94 b.ne __bad_stack // no? -> bad stack pointer 95 96 /* We were already on the overflow stack. Restore sp/x0 and carry on. */ 97 sub sp, sp, x0 98 mrs x0, tpidrro_el0 99#endif 100 b el\el\ht\()_\regsize\()_\label 101.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? 102 .endm 103 104 .macro tramp_alias, dst, sym 105 .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text 106 movz \dst, :abs_g2_s:.Lalias\@ 107 movk \dst, :abs_g1_nc:.Lalias\@ 108 movk \dst, :abs_g0_nc:.Lalias\@ 109 .endm 110 111 /* 112 * This macro corrupts x0-x3. It is the caller's duty to save/restore 113 * them if required. 114 */ 115 .macro apply_ssbd, state, tmp1, tmp2 116alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable 117 b .L__asm_ssbd_skip\@ // Patched to NOP 118alternative_cb_end 119 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 120 cbz \tmp2, .L__asm_ssbd_skip\@ 121 ldr \tmp2, [tsk, #TSK_TI_FLAGS] 122 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ 123 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 124 mov w1, #\state 125alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit 126 nop // Patched to SMC/HVC #0 127alternative_cb_end 128.L__asm_ssbd_skip\@: 129 .endm 130 131 /* Check for MTE asynchronous tag check faults */ 132 .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr 133#ifdef CONFIG_ARM64_MTE 134 .arch_extension lse 135alternative_if_not ARM64_MTE 136 b 1f 137alternative_else_nop_endif 138 /* 139 * Asynchronous tag check faults are only possible in ASYNC (2) or 140 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is 141 * set, so skip the check if it is unset. 142 */ 143 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f 144 mrs_s \tmp, SYS_TFSRE0_EL1 145 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f 146 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ 147 mov \tmp, #_TIF_MTE_ASYNC_FAULT 148 add \ti_flags, tsk, #TSK_TI_FLAGS 149 stset \tmp, [\ti_flags] 1501: 151#endif 152 .endm 153 154 /* Clear the MTE asynchronous tag check faults */ 155 .macro clear_mte_async_tcf thread_sctlr 156#ifdef CONFIG_ARM64_MTE 157alternative_if ARM64_MTE 158 /* See comment in check_mte_async_tcf above. */ 159 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f 160 dsb ish 161 msr_s SYS_TFSRE0_EL1, xzr 1621: 163alternative_else_nop_endif 164#endif 165 .endm 166 167 .macro mte_set_gcr, mte_ctrl, tmp 168#ifdef CONFIG_ARM64_MTE 169 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 170 orr \tmp, \tmp, #SYS_GCR_EL1_RRND 171 msr_s SYS_GCR_EL1, \tmp 172#endif 173 .endm 174 175 .macro mte_set_kernel_gcr, tmp, tmp2 176#ifdef CONFIG_KASAN_HW_TAGS 177alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable 178 b 1f 179alternative_cb_end 180 mov \tmp, KERNEL_GCR_EL1 181 msr_s SYS_GCR_EL1, \tmp 1821: 183#endif 184 .endm 185 186 .macro mte_set_user_gcr, tsk, tmp, tmp2 187#ifdef CONFIG_KASAN_HW_TAGS 188alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable 189 b 1f 190alternative_cb_end 191 ldr \tmp, [\tsk, #THREAD_MTE_CTRL] 192 193 mte_set_gcr \tmp, \tmp2 1941: 195#endif 196 .endm 197 198 .macro kernel_entry, el, regsize = 64 199 .if \el == 0 200 alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT 201 .endif 202 .if \regsize == 32 203 mov w0, w0 // zero upper 32 bits of x0 204 .endif 205 stp x0, x1, [sp, #16 * 0] 206 stp x2, x3, [sp, #16 * 1] 207 stp x4, x5, [sp, #16 * 2] 208 stp x6, x7, [sp, #16 * 3] 209 stp x8, x9, [sp, #16 * 4] 210 stp x10, x11, [sp, #16 * 5] 211 stp x12, x13, [sp, #16 * 6] 212 stp x14, x15, [sp, #16 * 7] 213 stp x16, x17, [sp, #16 * 8] 214 stp x18, x19, [sp, #16 * 9] 215 stp x20, x21, [sp, #16 * 10] 216 stp x22, x23, [sp, #16 * 11] 217 stp x24, x25, [sp, #16 * 12] 218 stp x26, x27, [sp, #16 * 13] 219 stp x28, x29, [sp, #16 * 14] 220 221 .if \el == 0 222 clear_gp_regs 223 mrs x21, sp_el0 224 ldr_this_cpu tsk, __entry_task, x20 225 msr sp_el0, tsk 226 227 /* 228 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions 229 * when scheduling. 230 */ 231 ldr x19, [tsk, #TSK_TI_FLAGS] 232 disable_step_tsk x19, x20 233 234 /* Check for asynchronous tag check faults in user space */ 235 ldr x0, [tsk, THREAD_SCTLR_USER] 236 check_mte_async_tcf x22, x23, x0 237 238#ifdef CONFIG_ARM64_PTR_AUTH 239alternative_if ARM64_HAS_ADDRESS_AUTH 240 /* 241 * Enable IA for in-kernel PAC if the task had it disabled. Although 242 * this could be implemented with an unconditional MRS which would avoid 243 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. 244 * 245 * Install the kernel IA key only if IA was enabled in the task. If IA 246 * was disabled on kernel exit then we would have left the kernel IA 247 * installed so there is no need to install it again. 248 */ 249 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f 250 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 251 b 2f 2521: 253 mrs x0, sctlr_el1 254 orr x0, x0, SCTLR_ELx_ENIA 255 msr sctlr_el1, x0 2562: 257alternative_else_nop_endif 258#endif 259 260 apply_ssbd 1, x22, x23 261 262 mte_set_kernel_gcr x22, x23 263 264 /* 265 * Any non-self-synchronizing system register updates required for 266 * kernel entry should be placed before this point. 267 */ 268alternative_if ARM64_MTE 269 isb 270 b 1f 271alternative_else_nop_endif 272alternative_if ARM64_HAS_ADDRESS_AUTH 273 isb 274alternative_else_nop_endif 2751: 276 277 scs_load_current 278 .else 279 add x21, sp, #PT_REGS_SIZE 280 get_current_task tsk 281 .endif /* \el == 0 */ 282 mrs x22, elr_el1 283 mrs x23, spsr_el1 284 stp lr, x21, [sp, #S_LR] 285 286 /* 287 * For exceptions from EL0, create a final frame record. 288 * For exceptions from EL1, create a synthetic frame record so the 289 * interrupted code shows up in the backtrace. 290 */ 291 .if \el == 0 292 stp xzr, xzr, [sp, #S_STACKFRAME] 293 .else 294 stp x29, x22, [sp, #S_STACKFRAME] 295 .endif 296 add x29, sp, #S_STACKFRAME 297 298#ifdef CONFIG_ARM64_SW_TTBR0_PAN 299alternative_if_not ARM64_HAS_PAN 300 bl __swpan_entry_el\el 301alternative_else_nop_endif 302#endif 303 304 stp x22, x23, [sp, #S_PC] 305 306 /* Not in a syscall by default (el0_svc overwrites for real syscall) */ 307 .if \el == 0 308 mov w21, #NO_SYSCALL 309 str w21, [sp, #S_SYSCALLNO] 310 .endif 311 312#ifdef CONFIG_ARM64_PSEUDO_NMI 313alternative_if_not ARM64_HAS_GIC_PRIO_MASKING 314 b .Lskip_pmr_save\@ 315alternative_else_nop_endif 316 317 mrs_s x20, SYS_ICC_PMR_EL1 318 str x20, [sp, #S_PMR_SAVE] 319 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET 320 msr_s SYS_ICC_PMR_EL1, x20 321 322.Lskip_pmr_save\@: 323#endif 324 325 /* 326 * Registers that may be useful after this macro is invoked: 327 * 328 * x20 - ICC_PMR_EL1 329 * x21 - aborted SP 330 * x22 - aborted PC 331 * x23 - aborted PSTATE 332 */ 333 .endm 334 335 .macro kernel_exit, el 336 .if \el != 0 337 disable_daif 338 .endif 339 340#ifdef CONFIG_ARM64_PSEUDO_NMI 341alternative_if_not ARM64_HAS_GIC_PRIO_MASKING 342 b .Lskip_pmr_restore\@ 343alternative_else_nop_endif 344 345 ldr x20, [sp, #S_PMR_SAVE] 346 msr_s SYS_ICC_PMR_EL1, x20 347 348 /* Ensure priority change is seen by redistributor */ 349alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC 350 dsb sy 351alternative_else_nop_endif 352 353.Lskip_pmr_restore\@: 354#endif 355 356 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 357 358#ifdef CONFIG_ARM64_SW_TTBR0_PAN 359alternative_if_not ARM64_HAS_PAN 360 bl __swpan_exit_el\el 361alternative_else_nop_endif 362#endif 363 364 .if \el == 0 365 ldr x23, [sp, #S_SP] // load return stack pointer 366 msr sp_el0, x23 367 tst x22, #PSR_MODE32_BIT // native task? 368 b.eq 3f 369 370#ifdef CONFIG_ARM64_ERRATUM_845719 371alternative_if ARM64_WORKAROUND_845719 372#ifdef CONFIG_PID_IN_CONTEXTIDR 373 mrs x29, contextidr_el1 374 msr contextidr_el1, x29 375#else 376 msr contextidr_el1, xzr 377#endif 378alternative_else_nop_endif 379#endif 3803: 381 scs_save tsk 382 383 /* Ignore asynchronous tag check faults in the uaccess routines */ 384 ldr x0, [tsk, THREAD_SCTLR_USER] 385 clear_mte_async_tcf x0 386 387#ifdef CONFIG_ARM64_PTR_AUTH 388alternative_if ARM64_HAS_ADDRESS_AUTH 389 /* 390 * IA was enabled for in-kernel PAC. Disable it now if needed, or 391 * alternatively install the user's IA. All other per-task keys and 392 * SCTLR bits were updated on task switch. 393 * 394 * No kernel C function calls after this. 395 */ 396 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f 397 __ptrauth_keys_install_user tsk, x0, x1, x2 398 b 2f 3991: 400 mrs x0, sctlr_el1 401 bic x0, x0, SCTLR_ELx_ENIA 402 msr sctlr_el1, x0 4032: 404alternative_else_nop_endif 405#endif 406 407 mte_set_user_gcr tsk, x0, x1 408 409 apply_ssbd 0, x0, x1 410 .endif 411 412 msr elr_el1, x21 // set up the return data 413 msr spsr_el1, x22 414 ldp x0, x1, [sp, #16 * 0] 415 ldp x2, x3, [sp, #16 * 1] 416 ldp x4, x5, [sp, #16 * 2] 417 ldp x6, x7, [sp, #16 * 3] 418 ldp x8, x9, [sp, #16 * 4] 419 ldp x10, x11, [sp, #16 * 5] 420 ldp x12, x13, [sp, #16 * 6] 421 ldp x14, x15, [sp, #16 * 7] 422 ldp x16, x17, [sp, #16 * 8] 423 ldp x18, x19, [sp, #16 * 9] 424 ldp x20, x21, [sp, #16 * 10] 425 ldp x22, x23, [sp, #16 * 11] 426 ldp x24, x25, [sp, #16 * 12] 427 ldp x26, x27, [sp, #16 * 13] 428 ldp x28, x29, [sp, #16 * 14] 429 430 .if \el == 0 431alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 432 ldr lr, [sp, #S_LR] 433 add sp, sp, #PT_REGS_SIZE // restore sp 434 eret 435alternative_else_nop_endif 436#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 437 msr far_el1, x29 438 439 ldr_this_cpu x30, this_cpu_vector, x29 440 tramp_alias x29, tramp_exit 441 msr vbar_el1, x30 // install vector table 442 ldr lr, [sp, #S_LR] // restore x30 443 add sp, sp, #PT_REGS_SIZE // restore sp 444 br x29 445#endif 446 .else 447 ldr lr, [sp, #S_LR] 448 add sp, sp, #PT_REGS_SIZE // restore sp 449 450 /* Ensure any device/NC reads complete */ 451 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 452 453 eret 454 .endif 455 sb 456 .endm 457 458#ifdef CONFIG_ARM64_SW_TTBR0_PAN 459 /* 460 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from 461 * EL0, there is no need to check the state of TTBR0_EL1 since 462 * accesses are always enabled. 463 * Note that the meaning of this bit differs from the ARMv8.1 PAN 464 * feature as all TTBR0_EL1 accesses are disabled, not just those to 465 * user mappings. 466 */ 467SYM_CODE_START_LOCAL(__swpan_entry_el1) 468 mrs x21, ttbr0_el1 469 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID 470 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR 471 b.eq 1f // TTBR0 access already disabled 472 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR 473SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) 474 __uaccess_ttbr0_disable x21 4751: ret 476SYM_CODE_END(__swpan_entry_el1) 477 478 /* 479 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR 480 * PAN bit checking. 481 */ 482SYM_CODE_START_LOCAL(__swpan_exit_el1) 483 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set 484 __uaccess_ttbr0_enable x0, x1 4851: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit 486 ret 487SYM_CODE_END(__swpan_exit_el1) 488 489SYM_CODE_START_LOCAL(__swpan_exit_el0) 490 __uaccess_ttbr0_enable x0, x1 491 /* 492 * Enable errata workarounds only if returning to user. The only 493 * workaround currently required for TTBR0_EL1 changes are for the 494 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache 495 * corruption). 496 */ 497 b post_ttbr_update_workaround 498SYM_CODE_END(__swpan_exit_el0) 499#endif 500 501/* GPRs used by entry code */ 502tsk .req x28 // current thread_info 503 504 .text 505 506/* 507 * Exception vectors. 508 */ 509 .pushsection ".entry.text", "ax" 510 511 .align 11 512SYM_CODE_START(vectors) 513 kernel_ventry 1, t, 64, sync // Synchronous EL1t 514 kernel_ventry 1, t, 64, irq // IRQ EL1t 515 kernel_ventry 1, t, 64, fiq // FIQ EL1t 516 kernel_ventry 1, t, 64, error // Error EL1t 517 518 kernel_ventry 1, h, 64, sync // Synchronous EL1h 519 kernel_ventry 1, h, 64, irq // IRQ EL1h 520 kernel_ventry 1, h, 64, fiq // FIQ EL1h 521 kernel_ventry 1, h, 64, error // Error EL1h 522 523 kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 524 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 525 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 526 kernel_ventry 0, t, 64, error // Error 64-bit EL0 527 528 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 529 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 530 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 531 kernel_ventry 0, t, 32, error // Error 32-bit EL0 532SYM_CODE_END(vectors) 533 534#ifdef CONFIG_VMAP_STACK 535SYM_CODE_START_LOCAL(__bad_stack) 536 /* 537 * We detected an overflow in kernel_ventry, which switched to the 538 * overflow stack. Stash the exception regs, and head to our overflow 539 * handler. 540 */ 541 542 /* Restore the original x0 value */ 543 mrs x0, tpidrro_el0 544 545 /* 546 * Store the original GPRs to the new stack. The orginal SP (minus 547 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry. 548 */ 549 sub sp, sp, #PT_REGS_SIZE 550 kernel_entry 1 551 mrs x0, tpidr_el0 552 add x0, x0, #PT_REGS_SIZE 553 str x0, [sp, #S_SP] 554 555 /* Stash the regs for handle_bad_stack */ 556 mov x0, sp 557 558 /* Time to die */ 559 bl handle_bad_stack 560 ASM_BUG() 561SYM_CODE_END(__bad_stack) 562#endif /* CONFIG_VMAP_STACK */ 563 564 565 .macro entry_handler el:req, ht:req, regsize:req, label:req 566SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) 567 kernel_entry \el, \regsize 568 mov x0, sp 569 bl el\el\ht\()_\regsize\()_\label\()_handler 570 .if \el == 0 571 b ret_to_user 572 .else 573 b ret_to_kernel 574 .endif 575SYM_CODE_END(el\el\ht\()_\regsize\()_\label) 576 .endm 577 578/* 579 * Early exception handlers 580 */ 581 entry_handler 1, t, 64, sync 582 entry_handler 1, t, 64, irq 583 entry_handler 1, t, 64, fiq 584 entry_handler 1, t, 64, error 585 586 entry_handler 1, h, 64, sync 587 entry_handler 1, h, 64, irq 588 entry_handler 1, h, 64, fiq 589 entry_handler 1, h, 64, error 590 591 entry_handler 0, t, 64, sync 592 entry_handler 0, t, 64, irq 593 entry_handler 0, t, 64, fiq 594 entry_handler 0, t, 64, error 595 596 entry_handler 0, t, 32, sync 597 entry_handler 0, t, 32, irq 598 entry_handler 0, t, 32, fiq 599 entry_handler 0, t, 32, error 600 601SYM_CODE_START_LOCAL(ret_to_kernel) 602 kernel_exit 1 603SYM_CODE_END(ret_to_kernel) 604 605SYM_CODE_START_LOCAL(ret_to_user) 606 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step 607 enable_step_tsk x19, x2 608#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 609 bl stackleak_erase_on_task_stack 610#endif 611 kernel_exit 0 612SYM_CODE_END(ret_to_user) 613 614 .popsection // .entry.text 615 616 // Move from tramp_pg_dir to swapper_pg_dir 617 .macro tramp_map_kernel, tmp 618 mrs \tmp, ttbr1_el1 619 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET 620 bic \tmp, \tmp, #USER_ASID_FLAG 621 msr ttbr1_el1, \tmp 622#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 623alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 624 /* ASID already in \tmp[63:48] */ 625 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) 626 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) 627 /* 2MB boundary containing the vectors, so we nobble the walk cache */ 628 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) 629 isb 630 tlbi vae1, \tmp 631 dsb nsh 632alternative_else_nop_endif 633#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ 634 .endm 635 636 // Move from swapper_pg_dir to tramp_pg_dir 637 .macro tramp_unmap_kernel, tmp 638 mrs \tmp, ttbr1_el1 639 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET 640 orr \tmp, \tmp, #USER_ASID_FLAG 641 msr ttbr1_el1, \tmp 642 /* 643 * We avoid running the post_ttbr_update_workaround here because 644 * it's only needed by Cavium ThunderX, which requires KPTI to be 645 * disabled. 646 */ 647 .endm 648 649 .macro tramp_data_read_var dst, var 650#ifdef CONFIG_RELOCATABLE 651 ldr \dst, .L__tramp_data_\var 652 .ifndef .L__tramp_data_\var 653 .pushsection ".entry.tramp.rodata", "a", %progbits 654 .align 3 655.L__tramp_data_\var: 656 .quad \var 657 .popsection 658 .endif 659#else 660 /* 661 * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a 662 * compile time constant (and hence not secret and not worth hiding). 663 * 664 * As statically allocated kernel code and data always live in the top 665 * 47 bits of the address space we can sign-extend bit 47 and avoid an 666 * instruction to load the upper 16 bits (which must be 0xFFFF). 667 */ 668 movz \dst, :abs_g2_s:\var 669 movk \dst, :abs_g1_nc:\var 670 movk \dst, :abs_g0_nc:\var 671#endif 672 .endm 673 674#define BHB_MITIGATION_NONE 0 675#define BHB_MITIGATION_LOOP 1 676#define BHB_MITIGATION_FW 2 677#define BHB_MITIGATION_INSN 3 678 679 .macro tramp_ventry, vector_start, regsize, kpti, bhb 680 .align 7 6811: 682 .if \regsize == 64 683 msr tpidrro_el0, x30 // Restored in kernel_ventry 684 .endif 685 686 .if \bhb == BHB_MITIGATION_LOOP 687 /* 688 * This sequence must appear before the first indirect branch. i.e. the 689 * ret out of tramp_ventry. It appears here because x30 is free. 690 */ 691 __mitigate_spectre_bhb_loop x30 692 .endif // \bhb == BHB_MITIGATION_LOOP 693 694 .if \bhb == BHB_MITIGATION_INSN 695 clearbhb 696 isb 697 .endif // \bhb == BHB_MITIGATION_INSN 698 699 .if \kpti == 1 700 /* 701 * Defend against branch aliasing attacks by pushing a dummy 702 * entry onto the return stack and using a RET instruction to 703 * enter the full-fat kernel vectors. 704 */ 705 bl 2f 706 b . 7072: 708 tramp_map_kernel x30 709alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 710 tramp_data_read_var x30, vectors 711alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 712 prfm plil1strm, [x30, #(1b - \vector_start)] 713alternative_else_nop_endif 714 715 msr vbar_el1, x30 716 isb 717 .else 718 adr_l x30, vectors 719 .endif // \kpti == 1 720 721 .if \bhb == BHB_MITIGATION_FW 722 /* 723 * The firmware sequence must appear before the first indirect branch. 724 * i.e. the ret out of tramp_ventry. But it also needs the stack to be 725 * mapped to save/restore the registers the SMC clobbers. 726 */ 727 __mitigate_spectre_bhb_fw 728 .endif // \bhb == BHB_MITIGATION_FW 729 730 add x30, x30, #(1b - \vector_start + 4) 731 ret 732.org 1b + 128 // Did we overflow the ventry slot? 733 .endm 734 735 .macro generate_tramp_vector, kpti, bhb 736.Lvector_start\@: 737 .space 0x400 738 739 .rept 4 740 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb 741 .endr 742 .rept 4 743 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb 744 .endr 745 .endm 746 747#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 748/* 749 * Exception vectors trampoline. 750 * The order must match __bp_harden_el1_vectors and the 751 * arm64_bp_harden_el1_vectors enum. 752 */ 753 .pushsection ".entry.tramp.text", "ax" 754 .align 11 755SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors) 756#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 757 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP 758 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW 759 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN 760#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 761 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE 762SYM_CODE_END(tramp_vectors) 763 764SYM_CODE_START_LOCAL(tramp_exit) 765 tramp_unmap_kernel x29 766 mrs x29, far_el1 // restore x29 767 eret 768 sb 769SYM_CODE_END(tramp_exit) 770 .popsection // .entry.tramp.text 771#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 772 773/* 774 * Exception vectors for spectre mitigations on entry from EL1 when 775 * kpti is not in use. 776 */ 777 .macro generate_el1_vector, bhb 778.Lvector_start\@: 779 kernel_ventry 1, t, 64, sync // Synchronous EL1t 780 kernel_ventry 1, t, 64, irq // IRQ EL1t 781 kernel_ventry 1, t, 64, fiq // FIQ EL1h 782 kernel_ventry 1, t, 64, error // Error EL1t 783 784 kernel_ventry 1, h, 64, sync // Synchronous EL1h 785 kernel_ventry 1, h, 64, irq // IRQ EL1h 786 kernel_ventry 1, h, 64, fiq // FIQ EL1h 787 kernel_ventry 1, h, 64, error // Error EL1h 788 789 .rept 4 790 tramp_ventry .Lvector_start\@, 64, 0, \bhb 791 .endr 792 .rept 4 793 tramp_ventry .Lvector_start\@, 32, 0, \bhb 794 .endr 795 .endm 796 797/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ 798 .pushsection ".entry.text", "ax" 799 .align 11 800SYM_CODE_START(__bp_harden_el1_vectors) 801#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 802 generate_el1_vector bhb=BHB_MITIGATION_LOOP 803 generate_el1_vector bhb=BHB_MITIGATION_FW 804 generate_el1_vector bhb=BHB_MITIGATION_INSN 805#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 806SYM_CODE_END(__bp_harden_el1_vectors) 807 .popsection 808 809 810/* 811 * Register switch for AArch64. The callee-saved registers need to be saved 812 * and restored. On entry: 813 * x0 = previous task_struct (must be preserved across the switch) 814 * x1 = next task_struct 815 * Previous and next are guaranteed not to be the same. 816 * 817 */ 818SYM_FUNC_START(cpu_switch_to) 819 mov x10, #THREAD_CPU_CONTEXT 820 add x8, x0, x10 821 mov x9, sp 822 stp x19, x20, [x8], #16 // store callee-saved registers 823 stp x21, x22, [x8], #16 824 stp x23, x24, [x8], #16 825 stp x25, x26, [x8], #16 826 stp x27, x28, [x8], #16 827 stp x29, x9, [x8], #16 828 str lr, [x8] 829 add x8, x1, x10 830 ldp x19, x20, [x8], #16 // restore callee-saved registers 831 ldp x21, x22, [x8], #16 832 ldp x23, x24, [x8], #16 833 ldp x25, x26, [x8], #16 834 ldp x27, x28, [x8], #16 835 ldp x29, x9, [x8], #16 836 ldr lr, [x8] 837 mov sp, x9 838 msr sp_el0, x1 839 ptrauth_keys_install_kernel x1, x8, x9, x10 840 scs_save x0 841 scs_load_current 842 ret 843SYM_FUNC_END(cpu_switch_to) 844NOKPROBE(cpu_switch_to) 845 846/* 847 * This is how we return from a fork. 848 */ 849SYM_CODE_START(ret_from_fork) 850 bl schedule_tail 851 cbz x19, 1f // not a kernel thread 852 mov x0, x20 853 blr x19 8541: get_current_task tsk 855 mov x0, sp 856 bl asm_exit_to_user_mode 857 b ret_to_user 858SYM_CODE_END(ret_from_fork) 859NOKPROBE(ret_from_fork) 860 861/* 862 * void call_on_irq_stack(struct pt_regs *regs, 863 * void (*func)(struct pt_regs *)); 864 * 865 * Calls func(regs) using this CPU's irq stack and shadow irq stack. 866 */ 867SYM_FUNC_START(call_on_irq_stack) 868#ifdef CONFIG_SHADOW_CALL_STACK 869 get_current_task x16 870 scs_save x16 871 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 872#endif 873 874 /* Create a frame record to save our LR and SP (implicit in FP) */ 875 stp x29, x30, [sp, #-16]! 876 mov x29, sp 877 878 ldr_this_cpu x16, irq_stack_ptr, x17 879 880 /* Move to the new stack and call the function there */ 881 add sp, x16, #IRQ_STACK_SIZE 882 blr x1 883 884 /* 885 * Restore the SP from the FP, and restore the FP and LR from the frame 886 * record. 887 */ 888 mov sp, x29 889 ldp x29, x30, [sp], #16 890 scs_load_current 891 ret 892SYM_FUNC_END(call_on_irq_stack) 893NOKPROBE(call_on_irq_stack) 894 895#ifdef CONFIG_ARM_SDE_INTERFACE 896 897#include <asm/sdei.h> 898#include <uapi/linux/arm_sdei.h> 899 900.macro sdei_handler_exit exit_mode 901 /* On success, this call never returns... */ 902 cmp \exit_mode, #SDEI_EXIT_SMC 903 b.ne 99f 904 smc #0 905 b . 90699: hvc #0 907 b . 908.endm 909 910#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 911/* 912 * The regular SDEI entry point may have been unmapped along with the rest of 913 * the kernel. This trampoline restores the kernel mapping to make the x1 memory 914 * argument accessible. 915 * 916 * This clobbers x4, __sdei_handler() will restore this from firmware's 917 * copy. 918 */ 919.pushsection ".entry.tramp.text", "ax" 920SYM_CODE_START(__sdei_asm_entry_trampoline) 921 mrs x4, ttbr1_el1 922 tbz x4, #USER_ASID_BIT, 1f 923 924 tramp_map_kernel tmp=x4 925 isb 926 mov x4, xzr 927 928 /* 929 * Remember whether to unmap the kernel on exit. 930 */ 9311: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 932 tramp_data_read_var x4, __sdei_asm_handler 933 br x4 934SYM_CODE_END(__sdei_asm_entry_trampoline) 935NOKPROBE(__sdei_asm_entry_trampoline) 936 937/* 938 * Make the exit call and restore the original ttbr1_el1 939 * 940 * x0 & x1: setup for the exit API call 941 * x2: exit_mode 942 * x4: struct sdei_registered_event argument from registration time. 943 */ 944SYM_CODE_START(__sdei_asm_exit_trampoline) 945 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 946 cbnz x4, 1f 947 948 tramp_unmap_kernel tmp=x4 949 9501: sdei_handler_exit exit_mode=x2 951SYM_CODE_END(__sdei_asm_exit_trampoline) 952NOKPROBE(__sdei_asm_exit_trampoline) 953.popsection // .entry.tramp.text 954#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 955 956/* 957 * Software Delegated Exception entry point. 958 * 959 * x0: Event number 960 * x1: struct sdei_registered_event argument from registration time. 961 * x2: interrupted PC 962 * x3: interrupted PSTATE 963 * x4: maybe clobbered by the trampoline 964 * 965 * Firmware has preserved x0->x17 for us, we must save/restore the rest to 966 * follow SMC-CC. We save (or retrieve) all the registers as the handler may 967 * want them. 968 */ 969SYM_CODE_START(__sdei_asm_handler) 970 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] 971 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] 972 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] 973 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] 974 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] 975 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] 976 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] 977 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] 978 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] 979 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] 980 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] 981 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] 982 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] 983 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] 984 mov x4, sp 985 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] 986 987 mov x19, x1 988 989 /* Store the registered-event for crash_smp_send_stop() */ 990 ldrb w4, [x19, #SDEI_EVENT_PRIORITY] 991 cbnz w4, 1f 992 adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 993 b 2f 9941: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 9952: str x19, [x5] 996 997#ifdef CONFIG_VMAP_STACK 998 /* 999 * entry.S may have been using sp as a scratch register, find whether 1000 * this is a normal or critical event and switch to the appropriate 1001 * stack for this CPU. 1002 */ 1003 cbnz w4, 1f 1004 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 1005 b 2f 10061: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 10072: mov x6, #SDEI_STACK_SIZE 1008 add x5, x5, x6 1009 mov sp, x5 1010#endif 1011 1012#ifdef CONFIG_SHADOW_CALL_STACK 1013 /* Use a separate shadow call stack for normal and critical events */ 1014 cbnz w4, 3f 1015 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 1016 b 4f 10173: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 10184: 1019#endif 1020 1021 /* 1022 * We may have interrupted userspace, or a guest, or exit-from or 1023 * return-to either of these. We can't trust sp_el0, restore it. 1024 */ 1025 mrs x28, sp_el0 1026 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 1027 msr sp_el0, x0 1028 1029 /* If we interrupted the kernel point to the previous stack/frame. */ 1030 and x0, x3, #0xc 1031 mrs x1, CurrentEL 1032 cmp x0, x1 1033 csel x29, x29, xzr, eq // fp, or zero 1034 csel x4, x2, xzr, eq // elr, or zero 1035 1036 stp x29, x4, [sp, #-16]! 1037 mov x29, sp 1038 1039 add x0, x19, #SDEI_EVENT_INTREGS 1040 mov x1, x19 1041 bl __sdei_handler 1042 1043 msr sp_el0, x28 1044 /* restore regs >x17 that we clobbered */ 1045 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline 1046 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] 1047 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] 1048 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] 1049 mov sp, x1 1050 1051 mov x1, x0 // address to complete_and_resume 1052 /* x0 = (x0 <= SDEI_EV_FAILED) ? 1053 * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME 1054 */ 1055 cmp x0, #SDEI_EV_FAILED 1056 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE 1057 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME 1058 csel x0, x2, x3, ls 1059 1060 ldr_l x2, sdei_exit_mode 1061 1062 /* Clear the registered-event seen by crash_smp_send_stop() */ 1063 ldrb w3, [x4, #SDEI_EVENT_PRIORITY] 1064 cbnz w3, 1f 1065 adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 1066 b 2f 10671: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 10682: str xzr, [x5] 1069 1070alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 1071 sdei_handler_exit exit_mode=x2 1072alternative_else_nop_endif 1073 1074#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1075 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline 1076 br x5 1077#endif 1078SYM_CODE_END(__sdei_asm_handler) 1079NOKPROBE(__sdei_asm_handler) 1080 1081SYM_CODE_START(__sdei_handler_abort) 1082 mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME 1083 adr x1, 1f 1084 ldr_l x2, sdei_exit_mode 1085 sdei_handler_exit exit_mode=x2 1086 // exit the handler and jump to the next instruction. 1087 // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. 10881: ret 1089SYM_CODE_END(__sdei_handler_abort) 1090NOKPROBE(__sdei_handler_abort) 1091#endif /* CONFIG_ARM_SDE_INTERFACE */ 1092