1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/process.c 4 * 5 * Original Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 7 * Copyright (C) 2012 ARM Ltd. 8 */ 9 #include <linux/compat.h> 10 #include <linux/efi.h> 11 #include <linux/elf.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/sched/debug.h> 15 #include <linux/sched/task.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/kernel.h> 18 #include <linux/mman.h> 19 #include <linux/mm.h> 20 #include <linux/nospec.h> 21 #include <linux/stddef.h> 22 #include <linux/sysctl.h> 23 #include <linux/unistd.h> 24 #include <linux/user.h> 25 #include <linux/delay.h> 26 #include <linux/reboot.h> 27 #include <linux/interrupt.h> 28 #include <linux/init.h> 29 #include <linux/cpumask.h> 30 #include <linux/cpu.h> 31 #include <linux/elfcore.h> 32 #include <linux/pm.h> 33 #include <linux/tick.h> 34 #include <linux/utsname.h> 35 #include <linux/uaccess.h> 36 #include <linux/random.h> 37 #include <linux/hw_breakpoint.h> 38 #include <linux/personality.h> 39 #include <linux/notifier.h> 40 #include <trace/events/power.h> 41 #include <linux/percpu.h> 42 #include <linux/thread_info.h> 43 #include <linux/prctl.h> 44 #include <linux/stacktrace.h> 45 46 #include <asm/alternative.h> 47 #include <asm/arch_timer.h> 48 #include <asm/compat.h> 49 #include <asm/cpufeature.h> 50 #include <asm/cacheflush.h> 51 #include <asm/exec.h> 52 #include <asm/fpsimd.h> 53 #include <asm/gcs.h> 54 #include <asm/mmu_context.h> 55 #include <asm/mpam.h> 56 #include <asm/mte.h> 57 #include <asm/processor.h> 58 #include <asm/pointer_auth.h> 59 #include <asm/stacktrace.h> 60 #include <asm/switch_to.h> 61 #include <asm/system_misc.h> 62 63 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) 64 #include <linux/stackprotector.h> 65 unsigned long __stack_chk_guard __ro_after_init; 66 EXPORT_SYMBOL(__stack_chk_guard); 67 #endif 68 69 /* 70 * Function pointers to optional machine specific functions 71 */ 72 void (*pm_power_off)(void); 73 EXPORT_SYMBOL_GPL(pm_power_off); 74 75 #ifdef CONFIG_HOTPLUG_CPU 76 void __noreturn arch_cpu_idle_dead(void) 77 { 78 cpu_die(); 79 } 80 #endif 81 82 /* 83 * Called by kexec, immediately prior to machine_kexec(). 84 * 85 * This must completely disable all secondary CPUs; simply causing those CPUs 86 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the 87 * kexec'd kernel to use any and all RAM as it sees fit, without having to 88 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug 89 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. 90 */ 91 void machine_shutdown(void) 92 { 93 smp_shutdown_nonboot_cpus(reboot_cpu); 94 } 95 96 /* 97 * Halting simply requires that the secondary CPUs stop performing any 98 * activity (executing tasks, handling interrupts). smp_send_stop() 99 * achieves this. 100 */ 101 void machine_halt(void) 102 { 103 local_irq_disable(); 104 smp_send_stop(); 105 while (1); 106 } 107 108 /* 109 * Power-off simply requires that the secondary CPUs stop performing any 110 * activity (executing tasks, handling interrupts). smp_send_stop() 111 * achieves this. When the system power is turned off, it will take all CPUs 112 * with it. 113 */ 114 void machine_power_off(void) 115 { 116 local_irq_disable(); 117 smp_send_stop(); 118 do_kernel_power_off(); 119 } 120 121 /* 122 * Restart requires that the secondary CPUs stop performing any activity 123 * while the primary CPU resets the system. Systems with multiple CPUs must 124 * provide a HW restart implementation, to ensure that all CPUs reset at once. 125 * This is required so that any code running after reset on the primary CPU 126 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 127 * executing pre-reset code, and using RAM that the primary CPU's code wishes 128 * to use. Implementing such co-ordination would be essentially impossible. 129 */ 130 void machine_restart(char *cmd) 131 { 132 /* Disable interrupts first */ 133 local_irq_disable(); 134 smp_send_stop(); 135 136 /* 137 * UpdateCapsule() depends on the system being reset via 138 * ResetSystem(). 139 */ 140 if (efi_enabled(EFI_RUNTIME_SERVICES)) 141 efi_reboot(reboot_mode, NULL); 142 143 /* Now call the architecture specific reboot code. */ 144 do_kernel_restart(cmd); 145 146 /* 147 * Whoops - the architecture was unable to reboot. 148 */ 149 printk("Reboot failed -- System halted\n"); 150 while (1); 151 } 152 153 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str 154 static const char *const btypes[] = { 155 bstr(NONE, "--"), 156 bstr( JC, "jc"), 157 bstr( C, "-c"), 158 bstr( J , "j-") 159 }; 160 #undef bstr 161 162 static void print_pstate(struct pt_regs *regs) 163 { 164 u64 pstate = regs->pstate; 165 166 if (compat_user_mode(regs)) { 167 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n", 168 pstate, 169 pstate & PSR_AA32_N_BIT ? 'N' : 'n', 170 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', 171 pstate & PSR_AA32_C_BIT ? 'C' : 'c', 172 pstate & PSR_AA32_V_BIT ? 'V' : 'v', 173 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', 174 pstate & PSR_AA32_T_BIT ? "T32" : "A32", 175 pstate & PSR_AA32_E_BIT ? "BE" : "LE", 176 pstate & PSR_AA32_A_BIT ? 'A' : 'a', 177 pstate & PSR_AA32_I_BIT ? 'I' : 'i', 178 pstate & PSR_AA32_F_BIT ? 'F' : 'f', 179 pstate & PSR_AA32_DIT_BIT ? '+' : '-', 180 pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); 181 } else { 182 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> 183 PSR_BTYPE_SHIFT]; 184 185 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n", 186 pstate, 187 pstate & PSR_N_BIT ? 'N' : 'n', 188 pstate & PSR_Z_BIT ? 'Z' : 'z', 189 pstate & PSR_C_BIT ? 'C' : 'c', 190 pstate & PSR_V_BIT ? 'V' : 'v', 191 pstate & PSR_D_BIT ? 'D' : 'd', 192 pstate & PSR_A_BIT ? 'A' : 'a', 193 pstate & PSR_I_BIT ? 'I' : 'i', 194 pstate & PSR_F_BIT ? 'F' : 'f', 195 pstate & PSR_PAN_BIT ? '+' : '-', 196 pstate & PSR_UAO_BIT ? '+' : '-', 197 pstate & PSR_TCO_BIT ? '+' : '-', 198 pstate & PSR_DIT_BIT ? '+' : '-', 199 pstate & PSR_SSBS_BIT ? '+' : '-', 200 btype_str); 201 } 202 } 203 204 void __show_regs(struct pt_regs *regs) 205 { 206 int i, top_reg; 207 u64 lr, sp; 208 209 if (compat_user_mode(regs)) { 210 lr = regs->compat_lr; 211 sp = regs->compat_sp; 212 top_reg = 12; 213 } else { 214 lr = regs->regs[30]; 215 sp = regs->sp; 216 top_reg = 29; 217 } 218 219 show_regs_print_info(KERN_DEFAULT); 220 print_pstate(regs); 221 222 if (!user_mode(regs)) { 223 printk("pc : %pS\n", (void *)regs->pc); 224 printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); 225 } else { 226 printk("pc : %016llx\n", regs->pc); 227 printk("lr : %016llx\n", lr); 228 } 229 230 printk("sp : %016llx\n", sp); 231 232 if (system_uses_irq_prio_masking()) 233 printk("pmr: %08x\n", regs->pmr); 234 235 i = top_reg; 236 237 while (i >= 0) { 238 printk("x%-2d: %016llx", i, regs->regs[i]); 239 240 while (i-- % 3) 241 pr_cont(" x%-2d: %016llx", i, regs->regs[i]); 242 243 pr_cont("\n"); 244 } 245 } 246 247 void show_regs(struct pt_regs *regs) 248 { 249 __show_regs(regs); 250 dump_backtrace(regs, NULL, KERN_DEFAULT); 251 } 252 253 static void tls_thread_flush(void) 254 { 255 write_sysreg(0, tpidr_el0); 256 if (system_supports_tpidr2()) 257 write_sysreg_s(0, SYS_TPIDR2_EL0); 258 259 if (is_compat_task()) { 260 current->thread.uw.tp_value = 0; 261 262 /* 263 * We need to ensure ordering between the shadow state and the 264 * hardware state, so that we don't corrupt the hardware state 265 * with a stale shadow state during context switch. 266 */ 267 barrier(); 268 write_sysreg(0, tpidrro_el0); 269 } 270 } 271 272 static void flush_tagged_addr_state(void) 273 { 274 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) 275 clear_thread_flag(TIF_TAGGED_ADDR); 276 } 277 278 static void flush_poe(void) 279 { 280 if (!system_supports_poe()) 281 return; 282 283 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 284 } 285 286 #ifdef CONFIG_ARM64_GCS 287 288 static void flush_gcs(void) 289 { 290 if (!system_supports_gcs()) 291 return; 292 293 current->thread.gcspr_el0 = 0; 294 current->thread.gcs_base = 0; 295 current->thread.gcs_size = 0; 296 current->thread.gcs_el0_mode = 0; 297 current->thread.gcs_el0_locked = 0; 298 write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1); 299 write_sysreg_s(0, SYS_GCSPR_EL0); 300 } 301 302 static int copy_thread_gcs(struct task_struct *p, 303 const struct kernel_clone_args *args) 304 { 305 unsigned long gcs; 306 307 if (!system_supports_gcs()) 308 return 0; 309 310 p->thread.gcs_base = 0; 311 p->thread.gcs_size = 0; 312 313 p->thread.gcs_el0_mode = current->thread.gcs_el0_mode; 314 p->thread.gcs_el0_locked = current->thread.gcs_el0_locked; 315 316 gcs = gcs_alloc_thread_stack(p, args); 317 if (IS_ERR_VALUE(gcs)) 318 return PTR_ERR((void *)gcs); 319 320 return 0; 321 } 322 323 #else 324 325 static void flush_gcs(void) { } 326 static int copy_thread_gcs(struct task_struct *p, 327 const struct kernel_clone_args *args) 328 { 329 return 0; 330 } 331 332 #endif 333 334 void flush_thread(void) 335 { 336 fpsimd_flush_thread(); 337 tls_thread_flush(); 338 flush_ptrace_hw_breakpoint(current); 339 flush_tagged_addr_state(); 340 flush_poe(); 341 flush_gcs(); 342 } 343 344 #ifdef CONFIG_ARM64_ERRATUM_4193714 345 346 static void arch_dup_tlbbatch_mask(struct task_struct *dst) 347 { 348 /* 349 * Clear the inherited cpumask with memset() to cover both cases where 350 * cpumask_var_t is a pointer or an array. It will be allocated lazily 351 * in sme_dvmsync_add_pending() if CPUMASK_OFFSTACK=y. 352 */ 353 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 354 memset(&dst->tlb_ubc.arch.cpumask, 0, 355 sizeof(dst->tlb_ubc.arch.cpumask)); 356 } 357 358 static void arch_release_tlbbatch_mask(struct task_struct *tsk) 359 { 360 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 361 free_cpumask_var(tsk->tlb_ubc.arch.cpumask); 362 } 363 364 #else 365 366 static void arch_dup_tlbbatch_mask(struct task_struct *dst) 367 { 368 } 369 370 static void arch_release_tlbbatch_mask(struct task_struct *tsk) 371 { 372 } 373 374 #endif /* CONFIG_ARM64_ERRATUM_4193714 */ 375 376 void arch_release_task_struct(struct task_struct *tsk) 377 { 378 arch_release_tlbbatch_mask(tsk); 379 fpsimd_release_task(tsk); 380 } 381 382 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 383 { 384 /* 385 * The current/src task's FPSIMD state may or may not be live, and may 386 * have been altered by ptrace after entry to the kernel. Save the 387 * effective FPSIMD state so that this will be copied into dst. 388 */ 389 fpsimd_save_and_flush_current_state(); 390 fpsimd_sync_from_effective_state(src); 391 392 *dst = *src; 393 394 arch_dup_tlbbatch_mask(dst); 395 396 /* 397 * Drop stale reference to src's sve_state and convert dst to 398 * non-streaming FPSIMD mode. 399 */ 400 dst->thread.fp_type = FP_STATE_FPSIMD; 401 dst->thread.sve_state = NULL; 402 clear_tsk_thread_flag(dst, TIF_SVE); 403 task_smstop_sm(dst); 404 405 /* 406 * Drop stale reference to src's sme_state and ensure dst has ZA 407 * disabled. 408 * 409 * When necessary, ZA will be inherited later in copy_thread_za(). 410 */ 411 dst->thread.sme_state = NULL; 412 clear_tsk_thread_flag(dst, TIF_SME); 413 dst->thread.svcr &= ~SVCR_ZA_MASK; 414 415 /* clear any pending asynchronous tag fault raised by the parent */ 416 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); 417 418 return 0; 419 } 420 421 static int copy_thread_za(struct task_struct *dst, struct task_struct *src) 422 { 423 if (!thread_za_enabled(&src->thread)) 424 return 0; 425 426 dst->thread.sve_state = kzalloc(sve_state_size(src), 427 GFP_KERNEL); 428 if (!dst->thread.sve_state) 429 return -ENOMEM; 430 431 dst->thread.sme_state = kmemdup(src->thread.sme_state, 432 sme_state_size(src), 433 GFP_KERNEL); 434 if (!dst->thread.sme_state) { 435 kfree(dst->thread.sve_state); 436 dst->thread.sve_state = NULL; 437 return -ENOMEM; 438 } 439 440 set_tsk_thread_flag(dst, TIF_SME); 441 dst->thread.svcr |= SVCR_ZA_MASK; 442 443 return 0; 444 } 445 446 asmlinkage void ret_from_fork(void) asm("ret_from_fork"); 447 448 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 449 { 450 u64 clone_flags = args->flags; 451 unsigned long stack_start = args->stack; 452 unsigned long tls = args->tls; 453 struct pt_regs *childregs = task_pt_regs(p); 454 int ret; 455 456 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); 457 458 /* 459 * In case p was allocated the same task_struct pointer as some 460 * other recently-exited task, make sure p is disassociated from 461 * any cpu that may have run that now-exited task recently. 462 * Otherwise we could erroneously skip reloading the FPSIMD 463 * registers for p. 464 */ 465 fpsimd_flush_task_state(p); 466 467 ptrauth_thread_init_kernel(p); 468 469 if (likely(!args->fn)) { 470 *childregs = *current_pt_regs(); 471 childregs->regs[0] = 0; 472 473 /* 474 * Read the current TLS pointer from tpidr_el0 as it may be 475 * out-of-sync with the saved value. 476 */ 477 *task_user_tls(p) = read_sysreg(tpidr_el0); 478 479 if (system_supports_poe()) 480 p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); 481 482 if (stack_start) { 483 if (is_compat_thread(task_thread_info(p))) 484 childregs->compat_sp = stack_start; 485 else 486 childregs->sp = stack_start; 487 } 488 489 /* 490 * Due to the AAPCS64 "ZA lazy saving scheme", PSTATE.ZA and 491 * TPIDR2 need to be manipulated as a pair, and either both 492 * need to be inherited or both need to be reset. 493 * 494 * Within a process, child threads must not inherit their 495 * parent's TPIDR2 value or they may clobber their parent's 496 * stack at some later point. 497 * 498 * When a process is fork()'d, the child must inherit ZA and 499 * TPIDR2 from its parent in case there was dormant ZA state. 500 * 501 * Use CLONE_VM to determine when the child will share the 502 * address space with the parent, and cannot safely inherit the 503 * state. 504 */ 505 if (system_supports_sme()) { 506 if (!(clone_flags & CLONE_VM)) { 507 p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 508 ret = copy_thread_za(p, current); 509 if (ret) 510 return ret; 511 } else { 512 p->thread.tpidr2_el0 = 0; 513 WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK); 514 } 515 } 516 517 /* 518 * If a TLS pointer was passed to clone, use it for the new 519 * thread. 520 */ 521 if (clone_flags & CLONE_SETTLS) 522 p->thread.uw.tp_value = tls; 523 524 ret = copy_thread_gcs(p, args); 525 if (ret != 0) 526 return ret; 527 } else { 528 /* 529 * A kthread has no context to ERET to, so ensure any buggy 530 * ERET is treated as an illegal exception return. 531 * 532 * When a user task is created from a kthread, childregs will 533 * be initialized by start_thread() or start_compat_thread(). 534 */ 535 memset(childregs, 0, sizeof(struct pt_regs)); 536 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; 537 childregs->stackframe.type = FRAME_META_TYPE_FINAL; 538 539 p->thread.cpu_context.x19 = (unsigned long)args->fn; 540 p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; 541 542 if (system_supports_poe()) 543 p->thread.por_el0 = POR_EL0_INIT; 544 } 545 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; 546 p->thread.cpu_context.sp = (unsigned long)childregs; 547 /* 548 * For the benefit of the unwinder, set up childregs->stackframe 549 * as the final frame for the new task. 550 */ 551 p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe; 552 553 ptrace_hw_copy_thread(p); 554 555 return 0; 556 } 557 558 void tls_preserve_current_state(void) 559 { 560 *task_user_tls(current) = read_sysreg(tpidr_el0); 561 if (system_supports_tpidr2() && !is_compat_task()) 562 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 563 } 564 565 static void tls_thread_switch(struct task_struct *next) 566 { 567 tls_preserve_current_state(); 568 569 if (is_compat_thread(task_thread_info(next))) 570 write_sysreg(next->thread.uw.tp_value, tpidrro_el0); 571 else 572 write_sysreg(0, tpidrro_el0); 573 574 write_sysreg(*task_user_tls(next), tpidr_el0); 575 if (system_supports_tpidr2()) 576 write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); 577 } 578 579 /* 580 * Force SSBS state on context-switch, since it may be lost after migrating 581 * from a CPU which treats the bit as RES0 in a heterogeneous system. 582 */ 583 static void ssbs_thread_switch(struct task_struct *next) 584 { 585 /* 586 * Nothing to do for kernel threads, but 'regs' may be junk 587 * (e.g. idle task) so check the flags and bail early. 588 */ 589 if (unlikely(next->flags & PF_KTHREAD)) 590 return; 591 592 /* 593 * If all CPUs implement the SSBS extension, then we just need to 594 * context-switch the PSTATE field. 595 */ 596 if (alternative_has_cap_unlikely(ARM64_SSBS)) 597 return; 598 599 spectre_v4_enable_task_mitigation(next); 600 } 601 602 /* 603 * We store our current task in sp_el0, which is clobbered by userspace. Keep a 604 * shadow copy so that we can restore this upon entry from userspace. 605 * 606 * This is *only* for exception entry from EL0, and is not valid until we 607 * __switch_to() a user task. 608 */ 609 DEFINE_PER_CPU(struct task_struct *, __entry_task); 610 611 static void entry_task_switch(struct task_struct *next) 612 { 613 __this_cpu_write(__entry_task, next); 614 } 615 616 #ifdef CONFIG_ARM64_GCS 617 618 void gcs_preserve_current_state(void) 619 { 620 current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 621 } 622 623 static void gcs_thread_switch(struct task_struct *next) 624 { 625 if (!system_supports_gcs()) 626 return; 627 628 /* GCSPR_EL0 is always readable */ 629 gcs_preserve_current_state(); 630 write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0); 631 632 if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode) 633 gcs_set_el0_mode(next); 634 635 /* 636 * Ensure that GCS memory effects of the 'prev' thread are 637 * ordered before other memory accesses with release semantics 638 * (or preceded by a DMB) on the current PE. In addition, any 639 * memory accesses with acquire semantics (or succeeded by a 640 * DMB) are ordered before GCS memory effects of the 'next' 641 * thread. This will ensure that the GCS memory effects are 642 * visible to other PEs in case of migration. 643 */ 644 if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next)) 645 gcsb_dsync(); 646 } 647 648 #else 649 650 static void gcs_thread_switch(struct task_struct *next) 651 { 652 } 653 654 #endif 655 656 /* 657 * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of 658 * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0} 659 * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is 660 * required or PR_TSC_SIGSEGV is set. 661 */ 662 static void update_cntkctl_el1(struct task_struct *next) 663 { 664 struct thread_info *ti = task_thread_info(next); 665 666 if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) || 667 has_erratum_handler(read_cntvct_el0) || 668 (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && 669 this_cpu_has_cap(ARM64_WORKAROUND_1418040) && 670 is_compat_thread(ti))) 671 sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); 672 else 673 sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); 674 } 675 676 static void cntkctl_thread_switch(struct task_struct *prev, 677 struct task_struct *next) 678 { 679 if ((read_ti_thread_flags(task_thread_info(prev)) & 680 (_TIF_32BIT | _TIF_TSC_SIGSEGV)) != 681 (read_ti_thread_flags(task_thread_info(next)) & 682 (_TIF_32BIT | _TIF_TSC_SIGSEGV))) 683 update_cntkctl_el1(next); 684 } 685 686 static int do_set_tsc_mode(unsigned int val) 687 { 688 bool tsc_sigsegv; 689 690 if (val == PR_TSC_SIGSEGV) 691 tsc_sigsegv = true; 692 else if (val == PR_TSC_ENABLE) 693 tsc_sigsegv = false; 694 else 695 return -EINVAL; 696 697 preempt_disable(); 698 update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv); 699 update_cntkctl_el1(current); 700 preempt_enable(); 701 702 return 0; 703 } 704 705 static void permission_overlay_switch(struct task_struct *next) 706 { 707 if (!system_supports_poe()) 708 return; 709 710 current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); 711 if (current->thread.por_el0 != next->thread.por_el0) { 712 write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); 713 /* 714 * No ISB required as we can tolerate spurious Overlay faults - 715 * the fault handler will check again based on the new value 716 * of POR_EL0. 717 */ 718 } 719 } 720 721 /* 722 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore 723 * this function must be called with preemption disabled and the update to 724 * sctlr_user must be made in the same preemption disabled block so that 725 * __switch_to() does not see the variable update before the SCTLR_EL1 one. 726 */ 727 void update_sctlr_el1(u64 sctlr) 728 { 729 /* 730 * EnIA must not be cleared while in the kernel as this is necessary for 731 * in-kernel PAC. It will be cleared on kernel exit if needed. 732 */ 733 sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); 734 735 /* ISB required for the kernel uaccess routines when setting TCF0. */ 736 isb(); 737 } 738 739 static inline void debug_switch_state(void) 740 { 741 if (system_uses_irq_prio_masking()) { 742 unsigned long daif_expected = 0; 743 unsigned long daif_actual = read_sysreg(daif); 744 unsigned long pmr_expected = GIC_PRIO_IRQOFF; 745 unsigned long pmr_actual = read_sysreg_s(SYS_ICC_PMR_EL1); 746 747 WARN_ONCE(daif_actual != daif_expected || 748 pmr_actual != pmr_expected, 749 "Unexpected DAIF + PMR: 0x%lx + 0x%lx (expected 0x%lx + 0x%lx)\n", 750 daif_actual, pmr_actual, 751 daif_expected, pmr_expected); 752 } else { 753 unsigned long daif_expected = DAIF_PROCCTX_NOIRQ; 754 unsigned long daif_actual = read_sysreg(daif); 755 756 WARN_ONCE(daif_actual != daif_expected, 757 "Unexpected DAIF value: 0x%lx (expected 0x%lx)\n", 758 daif_actual, daif_expected); 759 } 760 } 761 762 /* 763 * Thread switching. 764 */ 765 __notrace_funcgraph __sched 766 struct task_struct *__switch_to(struct task_struct *prev, 767 struct task_struct *next) 768 { 769 struct task_struct *last; 770 771 debug_switch_state(); 772 773 fpsimd_thread_switch(next); 774 tls_thread_switch(next); 775 hw_breakpoint_thread_switch(next); 776 contextidr_thread_switch(next); 777 entry_task_switch(next); 778 ssbs_thread_switch(next); 779 cntkctl_thread_switch(prev, next); 780 ptrauth_thread_switch_user(next); 781 permission_overlay_switch(next); 782 gcs_thread_switch(next); 783 784 /* 785 * Complete any pending TLB or cache maintenance on this CPU in case the 786 * thread migrates to a different CPU. This full barrier is also 787 * required by the membarrier system call. Additionally it makes any 788 * in-progress pgtable writes visible to the table walker; See 789 * emit_pte_barriers(). 790 */ 791 dsb(ish); 792 793 /* 794 * MTE thread switching must happen after the DSB above to ensure that 795 * any asynchronous tag check faults have been logged in the TFSR*_EL1 796 * registers. 797 */ 798 mte_thread_switch(next); 799 /* avoid expensive SCTLR_EL1 accesses if no change */ 800 if (prev->thread.sctlr_user != next->thread.sctlr_user) 801 update_sctlr_el1(next->thread.sctlr_user); 802 803 /* 804 * MPAM thread switch happens after the DSB to ensure prev's accesses 805 * use prev's MPAM settings. 806 */ 807 mpam_thread_switch(next); 808 809 /* the actual thread switch */ 810 last = cpu_switch_to(prev, next); 811 812 return last; 813 } 814 815 struct wchan_info { 816 unsigned long pc; 817 int count; 818 }; 819 820 static bool get_wchan_cb(void *arg, unsigned long pc) 821 { 822 struct wchan_info *wchan_info = arg; 823 824 if (!in_sched_functions(pc)) { 825 wchan_info->pc = pc; 826 return false; 827 } 828 return wchan_info->count++ < 16; 829 } 830 831 unsigned long __get_wchan(struct task_struct *p) 832 { 833 struct wchan_info wchan_info = { 834 .pc = 0, 835 .count = 0, 836 }; 837 838 if (!try_get_task_stack(p)) 839 return 0; 840 841 arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); 842 843 put_task_stack(p); 844 845 return wchan_info.pc; 846 } 847 848 unsigned long arch_align_stack(unsigned long sp) 849 { 850 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 851 sp -= get_random_u32_below(PAGE_SIZE); 852 return sp & ~0xf; 853 } 854 855 #ifdef CONFIG_COMPAT 856 int compat_elf_check_arch(const struct elf32_hdr *hdr) 857 { 858 if (!system_supports_32bit_el0()) 859 return false; 860 861 if ((hdr)->e_machine != EM_ARM) 862 return false; 863 864 if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) 865 return false; 866 867 /* 868 * Prevent execve() of a 32-bit program from a deadline task 869 * if the restricted affinity mask would be inadmissible on an 870 * asymmetric system. 871 */ 872 return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || 873 !dl_task_check_affinity(current, system_32bit_el0_cpumask()); 874 } 875 #endif 876 877 /* 878 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. 879 */ 880 void arch_setup_new_exec(void) 881 { 882 unsigned long mmflags = 0; 883 884 if (is_compat_task()) { 885 mmflags = MMCF_AARCH32; 886 887 /* 888 * Restrict the CPU affinity mask for a 32-bit task so that 889 * it contains only 32-bit-capable CPUs. 890 * 891 * From the perspective of the task, this looks similar to 892 * what would happen if the 64-bit-only CPUs were hot-unplugged 893 * at the point of execve(), although we try a bit harder to 894 * honour the cpuset hierarchy. 895 */ 896 if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) 897 force_compatible_cpus_allowed_ptr(current); 898 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { 899 relax_compatible_cpus_allowed_ptr(current); 900 } 901 902 current->mm->context.flags = mmflags; 903 ptrauth_thread_init_user(); 904 mte_thread_init_user(); 905 do_set_tsc_mode(PR_TSC_ENABLE); 906 907 if (task_spec_ssb_noexec(current)) { 908 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, 909 PR_SPEC_ENABLE); 910 } 911 } 912 913 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 914 /* 915 * Control the relaxed ABI allowing tagged user addresses into the kernel. 916 */ 917 static unsigned int tagged_addr_disabled; 918 919 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) 920 { 921 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; 922 struct thread_info *ti = task_thread_info(task); 923 924 if (is_compat_thread(ti)) 925 return -EINVAL; 926 927 if (system_supports_mte()) { 928 valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ 929 | PR_MTE_TAG_MASK; 930 931 if (cpus_have_cap(ARM64_MTE_STORE_ONLY)) 932 valid_mask |= PR_MTE_STORE_ONLY; 933 } 934 935 if (arg & ~valid_mask) 936 return -EINVAL; 937 938 /* 939 * Do not allow the enabling of the tagged address ABI if globally 940 * disabled via sysctl abi.tagged_addr_disabled. 941 */ 942 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) 943 return -EINVAL; 944 945 if (set_mte_ctrl(task, arg) != 0) 946 return -EINVAL; 947 948 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); 949 950 return 0; 951 } 952 953 long get_tagged_addr_ctrl(struct task_struct *task) 954 { 955 long ret = 0; 956 struct thread_info *ti = task_thread_info(task); 957 958 if (is_compat_thread(ti)) 959 return -EINVAL; 960 961 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) 962 ret = PR_TAGGED_ADDR_ENABLE; 963 964 ret |= get_mte_ctrl(task); 965 966 return ret; 967 } 968 969 /* 970 * Global sysctl to disable the tagged user addresses support. This control 971 * only prevents the tagged address ABI enabling via prctl() and does not 972 * disable it for tasks that already opted in to the relaxed ABI. 973 */ 974 975 static const struct ctl_table tagged_addr_sysctl_table[] = { 976 { 977 .procname = "tagged_addr_disabled", 978 .mode = 0644, 979 .data = &tagged_addr_disabled, 980 .maxlen = sizeof(int), 981 .proc_handler = proc_dointvec_minmax, 982 .extra1 = SYSCTL_ZERO, 983 .extra2 = SYSCTL_ONE, 984 }, 985 }; 986 987 static int __init tagged_addr_init(void) 988 { 989 if (!register_sysctl("abi", tagged_addr_sysctl_table)) 990 return -EINVAL; 991 return 0; 992 } 993 994 core_initcall(tagged_addr_init); 995 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ 996 997 #ifdef CONFIG_BINFMT_ELF 998 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, 999 bool has_interp, bool is_interp) 1000 { 1001 /* 1002 * For dynamically linked executables the interpreter is 1003 * responsible for setting PROT_BTI on everything except 1004 * itself. 1005 */ 1006 if (is_interp != has_interp) 1007 return prot; 1008 1009 if (!(state->flags & ARM64_ELF_BTI)) 1010 return prot; 1011 1012 if (prot & PROT_EXEC) 1013 prot |= PROT_BTI; 1014 1015 return prot; 1016 } 1017 #endif 1018 1019 int get_tsc_mode(unsigned long adr) 1020 { 1021 unsigned int val; 1022 1023 if (is_compat_task()) 1024 return -EINVAL; 1025 1026 if (test_thread_flag(TIF_TSC_SIGSEGV)) 1027 val = PR_TSC_SIGSEGV; 1028 else 1029 val = PR_TSC_ENABLE; 1030 1031 return put_user(val, (unsigned int __user *)adr); 1032 } 1033 1034 int set_tsc_mode(unsigned int val) 1035 { 1036 if (is_compat_task()) 1037 return -EINVAL; 1038 1039 return do_set_tsc_mode(val); 1040 } 1041