1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/process.c 4 * 5 * Original Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 7 * Copyright (C) 2012 ARM Ltd. 8 */ 9 #include <linux/compat.h> 10 #include <linux/efi.h> 11 #include <linux/elf.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/sched/debug.h> 15 #include <linux/sched/task.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/kernel.h> 18 #include <linux/mman.h> 19 #include <linux/mm.h> 20 #include <linux/nospec.h> 21 #include <linux/stddef.h> 22 #include <linux/sysctl.h> 23 #include <linux/unistd.h> 24 #include <linux/user.h> 25 #include <linux/delay.h> 26 #include <linux/reboot.h> 27 #include <linux/interrupt.h> 28 #include <linux/init.h> 29 #include <linux/cpu.h> 30 #include <linux/elfcore.h> 31 #include <linux/pm.h> 32 #include <linux/tick.h> 33 #include <linux/utsname.h> 34 #include <linux/uaccess.h> 35 #include <linux/random.h> 36 #include <linux/hw_breakpoint.h> 37 #include <linux/personality.h> 38 #include <linux/notifier.h> 39 #include <trace/events/power.h> 40 #include <linux/percpu.h> 41 #include <linux/thread_info.h> 42 #include <linux/prctl.h> 43 #include <linux/stacktrace.h> 44 45 #include <asm/alternative.h> 46 #include <asm/compat.h> 47 #include <asm/cpufeature.h> 48 #include <asm/cacheflush.h> 49 #include <asm/exec.h> 50 #include <asm/fpsimd.h> 51 #include <asm/mmu_context.h> 52 #include <asm/mte.h> 53 #include <asm/processor.h> 54 #include <asm/pointer_auth.h> 55 #include <asm/stacktrace.h> 56 #include <asm/switch_to.h> 57 #include <asm/system_misc.h> 58 59 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) 60 #include <linux/stackprotector.h> 61 unsigned long __stack_chk_guard __ro_after_init; 62 EXPORT_SYMBOL(__stack_chk_guard); 63 #endif 64 65 /* 66 * Function pointers to optional machine specific functions 67 */ 68 void (*pm_power_off)(void); 69 EXPORT_SYMBOL_GPL(pm_power_off); 70 71 #ifdef CONFIG_HOTPLUG_CPU 72 void __noreturn arch_cpu_idle_dead(void) 73 { 74 cpu_die(); 75 } 76 #endif 77 78 /* 79 * Called by kexec, immediately prior to machine_kexec(). 80 * 81 * This must completely disable all secondary CPUs; simply causing those CPUs 82 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the 83 * kexec'd kernel to use any and all RAM as it sees fit, without having to 84 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug 85 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. 86 */ 87 void machine_shutdown(void) 88 { 89 smp_shutdown_nonboot_cpus(reboot_cpu); 90 } 91 92 /* 93 * Halting simply requires that the secondary CPUs stop performing any 94 * activity (executing tasks, handling interrupts). smp_send_stop() 95 * achieves this. 96 */ 97 void machine_halt(void) 98 { 99 local_irq_disable(); 100 smp_send_stop(); 101 while (1); 102 } 103 104 /* 105 * Power-off simply requires that the secondary CPUs stop performing any 106 * activity (executing tasks, handling interrupts). smp_send_stop() 107 * achieves this. When the system power is turned off, it will take all CPUs 108 * with it. 109 */ 110 void machine_power_off(void) 111 { 112 local_irq_disable(); 113 smp_send_stop(); 114 do_kernel_power_off(); 115 } 116 117 /* 118 * Restart requires that the secondary CPUs stop performing any activity 119 * while the primary CPU resets the system. Systems with multiple CPUs must 120 * provide a HW restart implementation, to ensure that all CPUs reset at once. 121 * This is required so that any code running after reset on the primary CPU 122 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 123 * executing pre-reset code, and using RAM that the primary CPU's code wishes 124 * to use. Implementing such co-ordination would be essentially impossible. 125 */ 126 void machine_restart(char *cmd) 127 { 128 /* Disable interrupts first */ 129 local_irq_disable(); 130 smp_send_stop(); 131 132 /* 133 * UpdateCapsule() depends on the system being reset via 134 * ResetSystem(). 135 */ 136 if (efi_enabled(EFI_RUNTIME_SERVICES)) 137 efi_reboot(reboot_mode, NULL); 138 139 /* Now call the architecture specific reboot code. */ 140 do_kernel_restart(cmd); 141 142 /* 143 * Whoops - the architecture was unable to reboot. 144 */ 145 printk("Reboot failed -- System halted\n"); 146 while (1); 147 } 148 149 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str 150 static const char *const btypes[] = { 151 bstr(NONE, "--"), 152 bstr( JC, "jc"), 153 bstr( C, "-c"), 154 bstr( J , "j-") 155 }; 156 #undef bstr 157 158 static void print_pstate(struct pt_regs *regs) 159 { 160 u64 pstate = regs->pstate; 161 162 if (compat_user_mode(regs)) { 163 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n", 164 pstate, 165 pstate & PSR_AA32_N_BIT ? 'N' : 'n', 166 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', 167 pstate & PSR_AA32_C_BIT ? 'C' : 'c', 168 pstate & PSR_AA32_V_BIT ? 'V' : 'v', 169 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', 170 pstate & PSR_AA32_T_BIT ? "T32" : "A32", 171 pstate & PSR_AA32_E_BIT ? "BE" : "LE", 172 pstate & PSR_AA32_A_BIT ? 'A' : 'a', 173 pstate & PSR_AA32_I_BIT ? 'I' : 'i', 174 pstate & PSR_AA32_F_BIT ? 'F' : 'f', 175 pstate & PSR_AA32_DIT_BIT ? '+' : '-', 176 pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); 177 } else { 178 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> 179 PSR_BTYPE_SHIFT]; 180 181 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n", 182 pstate, 183 pstate & PSR_N_BIT ? 'N' : 'n', 184 pstate & PSR_Z_BIT ? 'Z' : 'z', 185 pstate & PSR_C_BIT ? 'C' : 'c', 186 pstate & PSR_V_BIT ? 'V' : 'v', 187 pstate & PSR_D_BIT ? 'D' : 'd', 188 pstate & PSR_A_BIT ? 'A' : 'a', 189 pstate & PSR_I_BIT ? 'I' : 'i', 190 pstate & PSR_F_BIT ? 'F' : 'f', 191 pstate & PSR_PAN_BIT ? '+' : '-', 192 pstate & PSR_UAO_BIT ? '+' : '-', 193 pstate & PSR_TCO_BIT ? '+' : '-', 194 pstate & PSR_DIT_BIT ? '+' : '-', 195 pstate & PSR_SSBS_BIT ? '+' : '-', 196 btype_str); 197 } 198 } 199 200 void __show_regs(struct pt_regs *regs) 201 { 202 int i, top_reg; 203 u64 lr, sp; 204 205 if (compat_user_mode(regs)) { 206 lr = regs->compat_lr; 207 sp = regs->compat_sp; 208 top_reg = 12; 209 } else { 210 lr = regs->regs[30]; 211 sp = regs->sp; 212 top_reg = 29; 213 } 214 215 show_regs_print_info(KERN_DEFAULT); 216 print_pstate(regs); 217 218 if (!user_mode(regs)) { 219 printk("pc : %pS\n", (void *)regs->pc); 220 printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); 221 } else { 222 printk("pc : %016llx\n", regs->pc); 223 printk("lr : %016llx\n", lr); 224 } 225 226 printk("sp : %016llx\n", sp); 227 228 if (system_uses_irq_prio_masking()) 229 printk("pmr_save: %08llx\n", regs->pmr_save); 230 231 i = top_reg; 232 233 while (i >= 0) { 234 printk("x%-2d: %016llx", i, regs->regs[i]); 235 236 while (i-- % 3) 237 pr_cont(" x%-2d: %016llx", i, regs->regs[i]); 238 239 pr_cont("\n"); 240 } 241 } 242 243 void show_regs(struct pt_regs *regs) 244 { 245 __show_regs(regs); 246 dump_backtrace(regs, NULL, KERN_DEFAULT); 247 } 248 249 static void tls_thread_flush(void) 250 { 251 write_sysreg(0, tpidr_el0); 252 if (system_supports_tpidr2()) 253 write_sysreg_s(0, SYS_TPIDR2_EL0); 254 255 if (is_compat_task()) { 256 current->thread.uw.tp_value = 0; 257 258 /* 259 * We need to ensure ordering between the shadow state and the 260 * hardware state, so that we don't corrupt the hardware state 261 * with a stale shadow state during context switch. 262 */ 263 barrier(); 264 write_sysreg(0, tpidrro_el0); 265 } 266 } 267 268 static void flush_tagged_addr_state(void) 269 { 270 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) 271 clear_thread_flag(TIF_TAGGED_ADDR); 272 } 273 274 void flush_thread(void) 275 { 276 fpsimd_flush_thread(); 277 tls_thread_flush(); 278 flush_ptrace_hw_breakpoint(current); 279 flush_tagged_addr_state(); 280 } 281 282 void arch_release_task_struct(struct task_struct *tsk) 283 { 284 fpsimd_release_task(tsk); 285 } 286 287 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 288 { 289 if (current->mm) 290 fpsimd_preserve_current_state(); 291 *dst = *src; 292 293 /* We rely on the above assignment to initialize dst's thread_flags: */ 294 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); 295 296 /* 297 * Detach src's sve_state (if any) from dst so that it does not 298 * get erroneously used or freed prematurely. dst's copies 299 * will be allocated on demand later on if dst uses SVE. 300 * For consistency, also clear TIF_SVE here: this could be done 301 * later in copy_process(), but to avoid tripping up future 302 * maintainers it is best not to leave TIF flags and buffers in 303 * an inconsistent state, even temporarily. 304 */ 305 dst->thread.sve_state = NULL; 306 clear_tsk_thread_flag(dst, TIF_SVE); 307 308 /* 309 * In the unlikely event that we create a new thread with ZA 310 * enabled we should retain the ZA and ZT state so duplicate 311 * it here. This may be shortly freed if we exec() or if 312 * CLONE_SETTLS but it's simpler to do it here. To avoid 313 * confusing the rest of the code ensure that we have a 314 * sve_state allocated whenever sme_state is allocated. 315 */ 316 if (thread_za_enabled(&src->thread)) { 317 dst->thread.sve_state = kzalloc(sve_state_size(src), 318 GFP_KERNEL); 319 if (!dst->thread.sve_state) 320 return -ENOMEM; 321 322 dst->thread.sme_state = kmemdup(src->thread.sme_state, 323 sme_state_size(src), 324 GFP_KERNEL); 325 if (!dst->thread.sme_state) { 326 kfree(dst->thread.sve_state); 327 dst->thread.sve_state = NULL; 328 return -ENOMEM; 329 } 330 } else { 331 dst->thread.sme_state = NULL; 332 clear_tsk_thread_flag(dst, TIF_SME); 333 } 334 335 dst->thread.fp_type = FP_STATE_FPSIMD; 336 337 /* clear any pending asynchronous tag fault raised by the parent */ 338 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); 339 340 return 0; 341 } 342 343 asmlinkage void ret_from_fork(void) asm("ret_from_fork"); 344 345 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 346 { 347 unsigned long clone_flags = args->flags; 348 unsigned long stack_start = args->stack; 349 unsigned long tls = args->tls; 350 struct pt_regs *childregs = task_pt_regs(p); 351 352 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); 353 354 /* 355 * In case p was allocated the same task_struct pointer as some 356 * other recently-exited task, make sure p is disassociated from 357 * any cpu that may have run that now-exited task recently. 358 * Otherwise we could erroneously skip reloading the FPSIMD 359 * registers for p. 360 */ 361 fpsimd_flush_task_state(p); 362 363 ptrauth_thread_init_kernel(p); 364 365 if (likely(!args->fn)) { 366 *childregs = *current_pt_regs(); 367 childregs->regs[0] = 0; 368 369 /* 370 * Read the current TLS pointer from tpidr_el0 as it may be 371 * out-of-sync with the saved value. 372 */ 373 *task_user_tls(p) = read_sysreg(tpidr_el0); 374 if (system_supports_tpidr2()) 375 p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 376 377 if (stack_start) { 378 if (is_compat_thread(task_thread_info(p))) 379 childregs->compat_sp = stack_start; 380 else 381 childregs->sp = stack_start; 382 } 383 384 /* 385 * If a TLS pointer was passed to clone, use it for the new 386 * thread. We also reset TPIDR2 if it's in use. 387 */ 388 if (clone_flags & CLONE_SETTLS) { 389 p->thread.uw.tp_value = tls; 390 p->thread.tpidr2_el0 = 0; 391 } 392 } else { 393 /* 394 * A kthread has no context to ERET to, so ensure any buggy 395 * ERET is treated as an illegal exception return. 396 * 397 * When a user task is created from a kthread, childregs will 398 * be initialized by start_thread() or start_compat_thread(). 399 */ 400 memset(childregs, 0, sizeof(struct pt_regs)); 401 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; 402 403 p->thread.cpu_context.x19 = (unsigned long)args->fn; 404 p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; 405 } 406 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; 407 p->thread.cpu_context.sp = (unsigned long)childregs; 408 /* 409 * For the benefit of the unwinder, set up childregs->stackframe 410 * as the final frame for the new task. 411 */ 412 p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; 413 414 ptrace_hw_copy_thread(p); 415 416 return 0; 417 } 418 419 void tls_preserve_current_state(void) 420 { 421 *task_user_tls(current) = read_sysreg(tpidr_el0); 422 if (system_supports_tpidr2() && !is_compat_task()) 423 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 424 } 425 426 static void tls_thread_switch(struct task_struct *next) 427 { 428 tls_preserve_current_state(); 429 430 if (is_compat_thread(task_thread_info(next))) 431 write_sysreg(next->thread.uw.tp_value, tpidrro_el0); 432 else if (!arm64_kernel_unmapped_at_el0()) 433 write_sysreg(0, tpidrro_el0); 434 435 write_sysreg(*task_user_tls(next), tpidr_el0); 436 if (system_supports_tpidr2()) 437 write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); 438 } 439 440 /* 441 * Force SSBS state on context-switch, since it may be lost after migrating 442 * from a CPU which treats the bit as RES0 in a heterogeneous system. 443 */ 444 static void ssbs_thread_switch(struct task_struct *next) 445 { 446 /* 447 * Nothing to do for kernel threads, but 'regs' may be junk 448 * (e.g. idle task) so check the flags and bail early. 449 */ 450 if (unlikely(next->flags & PF_KTHREAD)) 451 return; 452 453 /* 454 * If all CPUs implement the SSBS extension, then we just need to 455 * context-switch the PSTATE field. 456 */ 457 if (cpus_have_const_cap(ARM64_SSBS)) 458 return; 459 460 spectre_v4_enable_task_mitigation(next); 461 } 462 463 /* 464 * We store our current task in sp_el0, which is clobbered by userspace. Keep a 465 * shadow copy so that we can restore this upon entry from userspace. 466 * 467 * This is *only* for exception entry from EL0, and is not valid until we 468 * __switch_to() a user task. 469 */ 470 DEFINE_PER_CPU(struct task_struct *, __entry_task); 471 472 static void entry_task_switch(struct task_struct *next) 473 { 474 __this_cpu_write(__entry_task, next); 475 } 476 477 /* 478 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. 479 * Ensure access is disabled when switching to a 32bit task, ensure 480 * access is enabled when switching to a 64bit task. 481 */ 482 static void erratum_1418040_thread_switch(struct task_struct *next) 483 { 484 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || 485 !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) 486 return; 487 488 if (is_compat_thread(task_thread_info(next))) 489 sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); 490 else 491 sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); 492 } 493 494 static void erratum_1418040_new_exec(void) 495 { 496 preempt_disable(); 497 erratum_1418040_thread_switch(current); 498 preempt_enable(); 499 } 500 501 /* 502 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore 503 * this function must be called with preemption disabled and the update to 504 * sctlr_user must be made in the same preemption disabled block so that 505 * __switch_to() does not see the variable update before the SCTLR_EL1 one. 506 */ 507 void update_sctlr_el1(u64 sctlr) 508 { 509 /* 510 * EnIA must not be cleared while in the kernel as this is necessary for 511 * in-kernel PAC. It will be cleared on kernel exit if needed. 512 */ 513 sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); 514 515 /* ISB required for the kernel uaccess routines when setting TCF0. */ 516 isb(); 517 } 518 519 /* 520 * Thread switching. 521 */ 522 __notrace_funcgraph __sched 523 struct task_struct *__switch_to(struct task_struct *prev, 524 struct task_struct *next) 525 { 526 struct task_struct *last; 527 528 fpsimd_thread_switch(next); 529 tls_thread_switch(next); 530 hw_breakpoint_thread_switch(next); 531 contextidr_thread_switch(next); 532 entry_task_switch(next); 533 ssbs_thread_switch(next); 534 erratum_1418040_thread_switch(next); 535 ptrauth_thread_switch_user(next); 536 537 /* 538 * Complete any pending TLB or cache maintenance on this CPU in case 539 * the thread migrates to a different CPU. 540 * This full barrier is also required by the membarrier system 541 * call. 542 */ 543 dsb(ish); 544 545 /* 546 * MTE thread switching must happen after the DSB above to ensure that 547 * any asynchronous tag check faults have been logged in the TFSR*_EL1 548 * registers. 549 */ 550 mte_thread_switch(next); 551 /* avoid expensive SCTLR_EL1 accesses if no change */ 552 if (prev->thread.sctlr_user != next->thread.sctlr_user) 553 update_sctlr_el1(next->thread.sctlr_user); 554 555 /* the actual thread switch */ 556 last = cpu_switch_to(prev, next); 557 558 return last; 559 } 560 561 struct wchan_info { 562 unsigned long pc; 563 int count; 564 }; 565 566 static bool get_wchan_cb(void *arg, unsigned long pc) 567 { 568 struct wchan_info *wchan_info = arg; 569 570 if (!in_sched_functions(pc)) { 571 wchan_info->pc = pc; 572 return false; 573 } 574 return wchan_info->count++ < 16; 575 } 576 577 unsigned long __get_wchan(struct task_struct *p) 578 { 579 struct wchan_info wchan_info = { 580 .pc = 0, 581 .count = 0, 582 }; 583 584 if (!try_get_task_stack(p)) 585 return 0; 586 587 arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); 588 589 put_task_stack(p); 590 591 return wchan_info.pc; 592 } 593 594 unsigned long arch_align_stack(unsigned long sp) 595 { 596 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 597 sp -= get_random_u32_below(PAGE_SIZE); 598 return sp & ~0xf; 599 } 600 601 #ifdef CONFIG_COMPAT 602 int compat_elf_check_arch(const struct elf32_hdr *hdr) 603 { 604 if (!system_supports_32bit_el0()) 605 return false; 606 607 if ((hdr)->e_machine != EM_ARM) 608 return false; 609 610 if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) 611 return false; 612 613 /* 614 * Prevent execve() of a 32-bit program from a deadline task 615 * if the restricted affinity mask would be inadmissible on an 616 * asymmetric system. 617 */ 618 return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || 619 !dl_task_check_affinity(current, system_32bit_el0_cpumask()); 620 } 621 #endif 622 623 /* 624 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. 625 */ 626 void arch_setup_new_exec(void) 627 { 628 unsigned long mmflags = 0; 629 630 if (is_compat_task()) { 631 mmflags = MMCF_AARCH32; 632 633 /* 634 * Restrict the CPU affinity mask for a 32-bit task so that 635 * it contains only 32-bit-capable CPUs. 636 * 637 * From the perspective of the task, this looks similar to 638 * what would happen if the 64-bit-only CPUs were hot-unplugged 639 * at the point of execve(), although we try a bit harder to 640 * honour the cpuset hierarchy. 641 */ 642 if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) 643 force_compatible_cpus_allowed_ptr(current); 644 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { 645 relax_compatible_cpus_allowed_ptr(current); 646 } 647 648 current->mm->context.flags = mmflags; 649 ptrauth_thread_init_user(); 650 mte_thread_init_user(); 651 erratum_1418040_new_exec(); 652 653 if (task_spec_ssb_noexec(current)) { 654 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, 655 PR_SPEC_ENABLE); 656 } 657 } 658 659 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 660 /* 661 * Control the relaxed ABI allowing tagged user addresses into the kernel. 662 */ 663 static unsigned int tagged_addr_disabled; 664 665 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) 666 { 667 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; 668 struct thread_info *ti = task_thread_info(task); 669 670 if (is_compat_thread(ti)) 671 return -EINVAL; 672 673 if (system_supports_mte()) 674 valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ 675 | PR_MTE_TAG_MASK; 676 677 if (arg & ~valid_mask) 678 return -EINVAL; 679 680 /* 681 * Do not allow the enabling of the tagged address ABI if globally 682 * disabled via sysctl abi.tagged_addr_disabled. 683 */ 684 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) 685 return -EINVAL; 686 687 if (set_mte_ctrl(task, arg) != 0) 688 return -EINVAL; 689 690 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); 691 692 return 0; 693 } 694 695 long get_tagged_addr_ctrl(struct task_struct *task) 696 { 697 long ret = 0; 698 struct thread_info *ti = task_thread_info(task); 699 700 if (is_compat_thread(ti)) 701 return -EINVAL; 702 703 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) 704 ret = PR_TAGGED_ADDR_ENABLE; 705 706 ret |= get_mte_ctrl(task); 707 708 return ret; 709 } 710 711 /* 712 * Global sysctl to disable the tagged user addresses support. This control 713 * only prevents the tagged address ABI enabling via prctl() and does not 714 * disable it for tasks that already opted in to the relaxed ABI. 715 */ 716 717 static struct ctl_table tagged_addr_sysctl_table[] = { 718 { 719 .procname = "tagged_addr_disabled", 720 .mode = 0644, 721 .data = &tagged_addr_disabled, 722 .maxlen = sizeof(int), 723 .proc_handler = proc_dointvec_minmax, 724 .extra1 = SYSCTL_ZERO, 725 .extra2 = SYSCTL_ONE, 726 }, 727 { } 728 }; 729 730 static int __init tagged_addr_init(void) 731 { 732 if (!register_sysctl("abi", tagged_addr_sysctl_table)) 733 return -EINVAL; 734 return 0; 735 } 736 737 core_initcall(tagged_addr_init); 738 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ 739 740 #ifdef CONFIG_BINFMT_ELF 741 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, 742 bool has_interp, bool is_interp) 743 { 744 /* 745 * For dynamically linked executables the interpreter is 746 * responsible for setting PROT_BTI on everything except 747 * itself. 748 */ 749 if (is_interp != has_interp) 750 return prot; 751 752 if (!(state->flags & ARM64_ELF_BTI)) 753 return prot; 754 755 if (prot & PROT_EXEC) 756 prot |= PROT_BTI; 757 758 return prot; 759 } 760 #endif 761