1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/process.c
4 *
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9 #include <linux/compat.h>
10 #include <linux/efi.h>
11 #include <linux/elf.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/kernel.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/nospec.h>
21 #include <linux/stddef.h>
22 #include <linux/sysctl.h>
23 #include <linux/unistd.h>
24 #include <linux/user.h>
25 #include <linux/delay.h>
26 #include <linux/reboot.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/cpu.h>
30 #include <linux/elfcore.h>
31 #include <linux/pm.h>
32 #include <linux/tick.h>
33 #include <linux/utsname.h>
34 #include <linux/uaccess.h>
35 #include <linux/random.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/personality.h>
38 #include <linux/notifier.h>
39 #include <trace/events/power.h>
40 #include <linux/percpu.h>
41 #include <linux/thread_info.h>
42 #include <linux/prctl.h>
43 #include <linux/stacktrace.h>
44
45 #include <asm/alternative.h>
46 #include <asm/arch_timer.h>
47 #include <asm/compat.h>
48 #include <asm/cpufeature.h>
49 #include <asm/cacheflush.h>
50 #include <asm/exec.h>
51 #include <asm/fpsimd.h>
52 #include <asm/gcs.h>
53 #include <asm/mmu_context.h>
54 #include <asm/mte.h>
55 #include <asm/processor.h>
56 #include <asm/pointer_auth.h>
57 #include <asm/stacktrace.h>
58 #include <asm/switch_to.h>
59 #include <asm/system_misc.h>
60
61 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
62 #include <linux/stackprotector.h>
63 unsigned long __stack_chk_guard __ro_after_init;
64 EXPORT_SYMBOL(__stack_chk_guard);
65 #endif
66
67 /*
68 * Function pointers to optional machine specific functions
69 */
70 void (*pm_power_off)(void);
71 EXPORT_SYMBOL_GPL(pm_power_off);
72
73 #ifdef CONFIG_HOTPLUG_CPU
arch_cpu_idle_dead(void)74 void __noreturn arch_cpu_idle_dead(void)
75 {
76 cpu_die();
77 }
78 #endif
79
80 /*
81 * Called by kexec, immediately prior to machine_kexec().
82 *
83 * This must completely disable all secondary CPUs; simply causing those CPUs
84 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
85 * kexec'd kernel to use any and all RAM as it sees fit, without having to
86 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
87 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
88 */
machine_shutdown(void)89 void machine_shutdown(void)
90 {
91 smp_shutdown_nonboot_cpus(reboot_cpu);
92 }
93
94 /*
95 * Halting simply requires that the secondary CPUs stop performing any
96 * activity (executing tasks, handling interrupts). smp_send_stop()
97 * achieves this.
98 */
machine_halt(void)99 void machine_halt(void)
100 {
101 local_irq_disable();
102 smp_send_stop();
103 while (1);
104 }
105
106 /*
107 * Power-off simply requires that the secondary CPUs stop performing any
108 * activity (executing tasks, handling interrupts). smp_send_stop()
109 * achieves this. When the system power is turned off, it will take all CPUs
110 * with it.
111 */
machine_power_off(void)112 void machine_power_off(void)
113 {
114 local_irq_disable();
115 smp_send_stop();
116 do_kernel_power_off();
117 }
118
119 /*
120 * Restart requires that the secondary CPUs stop performing any activity
121 * while the primary CPU resets the system. Systems with multiple CPUs must
122 * provide a HW restart implementation, to ensure that all CPUs reset at once.
123 * This is required so that any code running after reset on the primary CPU
124 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
125 * executing pre-reset code, and using RAM that the primary CPU's code wishes
126 * to use. Implementing such co-ordination would be essentially impossible.
127 */
machine_restart(char * cmd)128 void machine_restart(char *cmd)
129 {
130 /* Disable interrupts first */
131 local_irq_disable();
132 smp_send_stop();
133
134 /*
135 * UpdateCapsule() depends on the system being reset via
136 * ResetSystem().
137 */
138 if (efi_enabled(EFI_RUNTIME_SERVICES))
139 efi_reboot(reboot_mode, NULL);
140
141 /* Now call the architecture specific reboot code. */
142 do_kernel_restart(cmd);
143
144 /*
145 * Whoops - the architecture was unable to reboot.
146 */
147 printk("Reboot failed -- System halted\n");
148 while (1);
149 }
150
151 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
152 static const char *const btypes[] = {
153 bstr(NONE, "--"),
154 bstr( JC, "jc"),
155 bstr( C, "-c"),
156 bstr( J , "j-")
157 };
158 #undef bstr
159
print_pstate(struct pt_regs * regs)160 static void print_pstate(struct pt_regs *regs)
161 {
162 u64 pstate = regs->pstate;
163
164 if (compat_user_mode(regs)) {
165 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
166 pstate,
167 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
168 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
169 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
170 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
171 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
172 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
173 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
174 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
175 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
176 pstate & PSR_AA32_F_BIT ? 'F' : 'f',
177 pstate & PSR_AA32_DIT_BIT ? '+' : '-',
178 pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
179 } else {
180 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
181 PSR_BTYPE_SHIFT];
182
183 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
184 pstate,
185 pstate & PSR_N_BIT ? 'N' : 'n',
186 pstate & PSR_Z_BIT ? 'Z' : 'z',
187 pstate & PSR_C_BIT ? 'C' : 'c',
188 pstate & PSR_V_BIT ? 'V' : 'v',
189 pstate & PSR_D_BIT ? 'D' : 'd',
190 pstate & PSR_A_BIT ? 'A' : 'a',
191 pstate & PSR_I_BIT ? 'I' : 'i',
192 pstate & PSR_F_BIT ? 'F' : 'f',
193 pstate & PSR_PAN_BIT ? '+' : '-',
194 pstate & PSR_UAO_BIT ? '+' : '-',
195 pstate & PSR_TCO_BIT ? '+' : '-',
196 pstate & PSR_DIT_BIT ? '+' : '-',
197 pstate & PSR_SSBS_BIT ? '+' : '-',
198 btype_str);
199 }
200 }
201
__show_regs(struct pt_regs * regs)202 void __show_regs(struct pt_regs *regs)
203 {
204 int i, top_reg;
205 u64 lr, sp;
206
207 if (compat_user_mode(regs)) {
208 lr = regs->compat_lr;
209 sp = regs->compat_sp;
210 top_reg = 12;
211 } else {
212 lr = regs->regs[30];
213 sp = regs->sp;
214 top_reg = 29;
215 }
216
217 show_regs_print_info(KERN_DEFAULT);
218 print_pstate(regs);
219
220 if (!user_mode(regs)) {
221 printk("pc : %pS\n", (void *)regs->pc);
222 printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
223 } else {
224 printk("pc : %016llx\n", regs->pc);
225 printk("lr : %016llx\n", lr);
226 }
227
228 printk("sp : %016llx\n", sp);
229
230 if (system_uses_irq_prio_masking())
231 printk("pmr: %08x\n", regs->pmr);
232
233 i = top_reg;
234
235 while (i >= 0) {
236 printk("x%-2d: %016llx", i, regs->regs[i]);
237
238 while (i-- % 3)
239 pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
240
241 pr_cont("\n");
242 }
243 }
244
show_regs(struct pt_regs * regs)245 void show_regs(struct pt_regs *regs)
246 {
247 __show_regs(regs);
248 dump_backtrace(regs, NULL, KERN_DEFAULT);
249 }
250
tls_thread_flush(void)251 static void tls_thread_flush(void)
252 {
253 write_sysreg(0, tpidr_el0);
254 if (system_supports_tpidr2())
255 write_sysreg_s(0, SYS_TPIDR2_EL0);
256
257 if (is_compat_task()) {
258 current->thread.uw.tp_value = 0;
259
260 /*
261 * We need to ensure ordering between the shadow state and the
262 * hardware state, so that we don't corrupt the hardware state
263 * with a stale shadow state during context switch.
264 */
265 barrier();
266 write_sysreg(0, tpidrro_el0);
267 }
268 }
269
flush_tagged_addr_state(void)270 static void flush_tagged_addr_state(void)
271 {
272 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
273 clear_thread_flag(TIF_TAGGED_ADDR);
274 }
275
flush_poe(void)276 static void flush_poe(void)
277 {
278 if (!system_supports_poe())
279 return;
280
281 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
282 }
283
284 #ifdef CONFIG_ARM64_GCS
285
flush_gcs(void)286 static void flush_gcs(void)
287 {
288 if (!system_supports_gcs())
289 return;
290
291 current->thread.gcspr_el0 = 0;
292 current->thread.gcs_base = 0;
293 current->thread.gcs_size = 0;
294 current->thread.gcs_el0_mode = 0;
295 write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1);
296 write_sysreg_s(0, SYS_GCSPR_EL0);
297 }
298
copy_thread_gcs(struct task_struct * p,const struct kernel_clone_args * args)299 static int copy_thread_gcs(struct task_struct *p,
300 const struct kernel_clone_args *args)
301 {
302 unsigned long gcs;
303
304 if (!system_supports_gcs())
305 return 0;
306
307 p->thread.gcs_base = 0;
308 p->thread.gcs_size = 0;
309
310 gcs = gcs_alloc_thread_stack(p, args);
311 if (IS_ERR_VALUE(gcs))
312 return PTR_ERR((void *)gcs);
313
314 p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
315 p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
316
317 return 0;
318 }
319
320 #else
321
flush_gcs(void)322 static void flush_gcs(void) { }
copy_thread_gcs(struct task_struct * p,const struct kernel_clone_args * args)323 static int copy_thread_gcs(struct task_struct *p,
324 const struct kernel_clone_args *args)
325 {
326 return 0;
327 }
328
329 #endif
330
flush_thread(void)331 void flush_thread(void)
332 {
333 fpsimd_flush_thread();
334 tls_thread_flush();
335 flush_ptrace_hw_breakpoint(current);
336 flush_tagged_addr_state();
337 flush_poe();
338 flush_gcs();
339 }
340
arch_release_task_struct(struct task_struct * tsk)341 void arch_release_task_struct(struct task_struct *tsk)
342 {
343 fpsimd_release_task(tsk);
344 gcs_free(tsk);
345 }
346
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)347 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
348 {
349 /*
350 * The current/src task's FPSIMD state may or may not be live, and may
351 * have been altered by ptrace after entry to the kernel. Save the
352 * effective FPSIMD state so that this will be copied into dst.
353 */
354 fpsimd_save_and_flush_current_state();
355 fpsimd_sync_from_effective_state(src);
356
357 *dst = *src;
358
359 /*
360 * Drop stale reference to src's sve_state and convert dst to
361 * non-streaming FPSIMD mode.
362 */
363 dst->thread.fp_type = FP_STATE_FPSIMD;
364 dst->thread.sve_state = NULL;
365 clear_tsk_thread_flag(dst, TIF_SVE);
366 task_smstop_sm(dst);
367
368 /*
369 * Drop stale reference to src's sme_state and ensure dst has ZA
370 * disabled.
371 *
372 * When necessary, ZA will be inherited later in copy_thread_za().
373 */
374 dst->thread.sme_state = NULL;
375 clear_tsk_thread_flag(dst, TIF_SME);
376 dst->thread.svcr &= ~SVCR_ZA_MASK;
377
378 /* clear any pending asynchronous tag fault raised by the parent */
379 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
380
381 return 0;
382 }
383
copy_thread_za(struct task_struct * dst,struct task_struct * src)384 static int copy_thread_za(struct task_struct *dst, struct task_struct *src)
385 {
386 if (!thread_za_enabled(&src->thread))
387 return 0;
388
389 dst->thread.sve_state = kzalloc(sve_state_size(src),
390 GFP_KERNEL);
391 if (!dst->thread.sve_state)
392 return -ENOMEM;
393
394 dst->thread.sme_state = kmemdup(src->thread.sme_state,
395 sme_state_size(src),
396 GFP_KERNEL);
397 if (!dst->thread.sme_state) {
398 kfree(dst->thread.sve_state);
399 dst->thread.sve_state = NULL;
400 return -ENOMEM;
401 }
402
403 set_tsk_thread_flag(dst, TIF_SME);
404 dst->thread.svcr |= SVCR_ZA_MASK;
405
406 return 0;
407 }
408
409 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
410
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)411 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
412 {
413 unsigned long clone_flags = args->flags;
414 unsigned long stack_start = args->stack;
415 unsigned long tls = args->tls;
416 struct pt_regs *childregs = task_pt_regs(p);
417 int ret;
418
419 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
420
421 /*
422 * In case p was allocated the same task_struct pointer as some
423 * other recently-exited task, make sure p is disassociated from
424 * any cpu that may have run that now-exited task recently.
425 * Otherwise we could erroneously skip reloading the FPSIMD
426 * registers for p.
427 */
428 fpsimd_flush_task_state(p);
429
430 ptrauth_thread_init_kernel(p);
431
432 if (likely(!args->fn)) {
433 *childregs = *current_pt_regs();
434 childregs->regs[0] = 0;
435
436 /*
437 * Read the current TLS pointer from tpidr_el0 as it may be
438 * out-of-sync with the saved value.
439 */
440 *task_user_tls(p) = read_sysreg(tpidr_el0);
441
442 if (system_supports_poe())
443 p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
444
445 if (stack_start) {
446 if (is_compat_thread(task_thread_info(p)))
447 childregs->compat_sp = stack_start;
448 else
449 childregs->sp = stack_start;
450 }
451
452 /*
453 * Due to the AAPCS64 "ZA lazy saving scheme", PSTATE.ZA and
454 * TPIDR2 need to be manipulated as a pair, and either both
455 * need to be inherited or both need to be reset.
456 *
457 * Within a process, child threads must not inherit their
458 * parent's TPIDR2 value or they may clobber their parent's
459 * stack at some later point.
460 *
461 * When a process is fork()'d, the child must inherit ZA and
462 * TPIDR2 from its parent in case there was dormant ZA state.
463 *
464 * Use CLONE_VM to determine when the child will share the
465 * address space with the parent, and cannot safely inherit the
466 * state.
467 */
468 if (system_supports_sme()) {
469 if (!(clone_flags & CLONE_VM)) {
470 p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
471 ret = copy_thread_za(p, current);
472 if (ret)
473 return ret;
474 } else {
475 p->thread.tpidr2_el0 = 0;
476 WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK);
477 }
478 }
479
480 /*
481 * If a TLS pointer was passed to clone, use it for the new
482 * thread.
483 */
484 if (clone_flags & CLONE_SETTLS)
485 p->thread.uw.tp_value = tls;
486
487 ret = copy_thread_gcs(p, args);
488 if (ret != 0)
489 return ret;
490 } else {
491 /*
492 * A kthread has no context to ERET to, so ensure any buggy
493 * ERET is treated as an illegal exception return.
494 *
495 * When a user task is created from a kthread, childregs will
496 * be initialized by start_thread() or start_compat_thread().
497 */
498 memset(childregs, 0, sizeof(struct pt_regs));
499 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
500 childregs->stackframe.type = FRAME_META_TYPE_FINAL;
501
502 p->thread.cpu_context.x19 = (unsigned long)args->fn;
503 p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
504
505 if (system_supports_poe())
506 p->thread.por_el0 = POR_EL0_INIT;
507 }
508 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
509 p->thread.cpu_context.sp = (unsigned long)childregs;
510 /*
511 * For the benefit of the unwinder, set up childregs->stackframe
512 * as the final frame for the new task.
513 */
514 p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe;
515
516 ptrace_hw_copy_thread(p);
517
518 return 0;
519 }
520
tls_preserve_current_state(void)521 void tls_preserve_current_state(void)
522 {
523 *task_user_tls(current) = read_sysreg(tpidr_el0);
524 if (system_supports_tpidr2() && !is_compat_task())
525 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
526 }
527
tls_thread_switch(struct task_struct * next)528 static void tls_thread_switch(struct task_struct *next)
529 {
530 tls_preserve_current_state();
531
532 if (is_compat_thread(task_thread_info(next)))
533 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
534 else
535 write_sysreg(0, tpidrro_el0);
536
537 write_sysreg(*task_user_tls(next), tpidr_el0);
538 if (system_supports_tpidr2())
539 write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
540 }
541
542 /*
543 * Force SSBS state on context-switch, since it may be lost after migrating
544 * from a CPU which treats the bit as RES0 in a heterogeneous system.
545 */
ssbs_thread_switch(struct task_struct * next)546 static void ssbs_thread_switch(struct task_struct *next)
547 {
548 /*
549 * Nothing to do for kernel threads, but 'regs' may be junk
550 * (e.g. idle task) so check the flags and bail early.
551 */
552 if (unlikely(next->flags & PF_KTHREAD))
553 return;
554
555 /*
556 * If all CPUs implement the SSBS extension, then we just need to
557 * context-switch the PSTATE field.
558 */
559 if (alternative_has_cap_unlikely(ARM64_SSBS))
560 return;
561
562 spectre_v4_enable_task_mitigation(next);
563 }
564
565 /*
566 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
567 * shadow copy so that we can restore this upon entry from userspace.
568 *
569 * This is *only* for exception entry from EL0, and is not valid until we
570 * __switch_to() a user task.
571 */
572 DEFINE_PER_CPU(struct task_struct *, __entry_task);
573
entry_task_switch(struct task_struct * next)574 static void entry_task_switch(struct task_struct *next)
575 {
576 __this_cpu_write(__entry_task, next);
577 }
578
579 #ifdef CONFIG_ARM64_GCS
580
gcs_preserve_current_state(void)581 void gcs_preserve_current_state(void)
582 {
583 current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
584 }
585
gcs_thread_switch(struct task_struct * next)586 static void gcs_thread_switch(struct task_struct *next)
587 {
588 if (!system_supports_gcs())
589 return;
590
591 /* GCSPR_EL0 is always readable */
592 gcs_preserve_current_state();
593 write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0);
594
595 if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode)
596 gcs_set_el0_mode(next);
597
598 /*
599 * Ensure that GCS memory effects of the 'prev' thread are
600 * ordered before other memory accesses with release semantics
601 * (or preceded by a DMB) on the current PE. In addition, any
602 * memory accesses with acquire semantics (or succeeded by a
603 * DMB) are ordered before GCS memory effects of the 'next'
604 * thread. This will ensure that the GCS memory effects are
605 * visible to other PEs in case of migration.
606 */
607 if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next))
608 gcsb_dsync();
609 }
610
611 #else
612
gcs_thread_switch(struct task_struct * next)613 static void gcs_thread_switch(struct task_struct *next)
614 {
615 }
616
617 #endif
618
619 /*
620 * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of
621 * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0}
622 * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is
623 * required or PR_TSC_SIGSEGV is set.
624 */
update_cntkctl_el1(struct task_struct * next)625 static void update_cntkctl_el1(struct task_struct *next)
626 {
627 struct thread_info *ti = task_thread_info(next);
628
629 if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) ||
630 has_erratum_handler(read_cntvct_el0) ||
631 (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
632 this_cpu_has_cap(ARM64_WORKAROUND_1418040) &&
633 is_compat_thread(ti)))
634 sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
635 else
636 sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
637 }
638
cntkctl_thread_switch(struct task_struct * prev,struct task_struct * next)639 static void cntkctl_thread_switch(struct task_struct *prev,
640 struct task_struct *next)
641 {
642 if ((read_ti_thread_flags(task_thread_info(prev)) &
643 (_TIF_32BIT | _TIF_TSC_SIGSEGV)) !=
644 (read_ti_thread_flags(task_thread_info(next)) &
645 (_TIF_32BIT | _TIF_TSC_SIGSEGV)))
646 update_cntkctl_el1(next);
647 }
648
do_set_tsc_mode(unsigned int val)649 static int do_set_tsc_mode(unsigned int val)
650 {
651 bool tsc_sigsegv;
652
653 if (val == PR_TSC_SIGSEGV)
654 tsc_sigsegv = true;
655 else if (val == PR_TSC_ENABLE)
656 tsc_sigsegv = false;
657 else
658 return -EINVAL;
659
660 preempt_disable();
661 update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv);
662 update_cntkctl_el1(current);
663 preempt_enable();
664
665 return 0;
666 }
667
permission_overlay_switch(struct task_struct * next)668 static void permission_overlay_switch(struct task_struct *next)
669 {
670 if (!system_supports_poe())
671 return;
672
673 current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
674 if (current->thread.por_el0 != next->thread.por_el0) {
675 write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
676 }
677 }
678
679 /*
680 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
681 * this function must be called with preemption disabled and the update to
682 * sctlr_user must be made in the same preemption disabled block so that
683 * __switch_to() does not see the variable update before the SCTLR_EL1 one.
684 */
update_sctlr_el1(u64 sctlr)685 void update_sctlr_el1(u64 sctlr)
686 {
687 /*
688 * EnIA must not be cleared while in the kernel as this is necessary for
689 * in-kernel PAC. It will be cleared on kernel exit if needed.
690 */
691 sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
692
693 /* ISB required for the kernel uaccess routines when setting TCF0. */
694 isb();
695 }
696
697 /*
698 * Thread switching.
699 */
700 __notrace_funcgraph __sched
__switch_to(struct task_struct * prev,struct task_struct * next)701 struct task_struct *__switch_to(struct task_struct *prev,
702 struct task_struct *next)
703 {
704 struct task_struct *last;
705
706 fpsimd_thread_switch(next);
707 tls_thread_switch(next);
708 hw_breakpoint_thread_switch(next);
709 contextidr_thread_switch(next);
710 entry_task_switch(next);
711 ssbs_thread_switch(next);
712 cntkctl_thread_switch(prev, next);
713 ptrauth_thread_switch_user(next);
714 permission_overlay_switch(next);
715 gcs_thread_switch(next);
716
717 /*
718 * Complete any pending TLB or cache maintenance on this CPU in case the
719 * thread migrates to a different CPU. This full barrier is also
720 * required by the membarrier system call. Additionally it makes any
721 * in-progress pgtable writes visible to the table walker; See
722 * emit_pte_barriers().
723 */
724 dsb(ish);
725
726 /*
727 * MTE thread switching must happen after the DSB above to ensure that
728 * any asynchronous tag check faults have been logged in the TFSR*_EL1
729 * registers.
730 */
731 mte_thread_switch(next);
732 /* avoid expensive SCTLR_EL1 accesses if no change */
733 if (prev->thread.sctlr_user != next->thread.sctlr_user)
734 update_sctlr_el1(next->thread.sctlr_user);
735
736 /* the actual thread switch */
737 last = cpu_switch_to(prev, next);
738
739 return last;
740 }
741
742 struct wchan_info {
743 unsigned long pc;
744 int count;
745 };
746
get_wchan_cb(void * arg,unsigned long pc)747 static bool get_wchan_cb(void *arg, unsigned long pc)
748 {
749 struct wchan_info *wchan_info = arg;
750
751 if (!in_sched_functions(pc)) {
752 wchan_info->pc = pc;
753 return false;
754 }
755 return wchan_info->count++ < 16;
756 }
757
__get_wchan(struct task_struct * p)758 unsigned long __get_wchan(struct task_struct *p)
759 {
760 struct wchan_info wchan_info = {
761 .pc = 0,
762 .count = 0,
763 };
764
765 if (!try_get_task_stack(p))
766 return 0;
767
768 arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL);
769
770 put_task_stack(p);
771
772 return wchan_info.pc;
773 }
774
arch_align_stack(unsigned long sp)775 unsigned long arch_align_stack(unsigned long sp)
776 {
777 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
778 sp -= get_random_u32_below(PAGE_SIZE);
779 return sp & ~0xf;
780 }
781
782 #ifdef CONFIG_COMPAT
compat_elf_check_arch(const struct elf32_hdr * hdr)783 int compat_elf_check_arch(const struct elf32_hdr *hdr)
784 {
785 if (!system_supports_32bit_el0())
786 return false;
787
788 if ((hdr)->e_machine != EM_ARM)
789 return false;
790
791 if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
792 return false;
793
794 /*
795 * Prevent execve() of a 32-bit program from a deadline task
796 * if the restricted affinity mask would be inadmissible on an
797 * asymmetric system.
798 */
799 return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
800 !dl_task_check_affinity(current, system_32bit_el0_cpumask());
801 }
802 #endif
803
804 /*
805 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
806 */
arch_setup_new_exec(void)807 void arch_setup_new_exec(void)
808 {
809 unsigned long mmflags = 0;
810
811 if (is_compat_task()) {
812 mmflags = MMCF_AARCH32;
813
814 /*
815 * Restrict the CPU affinity mask for a 32-bit task so that
816 * it contains only 32-bit-capable CPUs.
817 *
818 * From the perspective of the task, this looks similar to
819 * what would happen if the 64-bit-only CPUs were hot-unplugged
820 * at the point of execve(), although we try a bit harder to
821 * honour the cpuset hierarchy.
822 */
823 if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
824 force_compatible_cpus_allowed_ptr(current);
825 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
826 relax_compatible_cpus_allowed_ptr(current);
827 }
828
829 current->mm->context.flags = mmflags;
830 ptrauth_thread_init_user();
831 mte_thread_init_user();
832 do_set_tsc_mode(PR_TSC_ENABLE);
833
834 if (task_spec_ssb_noexec(current)) {
835 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
836 PR_SPEC_ENABLE);
837 }
838 }
839
840 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
841 /*
842 * Control the relaxed ABI allowing tagged user addresses into the kernel.
843 */
844 static unsigned int tagged_addr_disabled;
845
set_tagged_addr_ctrl(struct task_struct * task,unsigned long arg)846 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
847 {
848 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
849 struct thread_info *ti = task_thread_info(task);
850
851 if (is_compat_thread(ti))
852 return -EINVAL;
853
854 if (system_supports_mte())
855 valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
856 | PR_MTE_TAG_MASK;
857
858 if (arg & ~valid_mask)
859 return -EINVAL;
860
861 /*
862 * Do not allow the enabling of the tagged address ABI if globally
863 * disabled via sysctl abi.tagged_addr_disabled.
864 */
865 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
866 return -EINVAL;
867
868 if (set_mte_ctrl(task, arg) != 0)
869 return -EINVAL;
870
871 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
872
873 return 0;
874 }
875
get_tagged_addr_ctrl(struct task_struct * task)876 long get_tagged_addr_ctrl(struct task_struct *task)
877 {
878 long ret = 0;
879 struct thread_info *ti = task_thread_info(task);
880
881 if (is_compat_thread(ti))
882 return -EINVAL;
883
884 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
885 ret = PR_TAGGED_ADDR_ENABLE;
886
887 ret |= get_mte_ctrl(task);
888
889 return ret;
890 }
891
892 /*
893 * Global sysctl to disable the tagged user addresses support. This control
894 * only prevents the tagged address ABI enabling via prctl() and does not
895 * disable it for tasks that already opted in to the relaxed ABI.
896 */
897
898 static const struct ctl_table tagged_addr_sysctl_table[] = {
899 {
900 .procname = "tagged_addr_disabled",
901 .mode = 0644,
902 .data = &tagged_addr_disabled,
903 .maxlen = sizeof(int),
904 .proc_handler = proc_dointvec_minmax,
905 .extra1 = SYSCTL_ZERO,
906 .extra2 = SYSCTL_ONE,
907 },
908 };
909
tagged_addr_init(void)910 static int __init tagged_addr_init(void)
911 {
912 if (!register_sysctl("abi", tagged_addr_sysctl_table))
913 return -EINVAL;
914 return 0;
915 }
916
917 core_initcall(tagged_addr_init);
918 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
919
920 #ifdef CONFIG_BINFMT_ELF
arch_elf_adjust_prot(int prot,const struct arch_elf_state * state,bool has_interp,bool is_interp)921 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
922 bool has_interp, bool is_interp)
923 {
924 /*
925 * For dynamically linked executables the interpreter is
926 * responsible for setting PROT_BTI on everything except
927 * itself.
928 */
929 if (is_interp != has_interp)
930 return prot;
931
932 if (!(state->flags & ARM64_ELF_BTI))
933 return prot;
934
935 if (prot & PROT_EXEC)
936 prot |= PROT_BTI;
937
938 return prot;
939 }
940 #endif
941
get_tsc_mode(unsigned long adr)942 int get_tsc_mode(unsigned long adr)
943 {
944 unsigned int val;
945
946 if (is_compat_task())
947 return -EINVAL;
948
949 if (test_thread_flag(TIF_TSC_SIGSEGV))
950 val = PR_TSC_SIGSEGV;
951 else
952 val = PR_TSC_ENABLE;
953
954 return put_user(val, (unsigned int __user *)adr);
955 }
956
set_tsc_mode(unsigned int val)957 int set_tsc_mode(unsigned int val)
958 {
959 if (is_compat_task())
960 return -EINVAL;
961
962 return do_set_tsc_mode(val);
963 }
964