1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/ptrace.c
4 *
5 * By Ross Biro 1/23/92
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
9 */
10
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/mm.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/elf.h>
31 #include <linux/rseq.h>
32
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/gcs.h>
38 #include <asm/mte.h>
39 #include <asm/pointer_auth.h>
40 #include <asm/stacktrace.h>
41 #include <asm/syscall.h>
42 #include <asm/traps.h>
43 #include <asm/system_misc.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/syscalls.h>
47
48 struct pt_regs_offset {
49 const char *name;
50 int offset;
51 };
52
53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
54 #define REG_OFFSET_END {.name = NULL, .offset = 0}
55 #define GPR_OFFSET_NAME(r) \
56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
57
58 static const struct pt_regs_offset regoffset_table[] = {
59 GPR_OFFSET_NAME(0),
60 GPR_OFFSET_NAME(1),
61 GPR_OFFSET_NAME(2),
62 GPR_OFFSET_NAME(3),
63 GPR_OFFSET_NAME(4),
64 GPR_OFFSET_NAME(5),
65 GPR_OFFSET_NAME(6),
66 GPR_OFFSET_NAME(7),
67 GPR_OFFSET_NAME(8),
68 GPR_OFFSET_NAME(9),
69 GPR_OFFSET_NAME(10),
70 GPR_OFFSET_NAME(11),
71 GPR_OFFSET_NAME(12),
72 GPR_OFFSET_NAME(13),
73 GPR_OFFSET_NAME(14),
74 GPR_OFFSET_NAME(15),
75 GPR_OFFSET_NAME(16),
76 GPR_OFFSET_NAME(17),
77 GPR_OFFSET_NAME(18),
78 GPR_OFFSET_NAME(19),
79 GPR_OFFSET_NAME(20),
80 GPR_OFFSET_NAME(21),
81 GPR_OFFSET_NAME(22),
82 GPR_OFFSET_NAME(23),
83 GPR_OFFSET_NAME(24),
84 GPR_OFFSET_NAME(25),
85 GPR_OFFSET_NAME(26),
86 GPR_OFFSET_NAME(27),
87 GPR_OFFSET_NAME(28),
88 GPR_OFFSET_NAME(29),
89 GPR_OFFSET_NAME(30),
90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
91 REG_OFFSET_NAME(sp),
92 REG_OFFSET_NAME(pc),
93 REG_OFFSET_NAME(pstate),
94 REG_OFFSET_END,
95 };
96
97 /**
98 * regs_query_register_offset() - query register offset from its name
99 * @name: the name of a register
100 *
101 * regs_query_register_offset() returns the offset of a register in struct
102 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 */
regs_query_register_offset(const char * name)104 int regs_query_register_offset(const char *name)
105 {
106 const struct pt_regs_offset *roff;
107
108 for (roff = regoffset_table; roff->name != NULL; roff++)
109 if (!strcmp(roff->name, name))
110 return roff->offset;
111 return -EINVAL;
112 }
113
114 /**
115 * regs_within_kernel_stack() - check the address in the stack
116 * @regs: pt_regs which contains kernel stack pointer.
117 * @addr: address which is checked.
118 *
119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
120 * If @addr is within the kernel stack, it returns true. If not, returns false.
121 */
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
123 {
124 return ((addr & ~(THREAD_SIZE - 1)) ==
125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
126 on_irq_stack(addr, sizeof(unsigned long));
127 }
128
129 /**
130 * regs_get_kernel_stack_nth() - get Nth entry of the stack
131 * @regs: pt_regs which contains kernel stack pointer.
132 * @n: stack entry number.
133 *
134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
135 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
136 * this returns 0.
137 */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
139 {
140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
141
142 addr += n;
143 if (regs_within_kernel_stack(regs, (unsigned long)addr))
144 return READ_ONCE_NOCHECK(*addr);
145 else
146 return 0;
147 }
148
149 /*
150 * TODO: does not yet catch signals sent when the child dies.
151 * in exit.c or in signal.c.
152 */
153
154 /*
155 * Called by kernel/ptrace.c when detaching..
156 */
ptrace_disable(struct task_struct * child)157 void ptrace_disable(struct task_struct *child)
158 {
159 /*
160 * This would be better off in core code, but PTRACE_DETACH has
161 * grown its fair share of arch-specific worts and changing it
162 * is likely to cause regressions on obscure architectures.
163 */
164 user_disable_single_step(child);
165 }
166
167 #ifdef CONFIG_HAVE_HW_BREAKPOINT
168 /*
169 * Handle hitting a HW-breakpoint.
170 */
ptrace_hbptriggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)171 static void ptrace_hbptriggered(struct perf_event *bp,
172 struct perf_sample_data *data,
173 struct pt_regs *regs)
174 {
175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
176 const char *desc = "Hardware breakpoint trap (ptrace)";
177
178 if (is_compat_task()) {
179 int si_errno = 0;
180 int i;
181
182 for (i = 0; i < ARM_MAX_BRP; ++i) {
183 if (current->thread.debug.hbp_break[i] == bp) {
184 si_errno = (i << 1) + 1;
185 break;
186 }
187 }
188
189 for (i = 0; i < ARM_MAX_WRP; ++i) {
190 if (current->thread.debug.hbp_watch[i] == bp) {
191 si_errno = -((i << 1) + 1);
192 break;
193 }
194 }
195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
196 desc);
197 return;
198 }
199
200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
201 }
202
203 /*
204 * Unregister breakpoints from this task and reset the pointers in
205 * the thread_struct.
206 */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
208 {
209 int i;
210 struct thread_struct *t = &tsk->thread;
211
212 for (i = 0; i < ARM_MAX_BRP; i++) {
213 if (t->debug.hbp_break[i]) {
214 unregister_hw_breakpoint(t->debug.hbp_break[i]);
215 t->debug.hbp_break[i] = NULL;
216 }
217 }
218
219 for (i = 0; i < ARM_MAX_WRP; i++) {
220 if (t->debug.hbp_watch[i]) {
221 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
222 t->debug.hbp_watch[i] = NULL;
223 }
224 }
225 }
226
ptrace_hw_copy_thread(struct task_struct * tsk)227 void ptrace_hw_copy_thread(struct task_struct *tsk)
228 {
229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
230 }
231
ptrace_hbp_get_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx)232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
233 struct task_struct *tsk,
234 unsigned long idx)
235 {
236 struct perf_event *bp = ERR_PTR(-EINVAL);
237
238 switch (note_type) {
239 case NT_ARM_HW_BREAK:
240 if (idx >= ARM_MAX_BRP)
241 goto out;
242 idx = array_index_nospec(idx, ARM_MAX_BRP);
243 bp = tsk->thread.debug.hbp_break[idx];
244 break;
245 case NT_ARM_HW_WATCH:
246 if (idx >= ARM_MAX_WRP)
247 goto out;
248 idx = array_index_nospec(idx, ARM_MAX_WRP);
249 bp = tsk->thread.debug.hbp_watch[idx];
250 break;
251 }
252
253 out:
254 return bp;
255 }
256
ptrace_hbp_set_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx,struct perf_event * bp)257 static int ptrace_hbp_set_event(unsigned int note_type,
258 struct task_struct *tsk,
259 unsigned long idx,
260 struct perf_event *bp)
261 {
262 int err = -EINVAL;
263
264 switch (note_type) {
265 case NT_ARM_HW_BREAK:
266 if (idx >= ARM_MAX_BRP)
267 goto out;
268 idx = array_index_nospec(idx, ARM_MAX_BRP);
269 tsk->thread.debug.hbp_break[idx] = bp;
270 err = 0;
271 break;
272 case NT_ARM_HW_WATCH:
273 if (idx >= ARM_MAX_WRP)
274 goto out;
275 idx = array_index_nospec(idx, ARM_MAX_WRP);
276 tsk->thread.debug.hbp_watch[idx] = bp;
277 err = 0;
278 break;
279 }
280
281 out:
282 return err;
283 }
284
ptrace_hbp_create(unsigned int note_type,struct task_struct * tsk,unsigned long idx)285 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
286 struct task_struct *tsk,
287 unsigned long idx)
288 {
289 struct perf_event *bp;
290 struct perf_event_attr attr;
291 int err, type;
292
293 switch (note_type) {
294 case NT_ARM_HW_BREAK:
295 type = HW_BREAKPOINT_X;
296 break;
297 case NT_ARM_HW_WATCH:
298 type = HW_BREAKPOINT_RW;
299 break;
300 default:
301 return ERR_PTR(-EINVAL);
302 }
303
304 ptrace_breakpoint_init(&attr);
305
306 /*
307 * Initialise fields to sane defaults
308 * (i.e. values that will pass validation).
309 */
310 attr.bp_addr = 0;
311 attr.bp_len = HW_BREAKPOINT_LEN_4;
312 attr.bp_type = type;
313 attr.disabled = 1;
314
315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
316 if (IS_ERR(bp))
317 return bp;
318
319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
320 if (err)
321 return ERR_PTR(err);
322
323 return bp;
324 }
325
ptrace_hbp_fill_attr_ctrl(unsigned int note_type,struct arch_hw_breakpoint_ctrl ctrl,struct perf_event_attr * attr)326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
327 struct arch_hw_breakpoint_ctrl ctrl,
328 struct perf_event_attr *attr)
329 {
330 int err, len, type, offset, disabled = !ctrl.enabled;
331
332 attr->disabled = disabled;
333 if (disabled)
334 return 0;
335
336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
337 if (err)
338 return err;
339
340 switch (note_type) {
341 case NT_ARM_HW_BREAK:
342 if ((type & HW_BREAKPOINT_X) != type)
343 return -EINVAL;
344 break;
345 case NT_ARM_HW_WATCH:
346 if ((type & HW_BREAKPOINT_RW) != type)
347 return -EINVAL;
348 break;
349 default:
350 return -EINVAL;
351 }
352
353 attr->bp_len = len;
354 attr->bp_type = type;
355 attr->bp_addr += offset;
356
357 return 0;
358 }
359
ptrace_hbp_get_resource_info(unsigned int note_type,u32 * info)360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
361 {
362 u8 num;
363 u32 reg = 0;
364
365 switch (note_type) {
366 case NT_ARM_HW_BREAK:
367 num = hw_breakpoint_slots(TYPE_INST);
368 break;
369 case NT_ARM_HW_WATCH:
370 num = hw_breakpoint_slots(TYPE_DATA);
371 break;
372 default:
373 return -EINVAL;
374 }
375
376 reg |= debug_monitors_arch();
377 reg <<= 8;
378 reg |= num;
379
380 *info = reg;
381 return 0;
382 }
383
ptrace_hbp_get_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 * ctrl)384 static int ptrace_hbp_get_ctrl(unsigned int note_type,
385 struct task_struct *tsk,
386 unsigned long idx,
387 u32 *ctrl)
388 {
389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
390
391 if (IS_ERR(bp))
392 return PTR_ERR(bp);
393
394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
395 return 0;
396 }
397
ptrace_hbp_get_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * addr)398 static int ptrace_hbp_get_addr(unsigned int note_type,
399 struct task_struct *tsk,
400 unsigned long idx,
401 u64 *addr)
402 {
403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
404
405 if (IS_ERR(bp))
406 return PTR_ERR(bp);
407
408 *addr = bp ? counter_arch_bp(bp)->address : 0;
409 return 0;
410 }
411
ptrace_hbp_get_initialised_bp(unsigned int note_type,struct task_struct * tsk,unsigned long idx)412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
413 struct task_struct *tsk,
414 unsigned long idx)
415 {
416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
417
418 if (!bp)
419 bp = ptrace_hbp_create(note_type, tsk, idx);
420
421 return bp;
422 }
423
ptrace_hbp_set_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 uctrl)424 static int ptrace_hbp_set_ctrl(unsigned int note_type,
425 struct task_struct *tsk,
426 unsigned long idx,
427 u32 uctrl)
428 {
429 int err;
430 struct perf_event *bp;
431 struct perf_event_attr attr;
432 struct arch_hw_breakpoint_ctrl ctrl;
433
434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
435 if (IS_ERR(bp)) {
436 err = PTR_ERR(bp);
437 return err;
438 }
439
440 attr = bp->attr;
441 decode_ctrl_reg(uctrl, &ctrl);
442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
443 if (err)
444 return err;
445
446 return modify_user_hw_breakpoint(bp, &attr);
447 }
448
ptrace_hbp_set_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 addr)449 static int ptrace_hbp_set_addr(unsigned int note_type,
450 struct task_struct *tsk,
451 unsigned long idx,
452 u64 addr)
453 {
454 int err;
455 struct perf_event *bp;
456 struct perf_event_attr attr;
457
458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
459 if (IS_ERR(bp)) {
460 err = PTR_ERR(bp);
461 return err;
462 }
463
464 attr = bp->attr;
465 attr.bp_addr = addr;
466 err = modify_user_hw_breakpoint(bp, &attr);
467 return err;
468 }
469
470 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
471 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
472 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473
hw_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)474 static int hw_break_get(struct task_struct *target,
475 const struct user_regset *regset,
476 struct membuf to)
477 {
478 unsigned int note_type = regset->core_note_type;
479 int ret, idx = 0;
480 u32 info, ctrl;
481 u64 addr;
482
483 /* Resource info */
484 ret = ptrace_hbp_get_resource_info(note_type, &info);
485 if (ret)
486 return ret;
487
488 membuf_write(&to, &info, sizeof(info));
489 membuf_zero(&to, sizeof(u32));
490 /* (address, ctrl) registers */
491 while (to.left) {
492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
493 if (ret)
494 return ret;
495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
496 if (ret)
497 return ret;
498 membuf_store(&to, addr);
499 membuf_store(&to, ctrl);
500 membuf_zero(&to, sizeof(u32));
501 idx++;
502 }
503 return 0;
504 }
505
hw_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)506 static int hw_break_set(struct task_struct *target,
507 const struct user_regset *regset,
508 unsigned int pos, unsigned int count,
509 const void *kbuf, const void __user *ubuf)
510 {
511 unsigned int note_type = regset->core_note_type;
512 int ret, idx = 0, offset, limit;
513 u32 ctrl;
514 u64 addr;
515
516 /* Resource info and pad */
517 offset = offsetof(struct user_hwdebug_state, dbg_regs);
518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
519
520 /* (address, ctrl) registers */
521 limit = regset->n * regset->size;
522 while (count && offset < limit) {
523 if (count < PTRACE_HBP_ADDR_SZ)
524 return -EINVAL;
525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
526 offset, offset + PTRACE_HBP_ADDR_SZ);
527 if (ret)
528 return ret;
529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
530 if (ret)
531 return ret;
532 offset += PTRACE_HBP_ADDR_SZ;
533
534 if (!count)
535 break;
536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
537 offset, offset + PTRACE_HBP_CTRL_SZ);
538 if (ret)
539 return ret;
540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
541 if (ret)
542 return ret;
543 offset += PTRACE_HBP_CTRL_SZ;
544
545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
546 offset, offset + PTRACE_HBP_PAD_SZ);
547 offset += PTRACE_HBP_PAD_SZ;
548 idx++;
549 }
550
551 return 0;
552 }
553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
554
gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)555 static int gpr_get(struct task_struct *target,
556 const struct user_regset *regset,
557 struct membuf to)
558 {
559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
560 return membuf_write(&to, uregs, sizeof(*uregs));
561 }
562
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)563 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
564 unsigned int pos, unsigned int count,
565 const void *kbuf, const void __user *ubuf)
566 {
567 int ret;
568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
569
570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
571 if (ret)
572 return ret;
573
574 if (!valid_user_regs(&newregs, target))
575 return -EINVAL;
576
577 task_pt_regs(target)->user_regs = newregs;
578 return 0;
579 }
580
fpr_active(struct task_struct * target,const struct user_regset * regset)581 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
582 {
583 if (!system_supports_fpsimd())
584 return -ENODEV;
585 return regset->n;
586 }
587
588 /*
589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
590 */
__fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)591 static int __fpr_get(struct task_struct *target,
592 const struct user_regset *regset,
593 struct membuf to)
594 {
595 struct user_fpsimd_state *uregs;
596
597 fpsimd_sync_from_effective_state(target);
598
599 uregs = &target->thread.uw.fpsimd_state;
600
601 return membuf_write(&to, uregs, sizeof(*uregs));
602 }
603
fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)604 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
605 struct membuf to)
606 {
607 if (!system_supports_fpsimd())
608 return -EINVAL;
609
610 if (target == current)
611 fpsimd_preserve_current_state();
612
613 return __fpr_get(target, regset, to);
614 }
615
__fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,unsigned int start_pos)616 static int __fpr_set(struct task_struct *target,
617 const struct user_regset *regset,
618 unsigned int pos, unsigned int count,
619 const void *kbuf, const void __user *ubuf,
620 unsigned int start_pos)
621 {
622 int ret;
623 struct user_fpsimd_state newstate;
624
625 /*
626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
627 * short copyin can't resurrect stale data.
628 */
629 fpsimd_sync_from_effective_state(target);
630
631 newstate = target->thread.uw.fpsimd_state;
632
633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
634 start_pos, start_pos + sizeof(newstate));
635 if (ret)
636 return ret;
637
638 target->thread.uw.fpsimd_state = newstate;
639
640 return ret;
641 }
642
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)643 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
644 unsigned int pos, unsigned int count,
645 const void *kbuf, const void __user *ubuf)
646 {
647 int ret;
648
649 if (!system_supports_fpsimd())
650 return -EINVAL;
651
652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
653 if (ret)
654 return ret;
655
656 fpsimd_sync_to_effective_state_zeropad(target);
657 fpsimd_flush_task_state(target);
658
659 return ret;
660 }
661
tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)662 static int tls_get(struct task_struct *target, const struct user_regset *regset,
663 struct membuf to)
664 {
665 int ret;
666
667 if (target == current)
668 tls_preserve_current_state();
669
670 ret = membuf_store(&to, target->thread.uw.tp_value);
671 if (system_supports_tpidr2())
672 ret = membuf_store(&to, target->thread.tpidr2_el0);
673 else
674 ret = membuf_zero(&to, sizeof(u64));
675
676 return ret;
677 }
678
tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)679 static int tls_set(struct task_struct *target, const struct user_regset *regset,
680 unsigned int pos, unsigned int count,
681 const void *kbuf, const void __user *ubuf)
682 {
683 int ret;
684 unsigned long tls[2];
685
686 tls[0] = target->thread.uw.tp_value;
687 if (system_supports_tpidr2())
688 tls[1] = target->thread.tpidr2_el0;
689
690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
691 if (ret)
692 return ret;
693
694 target->thread.uw.tp_value = tls[0];
695 if (system_supports_tpidr2())
696 target->thread.tpidr2_el0 = tls[1];
697
698 return ret;
699 }
700
fpmr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset,
702 struct membuf to)
703 {
704 if (!system_supports_fpmr())
705 return -EINVAL;
706
707 if (target == current)
708 fpsimd_preserve_current_state();
709
710 return membuf_store(&to, target->thread.uw.fpmr);
711 }
712
fpmr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset,
714 unsigned int pos, unsigned int count,
715 const void *kbuf, const void __user *ubuf)
716 {
717 int ret;
718 unsigned long fpmr;
719
720 if (!system_supports_fpmr())
721 return -EINVAL;
722
723 fpmr = target->thread.uw.fpmr;
724
725 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
726 if (ret)
727 return ret;
728
729 target->thread.uw.fpmr = fpmr;
730
731 fpsimd_flush_task_state(target);
732
733 return 0;
734 }
735
system_call_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)736 static int system_call_get(struct task_struct *target,
737 const struct user_regset *regset,
738 struct membuf to)
739 {
740 return membuf_store(&to, task_pt_regs(target)->syscallno);
741 }
742
system_call_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)743 static int system_call_set(struct task_struct *target,
744 const struct user_regset *regset,
745 unsigned int pos, unsigned int count,
746 const void *kbuf, const void __user *ubuf)
747 {
748 int syscallno = task_pt_regs(target)->syscallno;
749 int ret;
750
751 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
752 if (ret)
753 return ret;
754
755 task_pt_regs(target)->syscallno = syscallno;
756 return ret;
757 }
758
759 #ifdef CONFIG_ARM64_SVE
760
sve_init_header_from_task(struct user_sve_header * header,struct task_struct * target,enum vec_type type)761 static void sve_init_header_from_task(struct user_sve_header *header,
762 struct task_struct *target,
763 enum vec_type type)
764 {
765 unsigned int vq;
766 bool active;
767 enum vec_type task_type;
768
769 memset(header, 0, sizeof(*header));
770
771 /* Check if the requested registers are active for the task */
772 if (thread_sm_enabled(&target->thread))
773 task_type = ARM64_VEC_SME;
774 else
775 task_type = ARM64_VEC_SVE;
776 active = (task_type == type);
777
778 if (active && target->thread.fp_type == FP_STATE_SVE)
779 header->flags = SVE_PT_REGS_SVE;
780 else
781 header->flags = SVE_PT_REGS_FPSIMD;
782
783 switch (type) {
784 case ARM64_VEC_SVE:
785 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
786 header->flags |= SVE_PT_VL_INHERIT;
787 break;
788 case ARM64_VEC_SME:
789 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
790 header->flags |= SVE_PT_VL_INHERIT;
791 break;
792 default:
793 WARN_ON_ONCE(1);
794 return;
795 }
796
797 header->vl = task_get_vl(target, type);
798 vq = sve_vq_from_vl(header->vl);
799
800 header->max_vl = vec_max_vl(type);
801 if (active)
802 header->size = SVE_PT_SIZE(vq, header->flags);
803 else
804 header->size = sizeof(header);
805 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
806 SVE_PT_REGS_SVE);
807 }
808
sve_size_from_header(struct user_sve_header const * header)809 static unsigned int sve_size_from_header(struct user_sve_header const *header)
810 {
811 return ALIGN(header->size, SVE_VQ_BYTES);
812 }
813
sve_get_common(struct task_struct * target,const struct user_regset * regset,struct membuf to,enum vec_type type)814 static int sve_get_common(struct task_struct *target,
815 const struct user_regset *regset,
816 struct membuf to,
817 enum vec_type type)
818 {
819 struct user_sve_header header;
820 unsigned int vq;
821 unsigned long start, end;
822
823 if (target == current)
824 fpsimd_preserve_current_state();
825
826 /* Header */
827 sve_init_header_from_task(&header, target, type);
828 vq = sve_vq_from_vl(header.vl);
829
830 membuf_write(&to, &header, sizeof(header));
831
832 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
833 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
834
835 /*
836 * When the requested vector type is not active, do not present data
837 * from the other mode to userspace.
838 */
839 if (header.size == sizeof(header))
840 return 0;
841
842 switch ((header.flags & SVE_PT_REGS_MASK)) {
843 case SVE_PT_REGS_FPSIMD:
844 return __fpr_get(target, regset, to);
845
846 case SVE_PT_REGS_SVE:
847 start = SVE_PT_SVE_OFFSET;
848 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
849 membuf_write(&to, target->thread.sve_state, end - start);
850
851 start = end;
852 end = SVE_PT_SVE_FPSR_OFFSET(vq);
853 membuf_zero(&to, end - start);
854
855 /*
856 * Copy fpsr, and fpcr which must follow contiguously in
857 * struct fpsimd_state:
858 */
859 start = end;
860 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
861 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
862 end - start);
863
864 start = end;
865 end = sve_size_from_header(&header);
866 return membuf_zero(&to, end - start);
867
868 default:
869 BUILD_BUG();
870 }
871 }
872
sve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)873 static int sve_get(struct task_struct *target,
874 const struct user_regset *regset,
875 struct membuf to)
876 {
877 if (!system_supports_sve())
878 return -EINVAL;
879
880 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
881 }
882
sve_set_common(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,enum vec_type type)883 static int sve_set_common(struct task_struct *target,
884 const struct user_regset *regset,
885 unsigned int pos, unsigned int count,
886 const void *kbuf, const void __user *ubuf,
887 enum vec_type type)
888 {
889 int ret;
890 struct user_sve_header header;
891 unsigned int vq;
892 unsigned long start, end;
893 bool fpsimd;
894
895 fpsimd_flush_task_state(target);
896
897 /* Header */
898 if (count < sizeof(header))
899 return -EINVAL;
900 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
901 0, sizeof(header));
902 if (ret)
903 return ret;
904
905 /*
906 * Streaming SVE data is always stored and presented in SVE format.
907 * Require the user to provide SVE formatted data for consistency, and
908 * to avoid the risk that we configure the task into an invalid state.
909 */
910 fpsimd = (header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD;
911 if (fpsimd && type == ARM64_VEC_SME)
912 return -EINVAL;
913
914 /*
915 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
916 * vec_set_vector_length(), which will also validate them for us:
917 */
918 ret = vec_set_vector_length(target, type, header.vl,
919 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
920 if (ret)
921 return ret;
922
923 /* Allocate SME storage if necessary, preserving any existing ZA/ZT state */
924 if (type == ARM64_VEC_SME) {
925 sme_alloc(target, false);
926 if (!target->thread.sme_state)
927 return -ENOMEM;
928 }
929
930 /* Allocate SVE storage if necessary, zeroing any existing SVE state */
931 if (!fpsimd) {
932 sve_alloc(target, true);
933 if (!target->thread.sve_state)
934 return -ENOMEM;
935 }
936
937 /*
938 * Actual VL set may be different from what the user asked
939 * for, or we may have configured the _ONEXEC VL not the
940 * current VL:
941 */
942 vq = sve_vq_from_vl(task_get_vl(target, type));
943
944 /* Enter/exit streaming mode */
945 if (system_supports_sme()) {
946 switch (type) {
947 case ARM64_VEC_SVE:
948 target->thread.svcr &= ~SVCR_SM_MASK;
949 set_tsk_thread_flag(target, TIF_SVE);
950 break;
951 case ARM64_VEC_SME:
952 target->thread.svcr |= SVCR_SM_MASK;
953 set_tsk_thread_flag(target, TIF_SME);
954 break;
955 default:
956 WARN_ON_ONCE(1);
957 return -EINVAL;
958 }
959 }
960
961 /* Always zero V regs, FPSR, and FPCR */
962 memset(¤t->thread.uw.fpsimd_state, 0,
963 sizeof(current->thread.uw.fpsimd_state));
964
965 /* Registers: FPSIMD-only case */
966
967 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
968 if (fpsimd) {
969 clear_tsk_thread_flag(target, TIF_SVE);
970 target->thread.fp_type = FP_STATE_FPSIMD;
971 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
972 SVE_PT_FPSIMD_OFFSET);
973 return ret;
974 }
975
976 /* Otherwise: no registers or full SVE case. */
977
978 target->thread.fp_type = FP_STATE_SVE;
979
980 /*
981 * If setting a different VL from the requested VL and there is
982 * register data, the data layout will be wrong: don't even
983 * try to set the registers in this case.
984 */
985 if (count && vq != sve_vq_from_vl(header.vl))
986 return -EIO;
987
988 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
989 start = SVE_PT_SVE_OFFSET;
990 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
991 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
992 target->thread.sve_state,
993 start, end);
994 if (ret)
995 return ret;
996
997 start = end;
998 end = SVE_PT_SVE_FPSR_OFFSET(vq);
999 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end);
1000
1001 /*
1002 * Copy fpsr, and fpcr which must follow contiguously in
1003 * struct fpsimd_state:
1004 */
1005 start = end;
1006 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
1007 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1008 &target->thread.uw.fpsimd_state.fpsr,
1009 start, end);
1010
1011 return ret;
1012 }
1013
sve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1014 static int sve_set(struct task_struct *target,
1015 const struct user_regset *regset,
1016 unsigned int pos, unsigned int count,
1017 const void *kbuf, const void __user *ubuf)
1018 {
1019 if (!system_supports_sve())
1020 return -EINVAL;
1021
1022 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1023 ARM64_VEC_SVE);
1024 }
1025
1026 #endif /* CONFIG_ARM64_SVE */
1027
1028 #ifdef CONFIG_ARM64_SME
1029
ssve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1030 static int ssve_get(struct task_struct *target,
1031 const struct user_regset *regset,
1032 struct membuf to)
1033 {
1034 if (!system_supports_sme())
1035 return -EINVAL;
1036
1037 return sve_get_common(target, regset, to, ARM64_VEC_SME);
1038 }
1039
ssve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1040 static int ssve_set(struct task_struct *target,
1041 const struct user_regset *regset,
1042 unsigned int pos, unsigned int count,
1043 const void *kbuf, const void __user *ubuf)
1044 {
1045 if (!system_supports_sme())
1046 return -EINVAL;
1047
1048 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1049 ARM64_VEC_SME);
1050 }
1051
za_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1052 static int za_get(struct task_struct *target,
1053 const struct user_regset *regset,
1054 struct membuf to)
1055 {
1056 struct user_za_header header;
1057 unsigned int vq;
1058 unsigned long start, end;
1059
1060 if (!system_supports_sme())
1061 return -EINVAL;
1062
1063 /* Header */
1064 memset(&header, 0, sizeof(header));
1065
1066 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1067 header.flags |= ZA_PT_VL_INHERIT;
1068
1069 header.vl = task_get_sme_vl(target);
1070 vq = sve_vq_from_vl(header.vl);
1071 header.max_vl = sme_max_vl();
1072 header.max_size = ZA_PT_SIZE(vq);
1073
1074 /* If ZA is not active there is only the header */
1075 if (thread_za_enabled(&target->thread))
1076 header.size = ZA_PT_SIZE(vq);
1077 else
1078 header.size = ZA_PT_ZA_OFFSET;
1079
1080 membuf_write(&to, &header, sizeof(header));
1081
1082 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1083 end = ZA_PT_ZA_OFFSET;
1084
1085 if (target == current)
1086 fpsimd_preserve_current_state();
1087
1088 /* Any register data to include? */
1089 if (thread_za_enabled(&target->thread)) {
1090 start = end;
1091 end = ZA_PT_SIZE(vq);
1092 membuf_write(&to, target->thread.sme_state, end - start);
1093 }
1094
1095 /* Zero any trailing padding */
1096 start = end;
1097 end = ALIGN(header.size, SVE_VQ_BYTES);
1098 return membuf_zero(&to, end - start);
1099 }
1100
za_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1101 static int za_set(struct task_struct *target,
1102 const struct user_regset *regset,
1103 unsigned int pos, unsigned int count,
1104 const void *kbuf, const void __user *ubuf)
1105 {
1106 int ret;
1107 struct user_za_header header;
1108 unsigned int vq;
1109 unsigned long start, end;
1110
1111 if (!system_supports_sme())
1112 return -EINVAL;
1113
1114 /* Header */
1115 if (count < sizeof(header))
1116 return -EINVAL;
1117 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1118 0, sizeof(header));
1119 if (ret)
1120 goto out;
1121
1122 /*
1123 * All current ZA_PT_* flags are consumed by
1124 * vec_set_vector_length(), which will also validate them for
1125 * us:
1126 */
1127 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1128 ((unsigned long)header.flags) << 16);
1129 if (ret)
1130 goto out;
1131
1132 /*
1133 * Actual VL set may be different from what the user asked
1134 * for, or we may have configured the _ONEXEC rather than
1135 * current VL:
1136 */
1137 vq = sve_vq_from_vl(task_get_sme_vl(target));
1138
1139 /* Ensure there is some SVE storage for streaming mode */
1140 if (!target->thread.sve_state) {
1141 sve_alloc(target, false);
1142 if (!target->thread.sve_state) {
1143 ret = -ENOMEM;
1144 goto out;
1145 }
1146 }
1147
1148 /*
1149 * Only flush the storage if PSTATE.ZA was not already set,
1150 * otherwise preserve any existing data.
1151 */
1152 sme_alloc(target, !thread_za_enabled(&target->thread));
1153 if (!target->thread.sme_state)
1154 return -ENOMEM;
1155
1156 /* If there is no data then disable ZA */
1157 if (!count) {
1158 target->thread.svcr &= ~SVCR_ZA_MASK;
1159 goto out;
1160 }
1161
1162 /*
1163 * If setting a different VL from the requested VL and there is
1164 * register data, the data layout will be wrong: don't even
1165 * try to set the registers in this case.
1166 */
1167 if (vq != sve_vq_from_vl(header.vl)) {
1168 ret = -EIO;
1169 goto out;
1170 }
1171
1172 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1173 start = ZA_PT_ZA_OFFSET;
1174 end = ZA_PT_SIZE(vq);
1175 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1176 target->thread.sme_state,
1177 start, end);
1178 if (ret)
1179 goto out;
1180
1181 /* Mark ZA as active and let userspace use it */
1182 set_tsk_thread_flag(target, TIF_SME);
1183 target->thread.svcr |= SVCR_ZA_MASK;
1184
1185 out:
1186 fpsimd_flush_task_state(target);
1187 return ret;
1188 }
1189
zt_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1190 static int zt_get(struct task_struct *target,
1191 const struct user_regset *regset,
1192 struct membuf to)
1193 {
1194 if (!system_supports_sme2())
1195 return -EINVAL;
1196
1197 /*
1198 * If PSTATE.ZA is not set then ZT will be zeroed when it is
1199 * enabled so report the current register value as zero.
1200 */
1201 if (thread_za_enabled(&target->thread))
1202 membuf_write(&to, thread_zt_state(&target->thread),
1203 ZT_SIG_REG_BYTES);
1204 else
1205 membuf_zero(&to, ZT_SIG_REG_BYTES);
1206
1207 return 0;
1208 }
1209
zt_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1210 static int zt_set(struct task_struct *target,
1211 const struct user_regset *regset,
1212 unsigned int pos, unsigned int count,
1213 const void *kbuf, const void __user *ubuf)
1214 {
1215 int ret;
1216
1217 if (!system_supports_sme2())
1218 return -EINVAL;
1219
1220 /* Ensure SVE storage in case this is first use of SME */
1221 sve_alloc(target, false);
1222 if (!target->thread.sve_state)
1223 return -ENOMEM;
1224
1225 if (!thread_za_enabled(&target->thread)) {
1226 sme_alloc(target, true);
1227 if (!target->thread.sme_state)
1228 return -ENOMEM;
1229 }
1230
1231 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1232 thread_zt_state(&target->thread),
1233 0, ZT_SIG_REG_BYTES);
1234 if (ret == 0) {
1235 target->thread.svcr |= SVCR_ZA_MASK;
1236 set_tsk_thread_flag(target, TIF_SME);
1237 }
1238
1239 fpsimd_flush_task_state(target);
1240
1241 return ret;
1242 }
1243
1244 #endif /* CONFIG_ARM64_SME */
1245
1246 #ifdef CONFIG_ARM64_PTR_AUTH
pac_mask_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1247 static int pac_mask_get(struct task_struct *target,
1248 const struct user_regset *regset,
1249 struct membuf to)
1250 {
1251 /*
1252 * The PAC bits can differ across data and instruction pointers
1253 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1254 * we expose separate masks.
1255 */
1256 unsigned long mask = ptrauth_user_pac_mask();
1257 struct user_pac_mask uregs = {
1258 .data_mask = mask,
1259 .insn_mask = mask,
1260 };
1261
1262 if (!system_supports_address_auth())
1263 return -EINVAL;
1264
1265 return membuf_write(&to, &uregs, sizeof(uregs));
1266 }
1267
pac_enabled_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1268 static int pac_enabled_keys_get(struct task_struct *target,
1269 const struct user_regset *regset,
1270 struct membuf to)
1271 {
1272 long enabled_keys = ptrauth_get_enabled_keys(target);
1273
1274 if (IS_ERR_VALUE(enabled_keys))
1275 return enabled_keys;
1276
1277 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1278 }
1279
pac_enabled_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1280 static int pac_enabled_keys_set(struct task_struct *target,
1281 const struct user_regset *regset,
1282 unsigned int pos, unsigned int count,
1283 const void *kbuf, const void __user *ubuf)
1284 {
1285 int ret;
1286 long enabled_keys = ptrauth_get_enabled_keys(target);
1287
1288 if (IS_ERR_VALUE(enabled_keys))
1289 return enabled_keys;
1290
1291 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1292 sizeof(long));
1293 if (ret)
1294 return ret;
1295
1296 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1297 enabled_keys);
1298 }
1299
1300 #ifdef CONFIG_CHECKPOINT_RESTORE
pac_key_to_user(const struct ptrauth_key * key)1301 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1302 {
1303 return (__uint128_t)key->hi << 64 | key->lo;
1304 }
1305
pac_key_from_user(__uint128_t ukey)1306 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1307 {
1308 struct ptrauth_key key = {
1309 .lo = (unsigned long)ukey,
1310 .hi = (unsigned long)(ukey >> 64),
1311 };
1312
1313 return key;
1314 }
1315
pac_address_keys_to_user(struct user_pac_address_keys * ukeys,const struct ptrauth_keys_user * keys)1316 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1317 const struct ptrauth_keys_user *keys)
1318 {
1319 ukeys->apiakey = pac_key_to_user(&keys->apia);
1320 ukeys->apibkey = pac_key_to_user(&keys->apib);
1321 ukeys->apdakey = pac_key_to_user(&keys->apda);
1322 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1323 }
1324
pac_address_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_address_keys * ukeys)1325 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1326 const struct user_pac_address_keys *ukeys)
1327 {
1328 keys->apia = pac_key_from_user(ukeys->apiakey);
1329 keys->apib = pac_key_from_user(ukeys->apibkey);
1330 keys->apda = pac_key_from_user(ukeys->apdakey);
1331 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1332 }
1333
pac_address_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1334 static int pac_address_keys_get(struct task_struct *target,
1335 const struct user_regset *regset,
1336 struct membuf to)
1337 {
1338 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1339 struct user_pac_address_keys user_keys;
1340
1341 if (!system_supports_address_auth())
1342 return -EINVAL;
1343
1344 pac_address_keys_to_user(&user_keys, keys);
1345
1346 return membuf_write(&to, &user_keys, sizeof(user_keys));
1347 }
1348
pac_address_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1349 static int pac_address_keys_set(struct task_struct *target,
1350 const struct user_regset *regset,
1351 unsigned int pos, unsigned int count,
1352 const void *kbuf, const void __user *ubuf)
1353 {
1354 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1355 struct user_pac_address_keys user_keys;
1356 int ret;
1357
1358 if (!system_supports_address_auth())
1359 return -EINVAL;
1360
1361 pac_address_keys_to_user(&user_keys, keys);
1362 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1363 &user_keys, 0, -1);
1364 if (ret)
1365 return ret;
1366 pac_address_keys_from_user(keys, &user_keys);
1367
1368 return 0;
1369 }
1370
pac_generic_keys_to_user(struct user_pac_generic_keys * ukeys,const struct ptrauth_keys_user * keys)1371 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1372 const struct ptrauth_keys_user *keys)
1373 {
1374 ukeys->apgakey = pac_key_to_user(&keys->apga);
1375 }
1376
pac_generic_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_generic_keys * ukeys)1377 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1378 const struct user_pac_generic_keys *ukeys)
1379 {
1380 keys->apga = pac_key_from_user(ukeys->apgakey);
1381 }
1382
pac_generic_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1383 static int pac_generic_keys_get(struct task_struct *target,
1384 const struct user_regset *regset,
1385 struct membuf to)
1386 {
1387 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1388 struct user_pac_generic_keys user_keys;
1389
1390 if (!system_supports_generic_auth())
1391 return -EINVAL;
1392
1393 pac_generic_keys_to_user(&user_keys, keys);
1394
1395 return membuf_write(&to, &user_keys, sizeof(user_keys));
1396 }
1397
pac_generic_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1398 static int pac_generic_keys_set(struct task_struct *target,
1399 const struct user_regset *regset,
1400 unsigned int pos, unsigned int count,
1401 const void *kbuf, const void __user *ubuf)
1402 {
1403 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1404 struct user_pac_generic_keys user_keys;
1405 int ret;
1406
1407 if (!system_supports_generic_auth())
1408 return -EINVAL;
1409
1410 pac_generic_keys_to_user(&user_keys, keys);
1411 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1412 &user_keys, 0, -1);
1413 if (ret)
1414 return ret;
1415 pac_generic_keys_from_user(keys, &user_keys);
1416
1417 return 0;
1418 }
1419 #endif /* CONFIG_CHECKPOINT_RESTORE */
1420 #endif /* CONFIG_ARM64_PTR_AUTH */
1421
1422 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
tagged_addr_ctrl_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1423 static int tagged_addr_ctrl_get(struct task_struct *target,
1424 const struct user_regset *regset,
1425 struct membuf to)
1426 {
1427 long ctrl = get_tagged_addr_ctrl(target);
1428
1429 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
1430 return ctrl;
1431
1432 return membuf_write(&to, &ctrl, sizeof(ctrl));
1433 }
1434
tagged_addr_ctrl_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1435 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1436 user_regset *regset, unsigned int pos,
1437 unsigned int count, const void *kbuf, const
1438 void __user *ubuf)
1439 {
1440 int ret;
1441 long ctrl;
1442
1443 ctrl = get_tagged_addr_ctrl(target);
1444 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
1445 return ctrl;
1446
1447 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1448 if (ret)
1449 return ret;
1450
1451 return set_tagged_addr_ctrl(target, ctrl);
1452 }
1453 #endif
1454
1455 #ifdef CONFIG_ARM64_POE
poe_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1456 static int poe_get(struct task_struct *target,
1457 const struct user_regset *regset,
1458 struct membuf to)
1459 {
1460 if (!system_supports_poe())
1461 return -EINVAL;
1462
1463 return membuf_write(&to, &target->thread.por_el0,
1464 sizeof(target->thread.por_el0));
1465 }
1466
poe_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1467 static int poe_set(struct task_struct *target, const struct
1468 user_regset *regset, unsigned int pos,
1469 unsigned int count, const void *kbuf, const
1470 void __user *ubuf)
1471 {
1472 int ret;
1473 long ctrl;
1474
1475 if (!system_supports_poe())
1476 return -EINVAL;
1477
1478 ctrl = target->thread.por_el0;
1479
1480 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1481 if (ret)
1482 return ret;
1483
1484 target->thread.por_el0 = ctrl;
1485
1486 return 0;
1487 }
1488 #endif
1489
1490 #ifdef CONFIG_ARM64_GCS
task_gcs_to_user(struct user_gcs * user_gcs,const struct task_struct * target)1491 static void task_gcs_to_user(struct user_gcs *user_gcs,
1492 const struct task_struct *target)
1493 {
1494 user_gcs->features_enabled = target->thread.gcs_el0_mode;
1495 user_gcs->features_locked = target->thread.gcs_el0_locked;
1496 user_gcs->gcspr_el0 = target->thread.gcspr_el0;
1497 }
1498
task_gcs_from_user(struct task_struct * target,const struct user_gcs * user_gcs)1499 static void task_gcs_from_user(struct task_struct *target,
1500 const struct user_gcs *user_gcs)
1501 {
1502 target->thread.gcs_el0_mode = user_gcs->features_enabled;
1503 target->thread.gcs_el0_locked = user_gcs->features_locked;
1504 target->thread.gcspr_el0 = user_gcs->gcspr_el0;
1505 }
1506
gcs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1507 static int gcs_get(struct task_struct *target,
1508 const struct user_regset *regset,
1509 struct membuf to)
1510 {
1511 struct user_gcs user_gcs;
1512
1513 if (!system_supports_gcs())
1514 return -EINVAL;
1515
1516 if (target == current)
1517 gcs_preserve_current_state();
1518
1519 task_gcs_to_user(&user_gcs, target);
1520
1521 return membuf_write(&to, &user_gcs, sizeof(user_gcs));
1522 }
1523
gcs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1524 static int gcs_set(struct task_struct *target, const struct
1525 user_regset *regset, unsigned int pos,
1526 unsigned int count, const void *kbuf, const
1527 void __user *ubuf)
1528 {
1529 int ret;
1530 struct user_gcs user_gcs;
1531
1532 if (!system_supports_gcs())
1533 return -EINVAL;
1534
1535 task_gcs_to_user(&user_gcs, target);
1536
1537 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1);
1538 if (ret)
1539 return ret;
1540
1541 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
1542 return -EINVAL;
1543
1544 task_gcs_from_user(target, &user_gcs);
1545
1546 return 0;
1547 }
1548 #endif
1549
1550 enum aarch64_regset {
1551 REGSET_GPR,
1552 REGSET_FPR,
1553 REGSET_TLS,
1554 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1555 REGSET_HW_BREAK,
1556 REGSET_HW_WATCH,
1557 #endif
1558 REGSET_FPMR,
1559 REGSET_SYSTEM_CALL,
1560 #ifdef CONFIG_ARM64_SVE
1561 REGSET_SVE,
1562 #endif
1563 #ifdef CONFIG_ARM64_SME
1564 REGSET_SSVE,
1565 REGSET_ZA,
1566 REGSET_ZT,
1567 #endif
1568 #ifdef CONFIG_ARM64_PTR_AUTH
1569 REGSET_PAC_MASK,
1570 REGSET_PAC_ENABLED_KEYS,
1571 #ifdef CONFIG_CHECKPOINT_RESTORE
1572 REGSET_PACA_KEYS,
1573 REGSET_PACG_KEYS,
1574 #endif
1575 #endif
1576 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1577 REGSET_TAGGED_ADDR_CTRL,
1578 #endif
1579 #ifdef CONFIG_ARM64_POE
1580 REGSET_POE,
1581 #endif
1582 #ifdef CONFIG_ARM64_GCS
1583 REGSET_GCS,
1584 #endif
1585 };
1586
1587 static const struct user_regset aarch64_regsets[] = {
1588 [REGSET_GPR] = {
1589 .core_note_type = NT_PRSTATUS,
1590 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1591 .size = sizeof(u64),
1592 .align = sizeof(u64),
1593 .regset_get = gpr_get,
1594 .set = gpr_set
1595 },
1596 [REGSET_FPR] = {
1597 .core_note_type = NT_PRFPREG,
1598 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1599 /*
1600 * We pretend we have 32-bit registers because the fpsr and
1601 * fpcr are 32-bits wide.
1602 */
1603 .size = sizeof(u32),
1604 .align = sizeof(u32),
1605 .active = fpr_active,
1606 .regset_get = fpr_get,
1607 .set = fpr_set
1608 },
1609 [REGSET_TLS] = {
1610 .core_note_type = NT_ARM_TLS,
1611 .n = 2,
1612 .size = sizeof(void *),
1613 .align = sizeof(void *),
1614 .regset_get = tls_get,
1615 .set = tls_set,
1616 },
1617 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1618 [REGSET_HW_BREAK] = {
1619 .core_note_type = NT_ARM_HW_BREAK,
1620 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1621 .size = sizeof(u32),
1622 .align = sizeof(u32),
1623 .regset_get = hw_break_get,
1624 .set = hw_break_set,
1625 },
1626 [REGSET_HW_WATCH] = {
1627 .core_note_type = NT_ARM_HW_WATCH,
1628 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1629 .size = sizeof(u32),
1630 .align = sizeof(u32),
1631 .regset_get = hw_break_get,
1632 .set = hw_break_set,
1633 },
1634 #endif
1635 [REGSET_SYSTEM_CALL] = {
1636 .core_note_type = NT_ARM_SYSTEM_CALL,
1637 .n = 1,
1638 .size = sizeof(int),
1639 .align = sizeof(int),
1640 .regset_get = system_call_get,
1641 .set = system_call_set,
1642 },
1643 [REGSET_FPMR] = {
1644 .core_note_type = NT_ARM_FPMR,
1645 .n = 1,
1646 .size = sizeof(u64),
1647 .align = sizeof(u64),
1648 .regset_get = fpmr_get,
1649 .set = fpmr_set,
1650 },
1651 #ifdef CONFIG_ARM64_SVE
1652 [REGSET_SVE] = { /* Scalable Vector Extension */
1653 .core_note_type = NT_ARM_SVE,
1654 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
1655 SVE_PT_REGS_SVE),
1656 SVE_VQ_BYTES),
1657 .size = SVE_VQ_BYTES,
1658 .align = SVE_VQ_BYTES,
1659 .regset_get = sve_get,
1660 .set = sve_set,
1661 },
1662 #endif
1663 #ifdef CONFIG_ARM64_SME
1664 [REGSET_SSVE] = { /* Streaming mode SVE */
1665 .core_note_type = NT_ARM_SSVE,
1666 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1667 SVE_VQ_BYTES),
1668 .size = SVE_VQ_BYTES,
1669 .align = SVE_VQ_BYTES,
1670 .regset_get = ssve_get,
1671 .set = ssve_set,
1672 },
1673 [REGSET_ZA] = { /* SME ZA */
1674 .core_note_type = NT_ARM_ZA,
1675 /*
1676 * ZA is a single register but it's variably sized and
1677 * the ptrace core requires that the size of any data
1678 * be an exact multiple of the configured register
1679 * size so report as though we had SVE_VQ_BYTES
1680 * registers. These values aren't exposed to
1681 * userspace.
1682 */
1683 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1684 .size = SVE_VQ_BYTES,
1685 .align = SVE_VQ_BYTES,
1686 .regset_get = za_get,
1687 .set = za_set,
1688 },
1689 [REGSET_ZT] = { /* SME ZT */
1690 .core_note_type = NT_ARM_ZT,
1691 .n = 1,
1692 .size = ZT_SIG_REG_BYTES,
1693 .align = sizeof(u64),
1694 .regset_get = zt_get,
1695 .set = zt_set,
1696 },
1697 #endif
1698 #ifdef CONFIG_ARM64_PTR_AUTH
1699 [REGSET_PAC_MASK] = {
1700 .core_note_type = NT_ARM_PAC_MASK,
1701 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1702 .size = sizeof(u64),
1703 .align = sizeof(u64),
1704 .regset_get = pac_mask_get,
1705 /* this cannot be set dynamically */
1706 },
1707 [REGSET_PAC_ENABLED_KEYS] = {
1708 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1709 .n = 1,
1710 .size = sizeof(long),
1711 .align = sizeof(long),
1712 .regset_get = pac_enabled_keys_get,
1713 .set = pac_enabled_keys_set,
1714 },
1715 #ifdef CONFIG_CHECKPOINT_RESTORE
1716 [REGSET_PACA_KEYS] = {
1717 .core_note_type = NT_ARM_PACA_KEYS,
1718 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1719 .size = sizeof(__uint128_t),
1720 .align = sizeof(__uint128_t),
1721 .regset_get = pac_address_keys_get,
1722 .set = pac_address_keys_set,
1723 },
1724 [REGSET_PACG_KEYS] = {
1725 .core_note_type = NT_ARM_PACG_KEYS,
1726 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1727 .size = sizeof(__uint128_t),
1728 .align = sizeof(__uint128_t),
1729 .regset_get = pac_generic_keys_get,
1730 .set = pac_generic_keys_set,
1731 },
1732 #endif
1733 #endif
1734 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1735 [REGSET_TAGGED_ADDR_CTRL] = {
1736 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1737 .n = 1,
1738 .size = sizeof(long),
1739 .align = sizeof(long),
1740 .regset_get = tagged_addr_ctrl_get,
1741 .set = tagged_addr_ctrl_set,
1742 },
1743 #endif
1744 #ifdef CONFIG_ARM64_POE
1745 [REGSET_POE] = {
1746 .core_note_type = NT_ARM_POE,
1747 .n = 1,
1748 .size = sizeof(long),
1749 .align = sizeof(long),
1750 .regset_get = poe_get,
1751 .set = poe_set,
1752 },
1753 #endif
1754 #ifdef CONFIG_ARM64_GCS
1755 [REGSET_GCS] = {
1756 .core_note_type = NT_ARM_GCS,
1757 .n = sizeof(struct user_gcs) / sizeof(u64),
1758 .size = sizeof(u64),
1759 .align = sizeof(u64),
1760 .regset_get = gcs_get,
1761 .set = gcs_set,
1762 },
1763 #endif
1764 };
1765
1766 static const struct user_regset_view user_aarch64_view = {
1767 .name = "aarch64", .e_machine = EM_AARCH64,
1768 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1769 };
1770
1771 enum compat_regset {
1772 REGSET_COMPAT_GPR,
1773 REGSET_COMPAT_VFP,
1774 };
1775
compat_get_user_reg(struct task_struct * task,int idx)1776 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1777 {
1778 struct pt_regs *regs = task_pt_regs(task);
1779
1780 switch (idx) {
1781 case 15:
1782 return regs->pc;
1783 case 16:
1784 return pstate_to_compat_psr(regs->pstate);
1785 case 17:
1786 return regs->orig_x0;
1787 default:
1788 return regs->regs[idx];
1789 }
1790 }
1791
compat_gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1792 static int compat_gpr_get(struct task_struct *target,
1793 const struct user_regset *regset,
1794 struct membuf to)
1795 {
1796 int i = 0;
1797
1798 while (to.left)
1799 membuf_store(&to, compat_get_user_reg(target, i++));
1800 return 0;
1801 }
1802
compat_gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1803 static int compat_gpr_set(struct task_struct *target,
1804 const struct user_regset *regset,
1805 unsigned int pos, unsigned int count,
1806 const void *kbuf, const void __user *ubuf)
1807 {
1808 struct pt_regs newregs;
1809 int ret = 0;
1810 unsigned int i, start, num_regs;
1811
1812 /* Calculate the number of AArch32 registers contained in count */
1813 num_regs = count / regset->size;
1814
1815 /* Convert pos into an register number */
1816 start = pos / regset->size;
1817
1818 if (start + num_regs > regset->n)
1819 return -EIO;
1820
1821 newregs = *task_pt_regs(target);
1822
1823 for (i = 0; i < num_regs; ++i) {
1824 unsigned int idx = start + i;
1825 compat_ulong_t reg;
1826
1827 if (kbuf) {
1828 memcpy(®, kbuf, sizeof(reg));
1829 kbuf += sizeof(reg);
1830 } else {
1831 ret = copy_from_user(®, ubuf, sizeof(reg));
1832 if (ret) {
1833 ret = -EFAULT;
1834 break;
1835 }
1836
1837 ubuf += sizeof(reg);
1838 }
1839
1840 switch (idx) {
1841 case 15:
1842 newregs.pc = reg;
1843 break;
1844 case 16:
1845 reg = compat_psr_to_pstate(reg);
1846 newregs.pstate = reg;
1847 break;
1848 case 17:
1849 newregs.orig_x0 = reg;
1850 break;
1851 default:
1852 newregs.regs[idx] = reg;
1853 }
1854
1855 }
1856
1857 if (valid_user_regs(&newregs.user_regs, target))
1858 *task_pt_regs(target) = newregs;
1859 else
1860 ret = -EINVAL;
1861
1862 return ret;
1863 }
1864
compat_vfp_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1865 static int compat_vfp_get(struct task_struct *target,
1866 const struct user_regset *regset,
1867 struct membuf to)
1868 {
1869 struct user_fpsimd_state *uregs;
1870 compat_ulong_t fpscr;
1871
1872 if (!system_supports_fpsimd())
1873 return -EINVAL;
1874
1875 uregs = &target->thread.uw.fpsimd_state;
1876
1877 if (target == current)
1878 fpsimd_preserve_current_state();
1879
1880 /*
1881 * The VFP registers are packed into the fpsimd_state, so they all sit
1882 * nicely together for us. We just need to create the fpscr separately.
1883 */
1884 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1885 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1886 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1887 return membuf_store(&to, fpscr);
1888 }
1889
compat_vfp_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1890 static int compat_vfp_set(struct task_struct *target,
1891 const struct user_regset *regset,
1892 unsigned int pos, unsigned int count,
1893 const void *kbuf, const void __user *ubuf)
1894 {
1895 struct user_fpsimd_state *uregs;
1896 compat_ulong_t fpscr;
1897 int ret, vregs_end_pos;
1898
1899 if (!system_supports_fpsimd())
1900 return -EINVAL;
1901
1902 uregs = &target->thread.uw.fpsimd_state;
1903
1904 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1905 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1906 vregs_end_pos);
1907
1908 if (count && !ret) {
1909 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1910 vregs_end_pos, VFP_STATE_SIZE);
1911 if (!ret) {
1912 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1913 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1914 }
1915 }
1916
1917 fpsimd_flush_task_state(target);
1918 return ret;
1919 }
1920
compat_tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1921 static int compat_tls_get(struct task_struct *target,
1922 const struct user_regset *regset,
1923 struct membuf to)
1924 {
1925 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1926 }
1927
compat_tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1928 static int compat_tls_set(struct task_struct *target,
1929 const struct user_regset *regset, unsigned int pos,
1930 unsigned int count, const void *kbuf,
1931 const void __user *ubuf)
1932 {
1933 int ret;
1934 compat_ulong_t tls = target->thread.uw.tp_value;
1935
1936 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1937 if (ret)
1938 return ret;
1939
1940 target->thread.uw.tp_value = tls;
1941 return ret;
1942 }
1943
1944 static const struct user_regset aarch32_regsets[] = {
1945 [REGSET_COMPAT_GPR] = {
1946 .core_note_type = NT_PRSTATUS,
1947 .n = COMPAT_ELF_NGREG,
1948 .size = sizeof(compat_elf_greg_t),
1949 .align = sizeof(compat_elf_greg_t),
1950 .regset_get = compat_gpr_get,
1951 .set = compat_gpr_set
1952 },
1953 [REGSET_COMPAT_VFP] = {
1954 .core_note_type = NT_ARM_VFP,
1955 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1956 .size = sizeof(compat_ulong_t),
1957 .align = sizeof(compat_ulong_t),
1958 .active = fpr_active,
1959 .regset_get = compat_vfp_get,
1960 .set = compat_vfp_set
1961 },
1962 };
1963
1964 static const struct user_regset_view user_aarch32_view = {
1965 .name = "aarch32", .e_machine = EM_ARM,
1966 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1967 };
1968
1969 static const struct user_regset aarch32_ptrace_regsets[] = {
1970 [REGSET_GPR] = {
1971 .core_note_type = NT_PRSTATUS,
1972 .n = COMPAT_ELF_NGREG,
1973 .size = sizeof(compat_elf_greg_t),
1974 .align = sizeof(compat_elf_greg_t),
1975 .regset_get = compat_gpr_get,
1976 .set = compat_gpr_set
1977 },
1978 [REGSET_FPR] = {
1979 .core_note_type = NT_ARM_VFP,
1980 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1981 .size = sizeof(compat_ulong_t),
1982 .align = sizeof(compat_ulong_t),
1983 .regset_get = compat_vfp_get,
1984 .set = compat_vfp_set
1985 },
1986 [REGSET_TLS] = {
1987 .core_note_type = NT_ARM_TLS,
1988 .n = 1,
1989 .size = sizeof(compat_ulong_t),
1990 .align = sizeof(compat_ulong_t),
1991 .regset_get = compat_tls_get,
1992 .set = compat_tls_set,
1993 },
1994 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1995 [REGSET_HW_BREAK] = {
1996 .core_note_type = NT_ARM_HW_BREAK,
1997 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1998 .size = sizeof(u32),
1999 .align = sizeof(u32),
2000 .regset_get = hw_break_get,
2001 .set = hw_break_set,
2002 },
2003 [REGSET_HW_WATCH] = {
2004 .core_note_type = NT_ARM_HW_WATCH,
2005 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
2006 .size = sizeof(u32),
2007 .align = sizeof(u32),
2008 .regset_get = hw_break_get,
2009 .set = hw_break_set,
2010 },
2011 #endif
2012 [REGSET_SYSTEM_CALL] = {
2013 .core_note_type = NT_ARM_SYSTEM_CALL,
2014 .n = 1,
2015 .size = sizeof(int),
2016 .align = sizeof(int),
2017 .regset_get = system_call_get,
2018 .set = system_call_set,
2019 },
2020 };
2021
2022 static const struct user_regset_view user_aarch32_ptrace_view = {
2023 .name = "aarch32", .e_machine = EM_ARM,
2024 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
2025 };
2026
2027 #ifdef CONFIG_COMPAT
compat_ptrace_read_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t __user * ret)2028 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
2029 compat_ulong_t __user *ret)
2030 {
2031 compat_ulong_t tmp;
2032
2033 if (off & 3)
2034 return -EIO;
2035
2036 if (off == COMPAT_PT_TEXT_ADDR)
2037 tmp = tsk->mm->start_code;
2038 else if (off == COMPAT_PT_DATA_ADDR)
2039 tmp = tsk->mm->start_data;
2040 else if (off == COMPAT_PT_TEXT_END_ADDR)
2041 tmp = tsk->mm->end_code;
2042 else if (off < sizeof(compat_elf_gregset_t))
2043 tmp = compat_get_user_reg(tsk, off >> 2);
2044 else if (off >= COMPAT_USER_SZ)
2045 return -EIO;
2046 else
2047 tmp = 0;
2048
2049 return put_user(tmp, ret);
2050 }
2051
compat_ptrace_write_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t val)2052 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
2053 compat_ulong_t val)
2054 {
2055 struct pt_regs newregs = *task_pt_regs(tsk);
2056 unsigned int idx = off / 4;
2057
2058 if (off & 3 || off >= COMPAT_USER_SZ)
2059 return -EIO;
2060
2061 if (off >= sizeof(compat_elf_gregset_t))
2062 return 0;
2063
2064 switch (idx) {
2065 case 15:
2066 newregs.pc = val;
2067 break;
2068 case 16:
2069 newregs.pstate = compat_psr_to_pstate(val);
2070 break;
2071 case 17:
2072 newregs.orig_x0 = val;
2073 break;
2074 default:
2075 newregs.regs[idx] = val;
2076 }
2077
2078 if (!valid_user_regs(&newregs.user_regs, tsk))
2079 return -EINVAL;
2080
2081 *task_pt_regs(tsk) = newregs;
2082 return 0;
2083 }
2084
2085 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2086
2087 /*
2088 * Convert a virtual register number into an index for a thread_info
2089 * breakpoint array. Breakpoints are identified using positive numbers
2090 * whilst watchpoints are negative. The registers are laid out as pairs
2091 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
2092 * Register 0 is reserved for describing resource information.
2093 */
compat_ptrace_hbp_num_to_idx(compat_long_t num)2094 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
2095 {
2096 return (abs(num) - 1) >> 1;
2097 }
2098
compat_ptrace_hbp_get_resource_info(u32 * kdata)2099 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
2100 {
2101 u8 num_brps, num_wrps, debug_arch, wp_len;
2102 u32 reg = 0;
2103
2104 num_brps = hw_breakpoint_slots(TYPE_INST);
2105 num_wrps = hw_breakpoint_slots(TYPE_DATA);
2106
2107 debug_arch = debug_monitors_arch();
2108 wp_len = 8;
2109 reg |= debug_arch;
2110 reg <<= 8;
2111 reg |= wp_len;
2112 reg <<= 8;
2113 reg |= num_wrps;
2114 reg <<= 8;
2115 reg |= num_brps;
2116
2117 *kdata = reg;
2118 return 0;
2119 }
2120
compat_ptrace_hbp_get(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)2121 static int compat_ptrace_hbp_get(unsigned int note_type,
2122 struct task_struct *tsk,
2123 compat_long_t num,
2124 u32 *kdata)
2125 {
2126 u64 addr = 0;
2127 u32 ctrl = 0;
2128
2129 int err, idx = compat_ptrace_hbp_num_to_idx(num);
2130
2131 if (num & 1) {
2132 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
2133 *kdata = (u32)addr;
2134 } else {
2135 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
2136 *kdata = ctrl;
2137 }
2138
2139 return err;
2140 }
2141
compat_ptrace_hbp_set(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)2142 static int compat_ptrace_hbp_set(unsigned int note_type,
2143 struct task_struct *tsk,
2144 compat_long_t num,
2145 u32 *kdata)
2146 {
2147 u64 addr;
2148 u32 ctrl;
2149
2150 int err, idx = compat_ptrace_hbp_num_to_idx(num);
2151
2152 if (num & 1) {
2153 addr = *kdata;
2154 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
2155 } else {
2156 ctrl = *kdata;
2157 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
2158 }
2159
2160 return err;
2161 }
2162
compat_ptrace_gethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)2163 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
2164 compat_ulong_t __user *data)
2165 {
2166 int ret;
2167 u32 kdata;
2168
2169 /* Watchpoint */
2170 if (num < 0) {
2171 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
2172 /* Resource info */
2173 } else if (num == 0) {
2174 ret = compat_ptrace_hbp_get_resource_info(&kdata);
2175 /* Breakpoint */
2176 } else {
2177 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
2178 }
2179
2180 if (!ret)
2181 ret = put_user(kdata, data);
2182
2183 return ret;
2184 }
2185
compat_ptrace_sethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)2186 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
2187 compat_ulong_t __user *data)
2188 {
2189 int ret;
2190 u32 kdata = 0;
2191
2192 if (num == 0)
2193 return 0;
2194
2195 ret = get_user(kdata, data);
2196 if (ret)
2197 return ret;
2198
2199 if (num < 0)
2200 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
2201 else
2202 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
2203
2204 return ret;
2205 }
2206 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2207
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)2208 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
2209 compat_ulong_t caddr, compat_ulong_t cdata)
2210 {
2211 unsigned long addr = caddr;
2212 unsigned long data = cdata;
2213 void __user *datap = compat_ptr(data);
2214 int ret;
2215
2216 switch (request) {
2217 case PTRACE_PEEKUSR:
2218 ret = compat_ptrace_read_user(child, addr, datap);
2219 break;
2220
2221 case PTRACE_POKEUSR:
2222 ret = compat_ptrace_write_user(child, addr, data);
2223 break;
2224
2225 case COMPAT_PTRACE_GETREGS:
2226 ret = copy_regset_to_user(child,
2227 &user_aarch32_view,
2228 REGSET_COMPAT_GPR,
2229 0, sizeof(compat_elf_gregset_t),
2230 datap);
2231 break;
2232
2233 case COMPAT_PTRACE_SETREGS:
2234 ret = copy_regset_from_user(child,
2235 &user_aarch32_view,
2236 REGSET_COMPAT_GPR,
2237 0, sizeof(compat_elf_gregset_t),
2238 datap);
2239 break;
2240
2241 case COMPAT_PTRACE_GET_THREAD_AREA:
2242 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
2243 (compat_ulong_t __user *)datap);
2244 break;
2245
2246 case COMPAT_PTRACE_SET_SYSCALL:
2247 task_pt_regs(child)->syscallno = data;
2248 ret = 0;
2249 break;
2250
2251 case COMPAT_PTRACE_GETVFPREGS:
2252 ret = copy_regset_to_user(child,
2253 &user_aarch32_view,
2254 REGSET_COMPAT_VFP,
2255 0, VFP_STATE_SIZE,
2256 datap);
2257 break;
2258
2259 case COMPAT_PTRACE_SETVFPREGS:
2260 ret = copy_regset_from_user(child,
2261 &user_aarch32_view,
2262 REGSET_COMPAT_VFP,
2263 0, VFP_STATE_SIZE,
2264 datap);
2265 break;
2266
2267 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2268 case COMPAT_PTRACE_GETHBPREGS:
2269 ret = compat_ptrace_gethbpregs(child, addr, datap);
2270 break;
2271
2272 case COMPAT_PTRACE_SETHBPREGS:
2273 ret = compat_ptrace_sethbpregs(child, addr, datap);
2274 break;
2275 #endif
2276
2277 default:
2278 ret = compat_ptrace_request(child, request, addr,
2279 data);
2280 break;
2281 }
2282
2283 return ret;
2284 }
2285 #endif /* CONFIG_COMPAT */
2286
task_user_regset_view(struct task_struct * task)2287 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2288 {
2289 /*
2290 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2291 * user_aarch32_view compatible with arm32. Native ptrace requests on
2292 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2293 * access to the TLS register.
2294 */
2295 if (is_compat_task())
2296 return &user_aarch32_view;
2297 else if (is_compat_thread(task_thread_info(task)))
2298 return &user_aarch32_ptrace_view;
2299
2300 return &user_aarch64_view;
2301 }
2302
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)2303 long arch_ptrace(struct task_struct *child, long request,
2304 unsigned long addr, unsigned long data)
2305 {
2306 switch (request) {
2307 case PTRACE_PEEKMTETAGS:
2308 case PTRACE_POKEMTETAGS:
2309 return mte_ptrace_copy_tags(child, request, addr, data);
2310 }
2311
2312 return ptrace_request(child, request, addr, data);
2313 }
2314
2315 enum ptrace_syscall_dir {
2316 PTRACE_SYSCALL_ENTER = 0,
2317 PTRACE_SYSCALL_EXIT,
2318 };
2319
report_syscall(struct pt_regs * regs,enum ptrace_syscall_dir dir)2320 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2321 {
2322 int regno;
2323 unsigned long saved_reg;
2324
2325 /*
2326 * We have some ABI weirdness here in the way that we handle syscall
2327 * exit stops because we indicate whether or not the stop has been
2328 * signalled from syscall entry or syscall exit by clobbering a general
2329 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2330 * and restoring its old value after the stop. This means that:
2331 *
2332 * - Any writes by the tracer to this register during the stop are
2333 * ignored/discarded.
2334 *
2335 * - The actual value of the register is not available during the stop,
2336 * so the tracer cannot save it and restore it later.
2337 *
2338 * - Syscall stops behave differently to seccomp and pseudo-step traps
2339 * (the latter do not nobble any registers).
2340 */
2341 regno = (is_compat_task() ? 12 : 7);
2342 saved_reg = regs->regs[regno];
2343 regs->regs[regno] = dir;
2344
2345 if (dir == PTRACE_SYSCALL_ENTER) {
2346 if (ptrace_report_syscall_entry(regs))
2347 forget_syscall(regs);
2348 regs->regs[regno] = saved_reg;
2349 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2350 ptrace_report_syscall_exit(regs, 0);
2351 regs->regs[regno] = saved_reg;
2352 } else {
2353 regs->regs[regno] = saved_reg;
2354
2355 /*
2356 * Signal a pseudo-step exception since we are stepping but
2357 * tracer modifications to the registers may have rewound the
2358 * state machine.
2359 */
2360 ptrace_report_syscall_exit(regs, 1);
2361 }
2362 }
2363
syscall_trace_enter(struct pt_regs * regs)2364 int syscall_trace_enter(struct pt_regs *regs)
2365 {
2366 unsigned long flags = read_thread_flags();
2367
2368 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2369 report_syscall(regs, PTRACE_SYSCALL_ENTER);
2370 if (flags & _TIF_SYSCALL_EMU)
2371 return NO_SYSCALL;
2372 }
2373
2374 /* Do the secure computing after ptrace; failures should be fast. */
2375 if (secure_computing() == -1)
2376 return NO_SYSCALL;
2377
2378 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2379 trace_sys_enter(regs, regs->syscallno);
2380
2381 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2382 regs->regs[2], regs->regs[3]);
2383
2384 return regs->syscallno;
2385 }
2386
syscall_trace_exit(struct pt_regs * regs)2387 void syscall_trace_exit(struct pt_regs *regs)
2388 {
2389 unsigned long flags = read_thread_flags();
2390
2391 audit_syscall_exit(regs);
2392
2393 if (flags & _TIF_SYSCALL_TRACEPOINT)
2394 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2395
2396 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2397 report_syscall(regs, PTRACE_SYSCALL_EXIT);
2398
2399 rseq_syscall(regs);
2400 }
2401
2402 /*
2403 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2404 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2405 * not described in ARM DDI 0487D.a.
2406 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2407 * be allocated an EL0 meaning in future.
2408 * Userspace cannot use these until they have an architectural meaning.
2409 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2410 * We also reserve IL for the kernel; SS is handled dynamically.
2411 */
2412 #define SPSR_EL1_AARCH64_RES0_BITS \
2413 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2414 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2415 #define SPSR_EL1_AARCH32_RES0_BITS \
2416 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2417
valid_compat_regs(struct user_pt_regs * regs)2418 static int valid_compat_regs(struct user_pt_regs *regs)
2419 {
2420 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2421
2422 if (!system_supports_mixed_endian_el0()) {
2423 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2424 regs->pstate |= PSR_AA32_E_BIT;
2425 else
2426 regs->pstate &= ~PSR_AA32_E_BIT;
2427 }
2428
2429 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2430 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2431 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2432 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2433 return 1;
2434 }
2435
2436 /*
2437 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2438 * arch/arm.
2439 */
2440 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2441 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2442 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2443 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2444 PSR_AA32_T_BIT;
2445 regs->pstate |= PSR_MODE32_BIT;
2446
2447 return 0;
2448 }
2449
valid_native_regs(struct user_pt_regs * regs)2450 static int valid_native_regs(struct user_pt_regs *regs)
2451 {
2452 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2453
2454 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2455 (regs->pstate & PSR_D_BIT) == 0 &&
2456 (regs->pstate & PSR_A_BIT) == 0 &&
2457 (regs->pstate & PSR_I_BIT) == 0 &&
2458 (regs->pstate & PSR_F_BIT) == 0) {
2459 return 1;
2460 }
2461
2462 /* Force PSR to a valid 64-bit EL0t */
2463 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2464
2465 return 0;
2466 }
2467
2468 /*
2469 * Are the current registers suitable for user mode? (used to maintain
2470 * security in signal handlers)
2471 */
valid_user_regs(struct user_pt_regs * regs,struct task_struct * task)2472 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2473 {
2474 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2475 user_regs_reset_single_step(regs, task);
2476
2477 if (is_compat_thread(task_thread_info(task)))
2478 return valid_compat_regs(regs);
2479 else
2480 return valid_native_regs(regs);
2481 }
2482