xref: /linux/arch/arm64/kernel/ptrace.c (revision 56d06fa29edd58c448766014afd833b7ff51247b)
1 /*
2  * Based on arch/arm/kernel/ptrace.c
3  *
4  * By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  * Copyright (C) 2012 ARM Ltd.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/mm.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/seccomp.h>
31 #include <linux/security.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34 #include <linux/uaccess.h>
35 #include <linux/perf_event.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/regset.h>
38 #include <linux/tracehook.h>
39 #include <linux/elf.h>
40 
41 #include <asm/compat.h>
42 #include <asm/debug-monitors.h>
43 #include <asm/pgtable.h>
44 #include <asm/syscall.h>
45 #include <asm/traps.h>
46 #include <asm/system_misc.h>
47 
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
50 
51 /*
52  * TODO: does not yet catch signals sent when the child dies.
53  * in exit.c or in signal.c.
54  */
55 
56 /*
57  * Called by kernel/ptrace.c when detaching..
58  */
59 void ptrace_disable(struct task_struct *child)
60 {
61 	/*
62 	 * This would be better off in core code, but PTRACE_DETACH has
63 	 * grown its fair share of arch-specific worts and changing it
64 	 * is likely to cause regressions on obscure architectures.
65 	 */
66 	user_disable_single_step(child);
67 }
68 
69 #ifdef CONFIG_HAVE_HW_BREAKPOINT
70 /*
71  * Handle hitting a HW-breakpoint.
72  */
73 static void ptrace_hbptriggered(struct perf_event *bp,
74 				struct perf_sample_data *data,
75 				struct pt_regs *regs)
76 {
77 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
78 	siginfo_t info = {
79 		.si_signo	= SIGTRAP,
80 		.si_errno	= 0,
81 		.si_code	= TRAP_HWBKPT,
82 		.si_addr	= (void __user *)(bkpt->trigger),
83 	};
84 
85 #ifdef CONFIG_COMPAT
86 	int i;
87 
88 	if (!is_compat_task())
89 		goto send_sig;
90 
91 	for (i = 0; i < ARM_MAX_BRP; ++i) {
92 		if (current->thread.debug.hbp_break[i] == bp) {
93 			info.si_errno = (i << 1) + 1;
94 			break;
95 		}
96 	}
97 
98 	for (i = 0; i < ARM_MAX_WRP; ++i) {
99 		if (current->thread.debug.hbp_watch[i] == bp) {
100 			info.si_errno = -((i << 1) + 1);
101 			break;
102 		}
103 	}
104 
105 send_sig:
106 #endif
107 	force_sig_info(SIGTRAP, &info, current);
108 }
109 
110 /*
111  * Unregister breakpoints from this task and reset the pointers in
112  * the thread_struct.
113  */
114 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
115 {
116 	int i;
117 	struct thread_struct *t = &tsk->thread;
118 
119 	for (i = 0; i < ARM_MAX_BRP; i++) {
120 		if (t->debug.hbp_break[i]) {
121 			unregister_hw_breakpoint(t->debug.hbp_break[i]);
122 			t->debug.hbp_break[i] = NULL;
123 		}
124 	}
125 
126 	for (i = 0; i < ARM_MAX_WRP; i++) {
127 		if (t->debug.hbp_watch[i]) {
128 			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
129 			t->debug.hbp_watch[i] = NULL;
130 		}
131 	}
132 }
133 
134 void ptrace_hw_copy_thread(struct task_struct *tsk)
135 {
136 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
137 }
138 
139 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
140 					       struct task_struct *tsk,
141 					       unsigned long idx)
142 {
143 	struct perf_event *bp = ERR_PTR(-EINVAL);
144 
145 	switch (note_type) {
146 	case NT_ARM_HW_BREAK:
147 		if (idx < ARM_MAX_BRP)
148 			bp = tsk->thread.debug.hbp_break[idx];
149 		break;
150 	case NT_ARM_HW_WATCH:
151 		if (idx < ARM_MAX_WRP)
152 			bp = tsk->thread.debug.hbp_watch[idx];
153 		break;
154 	}
155 
156 	return bp;
157 }
158 
159 static int ptrace_hbp_set_event(unsigned int note_type,
160 				struct task_struct *tsk,
161 				unsigned long idx,
162 				struct perf_event *bp)
163 {
164 	int err = -EINVAL;
165 
166 	switch (note_type) {
167 	case NT_ARM_HW_BREAK:
168 		if (idx < ARM_MAX_BRP) {
169 			tsk->thread.debug.hbp_break[idx] = bp;
170 			err = 0;
171 		}
172 		break;
173 	case NT_ARM_HW_WATCH:
174 		if (idx < ARM_MAX_WRP) {
175 			tsk->thread.debug.hbp_watch[idx] = bp;
176 			err = 0;
177 		}
178 		break;
179 	}
180 
181 	return err;
182 }
183 
184 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
185 					    struct task_struct *tsk,
186 					    unsigned long idx)
187 {
188 	struct perf_event *bp;
189 	struct perf_event_attr attr;
190 	int err, type;
191 
192 	switch (note_type) {
193 	case NT_ARM_HW_BREAK:
194 		type = HW_BREAKPOINT_X;
195 		break;
196 	case NT_ARM_HW_WATCH:
197 		type = HW_BREAKPOINT_RW;
198 		break;
199 	default:
200 		return ERR_PTR(-EINVAL);
201 	}
202 
203 	ptrace_breakpoint_init(&attr);
204 
205 	/*
206 	 * Initialise fields to sane defaults
207 	 * (i.e. values that will pass validation).
208 	 */
209 	attr.bp_addr	= 0;
210 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
211 	attr.bp_type	= type;
212 	attr.disabled	= 1;
213 
214 	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
215 	if (IS_ERR(bp))
216 		return bp;
217 
218 	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
219 	if (err)
220 		return ERR_PTR(err);
221 
222 	return bp;
223 }
224 
225 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
226 				     struct arch_hw_breakpoint_ctrl ctrl,
227 				     struct perf_event_attr *attr)
228 {
229 	int err, len, type, disabled = !ctrl.enabled;
230 
231 	attr->disabled = disabled;
232 	if (disabled)
233 		return 0;
234 
235 	err = arch_bp_generic_fields(ctrl, &len, &type);
236 	if (err)
237 		return err;
238 
239 	switch (note_type) {
240 	case NT_ARM_HW_BREAK:
241 		if ((type & HW_BREAKPOINT_X) != type)
242 			return -EINVAL;
243 		break;
244 	case NT_ARM_HW_WATCH:
245 		if ((type & HW_BREAKPOINT_RW) != type)
246 			return -EINVAL;
247 		break;
248 	default:
249 		return -EINVAL;
250 	}
251 
252 	attr->bp_len	= len;
253 	attr->bp_type	= type;
254 
255 	return 0;
256 }
257 
258 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
259 {
260 	u8 num;
261 	u32 reg = 0;
262 
263 	switch (note_type) {
264 	case NT_ARM_HW_BREAK:
265 		num = hw_breakpoint_slots(TYPE_INST);
266 		break;
267 	case NT_ARM_HW_WATCH:
268 		num = hw_breakpoint_slots(TYPE_DATA);
269 		break;
270 	default:
271 		return -EINVAL;
272 	}
273 
274 	reg |= debug_monitors_arch();
275 	reg <<= 8;
276 	reg |= num;
277 
278 	*info = reg;
279 	return 0;
280 }
281 
282 static int ptrace_hbp_get_ctrl(unsigned int note_type,
283 			       struct task_struct *tsk,
284 			       unsigned long idx,
285 			       u32 *ctrl)
286 {
287 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
288 
289 	if (IS_ERR(bp))
290 		return PTR_ERR(bp);
291 
292 	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
293 	return 0;
294 }
295 
296 static int ptrace_hbp_get_addr(unsigned int note_type,
297 			       struct task_struct *tsk,
298 			       unsigned long idx,
299 			       u64 *addr)
300 {
301 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
302 
303 	if (IS_ERR(bp))
304 		return PTR_ERR(bp);
305 
306 	*addr = bp ? bp->attr.bp_addr : 0;
307 	return 0;
308 }
309 
310 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
311 							struct task_struct *tsk,
312 							unsigned long idx)
313 {
314 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
315 
316 	if (!bp)
317 		bp = ptrace_hbp_create(note_type, tsk, idx);
318 
319 	return bp;
320 }
321 
322 static int ptrace_hbp_set_ctrl(unsigned int note_type,
323 			       struct task_struct *tsk,
324 			       unsigned long idx,
325 			       u32 uctrl)
326 {
327 	int err;
328 	struct perf_event *bp;
329 	struct perf_event_attr attr;
330 	struct arch_hw_breakpoint_ctrl ctrl;
331 
332 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
333 	if (IS_ERR(bp)) {
334 		err = PTR_ERR(bp);
335 		return err;
336 	}
337 
338 	attr = bp->attr;
339 	decode_ctrl_reg(uctrl, &ctrl);
340 	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
341 	if (err)
342 		return err;
343 
344 	return modify_user_hw_breakpoint(bp, &attr);
345 }
346 
347 static int ptrace_hbp_set_addr(unsigned int note_type,
348 			       struct task_struct *tsk,
349 			       unsigned long idx,
350 			       u64 addr)
351 {
352 	int err;
353 	struct perf_event *bp;
354 	struct perf_event_attr attr;
355 
356 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
357 	if (IS_ERR(bp)) {
358 		err = PTR_ERR(bp);
359 		return err;
360 	}
361 
362 	attr = bp->attr;
363 	attr.bp_addr = addr;
364 	err = modify_user_hw_breakpoint(bp, &attr);
365 	return err;
366 }
367 
368 #define PTRACE_HBP_ADDR_SZ	sizeof(u64)
369 #define PTRACE_HBP_CTRL_SZ	sizeof(u32)
370 #define PTRACE_HBP_PAD_SZ	sizeof(u32)
371 
372 static int hw_break_get(struct task_struct *target,
373 			const struct user_regset *regset,
374 			unsigned int pos, unsigned int count,
375 			void *kbuf, void __user *ubuf)
376 {
377 	unsigned int note_type = regset->core_note_type;
378 	int ret, idx = 0, offset, limit;
379 	u32 info, ctrl;
380 	u64 addr;
381 
382 	/* Resource info */
383 	ret = ptrace_hbp_get_resource_info(note_type, &info);
384 	if (ret)
385 		return ret;
386 
387 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
388 				  sizeof(info));
389 	if (ret)
390 		return ret;
391 
392 	/* Pad */
393 	offset = offsetof(struct user_hwdebug_state, pad);
394 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
395 				       offset + PTRACE_HBP_PAD_SZ);
396 	if (ret)
397 		return ret;
398 
399 	/* (address, ctrl) registers */
400 	offset = offsetof(struct user_hwdebug_state, dbg_regs);
401 	limit = regset->n * regset->size;
402 	while (count && offset < limit) {
403 		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
404 		if (ret)
405 			return ret;
406 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
407 					  offset, offset + PTRACE_HBP_ADDR_SZ);
408 		if (ret)
409 			return ret;
410 		offset += PTRACE_HBP_ADDR_SZ;
411 
412 		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
413 		if (ret)
414 			return ret;
415 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
416 					  offset, offset + PTRACE_HBP_CTRL_SZ);
417 		if (ret)
418 			return ret;
419 		offset += PTRACE_HBP_CTRL_SZ;
420 
421 		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
422 					       offset,
423 					       offset + PTRACE_HBP_PAD_SZ);
424 		if (ret)
425 			return ret;
426 		offset += PTRACE_HBP_PAD_SZ;
427 		idx++;
428 	}
429 
430 	return 0;
431 }
432 
433 static int hw_break_set(struct task_struct *target,
434 			const struct user_regset *regset,
435 			unsigned int pos, unsigned int count,
436 			const void *kbuf, const void __user *ubuf)
437 {
438 	unsigned int note_type = regset->core_note_type;
439 	int ret, idx = 0, offset, limit;
440 	u32 ctrl;
441 	u64 addr;
442 
443 	/* Resource info and pad */
444 	offset = offsetof(struct user_hwdebug_state, dbg_regs);
445 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
446 	if (ret)
447 		return ret;
448 
449 	/* (address, ctrl) registers */
450 	limit = regset->n * regset->size;
451 	while (count && offset < limit) {
452 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
453 					 offset, offset + PTRACE_HBP_ADDR_SZ);
454 		if (ret)
455 			return ret;
456 		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
457 		if (ret)
458 			return ret;
459 		offset += PTRACE_HBP_ADDR_SZ;
460 
461 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
462 					 offset, offset + PTRACE_HBP_CTRL_SZ);
463 		if (ret)
464 			return ret;
465 		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
466 		if (ret)
467 			return ret;
468 		offset += PTRACE_HBP_CTRL_SZ;
469 
470 		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
471 						offset,
472 						offset + PTRACE_HBP_PAD_SZ);
473 		if (ret)
474 			return ret;
475 		offset += PTRACE_HBP_PAD_SZ;
476 		idx++;
477 	}
478 
479 	return 0;
480 }
481 #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
482 
483 static int gpr_get(struct task_struct *target,
484 		   const struct user_regset *regset,
485 		   unsigned int pos, unsigned int count,
486 		   void *kbuf, void __user *ubuf)
487 {
488 	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
489 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
490 }
491 
492 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
493 		   unsigned int pos, unsigned int count,
494 		   const void *kbuf, const void __user *ubuf)
495 {
496 	int ret;
497 	struct user_pt_regs newregs;
498 
499 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
500 	if (ret)
501 		return ret;
502 
503 	if (!valid_user_regs(&newregs, target))
504 		return -EINVAL;
505 
506 	task_pt_regs(target)->user_regs = newregs;
507 	return 0;
508 }
509 
510 /*
511  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
512  */
513 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
514 		   unsigned int pos, unsigned int count,
515 		   void *kbuf, void __user *ubuf)
516 {
517 	struct user_fpsimd_state *uregs;
518 	uregs = &target->thread.fpsimd_state.user_fpsimd;
519 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
520 }
521 
522 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
523 		   unsigned int pos, unsigned int count,
524 		   const void *kbuf, const void __user *ubuf)
525 {
526 	int ret;
527 	struct user_fpsimd_state newstate;
528 
529 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
530 	if (ret)
531 		return ret;
532 
533 	target->thread.fpsimd_state.user_fpsimd = newstate;
534 	fpsimd_flush_task_state(target);
535 	return ret;
536 }
537 
538 static int tls_get(struct task_struct *target, const struct user_regset *regset,
539 		   unsigned int pos, unsigned int count,
540 		   void *kbuf, void __user *ubuf)
541 {
542 	unsigned long *tls = &target->thread.tp_value;
543 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
544 }
545 
546 static int tls_set(struct task_struct *target, const struct user_regset *regset,
547 		   unsigned int pos, unsigned int count,
548 		   const void *kbuf, const void __user *ubuf)
549 {
550 	int ret;
551 	unsigned long tls;
552 
553 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
554 	if (ret)
555 		return ret;
556 
557 	target->thread.tp_value = tls;
558 	return ret;
559 }
560 
561 static int system_call_get(struct task_struct *target,
562 			   const struct user_regset *regset,
563 			   unsigned int pos, unsigned int count,
564 			   void *kbuf, void __user *ubuf)
565 {
566 	int syscallno = task_pt_regs(target)->syscallno;
567 
568 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
569 				   &syscallno, 0, -1);
570 }
571 
572 static int system_call_set(struct task_struct *target,
573 			   const struct user_regset *regset,
574 			   unsigned int pos, unsigned int count,
575 			   const void *kbuf, const void __user *ubuf)
576 {
577 	int syscallno, ret;
578 
579 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
580 	if (ret)
581 		return ret;
582 
583 	task_pt_regs(target)->syscallno = syscallno;
584 	return ret;
585 }
586 
587 enum aarch64_regset {
588 	REGSET_GPR,
589 	REGSET_FPR,
590 	REGSET_TLS,
591 #ifdef CONFIG_HAVE_HW_BREAKPOINT
592 	REGSET_HW_BREAK,
593 	REGSET_HW_WATCH,
594 #endif
595 	REGSET_SYSTEM_CALL,
596 };
597 
598 static const struct user_regset aarch64_regsets[] = {
599 	[REGSET_GPR] = {
600 		.core_note_type = NT_PRSTATUS,
601 		.n = sizeof(struct user_pt_regs) / sizeof(u64),
602 		.size = sizeof(u64),
603 		.align = sizeof(u64),
604 		.get = gpr_get,
605 		.set = gpr_set
606 	},
607 	[REGSET_FPR] = {
608 		.core_note_type = NT_PRFPREG,
609 		.n = sizeof(struct user_fpsimd_state) / sizeof(u32),
610 		/*
611 		 * We pretend we have 32-bit registers because the fpsr and
612 		 * fpcr are 32-bits wide.
613 		 */
614 		.size = sizeof(u32),
615 		.align = sizeof(u32),
616 		.get = fpr_get,
617 		.set = fpr_set
618 	},
619 	[REGSET_TLS] = {
620 		.core_note_type = NT_ARM_TLS,
621 		.n = 1,
622 		.size = sizeof(void *),
623 		.align = sizeof(void *),
624 		.get = tls_get,
625 		.set = tls_set,
626 	},
627 #ifdef CONFIG_HAVE_HW_BREAKPOINT
628 	[REGSET_HW_BREAK] = {
629 		.core_note_type = NT_ARM_HW_BREAK,
630 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
631 		.size = sizeof(u32),
632 		.align = sizeof(u32),
633 		.get = hw_break_get,
634 		.set = hw_break_set,
635 	},
636 	[REGSET_HW_WATCH] = {
637 		.core_note_type = NT_ARM_HW_WATCH,
638 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
639 		.size = sizeof(u32),
640 		.align = sizeof(u32),
641 		.get = hw_break_get,
642 		.set = hw_break_set,
643 	},
644 #endif
645 	[REGSET_SYSTEM_CALL] = {
646 		.core_note_type = NT_ARM_SYSTEM_CALL,
647 		.n = 1,
648 		.size = sizeof(int),
649 		.align = sizeof(int),
650 		.get = system_call_get,
651 		.set = system_call_set,
652 	},
653 };
654 
655 static const struct user_regset_view user_aarch64_view = {
656 	.name = "aarch64", .e_machine = EM_AARCH64,
657 	.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
658 };
659 
660 #ifdef CONFIG_COMPAT
661 #include <linux/compat.h>
662 
663 enum compat_regset {
664 	REGSET_COMPAT_GPR,
665 	REGSET_COMPAT_VFP,
666 };
667 
668 static int compat_gpr_get(struct task_struct *target,
669 			  const struct user_regset *regset,
670 			  unsigned int pos, unsigned int count,
671 			  void *kbuf, void __user *ubuf)
672 {
673 	int ret = 0;
674 	unsigned int i, start, num_regs;
675 
676 	/* Calculate the number of AArch32 registers contained in count */
677 	num_regs = count / regset->size;
678 
679 	/* Convert pos into an register number */
680 	start = pos / regset->size;
681 
682 	if (start + num_regs > regset->n)
683 		return -EIO;
684 
685 	for (i = 0; i < num_regs; ++i) {
686 		unsigned int idx = start + i;
687 		compat_ulong_t reg;
688 
689 		switch (idx) {
690 		case 15:
691 			reg = task_pt_regs(target)->pc;
692 			break;
693 		case 16:
694 			reg = task_pt_regs(target)->pstate;
695 			break;
696 		case 17:
697 			reg = task_pt_regs(target)->orig_x0;
698 			break;
699 		default:
700 			reg = task_pt_regs(target)->regs[idx];
701 		}
702 
703 		if (kbuf) {
704 			memcpy(kbuf, &reg, sizeof(reg));
705 			kbuf += sizeof(reg);
706 		} else {
707 			ret = copy_to_user(ubuf, &reg, sizeof(reg));
708 			if (ret) {
709 				ret = -EFAULT;
710 				break;
711 			}
712 
713 			ubuf += sizeof(reg);
714 		}
715 	}
716 
717 	return ret;
718 }
719 
720 static int compat_gpr_set(struct task_struct *target,
721 			  const struct user_regset *regset,
722 			  unsigned int pos, unsigned int count,
723 			  const void *kbuf, const void __user *ubuf)
724 {
725 	struct pt_regs newregs;
726 	int ret = 0;
727 	unsigned int i, start, num_regs;
728 
729 	/* Calculate the number of AArch32 registers contained in count */
730 	num_regs = count / regset->size;
731 
732 	/* Convert pos into an register number */
733 	start = pos / regset->size;
734 
735 	if (start + num_regs > regset->n)
736 		return -EIO;
737 
738 	newregs = *task_pt_regs(target);
739 
740 	for (i = 0; i < num_regs; ++i) {
741 		unsigned int idx = start + i;
742 		compat_ulong_t reg;
743 
744 		if (kbuf) {
745 			memcpy(&reg, kbuf, sizeof(reg));
746 			kbuf += sizeof(reg);
747 		} else {
748 			ret = copy_from_user(&reg, ubuf, sizeof(reg));
749 			if (ret) {
750 				ret = -EFAULT;
751 				break;
752 			}
753 
754 			ubuf += sizeof(reg);
755 		}
756 
757 		switch (idx) {
758 		case 15:
759 			newregs.pc = reg;
760 			break;
761 		case 16:
762 			newregs.pstate = reg;
763 			break;
764 		case 17:
765 			newregs.orig_x0 = reg;
766 			break;
767 		default:
768 			newregs.regs[idx] = reg;
769 		}
770 
771 	}
772 
773 	if (valid_user_regs(&newregs.user_regs, target))
774 		*task_pt_regs(target) = newregs;
775 	else
776 		ret = -EINVAL;
777 
778 	return ret;
779 }
780 
781 static int compat_vfp_get(struct task_struct *target,
782 			  const struct user_regset *regset,
783 			  unsigned int pos, unsigned int count,
784 			  void *kbuf, void __user *ubuf)
785 {
786 	struct user_fpsimd_state *uregs;
787 	compat_ulong_t fpscr;
788 	int ret;
789 
790 	uregs = &target->thread.fpsimd_state.user_fpsimd;
791 
792 	/*
793 	 * The VFP registers are packed into the fpsimd_state, so they all sit
794 	 * nicely together for us. We just need to create the fpscr separately.
795 	 */
796 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
797 				  VFP_STATE_SIZE - sizeof(compat_ulong_t));
798 
799 	if (count && !ret) {
800 		fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
801 			(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
802 		ret = put_user(fpscr, (compat_ulong_t *)ubuf);
803 	}
804 
805 	return ret;
806 }
807 
808 static int compat_vfp_set(struct task_struct *target,
809 			  const struct user_regset *regset,
810 			  unsigned int pos, unsigned int count,
811 			  const void *kbuf, const void __user *ubuf)
812 {
813 	struct user_fpsimd_state *uregs;
814 	compat_ulong_t fpscr;
815 	int ret;
816 
817 	if (pos + count > VFP_STATE_SIZE)
818 		return -EIO;
819 
820 	uregs = &target->thread.fpsimd_state.user_fpsimd;
821 
822 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
823 				 VFP_STATE_SIZE - sizeof(compat_ulong_t));
824 
825 	if (count && !ret) {
826 		ret = get_user(fpscr, (compat_ulong_t *)ubuf);
827 		uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
828 		uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
829 	}
830 
831 	fpsimd_flush_task_state(target);
832 	return ret;
833 }
834 
835 static int compat_tls_get(struct task_struct *target,
836 			  const struct user_regset *regset, unsigned int pos,
837 			  unsigned int count, void *kbuf, void __user *ubuf)
838 {
839 	compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
840 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
841 }
842 
843 static int compat_tls_set(struct task_struct *target,
844 			  const struct user_regset *regset, unsigned int pos,
845 			  unsigned int count, const void *kbuf,
846 			  const void __user *ubuf)
847 {
848 	int ret;
849 	compat_ulong_t tls;
850 
851 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
852 	if (ret)
853 		return ret;
854 
855 	target->thread.tp_value = tls;
856 	return ret;
857 }
858 
859 static const struct user_regset aarch32_regsets[] = {
860 	[REGSET_COMPAT_GPR] = {
861 		.core_note_type = NT_PRSTATUS,
862 		.n = COMPAT_ELF_NGREG,
863 		.size = sizeof(compat_elf_greg_t),
864 		.align = sizeof(compat_elf_greg_t),
865 		.get = compat_gpr_get,
866 		.set = compat_gpr_set
867 	},
868 	[REGSET_COMPAT_VFP] = {
869 		.core_note_type = NT_ARM_VFP,
870 		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
871 		.size = sizeof(compat_ulong_t),
872 		.align = sizeof(compat_ulong_t),
873 		.get = compat_vfp_get,
874 		.set = compat_vfp_set
875 	},
876 };
877 
878 static const struct user_regset_view user_aarch32_view = {
879 	.name = "aarch32", .e_machine = EM_ARM,
880 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
881 };
882 
883 static const struct user_regset aarch32_ptrace_regsets[] = {
884 	[REGSET_GPR] = {
885 		.core_note_type = NT_PRSTATUS,
886 		.n = COMPAT_ELF_NGREG,
887 		.size = sizeof(compat_elf_greg_t),
888 		.align = sizeof(compat_elf_greg_t),
889 		.get = compat_gpr_get,
890 		.set = compat_gpr_set
891 	},
892 	[REGSET_FPR] = {
893 		.core_note_type = NT_ARM_VFP,
894 		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
895 		.size = sizeof(compat_ulong_t),
896 		.align = sizeof(compat_ulong_t),
897 		.get = compat_vfp_get,
898 		.set = compat_vfp_set
899 	},
900 	[REGSET_TLS] = {
901 		.core_note_type = NT_ARM_TLS,
902 		.n = 1,
903 		.size = sizeof(compat_ulong_t),
904 		.align = sizeof(compat_ulong_t),
905 		.get = compat_tls_get,
906 		.set = compat_tls_set,
907 	},
908 #ifdef CONFIG_HAVE_HW_BREAKPOINT
909 	[REGSET_HW_BREAK] = {
910 		.core_note_type = NT_ARM_HW_BREAK,
911 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
912 		.size = sizeof(u32),
913 		.align = sizeof(u32),
914 		.get = hw_break_get,
915 		.set = hw_break_set,
916 	},
917 	[REGSET_HW_WATCH] = {
918 		.core_note_type = NT_ARM_HW_WATCH,
919 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
920 		.size = sizeof(u32),
921 		.align = sizeof(u32),
922 		.get = hw_break_get,
923 		.set = hw_break_set,
924 	},
925 #endif
926 	[REGSET_SYSTEM_CALL] = {
927 		.core_note_type = NT_ARM_SYSTEM_CALL,
928 		.n = 1,
929 		.size = sizeof(int),
930 		.align = sizeof(int),
931 		.get = system_call_get,
932 		.set = system_call_set,
933 	},
934 };
935 
936 static const struct user_regset_view user_aarch32_ptrace_view = {
937 	.name = "aarch32", .e_machine = EM_ARM,
938 	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
939 };
940 
941 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
942 				   compat_ulong_t __user *ret)
943 {
944 	compat_ulong_t tmp;
945 
946 	if (off & 3)
947 		return -EIO;
948 
949 	if (off == COMPAT_PT_TEXT_ADDR)
950 		tmp = tsk->mm->start_code;
951 	else if (off == COMPAT_PT_DATA_ADDR)
952 		tmp = tsk->mm->start_data;
953 	else if (off == COMPAT_PT_TEXT_END_ADDR)
954 		tmp = tsk->mm->end_code;
955 	else if (off < sizeof(compat_elf_gregset_t))
956 		return copy_regset_to_user(tsk, &user_aarch32_view,
957 					   REGSET_COMPAT_GPR, off,
958 					   sizeof(compat_ulong_t), ret);
959 	else if (off >= COMPAT_USER_SZ)
960 		return -EIO;
961 	else
962 		tmp = 0;
963 
964 	return put_user(tmp, ret);
965 }
966 
967 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
968 				    compat_ulong_t val)
969 {
970 	int ret;
971 	mm_segment_t old_fs = get_fs();
972 
973 	if (off & 3 || off >= COMPAT_USER_SZ)
974 		return -EIO;
975 
976 	if (off >= sizeof(compat_elf_gregset_t))
977 		return 0;
978 
979 	set_fs(KERNEL_DS);
980 	ret = copy_regset_from_user(tsk, &user_aarch32_view,
981 				    REGSET_COMPAT_GPR, off,
982 				    sizeof(compat_ulong_t),
983 				    &val);
984 	set_fs(old_fs);
985 
986 	return ret;
987 }
988 
989 #ifdef CONFIG_HAVE_HW_BREAKPOINT
990 
991 /*
992  * Convert a virtual register number into an index for a thread_info
993  * breakpoint array. Breakpoints are identified using positive numbers
994  * whilst watchpoints are negative. The registers are laid out as pairs
995  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
996  * Register 0 is reserved for describing resource information.
997  */
998 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
999 {
1000 	return (abs(num) - 1) >> 1;
1001 }
1002 
1003 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1004 {
1005 	u8 num_brps, num_wrps, debug_arch, wp_len;
1006 	u32 reg = 0;
1007 
1008 	num_brps	= hw_breakpoint_slots(TYPE_INST);
1009 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
1010 
1011 	debug_arch	= debug_monitors_arch();
1012 	wp_len		= 8;
1013 	reg		|= debug_arch;
1014 	reg		<<= 8;
1015 	reg		|= wp_len;
1016 	reg		<<= 8;
1017 	reg		|= num_wrps;
1018 	reg		<<= 8;
1019 	reg		|= num_brps;
1020 
1021 	*kdata = reg;
1022 	return 0;
1023 }
1024 
1025 static int compat_ptrace_hbp_get(unsigned int note_type,
1026 				 struct task_struct *tsk,
1027 				 compat_long_t num,
1028 				 u32 *kdata)
1029 {
1030 	u64 addr = 0;
1031 	u32 ctrl = 0;
1032 
1033 	int err, idx = compat_ptrace_hbp_num_to_idx(num);;
1034 
1035 	if (num & 1) {
1036 		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1037 		*kdata = (u32)addr;
1038 	} else {
1039 		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1040 		*kdata = ctrl;
1041 	}
1042 
1043 	return err;
1044 }
1045 
1046 static int compat_ptrace_hbp_set(unsigned int note_type,
1047 				 struct task_struct *tsk,
1048 				 compat_long_t num,
1049 				 u32 *kdata)
1050 {
1051 	u64 addr;
1052 	u32 ctrl;
1053 
1054 	int err, idx = compat_ptrace_hbp_num_to_idx(num);
1055 
1056 	if (num & 1) {
1057 		addr = *kdata;
1058 		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1059 	} else {
1060 		ctrl = *kdata;
1061 		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1062 	}
1063 
1064 	return err;
1065 }
1066 
1067 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1068 				    compat_ulong_t __user *data)
1069 {
1070 	int ret;
1071 	u32 kdata;
1072 	mm_segment_t old_fs = get_fs();
1073 
1074 	set_fs(KERNEL_DS);
1075 	/* Watchpoint */
1076 	if (num < 0) {
1077 		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1078 	/* Resource info */
1079 	} else if (num == 0) {
1080 		ret = compat_ptrace_hbp_get_resource_info(&kdata);
1081 	/* Breakpoint */
1082 	} else {
1083 		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1084 	}
1085 	set_fs(old_fs);
1086 
1087 	if (!ret)
1088 		ret = put_user(kdata, data);
1089 
1090 	return ret;
1091 }
1092 
1093 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1094 				    compat_ulong_t __user *data)
1095 {
1096 	int ret;
1097 	u32 kdata = 0;
1098 	mm_segment_t old_fs = get_fs();
1099 
1100 	if (num == 0)
1101 		return 0;
1102 
1103 	ret = get_user(kdata, data);
1104 	if (ret)
1105 		return ret;
1106 
1107 	set_fs(KERNEL_DS);
1108 	if (num < 0)
1109 		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1110 	else
1111 		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1112 	set_fs(old_fs);
1113 
1114 	return ret;
1115 }
1116 #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
1117 
1118 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1119 			compat_ulong_t caddr, compat_ulong_t cdata)
1120 {
1121 	unsigned long addr = caddr;
1122 	unsigned long data = cdata;
1123 	void __user *datap = compat_ptr(data);
1124 	int ret;
1125 
1126 	switch (request) {
1127 		case PTRACE_PEEKUSR:
1128 			ret = compat_ptrace_read_user(child, addr, datap);
1129 			break;
1130 
1131 		case PTRACE_POKEUSR:
1132 			ret = compat_ptrace_write_user(child, addr, data);
1133 			break;
1134 
1135 		case COMPAT_PTRACE_GETREGS:
1136 			ret = copy_regset_to_user(child,
1137 						  &user_aarch32_view,
1138 						  REGSET_COMPAT_GPR,
1139 						  0, sizeof(compat_elf_gregset_t),
1140 						  datap);
1141 			break;
1142 
1143 		case COMPAT_PTRACE_SETREGS:
1144 			ret = copy_regset_from_user(child,
1145 						    &user_aarch32_view,
1146 						    REGSET_COMPAT_GPR,
1147 						    0, sizeof(compat_elf_gregset_t),
1148 						    datap);
1149 			break;
1150 
1151 		case COMPAT_PTRACE_GET_THREAD_AREA:
1152 			ret = put_user((compat_ulong_t)child->thread.tp_value,
1153 				       (compat_ulong_t __user *)datap);
1154 			break;
1155 
1156 		case COMPAT_PTRACE_SET_SYSCALL:
1157 			task_pt_regs(child)->syscallno = data;
1158 			ret = 0;
1159 			break;
1160 
1161 		case COMPAT_PTRACE_GETVFPREGS:
1162 			ret = copy_regset_to_user(child,
1163 						  &user_aarch32_view,
1164 						  REGSET_COMPAT_VFP,
1165 						  0, VFP_STATE_SIZE,
1166 						  datap);
1167 			break;
1168 
1169 		case COMPAT_PTRACE_SETVFPREGS:
1170 			ret = copy_regset_from_user(child,
1171 						    &user_aarch32_view,
1172 						    REGSET_COMPAT_VFP,
1173 						    0, VFP_STATE_SIZE,
1174 						    datap);
1175 			break;
1176 
1177 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1178 		case COMPAT_PTRACE_GETHBPREGS:
1179 			ret = compat_ptrace_gethbpregs(child, addr, datap);
1180 			break;
1181 
1182 		case COMPAT_PTRACE_SETHBPREGS:
1183 			ret = compat_ptrace_sethbpregs(child, addr, datap);
1184 			break;
1185 #endif
1186 
1187 		default:
1188 			ret = compat_ptrace_request(child, request, addr,
1189 						    data);
1190 			break;
1191 	}
1192 
1193 	return ret;
1194 }
1195 #endif /* CONFIG_COMPAT */
1196 
1197 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1198 {
1199 #ifdef CONFIG_COMPAT
1200 	/*
1201 	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1202 	 * user_aarch32_view compatible with arm32. Native ptrace requests on
1203 	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1204 	 * access to the TLS register.
1205 	 */
1206 	if (is_compat_task())
1207 		return &user_aarch32_view;
1208 	else if (is_compat_thread(task_thread_info(task)))
1209 		return &user_aarch32_ptrace_view;
1210 #endif
1211 	return &user_aarch64_view;
1212 }
1213 
1214 long arch_ptrace(struct task_struct *child, long request,
1215 		 unsigned long addr, unsigned long data)
1216 {
1217 	return ptrace_request(child, request, addr, data);
1218 }
1219 
1220 enum ptrace_syscall_dir {
1221 	PTRACE_SYSCALL_ENTER = 0,
1222 	PTRACE_SYSCALL_EXIT,
1223 };
1224 
1225 static void tracehook_report_syscall(struct pt_regs *regs,
1226 				     enum ptrace_syscall_dir dir)
1227 {
1228 	int regno;
1229 	unsigned long saved_reg;
1230 
1231 	/*
1232 	 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1233 	 * used to denote syscall entry/exit:
1234 	 */
1235 	regno = (is_compat_task() ? 12 : 7);
1236 	saved_reg = regs->regs[regno];
1237 	regs->regs[regno] = dir;
1238 
1239 	if (dir == PTRACE_SYSCALL_EXIT)
1240 		tracehook_report_syscall_exit(regs, 0);
1241 	else if (tracehook_report_syscall_entry(regs))
1242 		regs->syscallno = ~0UL;
1243 
1244 	regs->regs[regno] = saved_reg;
1245 }
1246 
1247 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1248 {
1249 	/* Do the secure computing check first; failures should be fast. */
1250 	if (secure_computing() == -1)
1251 		return -1;
1252 
1253 	if (test_thread_flag(TIF_SYSCALL_TRACE))
1254 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1255 
1256 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1257 		trace_sys_enter(regs, regs->syscallno);
1258 
1259 	audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1260 			    regs->regs[2], regs->regs[3]);
1261 
1262 	return regs->syscallno;
1263 }
1264 
1265 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1266 {
1267 	audit_syscall_exit(regs);
1268 
1269 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1270 		trace_sys_exit(regs, regs_return_value(regs));
1271 
1272 	if (test_thread_flag(TIF_SYSCALL_TRACE))
1273 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1274 }
1275 
1276 /*
1277  * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1278  * Userspace cannot use these until they have an architectural meaning.
1279  * We also reserve IL for the kernel; SS is handled dynamically.
1280  */
1281 #define SPSR_EL1_AARCH64_RES0_BITS \
1282 	(GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1283 	 GENMASK_ULL(5, 5))
1284 #define SPSR_EL1_AARCH32_RES0_BITS \
1285 	(GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1286 
1287 static int valid_compat_regs(struct user_pt_regs *regs)
1288 {
1289 	regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1290 
1291 	if (!system_supports_mixed_endian_el0()) {
1292 		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1293 			regs->pstate |= COMPAT_PSR_E_BIT;
1294 		else
1295 			regs->pstate &= ~COMPAT_PSR_E_BIT;
1296 	}
1297 
1298 	if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1299 	    (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
1300 	    (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
1301 	    (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
1302 		return 1;
1303 	}
1304 
1305 	/*
1306 	 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1307 	 * arch/arm.
1308 	 */
1309 	regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
1310 			COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
1311 			COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
1312 			COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
1313 			COMPAT_PSR_T_BIT;
1314 	regs->pstate |= PSR_MODE32_BIT;
1315 
1316 	return 0;
1317 }
1318 
1319 static int valid_native_regs(struct user_pt_regs *regs)
1320 {
1321 	regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1322 
1323 	if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1324 	    (regs->pstate & PSR_D_BIT) == 0 &&
1325 	    (regs->pstate & PSR_A_BIT) == 0 &&
1326 	    (regs->pstate & PSR_I_BIT) == 0 &&
1327 	    (regs->pstate & PSR_F_BIT) == 0) {
1328 		return 1;
1329 	}
1330 
1331 	/* Force PSR to a valid 64-bit EL0t */
1332 	regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1333 
1334 	return 0;
1335 }
1336 
1337 /*
1338  * Are the current registers suitable for user mode? (used to maintain
1339  * security in signal handlers)
1340  */
1341 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1342 {
1343 	if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1344 		regs->pstate &= ~DBG_SPSR_SS;
1345 
1346 	if (is_compat_thread(task_thread_info(task)))
1347 		return valid_compat_regs(regs);
1348 	else
1349 		return valid_native_regs(regs);
1350 }
1351