xref: /linux/arch/arm/kernel/ptrace.c (revision 7c43185138cf523b0810ffd2c9e18e2ecb356730)
1 /*
2  *  linux/arch/arm/kernel/ptrace.c
3  *
4  *  By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/init.h>
20 #include <linux/signal.h>
21 #include <linux/uaccess.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/regset.h>
25 
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/traps.h>
29 
30 #define REG_PC	15
31 #define REG_PSR	16
32 /*
33  * does not yet catch signals sent when the child dies.
34  * in exit.c or in signal.c.
35  */
36 
37 #if 0
38 /*
39  * Breakpoint SWI instruction: SWI &9F0001
40  */
41 #define BREAKINST_ARM	0xef9f0001
42 #define BREAKINST_THUMB	0xdf00		/* fill this in later */
43 #else
44 /*
45  * New breakpoints - use an undefined instruction.  The ARM architecture
46  * reference manual guarantees that the following instruction space
47  * will produce an undefined instruction exception on all CPUs:
48  *
49  *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
50  *  Thumb: 1101 1110 xxxx xxxx
51  */
52 #define BREAKINST_ARM	0xe7f001f0
53 #define BREAKINST_THUMB	0xde01
54 #endif
55 
56 struct pt_regs_offset {
57 	const char *name;
58 	int offset;
59 };
60 
61 #define REG_OFFSET_NAME(r) \
62 	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 
65 static const struct pt_regs_offset regoffset_table[] = {
66 	REG_OFFSET_NAME(r0),
67 	REG_OFFSET_NAME(r1),
68 	REG_OFFSET_NAME(r2),
69 	REG_OFFSET_NAME(r3),
70 	REG_OFFSET_NAME(r4),
71 	REG_OFFSET_NAME(r5),
72 	REG_OFFSET_NAME(r6),
73 	REG_OFFSET_NAME(r7),
74 	REG_OFFSET_NAME(r8),
75 	REG_OFFSET_NAME(r9),
76 	REG_OFFSET_NAME(r10),
77 	REG_OFFSET_NAME(fp),
78 	REG_OFFSET_NAME(ip),
79 	REG_OFFSET_NAME(sp),
80 	REG_OFFSET_NAME(lr),
81 	REG_OFFSET_NAME(pc),
82 	REG_OFFSET_NAME(cpsr),
83 	REG_OFFSET_NAME(ORIG_r0),
84 	REG_OFFSET_END,
85 };
86 
87 /**
88  * regs_query_register_offset() - query register offset from its name
89  * @name:	the name of a register
90  *
91  * regs_query_register_offset() returns the offset of a register in struct
92  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
93  */
94 int regs_query_register_offset(const char *name)
95 {
96 	const struct pt_regs_offset *roff;
97 	for (roff = regoffset_table; roff->name != NULL; roff++)
98 		if (!strcmp(roff->name, name))
99 			return roff->offset;
100 	return -EINVAL;
101 }
102 
103 /**
104  * regs_query_register_name() - query register name from its offset
105  * @offset:	the offset of a register in struct pt_regs.
106  *
107  * regs_query_register_name() returns the name of a register from its
108  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
109  */
110 const char *regs_query_register_name(unsigned int offset)
111 {
112 	const struct pt_regs_offset *roff;
113 	for (roff = regoffset_table; roff->name != NULL; roff++)
114 		if (roff->offset == offset)
115 			return roff->name;
116 	return NULL;
117 }
118 
119 /**
120  * regs_within_kernel_stack() - check the address in the stack
121  * @regs:      pt_regs which contains kernel stack pointer.
122  * @addr:      address which is checked.
123  *
124  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
125  * If @addr is within the kernel stack, it returns true. If not, returns false.
126  */
127 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
128 {
129 	return ((addr & ~(THREAD_SIZE - 1))  ==
130 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
131 }
132 
133 /**
134  * regs_get_kernel_stack_nth() - get Nth entry of the stack
135  * @regs:	pt_regs which contains kernel stack pointer.
136  * @n:		stack entry number.
137  *
138  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
139  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
140  * this returns 0.
141  */
142 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
143 {
144 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
145 	addr += n;
146 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
147 		return *addr;
148 	else
149 		return 0;
150 }
151 
152 /*
153  * this routine will get a word off of the processes privileged stack.
154  * the offset is how far from the base addr as stored in the THREAD.
155  * this routine assumes that all the privileged stacks are in our
156  * data space.
157  */
158 static inline long get_user_reg(struct task_struct *task, int offset)
159 {
160 	return task_pt_regs(task)->uregs[offset];
161 }
162 
163 /*
164  * this routine will put a word on the processes privileged stack.
165  * the offset is how far from the base addr as stored in the THREAD.
166  * this routine assumes that all the privileged stacks are in our
167  * data space.
168  */
169 static inline int
170 put_user_reg(struct task_struct *task, int offset, long data)
171 {
172 	struct pt_regs newregs, *regs = task_pt_regs(task);
173 	int ret = -EINVAL;
174 
175 	newregs = *regs;
176 	newregs.uregs[offset] = data;
177 
178 	if (valid_user_regs(&newregs)) {
179 		regs->uregs[offset] = data;
180 		ret = 0;
181 	}
182 
183 	return ret;
184 }
185 
186 /*
187  * Called by kernel/ptrace.c when detaching..
188  */
189 void ptrace_disable(struct task_struct *child)
190 {
191 	/* Nothing to do. */
192 }
193 
194 /*
195  * Handle hitting a breakpoint.
196  */
197 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
198 {
199 	siginfo_t info;
200 
201 	info.si_signo = SIGTRAP;
202 	info.si_errno = 0;
203 	info.si_code  = TRAP_BRKPT;
204 	info.si_addr  = (void __user *)instruction_pointer(regs);
205 
206 	force_sig_info(SIGTRAP, &info, tsk);
207 }
208 
209 static int break_trap(struct pt_regs *regs, unsigned int instr)
210 {
211 	ptrace_break(current, regs);
212 	return 0;
213 }
214 
215 static struct undef_hook arm_break_hook = {
216 	.instr_mask	= 0x0fffffff,
217 	.instr_val	= 0x07f001f0,
218 	.cpsr_mask	= PSR_T_BIT,
219 	.cpsr_val	= 0,
220 	.fn		= break_trap,
221 };
222 
223 static struct undef_hook thumb_break_hook = {
224 	.instr_mask	= 0xffff,
225 	.instr_val	= 0xde01,
226 	.cpsr_mask	= PSR_T_BIT,
227 	.cpsr_val	= PSR_T_BIT,
228 	.fn		= break_trap,
229 };
230 
231 static struct undef_hook thumb2_break_hook = {
232 	.instr_mask	= 0xffffffff,
233 	.instr_val	= 0xf7f0a000,
234 	.cpsr_mask	= PSR_T_BIT,
235 	.cpsr_val	= PSR_T_BIT,
236 	.fn		= break_trap,
237 };
238 
239 static int __init ptrace_break_init(void)
240 {
241 	register_undef_hook(&arm_break_hook);
242 	register_undef_hook(&thumb_break_hook);
243 	register_undef_hook(&thumb2_break_hook);
244 	return 0;
245 }
246 
247 core_initcall(ptrace_break_init);
248 
249 /*
250  * Read the word at offset "off" into the "struct user".  We
251  * actually access the pt_regs stored on the kernel stack.
252  */
253 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
254 			    unsigned long __user *ret)
255 {
256 	unsigned long tmp;
257 
258 	if (off & 3 || off >= sizeof(struct user))
259 		return -EIO;
260 
261 	tmp = 0;
262 	if (off == PT_TEXT_ADDR)
263 		tmp = tsk->mm->start_code;
264 	else if (off == PT_DATA_ADDR)
265 		tmp = tsk->mm->start_data;
266 	else if (off == PT_TEXT_END_ADDR)
267 		tmp = tsk->mm->end_code;
268 	else if (off < sizeof(struct pt_regs))
269 		tmp = get_user_reg(tsk, off >> 2);
270 
271 	return put_user(tmp, ret);
272 }
273 
274 /*
275  * Write the word at offset "off" into "struct user".  We
276  * actually access the pt_regs stored on the kernel stack.
277  */
278 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
279 			     unsigned long val)
280 {
281 	if (off & 3 || off >= sizeof(struct user))
282 		return -EIO;
283 
284 	if (off >= sizeof(struct pt_regs))
285 		return 0;
286 
287 	return put_user_reg(tsk, off >> 2, val);
288 }
289 
290 #ifdef CONFIG_IWMMXT
291 
292 /*
293  * Get the child iWMMXt state.
294  */
295 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
296 {
297 	struct thread_info *thread = task_thread_info(tsk);
298 
299 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
300 		return -ENODATA;
301 	iwmmxt_task_disable(thread);  /* force it to ram */
302 	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
303 		? -EFAULT : 0;
304 }
305 
306 /*
307  * Set the child iWMMXt state.
308  */
309 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
310 {
311 	struct thread_info *thread = task_thread_info(tsk);
312 
313 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
314 		return -EACCES;
315 	iwmmxt_task_release(thread);  /* force a reload */
316 	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
317 		? -EFAULT : 0;
318 }
319 
320 #endif
321 
322 #ifdef CONFIG_CRUNCH
323 /*
324  * Get the child Crunch state.
325  */
326 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
327 {
328 	struct thread_info *thread = task_thread_info(tsk);
329 
330 	crunch_task_disable(thread);  /* force it to ram */
331 	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
332 		? -EFAULT : 0;
333 }
334 
335 /*
336  * Set the child Crunch state.
337  */
338 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
339 {
340 	struct thread_info *thread = task_thread_info(tsk);
341 
342 	crunch_task_release(thread);  /* force a reload */
343 	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
344 		? -EFAULT : 0;
345 }
346 #endif
347 
348 #ifdef CONFIG_HAVE_HW_BREAKPOINT
349 /*
350  * Convert a virtual register number into an index for a thread_info
351  * breakpoint array. Breakpoints are identified using positive numbers
352  * whilst watchpoints are negative. The registers are laid out as pairs
353  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
354  * Register 0 is reserved for describing resource information.
355  */
356 static int ptrace_hbp_num_to_idx(long num)
357 {
358 	if (num < 0)
359 		num = (ARM_MAX_BRP << 1) - num;
360 	return (num - 1) >> 1;
361 }
362 
363 /*
364  * Returns the virtual register number for the address of the
365  * breakpoint at index idx.
366  */
367 static long ptrace_hbp_idx_to_num(int idx)
368 {
369 	long mid = ARM_MAX_BRP << 1;
370 	long num = (idx << 1) + 1;
371 	return num > mid ? mid - num : num;
372 }
373 
374 /*
375  * Handle hitting a HW-breakpoint.
376  */
377 static void ptrace_hbptriggered(struct perf_event *bp,
378 				     struct perf_sample_data *data,
379 				     struct pt_regs *regs)
380 {
381 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
382 	long num;
383 	int i;
384 	siginfo_t info;
385 
386 	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
387 		if (current->thread.debug.hbp[i] == bp)
388 			break;
389 
390 	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
391 
392 	info.si_signo	= SIGTRAP;
393 	info.si_errno	= (int)num;
394 	info.si_code	= TRAP_HWBKPT;
395 	info.si_addr	= (void __user *)(bkpt->trigger);
396 
397 	force_sig_info(SIGTRAP, &info, current);
398 }
399 
400 /*
401  * Set ptrace breakpoint pointers to zero for this task.
402  * This is required in order to prevent child processes from unregistering
403  * breakpoints held by their parent.
404  */
405 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
406 {
407 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
408 }
409 
410 /*
411  * Unregister breakpoints from this task and reset the pointers in
412  * the thread_struct.
413  */
414 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
415 {
416 	int i;
417 	struct thread_struct *t = &tsk->thread;
418 
419 	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
420 		if (t->debug.hbp[i]) {
421 			unregister_hw_breakpoint(t->debug.hbp[i]);
422 			t->debug.hbp[i] = NULL;
423 		}
424 	}
425 }
426 
427 static u32 ptrace_get_hbp_resource_info(void)
428 {
429 	u8 num_brps, num_wrps, debug_arch, wp_len;
430 	u32 reg = 0;
431 
432 	num_brps	= hw_breakpoint_slots(TYPE_INST);
433 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
434 	debug_arch	= arch_get_debug_arch();
435 	wp_len		= arch_get_max_wp_len();
436 
437 	reg		|= debug_arch;
438 	reg		<<= 8;
439 	reg		|= wp_len;
440 	reg		<<= 8;
441 	reg		|= num_wrps;
442 	reg		<<= 8;
443 	reg		|= num_brps;
444 
445 	return reg;
446 }
447 
448 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
449 {
450 	struct perf_event_attr attr;
451 
452 	ptrace_breakpoint_init(&attr);
453 
454 	/* Initialise fields to sane defaults. */
455 	attr.bp_addr	= 0;
456 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
457 	attr.bp_type	= type;
458 	attr.disabled	= 1;
459 
460 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
461 					   tsk);
462 }
463 
464 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
465 			     unsigned long  __user *data)
466 {
467 	u32 reg;
468 	int idx, ret = 0;
469 	struct perf_event *bp;
470 	struct arch_hw_breakpoint_ctrl arch_ctrl;
471 
472 	if (num == 0) {
473 		reg = ptrace_get_hbp_resource_info();
474 	} else {
475 		idx = ptrace_hbp_num_to_idx(num);
476 		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
477 			ret = -EINVAL;
478 			goto out;
479 		}
480 
481 		bp = tsk->thread.debug.hbp[idx];
482 		if (!bp) {
483 			reg = 0;
484 			goto put;
485 		}
486 
487 		arch_ctrl = counter_arch_bp(bp)->ctrl;
488 
489 		/*
490 		 * Fix up the len because we may have adjusted it
491 		 * to compensate for an unaligned address.
492 		 */
493 		while (!(arch_ctrl.len & 0x1))
494 			arch_ctrl.len >>= 1;
495 
496 		if (num & 0x1)
497 			reg = bp->attr.bp_addr;
498 		else
499 			reg = encode_ctrl_reg(arch_ctrl);
500 	}
501 
502 put:
503 	if (put_user(reg, data))
504 		ret = -EFAULT;
505 
506 out:
507 	return ret;
508 }
509 
510 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
511 			     unsigned long __user *data)
512 {
513 	int idx, gen_len, gen_type, implied_type, ret = 0;
514 	u32 user_val;
515 	struct perf_event *bp;
516 	struct arch_hw_breakpoint_ctrl ctrl;
517 	struct perf_event_attr attr;
518 
519 	if (num == 0)
520 		goto out;
521 	else if (num < 0)
522 		implied_type = HW_BREAKPOINT_RW;
523 	else
524 		implied_type = HW_BREAKPOINT_X;
525 
526 	idx = ptrace_hbp_num_to_idx(num);
527 	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
528 		ret = -EINVAL;
529 		goto out;
530 	}
531 
532 	if (get_user(user_val, data)) {
533 		ret = -EFAULT;
534 		goto out;
535 	}
536 
537 	bp = tsk->thread.debug.hbp[idx];
538 	if (!bp) {
539 		bp = ptrace_hbp_create(tsk, implied_type);
540 		if (IS_ERR(bp)) {
541 			ret = PTR_ERR(bp);
542 			goto out;
543 		}
544 		tsk->thread.debug.hbp[idx] = bp;
545 	}
546 
547 	attr = bp->attr;
548 
549 	if (num & 0x1) {
550 		/* Address */
551 		attr.bp_addr	= user_val;
552 	} else {
553 		/* Control */
554 		decode_ctrl_reg(user_val, &ctrl);
555 		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
556 		if (ret)
557 			goto out;
558 
559 		if ((gen_type & implied_type) != gen_type) {
560 			ret = -EINVAL;
561 			goto out;
562 		}
563 
564 		attr.bp_len	= gen_len;
565 		attr.bp_type	= gen_type;
566 		attr.disabled	= !ctrl.enabled;
567 	}
568 
569 	ret = modify_user_hw_breakpoint(bp, &attr);
570 out:
571 	return ret;
572 }
573 #endif
574 
575 /* regset get/set implementations */
576 
577 static int gpr_get(struct task_struct *target,
578 		   const struct user_regset *regset,
579 		   unsigned int pos, unsigned int count,
580 		   void *kbuf, void __user *ubuf)
581 {
582 	struct pt_regs *regs = task_pt_regs(target);
583 
584 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
585 				   regs,
586 				   0, sizeof(*regs));
587 }
588 
589 static int gpr_set(struct task_struct *target,
590 		   const struct user_regset *regset,
591 		   unsigned int pos, unsigned int count,
592 		   const void *kbuf, const void __user *ubuf)
593 {
594 	int ret;
595 	struct pt_regs newregs;
596 
597 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
598 				 &newregs,
599 				 0, sizeof(newregs));
600 	if (ret)
601 		return ret;
602 
603 	if (!valid_user_regs(&newregs))
604 		return -EINVAL;
605 
606 	*task_pt_regs(target) = newregs;
607 	return 0;
608 }
609 
610 static int fpa_get(struct task_struct *target,
611 		   const struct user_regset *regset,
612 		   unsigned int pos, unsigned int count,
613 		   void *kbuf, void __user *ubuf)
614 {
615 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
616 				   &task_thread_info(target)->fpstate,
617 				   0, sizeof(struct user_fp));
618 }
619 
620 static int fpa_set(struct task_struct *target,
621 		   const struct user_regset *regset,
622 		   unsigned int pos, unsigned int count,
623 		   const void *kbuf, const void __user *ubuf)
624 {
625 	struct thread_info *thread = task_thread_info(target);
626 
627 	thread->used_cp[1] = thread->used_cp[2] = 1;
628 
629 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
630 		&thread->fpstate,
631 		0, sizeof(struct user_fp));
632 }
633 
634 #ifdef CONFIG_VFP
635 /*
636  * VFP register get/set implementations.
637  *
638  * With respect to the kernel, struct user_fp is divided into three chunks:
639  * 16 or 32 real VFP registers (d0-d15 or d0-31)
640  *	These are transferred to/from the real registers in the task's
641  *	vfp_hard_struct.  The number of registers depends on the kernel
642  *	configuration.
643  *
644  * 16 or 0 fake VFP registers (d16-d31 or empty)
645  *	i.e., the user_vfp structure has space for 32 registers even if
646  *	the kernel doesn't have them all.
647  *
648  *	vfp_get() reads this chunk as zero where applicable
649  *	vfp_set() ignores this chunk
650  *
651  * 1 word for the FPSCR
652  *
653  * The bounds-checking logic built into user_regset_copyout and friends
654  * means that we can make a simple sequence of calls to map the relevant data
655  * to/from the specified slice of the user regset structure.
656  */
657 static int vfp_get(struct task_struct *target,
658 		   const struct user_regset *regset,
659 		   unsigned int pos, unsigned int count,
660 		   void *kbuf, void __user *ubuf)
661 {
662 	int ret;
663 	struct thread_info *thread = task_thread_info(target);
664 	struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
665 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
666 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
667 
668 	vfp_sync_hwstate(thread);
669 
670 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
671 				  &vfp->fpregs,
672 				  user_fpregs_offset,
673 				  user_fpregs_offset + sizeof(vfp->fpregs));
674 	if (ret)
675 		return ret;
676 
677 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
678 				       user_fpregs_offset + sizeof(vfp->fpregs),
679 				       user_fpscr_offset);
680 	if (ret)
681 		return ret;
682 
683 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
684 				   &vfp->fpscr,
685 				   user_fpscr_offset,
686 				   user_fpscr_offset + sizeof(vfp->fpscr));
687 }
688 
689 /*
690  * For vfp_set() a read-modify-write is done on the VFP registers,
691  * in order to avoid writing back a half-modified set of registers on
692  * failure.
693  */
694 static int vfp_set(struct task_struct *target,
695 			  const struct user_regset *regset,
696 			  unsigned int pos, unsigned int count,
697 			  const void *kbuf, const void __user *ubuf)
698 {
699 	int ret;
700 	struct thread_info *thread = task_thread_info(target);
701 	struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
702 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
703 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
704 
705 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
706 				  &new_vfp.fpregs,
707 				  user_fpregs_offset,
708 				  user_fpregs_offset + sizeof(new_vfp.fpregs));
709 	if (ret)
710 		return ret;
711 
712 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
713 				user_fpregs_offset + sizeof(new_vfp.fpregs),
714 				user_fpscr_offset);
715 	if (ret)
716 		return ret;
717 
718 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
719 				 &new_vfp.fpscr,
720 				 user_fpscr_offset,
721 				 user_fpscr_offset + sizeof(new_vfp.fpscr));
722 	if (ret)
723 		return ret;
724 
725 	vfp_sync_hwstate(thread);
726 	thread->vfpstate.hard = new_vfp;
727 	vfp_flush_hwstate(thread);
728 
729 	return 0;
730 }
731 #endif /* CONFIG_VFP */
732 
733 enum arm_regset {
734 	REGSET_GPR,
735 	REGSET_FPR,
736 #ifdef CONFIG_VFP
737 	REGSET_VFP,
738 #endif
739 };
740 
741 static const struct user_regset arm_regsets[] = {
742 	[REGSET_GPR] = {
743 		.core_note_type = NT_PRSTATUS,
744 		.n = ELF_NGREG,
745 		.size = sizeof(u32),
746 		.align = sizeof(u32),
747 		.get = gpr_get,
748 		.set = gpr_set
749 	},
750 	[REGSET_FPR] = {
751 		/*
752 		 * For the FPA regs in fpstate, the real fields are a mixture
753 		 * of sizes, so pretend that the registers are word-sized:
754 		 */
755 		.core_note_type = NT_PRFPREG,
756 		.n = sizeof(struct user_fp) / sizeof(u32),
757 		.size = sizeof(u32),
758 		.align = sizeof(u32),
759 		.get = fpa_get,
760 		.set = fpa_set
761 	},
762 #ifdef CONFIG_VFP
763 	[REGSET_VFP] = {
764 		/*
765 		 * Pretend that the VFP regs are word-sized, since the FPSCR is
766 		 * a single word dangling at the end of struct user_vfp:
767 		 */
768 		.core_note_type = NT_ARM_VFP,
769 		.n = ARM_VFPREGS_SIZE / sizeof(u32),
770 		.size = sizeof(u32),
771 		.align = sizeof(u32),
772 		.get = vfp_get,
773 		.set = vfp_set
774 	},
775 #endif /* CONFIG_VFP */
776 };
777 
778 static const struct user_regset_view user_arm_view = {
779 	.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
780 	.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
781 };
782 
783 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
784 {
785 	return &user_arm_view;
786 }
787 
788 long arch_ptrace(struct task_struct *child, long request,
789 		 unsigned long addr, unsigned long data)
790 {
791 	int ret;
792 	unsigned long __user *datap = (unsigned long __user *) data;
793 
794 	switch (request) {
795 		case PTRACE_PEEKUSR:
796 			ret = ptrace_read_user(child, addr, datap);
797 			break;
798 
799 		case PTRACE_POKEUSR:
800 			ret = ptrace_write_user(child, addr, data);
801 			break;
802 
803 		case PTRACE_GETREGS:
804 			ret = copy_regset_to_user(child,
805 						  &user_arm_view, REGSET_GPR,
806 						  0, sizeof(struct pt_regs),
807 						  datap);
808 			break;
809 
810 		case PTRACE_SETREGS:
811 			ret = copy_regset_from_user(child,
812 						    &user_arm_view, REGSET_GPR,
813 						    0, sizeof(struct pt_regs),
814 						    datap);
815 			break;
816 
817 		case PTRACE_GETFPREGS:
818 			ret = copy_regset_to_user(child,
819 						  &user_arm_view, REGSET_FPR,
820 						  0, sizeof(union fp_state),
821 						  datap);
822 			break;
823 
824 		case PTRACE_SETFPREGS:
825 			ret = copy_regset_from_user(child,
826 						    &user_arm_view, REGSET_FPR,
827 						    0, sizeof(union fp_state),
828 						    datap);
829 			break;
830 
831 #ifdef CONFIG_IWMMXT
832 		case PTRACE_GETWMMXREGS:
833 			ret = ptrace_getwmmxregs(child, datap);
834 			break;
835 
836 		case PTRACE_SETWMMXREGS:
837 			ret = ptrace_setwmmxregs(child, datap);
838 			break;
839 #endif
840 
841 		case PTRACE_GET_THREAD_AREA:
842 			ret = put_user(task_thread_info(child)->tp_value,
843 				       datap);
844 			break;
845 
846 		case PTRACE_SET_SYSCALL:
847 			task_thread_info(child)->syscall = data;
848 			ret = 0;
849 			break;
850 
851 #ifdef CONFIG_CRUNCH
852 		case PTRACE_GETCRUNCHREGS:
853 			ret = ptrace_getcrunchregs(child, datap);
854 			break;
855 
856 		case PTRACE_SETCRUNCHREGS:
857 			ret = ptrace_setcrunchregs(child, datap);
858 			break;
859 #endif
860 
861 #ifdef CONFIG_VFP
862 		case PTRACE_GETVFPREGS:
863 			ret = copy_regset_to_user(child,
864 						  &user_arm_view, REGSET_VFP,
865 						  0, ARM_VFPREGS_SIZE,
866 						  datap);
867 			break;
868 
869 		case PTRACE_SETVFPREGS:
870 			ret = copy_regset_from_user(child,
871 						    &user_arm_view, REGSET_VFP,
872 						    0, ARM_VFPREGS_SIZE,
873 						    datap);
874 			break;
875 #endif
876 
877 #ifdef CONFIG_HAVE_HW_BREAKPOINT
878 		case PTRACE_GETHBPREGS:
879 			if (ptrace_get_breakpoints(child) < 0)
880 				return -ESRCH;
881 
882 			ret = ptrace_gethbpregs(child, addr,
883 						(unsigned long __user *)data);
884 			ptrace_put_breakpoints(child);
885 			break;
886 		case PTRACE_SETHBPREGS:
887 			if (ptrace_get_breakpoints(child) < 0)
888 				return -ESRCH;
889 
890 			ret = ptrace_sethbpregs(child, addr,
891 						(unsigned long __user *)data);
892 			ptrace_put_breakpoints(child);
893 			break;
894 #endif
895 
896 		default:
897 			ret = ptrace_request(child, request, addr, data);
898 			break;
899 	}
900 
901 	return ret;
902 }
903 
904 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
905 {
906 	unsigned long ip;
907 
908 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
909 		return scno;
910 	if (!(current->ptrace & PT_PTRACED))
911 		return scno;
912 
913 	/*
914 	 * Save IP.  IP is used to denote syscall entry/exit:
915 	 *  IP = 0 -> entry, = 1 -> exit
916 	 */
917 	ip = regs->ARM_ip;
918 	regs->ARM_ip = why;
919 
920 	current_thread_info()->syscall = scno;
921 
922 	/* the 0x80 provides a way for the tracing parent to distinguish
923 	   between a syscall stop and SIGTRAP delivery */
924 	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
925 				 ? 0x80 : 0));
926 	/*
927 	 * this isn't the same as continuing with a signal, but it will do
928 	 * for normal use.  strace only continues with a signal if the
929 	 * stopping signal is not SIGTRAP.  -brl
930 	 */
931 	if (current->exit_code) {
932 		send_sig(current->exit_code, current, 1);
933 		current->exit_code = 0;
934 	}
935 	regs->ARM_ip = ip;
936 
937 	return current_thread_info()->syscall;
938 }
939