xref: /linux/arch/x86/kernel/ptrace.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* By Ross Biro 1/23/92 */
3 /*
4  * Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/ptrace.h>
16 #include <linux/tracehook.h>
17 #include <linux/user.h>
18 #include <linux/elf.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/seccomp.h>
22 #include <linux/signal.h>
23 #include <linux/perf_event.h>
24 #include <linux/hw_breakpoint.h>
25 #include <linux/rcupdate.h>
26 #include <linux/export.h>
27 #include <linux/context_tracking.h>
28 
29 #include <linux/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/processor.h>
32 #include <asm/fpu/internal.h>
33 #include <asm/fpu/signal.h>
34 #include <asm/fpu/regset.h>
35 #include <asm/debugreg.h>
36 #include <asm/ldt.h>
37 #include <asm/desc.h>
38 #include <asm/prctl.h>
39 #include <asm/proto.h>
40 #include <asm/hw_breakpoint.h>
41 #include <asm/traps.h>
42 #include <asm/syscall.h>
43 #include <asm/fsgsbase.h>
44 
45 #include "tls.h"
46 
47 enum x86_regset {
48 	REGSET_GENERAL,
49 	REGSET_FP,
50 	REGSET_XFP,
51 	REGSET_IOPERM64 = REGSET_XFP,
52 	REGSET_XSTATE,
53 	REGSET_TLS,
54 	REGSET_IOPERM32,
55 };
56 
57 struct pt_regs_offset {
58 	const char *name;
59 	int offset;
60 };
61 
62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 
65 static const struct pt_regs_offset regoffset_table[] = {
66 #ifdef CONFIG_X86_64
67 	REG_OFFSET_NAME(r15),
68 	REG_OFFSET_NAME(r14),
69 	REG_OFFSET_NAME(r13),
70 	REG_OFFSET_NAME(r12),
71 	REG_OFFSET_NAME(r11),
72 	REG_OFFSET_NAME(r10),
73 	REG_OFFSET_NAME(r9),
74 	REG_OFFSET_NAME(r8),
75 #endif
76 	REG_OFFSET_NAME(bx),
77 	REG_OFFSET_NAME(cx),
78 	REG_OFFSET_NAME(dx),
79 	REG_OFFSET_NAME(si),
80 	REG_OFFSET_NAME(di),
81 	REG_OFFSET_NAME(bp),
82 	REG_OFFSET_NAME(ax),
83 #ifdef CONFIG_X86_32
84 	REG_OFFSET_NAME(ds),
85 	REG_OFFSET_NAME(es),
86 	REG_OFFSET_NAME(fs),
87 	REG_OFFSET_NAME(gs),
88 #endif
89 	REG_OFFSET_NAME(orig_ax),
90 	REG_OFFSET_NAME(ip),
91 	REG_OFFSET_NAME(cs),
92 	REG_OFFSET_NAME(flags),
93 	REG_OFFSET_NAME(sp),
94 	REG_OFFSET_NAME(ss),
95 	REG_OFFSET_END,
96 };
97 
98 /**
99  * regs_query_register_offset() - query register offset from its name
100  * @name:	the name of a register
101  *
102  * regs_query_register_offset() returns the offset of a register in struct
103  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
104  */
105 int regs_query_register_offset(const char *name)
106 {
107 	const struct pt_regs_offset *roff;
108 	for (roff = regoffset_table; roff->name != NULL; roff++)
109 		if (!strcmp(roff->name, name))
110 			return roff->offset;
111 	return -EINVAL;
112 }
113 
114 /**
115  * regs_query_register_name() - query register name from its offset
116  * @offset:	the offset of a register in struct pt_regs.
117  *
118  * regs_query_register_name() returns the name of a register from its
119  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
120  */
121 const char *regs_query_register_name(unsigned int offset)
122 {
123 	const struct pt_regs_offset *roff;
124 	for (roff = regoffset_table; roff->name != NULL; roff++)
125 		if (roff->offset == offset)
126 			return roff->name;
127 	return NULL;
128 }
129 
130 /*
131  * does not yet catch signals sent when the child dies.
132  * in exit.c or in signal.c.
133  */
134 
135 /*
136  * Determines which flags the user has access to [1 = access, 0 = no access].
137  */
138 #define FLAG_MASK_32		((unsigned long)			\
139 				 (X86_EFLAGS_CF | X86_EFLAGS_PF |	\
140 				  X86_EFLAGS_AF | X86_EFLAGS_ZF |	\
141 				  X86_EFLAGS_SF | X86_EFLAGS_TF |	\
142 				  X86_EFLAGS_DF | X86_EFLAGS_OF |	\
143 				  X86_EFLAGS_RF | X86_EFLAGS_AC))
144 
145 /*
146  * Determines whether a value may be installed in a segment register.
147  */
148 static inline bool invalid_selector(u16 value)
149 {
150 	return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
151 }
152 
153 #ifdef CONFIG_X86_32
154 
155 #define FLAG_MASK		FLAG_MASK_32
156 
157 /*
158  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
159  * when it traps.  The previous stack will be directly underneath the saved
160  * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
161  *
162  * Now, if the stack is empty, '&regs->sp' is out of range. In this
163  * case we try to take the previous stack. To always return a non-null
164  * stack pointer we fall back to regs as stack if no previous stack
165  * exists.
166  *
167  * This is valid only for kernel mode traps.
168  */
169 unsigned long kernel_stack_pointer(struct pt_regs *regs)
170 {
171 	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
172 	unsigned long sp = (unsigned long)&regs->sp;
173 	u32 *prev_esp;
174 
175 	if (context == (sp & ~(THREAD_SIZE - 1)))
176 		return sp;
177 
178 	prev_esp = (u32 *)(context);
179 	if (*prev_esp)
180 		return (unsigned long)*prev_esp;
181 
182 	return (unsigned long)regs;
183 }
184 EXPORT_SYMBOL_GPL(kernel_stack_pointer);
185 
186 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
187 {
188 	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
189 	return &regs->bx + (regno >> 2);
190 }
191 
192 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
193 {
194 	/*
195 	 * Returning the value truncates it to 16 bits.
196 	 */
197 	unsigned int retval;
198 	if (offset != offsetof(struct user_regs_struct, gs))
199 		retval = *pt_regs_access(task_pt_regs(task), offset);
200 	else {
201 		if (task == current)
202 			retval = get_user_gs(task_pt_regs(task));
203 		else
204 			retval = task_user_gs(task);
205 	}
206 	return retval;
207 }
208 
209 static int set_segment_reg(struct task_struct *task,
210 			   unsigned long offset, u16 value)
211 {
212 	/*
213 	 * The value argument was already truncated to 16 bits.
214 	 */
215 	if (invalid_selector(value))
216 		return -EIO;
217 
218 	/*
219 	 * For %cs and %ss we cannot permit a null selector.
220 	 * We can permit a bogus selector as long as it has USER_RPL.
221 	 * Null selectors are fine for other segment registers, but
222 	 * we will never get back to user mode with invalid %cs or %ss
223 	 * and will take the trap in iret instead.  Much code relies
224 	 * on user_mode() to distinguish a user trap frame (which can
225 	 * safely use invalid selectors) from a kernel trap frame.
226 	 */
227 	switch (offset) {
228 	case offsetof(struct user_regs_struct, cs):
229 	case offsetof(struct user_regs_struct, ss):
230 		if (unlikely(value == 0))
231 			return -EIO;
232 
233 	default:
234 		*pt_regs_access(task_pt_regs(task), offset) = value;
235 		break;
236 
237 	case offsetof(struct user_regs_struct, gs):
238 		if (task == current)
239 			set_user_gs(task_pt_regs(task), value);
240 		else
241 			task_user_gs(task) = value;
242 	}
243 
244 	return 0;
245 }
246 
247 #else  /* CONFIG_X86_64 */
248 
249 #define FLAG_MASK		(FLAG_MASK_32 | X86_EFLAGS_NT)
250 
251 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
252 {
253 	BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
254 	return &regs->r15 + (offset / sizeof(regs->r15));
255 }
256 
257 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
258 {
259 	/*
260 	 * Returning the value truncates it to 16 bits.
261 	 */
262 	unsigned int seg;
263 
264 	switch (offset) {
265 	case offsetof(struct user_regs_struct, fs):
266 		if (task == current) {
267 			/* Older gas can't assemble movq %?s,%r?? */
268 			asm("movl %%fs,%0" : "=r" (seg));
269 			return seg;
270 		}
271 		return task->thread.fsindex;
272 	case offsetof(struct user_regs_struct, gs):
273 		if (task == current) {
274 			asm("movl %%gs,%0" : "=r" (seg));
275 			return seg;
276 		}
277 		return task->thread.gsindex;
278 	case offsetof(struct user_regs_struct, ds):
279 		if (task == current) {
280 			asm("movl %%ds,%0" : "=r" (seg));
281 			return seg;
282 		}
283 		return task->thread.ds;
284 	case offsetof(struct user_regs_struct, es):
285 		if (task == current) {
286 			asm("movl %%es,%0" : "=r" (seg));
287 			return seg;
288 		}
289 		return task->thread.es;
290 
291 	case offsetof(struct user_regs_struct, cs):
292 	case offsetof(struct user_regs_struct, ss):
293 		break;
294 	}
295 	return *pt_regs_access(task_pt_regs(task), offset);
296 }
297 
298 static int set_segment_reg(struct task_struct *task,
299 			   unsigned long offset, u16 value)
300 {
301 	/*
302 	 * The value argument was already truncated to 16 bits.
303 	 */
304 	if (invalid_selector(value))
305 		return -EIO;
306 
307 	switch (offset) {
308 	case offsetof(struct user_regs_struct,fs):
309 		task->thread.fsindex = value;
310 		if (task == current)
311 			loadsegment(fs, task->thread.fsindex);
312 		break;
313 	case offsetof(struct user_regs_struct,gs):
314 		task->thread.gsindex = value;
315 		if (task == current)
316 			load_gs_index(task->thread.gsindex);
317 		break;
318 	case offsetof(struct user_regs_struct,ds):
319 		task->thread.ds = value;
320 		if (task == current)
321 			loadsegment(ds, task->thread.ds);
322 		break;
323 	case offsetof(struct user_regs_struct,es):
324 		task->thread.es = value;
325 		if (task == current)
326 			loadsegment(es, task->thread.es);
327 		break;
328 
329 		/*
330 		 * Can't actually change these in 64-bit mode.
331 		 */
332 	case offsetof(struct user_regs_struct,cs):
333 		if (unlikely(value == 0))
334 			return -EIO;
335 		task_pt_regs(task)->cs = value;
336 		break;
337 	case offsetof(struct user_regs_struct,ss):
338 		if (unlikely(value == 0))
339 			return -EIO;
340 		task_pt_regs(task)->ss = value;
341 		break;
342 	}
343 
344 	return 0;
345 }
346 
347 #endif	/* CONFIG_X86_32 */
348 
349 static unsigned long get_flags(struct task_struct *task)
350 {
351 	unsigned long retval = task_pt_regs(task)->flags;
352 
353 	/*
354 	 * If the debugger set TF, hide it from the readout.
355 	 */
356 	if (test_tsk_thread_flag(task, TIF_FORCED_TF))
357 		retval &= ~X86_EFLAGS_TF;
358 
359 	return retval;
360 }
361 
362 static int set_flags(struct task_struct *task, unsigned long value)
363 {
364 	struct pt_regs *regs = task_pt_regs(task);
365 
366 	/*
367 	 * If the user value contains TF, mark that
368 	 * it was not "us" (the debugger) that set it.
369 	 * If not, make sure it stays set if we had.
370 	 */
371 	if (value & X86_EFLAGS_TF)
372 		clear_tsk_thread_flag(task, TIF_FORCED_TF);
373 	else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
374 		value |= X86_EFLAGS_TF;
375 
376 	regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
377 
378 	return 0;
379 }
380 
381 static int putreg(struct task_struct *child,
382 		  unsigned long offset, unsigned long value)
383 {
384 	switch (offset) {
385 	case offsetof(struct user_regs_struct, cs):
386 	case offsetof(struct user_regs_struct, ds):
387 	case offsetof(struct user_regs_struct, es):
388 	case offsetof(struct user_regs_struct, fs):
389 	case offsetof(struct user_regs_struct, gs):
390 	case offsetof(struct user_regs_struct, ss):
391 		return set_segment_reg(child, offset, value);
392 
393 	case offsetof(struct user_regs_struct, flags):
394 		return set_flags(child, value);
395 
396 #ifdef CONFIG_X86_64
397 	case offsetof(struct user_regs_struct,fs_base):
398 		if (value >= TASK_SIZE_MAX)
399 			return -EIO;
400 		/*
401 		 * When changing the FS base, use do_arch_prctl_64()
402 		 * to set the index to zero and to set the base
403 		 * as requested.
404 		 */
405 		if (child->thread.fsbase != value)
406 			return do_arch_prctl_64(child, ARCH_SET_FS, value);
407 		return 0;
408 	case offsetof(struct user_regs_struct,gs_base):
409 		/*
410 		 * Exactly the same here as the %fs handling above.
411 		 */
412 		if (value >= TASK_SIZE_MAX)
413 			return -EIO;
414 		if (child->thread.gsbase != value)
415 			return do_arch_prctl_64(child, ARCH_SET_GS, value);
416 		return 0;
417 #endif
418 	}
419 
420 	*pt_regs_access(task_pt_regs(child), offset) = value;
421 	return 0;
422 }
423 
424 static unsigned long getreg(struct task_struct *task, unsigned long offset)
425 {
426 	switch (offset) {
427 	case offsetof(struct user_regs_struct, cs):
428 	case offsetof(struct user_regs_struct, ds):
429 	case offsetof(struct user_regs_struct, es):
430 	case offsetof(struct user_regs_struct, fs):
431 	case offsetof(struct user_regs_struct, gs):
432 	case offsetof(struct user_regs_struct, ss):
433 		return get_segment_reg(task, offset);
434 
435 	case offsetof(struct user_regs_struct, flags):
436 		return get_flags(task);
437 
438 #ifdef CONFIG_X86_64
439 	case offsetof(struct user_regs_struct, fs_base):
440 		return x86_fsbase_read_task(task);
441 	case offsetof(struct user_regs_struct, gs_base):
442 		return x86_gsbase_read_task(task);
443 #endif
444 	}
445 
446 	return *pt_regs_access(task_pt_regs(task), offset);
447 }
448 
449 static int genregs_get(struct task_struct *target,
450 		       const struct user_regset *regset,
451 		       unsigned int pos, unsigned int count,
452 		       void *kbuf, void __user *ubuf)
453 {
454 	if (kbuf) {
455 		unsigned long *k = kbuf;
456 		while (count >= sizeof(*k)) {
457 			*k++ = getreg(target, pos);
458 			count -= sizeof(*k);
459 			pos += sizeof(*k);
460 		}
461 	} else {
462 		unsigned long __user *u = ubuf;
463 		while (count >= sizeof(*u)) {
464 			if (__put_user(getreg(target, pos), u++))
465 				return -EFAULT;
466 			count -= sizeof(*u);
467 			pos += sizeof(*u);
468 		}
469 	}
470 
471 	return 0;
472 }
473 
474 static int genregs_set(struct task_struct *target,
475 		       const struct user_regset *regset,
476 		       unsigned int pos, unsigned int count,
477 		       const void *kbuf, const void __user *ubuf)
478 {
479 	int ret = 0;
480 	if (kbuf) {
481 		const unsigned long *k = kbuf;
482 		while (count >= sizeof(*k) && !ret) {
483 			ret = putreg(target, pos, *k++);
484 			count -= sizeof(*k);
485 			pos += sizeof(*k);
486 		}
487 	} else {
488 		const unsigned long  __user *u = ubuf;
489 		while (count >= sizeof(*u) && !ret) {
490 			unsigned long word;
491 			ret = __get_user(word, u++);
492 			if (ret)
493 				break;
494 			ret = putreg(target, pos, word);
495 			count -= sizeof(*u);
496 			pos += sizeof(*u);
497 		}
498 	}
499 	return ret;
500 }
501 
502 static void ptrace_triggered(struct perf_event *bp,
503 			     struct perf_sample_data *data,
504 			     struct pt_regs *regs)
505 {
506 	int i;
507 	struct thread_struct *thread = &(current->thread);
508 
509 	/*
510 	 * Store in the virtual DR6 register the fact that the breakpoint
511 	 * was hit so the thread's debugger will see it.
512 	 */
513 	for (i = 0; i < HBP_NUM; i++) {
514 		if (thread->ptrace_bps[i] == bp)
515 			break;
516 	}
517 
518 	thread->debugreg6 |= (DR_TRAP0 << i);
519 }
520 
521 /*
522  * Walk through every ptrace breakpoints for this thread and
523  * build the dr7 value on top of their attributes.
524  *
525  */
526 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
527 {
528 	int i;
529 	int dr7 = 0;
530 	struct arch_hw_breakpoint *info;
531 
532 	for (i = 0; i < HBP_NUM; i++) {
533 		if (bp[i] && !bp[i]->attr.disabled) {
534 			info = counter_arch_bp(bp[i]);
535 			dr7 |= encode_dr7(i, info->len, info->type);
536 		}
537 	}
538 
539 	return dr7;
540 }
541 
542 static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
543 					int len, int type, bool disabled)
544 {
545 	int err, bp_len, bp_type;
546 
547 	err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
548 	if (!err) {
549 		attr->bp_len = bp_len;
550 		attr->bp_type = bp_type;
551 		attr->disabled = disabled;
552 	}
553 
554 	return err;
555 }
556 
557 static struct perf_event *
558 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
559 				unsigned long addr, bool disabled)
560 {
561 	struct perf_event_attr attr;
562 	int err;
563 
564 	ptrace_breakpoint_init(&attr);
565 	attr.bp_addr = addr;
566 
567 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
568 	if (err)
569 		return ERR_PTR(err);
570 
571 	return register_user_hw_breakpoint(&attr, ptrace_triggered,
572 						 NULL, tsk);
573 }
574 
575 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
576 					int disabled)
577 {
578 	struct perf_event_attr attr = bp->attr;
579 	int err;
580 
581 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
582 	if (err)
583 		return err;
584 
585 	return modify_user_hw_breakpoint(bp, &attr);
586 }
587 
588 /*
589  * Handle ptrace writes to debug register 7.
590  */
591 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
592 {
593 	struct thread_struct *thread = &tsk->thread;
594 	unsigned long old_dr7;
595 	bool second_pass = false;
596 	int i, rc, ret = 0;
597 
598 	data &= ~DR_CONTROL_RESERVED;
599 	old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
600 
601 restore:
602 	rc = 0;
603 	for (i = 0; i < HBP_NUM; i++) {
604 		unsigned len, type;
605 		bool disabled = !decode_dr7(data, i, &len, &type);
606 		struct perf_event *bp = thread->ptrace_bps[i];
607 
608 		if (!bp) {
609 			if (disabled)
610 				continue;
611 
612 			bp = ptrace_register_breakpoint(tsk,
613 					len, type, 0, disabled);
614 			if (IS_ERR(bp)) {
615 				rc = PTR_ERR(bp);
616 				break;
617 			}
618 
619 			thread->ptrace_bps[i] = bp;
620 			continue;
621 		}
622 
623 		rc = ptrace_modify_breakpoint(bp, len, type, disabled);
624 		if (rc)
625 			break;
626 	}
627 
628 	/* Restore if the first pass failed, second_pass shouldn't fail. */
629 	if (rc && !WARN_ON(second_pass)) {
630 		ret = rc;
631 		data = old_dr7;
632 		second_pass = true;
633 		goto restore;
634 	}
635 
636 	return ret;
637 }
638 
639 /*
640  * Handle PTRACE_PEEKUSR calls for the debug register area.
641  */
642 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
643 {
644 	struct thread_struct *thread = &tsk->thread;
645 	unsigned long val = 0;
646 
647 	if (n < HBP_NUM) {
648 		struct perf_event *bp = thread->ptrace_bps[n];
649 
650 		if (bp)
651 			val = bp->hw.info.address;
652 	} else if (n == 6) {
653 		val = thread->debugreg6;
654 	} else if (n == 7) {
655 		val = thread->ptrace_dr7;
656 	}
657 	return val;
658 }
659 
660 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
661 				      unsigned long addr)
662 {
663 	struct thread_struct *t = &tsk->thread;
664 	struct perf_event *bp = t->ptrace_bps[nr];
665 	int err = 0;
666 
667 	if (!bp) {
668 		/*
669 		 * Put stub len and type to create an inactive but correct bp.
670 		 *
671 		 * CHECKME: the previous code returned -EIO if the addr wasn't
672 		 * a valid task virtual addr. The new one will return -EINVAL in
673 		 *  this case.
674 		 * -EINVAL may be what we want for in-kernel breakpoints users,
675 		 * but -EIO looks better for ptrace, since we refuse a register
676 		 * writing for the user. And anyway this is the previous
677 		 * behaviour.
678 		 */
679 		bp = ptrace_register_breakpoint(tsk,
680 				X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
681 				addr, true);
682 		if (IS_ERR(bp))
683 			err = PTR_ERR(bp);
684 		else
685 			t->ptrace_bps[nr] = bp;
686 	} else {
687 		struct perf_event_attr attr = bp->attr;
688 
689 		attr.bp_addr = addr;
690 		err = modify_user_hw_breakpoint(bp, &attr);
691 	}
692 
693 	return err;
694 }
695 
696 /*
697  * Handle PTRACE_POKEUSR calls for the debug register area.
698  */
699 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
700 			       unsigned long val)
701 {
702 	struct thread_struct *thread = &tsk->thread;
703 	/* There are no DR4 or DR5 registers */
704 	int rc = -EIO;
705 
706 	if (n < HBP_NUM) {
707 		rc = ptrace_set_breakpoint_addr(tsk, n, val);
708 	} else if (n == 6) {
709 		thread->debugreg6 = val;
710 		rc = 0;
711 	} else if (n == 7) {
712 		rc = ptrace_write_dr7(tsk, val);
713 		if (!rc)
714 			thread->ptrace_dr7 = val;
715 	}
716 	return rc;
717 }
718 
719 /*
720  * These access the current or another (stopped) task's io permission
721  * bitmap for debugging or core dump.
722  */
723 static int ioperm_active(struct task_struct *target,
724 			 const struct user_regset *regset)
725 {
726 	return target->thread.io_bitmap_max / regset->size;
727 }
728 
729 static int ioperm_get(struct task_struct *target,
730 		      const struct user_regset *regset,
731 		      unsigned int pos, unsigned int count,
732 		      void *kbuf, void __user *ubuf)
733 {
734 	if (!target->thread.io_bitmap_ptr)
735 		return -ENXIO;
736 
737 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
738 				   target->thread.io_bitmap_ptr,
739 				   0, IO_BITMAP_BYTES);
740 }
741 
742 /*
743  * Called by kernel/ptrace.c when detaching..
744  *
745  * Make sure the single step bit is not set.
746  */
747 void ptrace_disable(struct task_struct *child)
748 {
749 	user_disable_single_step(child);
750 #ifdef TIF_SYSCALL_EMU
751 	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
752 #endif
753 }
754 
755 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
756 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
757 #endif
758 
759 long arch_ptrace(struct task_struct *child, long request,
760 		 unsigned long addr, unsigned long data)
761 {
762 	int ret;
763 	unsigned long __user *datap = (unsigned long __user *)data;
764 
765 	switch (request) {
766 	/* read the word at location addr in the USER area. */
767 	case PTRACE_PEEKUSR: {
768 		unsigned long tmp;
769 
770 		ret = -EIO;
771 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
772 			break;
773 
774 		tmp = 0;  /* Default return condition */
775 		if (addr < sizeof(struct user_regs_struct))
776 			tmp = getreg(child, addr);
777 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
778 			 addr <= offsetof(struct user, u_debugreg[7])) {
779 			addr -= offsetof(struct user, u_debugreg[0]);
780 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
781 		}
782 		ret = put_user(tmp, datap);
783 		break;
784 	}
785 
786 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
787 		ret = -EIO;
788 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
789 			break;
790 
791 		if (addr < sizeof(struct user_regs_struct))
792 			ret = putreg(child, addr, data);
793 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
794 			 addr <= offsetof(struct user, u_debugreg[7])) {
795 			addr -= offsetof(struct user, u_debugreg[0]);
796 			ret = ptrace_set_debugreg(child,
797 						  addr / sizeof(data), data);
798 		}
799 		break;
800 
801 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
802 		return copy_regset_to_user(child,
803 					   task_user_regset_view(current),
804 					   REGSET_GENERAL,
805 					   0, sizeof(struct user_regs_struct),
806 					   datap);
807 
808 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
809 		return copy_regset_from_user(child,
810 					     task_user_regset_view(current),
811 					     REGSET_GENERAL,
812 					     0, sizeof(struct user_regs_struct),
813 					     datap);
814 
815 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
816 		return copy_regset_to_user(child,
817 					   task_user_regset_view(current),
818 					   REGSET_FP,
819 					   0, sizeof(struct user_i387_struct),
820 					   datap);
821 
822 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
823 		return copy_regset_from_user(child,
824 					     task_user_regset_view(current),
825 					     REGSET_FP,
826 					     0, sizeof(struct user_i387_struct),
827 					     datap);
828 
829 #ifdef CONFIG_X86_32
830 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
831 		return copy_regset_to_user(child, &user_x86_32_view,
832 					   REGSET_XFP,
833 					   0, sizeof(struct user_fxsr_struct),
834 					   datap) ? -EIO : 0;
835 
836 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
837 		return copy_regset_from_user(child, &user_x86_32_view,
838 					     REGSET_XFP,
839 					     0, sizeof(struct user_fxsr_struct),
840 					     datap) ? -EIO : 0;
841 #endif
842 
843 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
844 	case PTRACE_GET_THREAD_AREA:
845 		if ((int) addr < 0)
846 			return -EIO;
847 		ret = do_get_thread_area(child, addr,
848 					(struct user_desc __user *)data);
849 		break;
850 
851 	case PTRACE_SET_THREAD_AREA:
852 		if ((int) addr < 0)
853 			return -EIO;
854 		ret = do_set_thread_area(child, addr,
855 					(struct user_desc __user *)data, 0);
856 		break;
857 #endif
858 
859 #ifdef CONFIG_X86_64
860 		/* normal 64bit interface to access TLS data.
861 		   Works just like arch_prctl, except that the arguments
862 		   are reversed. */
863 	case PTRACE_ARCH_PRCTL:
864 		ret = do_arch_prctl_64(child, data, addr);
865 		break;
866 #endif
867 
868 	default:
869 		ret = ptrace_request(child, request, addr, data);
870 		break;
871 	}
872 
873 	return ret;
874 }
875 
876 #ifdef CONFIG_IA32_EMULATION
877 
878 #include <linux/compat.h>
879 #include <linux/syscalls.h>
880 #include <asm/ia32.h>
881 #include <asm/user32.h>
882 
883 #define R32(l,q)							\
884 	case offsetof(struct user32, regs.l):				\
885 		regs->q = value; break
886 
887 #define SEG32(rs)							\
888 	case offsetof(struct user32, regs.rs):				\
889 		return set_segment_reg(child,				\
890 				       offsetof(struct user_regs_struct, rs), \
891 				       value);				\
892 		break
893 
894 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
895 {
896 	struct pt_regs *regs = task_pt_regs(child);
897 
898 	switch (regno) {
899 
900 	SEG32(cs);
901 	SEG32(ds);
902 	SEG32(es);
903 	SEG32(fs);
904 	SEG32(gs);
905 	SEG32(ss);
906 
907 	R32(ebx, bx);
908 	R32(ecx, cx);
909 	R32(edx, dx);
910 	R32(edi, di);
911 	R32(esi, si);
912 	R32(ebp, bp);
913 	R32(eax, ax);
914 	R32(eip, ip);
915 	R32(esp, sp);
916 
917 	case offsetof(struct user32, regs.orig_eax):
918 		/*
919 		 * Warning: bizarre corner case fixup here.  A 32-bit
920 		 * debugger setting orig_eax to -1 wants to disable
921 		 * syscall restart.  Make sure that the syscall
922 		 * restart code sign-extends orig_ax.  Also make sure
923 		 * we interpret the -ERESTART* codes correctly if
924 		 * loaded into regs->ax in case the task is not
925 		 * actually still sitting at the exit from a 32-bit
926 		 * syscall with TS_COMPAT still set.
927 		 */
928 		regs->orig_ax = value;
929 		if (syscall_get_nr(child, regs) >= 0)
930 			child->thread_info.status |= TS_I386_REGS_POKED;
931 		break;
932 
933 	case offsetof(struct user32, regs.eflags):
934 		return set_flags(child, value);
935 
936 	case offsetof(struct user32, u_debugreg[0]) ...
937 		offsetof(struct user32, u_debugreg[7]):
938 		regno -= offsetof(struct user32, u_debugreg[0]);
939 		return ptrace_set_debugreg(child, regno / 4, value);
940 
941 	default:
942 		if (regno > sizeof(struct user32) || (regno & 3))
943 			return -EIO;
944 
945 		/*
946 		 * Other dummy fields in the virtual user structure
947 		 * are ignored
948 		 */
949 		break;
950 	}
951 	return 0;
952 }
953 
954 #undef R32
955 #undef SEG32
956 
957 #define R32(l,q)							\
958 	case offsetof(struct user32, regs.l):				\
959 		*val = regs->q; break
960 
961 #define SEG32(rs)							\
962 	case offsetof(struct user32, regs.rs):				\
963 		*val = get_segment_reg(child,				\
964 				       offsetof(struct user_regs_struct, rs)); \
965 		break
966 
967 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
968 {
969 	struct pt_regs *regs = task_pt_regs(child);
970 
971 	switch (regno) {
972 
973 	SEG32(ds);
974 	SEG32(es);
975 	SEG32(fs);
976 	SEG32(gs);
977 
978 	R32(cs, cs);
979 	R32(ss, ss);
980 	R32(ebx, bx);
981 	R32(ecx, cx);
982 	R32(edx, dx);
983 	R32(edi, di);
984 	R32(esi, si);
985 	R32(ebp, bp);
986 	R32(eax, ax);
987 	R32(orig_eax, orig_ax);
988 	R32(eip, ip);
989 	R32(esp, sp);
990 
991 	case offsetof(struct user32, regs.eflags):
992 		*val = get_flags(child);
993 		break;
994 
995 	case offsetof(struct user32, u_debugreg[0]) ...
996 		offsetof(struct user32, u_debugreg[7]):
997 		regno -= offsetof(struct user32, u_debugreg[0]);
998 		*val = ptrace_get_debugreg(child, regno / 4);
999 		break;
1000 
1001 	default:
1002 		if (regno > sizeof(struct user32) || (regno & 3))
1003 			return -EIO;
1004 
1005 		/*
1006 		 * Other dummy fields in the virtual user structure
1007 		 * are ignored
1008 		 */
1009 		*val = 0;
1010 		break;
1011 	}
1012 	return 0;
1013 }
1014 
1015 #undef R32
1016 #undef SEG32
1017 
1018 static int genregs32_get(struct task_struct *target,
1019 			 const struct user_regset *regset,
1020 			 unsigned int pos, unsigned int count,
1021 			 void *kbuf, void __user *ubuf)
1022 {
1023 	if (kbuf) {
1024 		compat_ulong_t *k = kbuf;
1025 		while (count >= sizeof(*k)) {
1026 			getreg32(target, pos, k++);
1027 			count -= sizeof(*k);
1028 			pos += sizeof(*k);
1029 		}
1030 	} else {
1031 		compat_ulong_t __user *u = ubuf;
1032 		while (count >= sizeof(*u)) {
1033 			compat_ulong_t word;
1034 			getreg32(target, pos, &word);
1035 			if (__put_user(word, u++))
1036 				return -EFAULT;
1037 			count -= sizeof(*u);
1038 			pos += sizeof(*u);
1039 		}
1040 	}
1041 
1042 	return 0;
1043 }
1044 
1045 static int genregs32_set(struct task_struct *target,
1046 			 const struct user_regset *regset,
1047 			 unsigned int pos, unsigned int count,
1048 			 const void *kbuf, const void __user *ubuf)
1049 {
1050 	int ret = 0;
1051 	if (kbuf) {
1052 		const compat_ulong_t *k = kbuf;
1053 		while (count >= sizeof(*k) && !ret) {
1054 			ret = putreg32(target, pos, *k++);
1055 			count -= sizeof(*k);
1056 			pos += sizeof(*k);
1057 		}
1058 	} else {
1059 		const compat_ulong_t __user *u = ubuf;
1060 		while (count >= sizeof(*u) && !ret) {
1061 			compat_ulong_t word;
1062 			ret = __get_user(word, u++);
1063 			if (ret)
1064 				break;
1065 			ret = putreg32(target, pos, word);
1066 			count -= sizeof(*u);
1067 			pos += sizeof(*u);
1068 		}
1069 	}
1070 	return ret;
1071 }
1072 
1073 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
1074 			     compat_ulong_t caddr, compat_ulong_t cdata)
1075 {
1076 	unsigned long addr = caddr;
1077 	unsigned long data = cdata;
1078 	void __user *datap = compat_ptr(data);
1079 	int ret;
1080 	__u32 val;
1081 
1082 	switch (request) {
1083 	case PTRACE_PEEKUSR:
1084 		ret = getreg32(child, addr, &val);
1085 		if (ret == 0)
1086 			ret = put_user(val, (__u32 __user *)datap);
1087 		break;
1088 
1089 	case PTRACE_POKEUSR:
1090 		ret = putreg32(child, addr, data);
1091 		break;
1092 
1093 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1094 		return copy_regset_to_user(child, &user_x86_32_view,
1095 					   REGSET_GENERAL,
1096 					   0, sizeof(struct user_regs_struct32),
1097 					   datap);
1098 
1099 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1100 		return copy_regset_from_user(child, &user_x86_32_view,
1101 					     REGSET_GENERAL, 0,
1102 					     sizeof(struct user_regs_struct32),
1103 					     datap);
1104 
1105 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1106 		return copy_regset_to_user(child, &user_x86_32_view,
1107 					   REGSET_FP, 0,
1108 					   sizeof(struct user_i387_ia32_struct),
1109 					   datap);
1110 
1111 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1112 		return copy_regset_from_user(
1113 			child, &user_x86_32_view, REGSET_FP,
1114 			0, sizeof(struct user_i387_ia32_struct), datap);
1115 
1116 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
1117 		return copy_regset_to_user(child, &user_x86_32_view,
1118 					   REGSET_XFP, 0,
1119 					   sizeof(struct user32_fxsr_struct),
1120 					   datap);
1121 
1122 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
1123 		return copy_regset_from_user(child, &user_x86_32_view,
1124 					     REGSET_XFP, 0,
1125 					     sizeof(struct user32_fxsr_struct),
1126 					     datap);
1127 
1128 	case PTRACE_GET_THREAD_AREA:
1129 	case PTRACE_SET_THREAD_AREA:
1130 		return arch_ptrace(child, request, addr, data);
1131 
1132 	default:
1133 		return compat_ptrace_request(child, request, addr, data);
1134 	}
1135 
1136 	return ret;
1137 }
1138 #endif /* CONFIG_IA32_EMULATION */
1139 
1140 #ifdef CONFIG_X86_X32_ABI
1141 static long x32_arch_ptrace(struct task_struct *child,
1142 			    compat_long_t request, compat_ulong_t caddr,
1143 			    compat_ulong_t cdata)
1144 {
1145 	unsigned long addr = caddr;
1146 	unsigned long data = cdata;
1147 	void __user *datap = compat_ptr(data);
1148 	int ret;
1149 
1150 	switch (request) {
1151 	/* Read 32bits at location addr in the USER area.  Only allow
1152 	   to return the lower 32bits of segment and debug registers.  */
1153 	case PTRACE_PEEKUSR: {
1154 		u32 tmp;
1155 
1156 		ret = -EIO;
1157 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1158 		    addr < offsetof(struct user_regs_struct, cs))
1159 			break;
1160 
1161 		tmp = 0;  /* Default return condition */
1162 		if (addr < sizeof(struct user_regs_struct))
1163 			tmp = getreg(child, addr);
1164 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1165 			 addr <= offsetof(struct user, u_debugreg[7])) {
1166 			addr -= offsetof(struct user, u_debugreg[0]);
1167 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1168 		}
1169 		ret = put_user(tmp, (__u32 __user *)datap);
1170 		break;
1171 	}
1172 
1173 	/* Write the word at location addr in the USER area.  Only allow
1174 	   to update segment and debug registers with the upper 32bits
1175 	   zero-extended. */
1176 	case PTRACE_POKEUSR:
1177 		ret = -EIO;
1178 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1179 		    addr < offsetof(struct user_regs_struct, cs))
1180 			break;
1181 
1182 		if (addr < sizeof(struct user_regs_struct))
1183 			ret = putreg(child, addr, data);
1184 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1185 			 addr <= offsetof(struct user, u_debugreg[7])) {
1186 			addr -= offsetof(struct user, u_debugreg[0]);
1187 			ret = ptrace_set_debugreg(child,
1188 						  addr / sizeof(data), data);
1189 		}
1190 		break;
1191 
1192 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1193 		return copy_regset_to_user(child,
1194 					   task_user_regset_view(current),
1195 					   REGSET_GENERAL,
1196 					   0, sizeof(struct user_regs_struct),
1197 					   datap);
1198 
1199 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1200 		return copy_regset_from_user(child,
1201 					     task_user_regset_view(current),
1202 					     REGSET_GENERAL,
1203 					     0, sizeof(struct user_regs_struct),
1204 					     datap);
1205 
1206 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1207 		return copy_regset_to_user(child,
1208 					   task_user_regset_view(current),
1209 					   REGSET_FP,
1210 					   0, sizeof(struct user_i387_struct),
1211 					   datap);
1212 
1213 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1214 		return copy_regset_from_user(child,
1215 					     task_user_regset_view(current),
1216 					     REGSET_FP,
1217 					     0, sizeof(struct user_i387_struct),
1218 					     datap);
1219 
1220 	default:
1221 		return compat_ptrace_request(child, request, addr, data);
1222 	}
1223 
1224 	return ret;
1225 }
1226 #endif
1227 
1228 #ifdef CONFIG_COMPAT
1229 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1230 			compat_ulong_t caddr, compat_ulong_t cdata)
1231 {
1232 #ifdef CONFIG_X86_X32_ABI
1233 	if (!in_ia32_syscall())
1234 		return x32_arch_ptrace(child, request, caddr, cdata);
1235 #endif
1236 #ifdef CONFIG_IA32_EMULATION
1237 	return ia32_arch_ptrace(child, request, caddr, cdata);
1238 #else
1239 	return 0;
1240 #endif
1241 }
1242 #endif	/* CONFIG_COMPAT */
1243 
1244 #ifdef CONFIG_X86_64
1245 
1246 static struct user_regset x86_64_regsets[] __ro_after_init = {
1247 	[REGSET_GENERAL] = {
1248 		.core_note_type = NT_PRSTATUS,
1249 		.n = sizeof(struct user_regs_struct) / sizeof(long),
1250 		.size = sizeof(long), .align = sizeof(long),
1251 		.get = genregs_get, .set = genregs_set
1252 	},
1253 	[REGSET_FP] = {
1254 		.core_note_type = NT_PRFPREG,
1255 		.n = sizeof(struct user_i387_struct) / sizeof(long),
1256 		.size = sizeof(long), .align = sizeof(long),
1257 		.active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1258 	},
1259 	[REGSET_XSTATE] = {
1260 		.core_note_type = NT_X86_XSTATE,
1261 		.size = sizeof(u64), .align = sizeof(u64),
1262 		.active = xstateregs_active, .get = xstateregs_get,
1263 		.set = xstateregs_set
1264 	},
1265 	[REGSET_IOPERM64] = {
1266 		.core_note_type = NT_386_IOPERM,
1267 		.n = IO_BITMAP_LONGS,
1268 		.size = sizeof(long), .align = sizeof(long),
1269 		.active = ioperm_active, .get = ioperm_get
1270 	},
1271 };
1272 
1273 static const struct user_regset_view user_x86_64_view = {
1274 	.name = "x86_64", .e_machine = EM_X86_64,
1275 	.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1276 };
1277 
1278 #else  /* CONFIG_X86_32 */
1279 
1280 #define user_regs_struct32	user_regs_struct
1281 #define genregs32_get		genregs_get
1282 #define genregs32_set		genregs_set
1283 
1284 #endif	/* CONFIG_X86_64 */
1285 
1286 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1287 static struct user_regset x86_32_regsets[] __ro_after_init = {
1288 	[REGSET_GENERAL] = {
1289 		.core_note_type = NT_PRSTATUS,
1290 		.n = sizeof(struct user_regs_struct32) / sizeof(u32),
1291 		.size = sizeof(u32), .align = sizeof(u32),
1292 		.get = genregs32_get, .set = genregs32_set
1293 	},
1294 	[REGSET_FP] = {
1295 		.core_note_type = NT_PRFPREG,
1296 		.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1297 		.size = sizeof(u32), .align = sizeof(u32),
1298 		.active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set
1299 	},
1300 	[REGSET_XFP] = {
1301 		.core_note_type = NT_PRXFPREG,
1302 		.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1303 		.size = sizeof(u32), .align = sizeof(u32),
1304 		.active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1305 	},
1306 	[REGSET_XSTATE] = {
1307 		.core_note_type = NT_X86_XSTATE,
1308 		.size = sizeof(u64), .align = sizeof(u64),
1309 		.active = xstateregs_active, .get = xstateregs_get,
1310 		.set = xstateregs_set
1311 	},
1312 	[REGSET_TLS] = {
1313 		.core_note_type = NT_386_TLS,
1314 		.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1315 		.size = sizeof(struct user_desc),
1316 		.align = sizeof(struct user_desc),
1317 		.active = regset_tls_active,
1318 		.get = regset_tls_get, .set = regset_tls_set
1319 	},
1320 	[REGSET_IOPERM32] = {
1321 		.core_note_type = NT_386_IOPERM,
1322 		.n = IO_BITMAP_BYTES / sizeof(u32),
1323 		.size = sizeof(u32), .align = sizeof(u32),
1324 		.active = ioperm_active, .get = ioperm_get
1325 	},
1326 };
1327 
1328 static const struct user_regset_view user_x86_32_view = {
1329 	.name = "i386", .e_machine = EM_386,
1330 	.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1331 };
1332 #endif
1333 
1334 /*
1335  * This represents bytes 464..511 in the memory layout exported through
1336  * the REGSET_XSTATE interface.
1337  */
1338 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1339 
1340 void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1341 {
1342 #ifdef CONFIG_X86_64
1343 	x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1344 #endif
1345 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1346 	x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1347 #endif
1348 	xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1349 }
1350 
1351 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1352 {
1353 #ifdef CONFIG_IA32_EMULATION
1354 	if (!user_64bit_mode(task_pt_regs(task)))
1355 #endif
1356 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1357 		return &user_x86_32_view;
1358 #endif
1359 #ifdef CONFIG_X86_64
1360 	return &user_x86_64_view;
1361 #endif
1362 }
1363 
1364 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1365 					 int error_code, int si_code)
1366 {
1367 	tsk->thread.trap_nr = X86_TRAP_DB;
1368 	tsk->thread.error_code = error_code;
1369 
1370 	/* Send us the fake SIGTRAP */
1371 	force_sig_fault(SIGTRAP, si_code,
1372 			user_mode(regs) ? (void __user *)regs->ip : NULL, tsk);
1373 }
1374 
1375 void user_single_step_report(struct pt_regs *regs)
1376 {
1377 	send_sigtrap(current, regs, 0, TRAP_BRKPT);
1378 }
1379