xref: /linux/arch/x86/kernel/ptrace.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* By Ross Biro 1/23/92 */
3 /*
4  * Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/rcupdate.h>
25 #include <linux/export.h>
26 #include <linux/context_tracking.h>
27 #include <linux/nospec.h>
28 
29 #include <linux/uaccess.h>
30 #include <asm/processor.h>
31 #include <asm/fpu/signal.h>
32 #include <asm/fpu/regset.h>
33 #include <asm/fpu/xstate.h>
34 #include <asm/debugreg.h>
35 #include <asm/ldt.h>
36 #include <asm/desc.h>
37 #include <asm/prctl.h>
38 #include <asm/proto.h>
39 #include <asm/hw_breakpoint.h>
40 #include <asm/traps.h>
41 #include <asm/syscall.h>
42 #include <asm/fsgsbase.h>
43 #include <asm/io_bitmap.h>
44 
45 #include "tls.h"
46 
47 enum x86_regset_32 {
48 	REGSET32_GENERAL,
49 	REGSET32_FP,
50 	REGSET32_XFP,
51 	REGSET32_XSTATE,
52 	REGSET32_TLS,
53 	REGSET32_IOPERM,
54 };
55 
56 enum x86_regset_64 {
57 	REGSET64_GENERAL,
58 	REGSET64_FP,
59 	REGSET64_IOPERM,
60 	REGSET64_XSTATE,
61 	REGSET64_SSP,
62 };
63 
64 #define REGSET_GENERAL \
65 ({ \
66 	BUILD_BUG_ON((int)REGSET32_GENERAL != (int)REGSET64_GENERAL); \
67 	REGSET32_GENERAL; \
68 })
69 
70 #define REGSET_FP \
71 ({ \
72 	BUILD_BUG_ON((int)REGSET32_FP != (int)REGSET64_FP); \
73 	REGSET32_FP; \
74 })
75 
76 
77 struct pt_regs_offset {
78 	const char *name;
79 	int offset;
80 };
81 
82 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
83 #define REG_OFFSET_END {.name = NULL, .offset = 0}
84 
85 static const struct pt_regs_offset regoffset_table[] = {
86 #ifdef CONFIG_X86_64
87 	REG_OFFSET_NAME(r15),
88 	REG_OFFSET_NAME(r14),
89 	REG_OFFSET_NAME(r13),
90 	REG_OFFSET_NAME(r12),
91 	REG_OFFSET_NAME(r11),
92 	REG_OFFSET_NAME(r10),
93 	REG_OFFSET_NAME(r9),
94 	REG_OFFSET_NAME(r8),
95 #endif
96 	REG_OFFSET_NAME(bx),
97 	REG_OFFSET_NAME(cx),
98 	REG_OFFSET_NAME(dx),
99 	REG_OFFSET_NAME(si),
100 	REG_OFFSET_NAME(di),
101 	REG_OFFSET_NAME(bp),
102 	REG_OFFSET_NAME(ax),
103 #ifdef CONFIG_X86_32
104 	REG_OFFSET_NAME(ds),
105 	REG_OFFSET_NAME(es),
106 	REG_OFFSET_NAME(fs),
107 	REG_OFFSET_NAME(gs),
108 #endif
109 	REG_OFFSET_NAME(orig_ax),
110 	REG_OFFSET_NAME(ip),
111 	REG_OFFSET_NAME(cs),
112 	REG_OFFSET_NAME(flags),
113 	REG_OFFSET_NAME(sp),
114 	REG_OFFSET_NAME(ss),
115 	REG_OFFSET_END,
116 };
117 
118 /**
119  * regs_query_register_offset() - query register offset from its name
120  * @name:	the name of a register
121  *
122  * regs_query_register_offset() returns the offset of a register in struct
123  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
124  */
regs_query_register_offset(const char * name)125 int regs_query_register_offset(const char *name)
126 {
127 	const struct pt_regs_offset *roff;
128 	for (roff = regoffset_table; roff->name != NULL; roff++)
129 		if (!strcmp(roff->name, name))
130 			return roff->offset;
131 	return -EINVAL;
132 }
133 
134 /**
135  * regs_query_register_name() - query register name from its offset
136  * @offset:	the offset of a register in struct pt_regs.
137  *
138  * regs_query_register_name() returns the name of a register from its
139  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
140  */
regs_query_register_name(unsigned int offset)141 const char *regs_query_register_name(unsigned int offset)
142 {
143 	const struct pt_regs_offset *roff;
144 	for (roff = regoffset_table; roff->name != NULL; roff++)
145 		if (roff->offset == offset)
146 			return roff->name;
147 	return NULL;
148 }
149 
150 /*
151  * does not yet catch signals sent when the child dies.
152  * in exit.c or in signal.c.
153  */
154 
155 /*
156  * Determines which flags the user has access to [1 = access, 0 = no access].
157  */
158 #define FLAG_MASK_32		((unsigned long)			\
159 				 (X86_EFLAGS_CF | X86_EFLAGS_PF |	\
160 				  X86_EFLAGS_AF | X86_EFLAGS_ZF |	\
161 				  X86_EFLAGS_SF | X86_EFLAGS_TF |	\
162 				  X86_EFLAGS_DF | X86_EFLAGS_OF |	\
163 				  X86_EFLAGS_RF | X86_EFLAGS_AC))
164 
165 /*
166  * Determines whether a value may be installed in a segment register.
167  */
invalid_selector(u16 value)168 static inline bool invalid_selector(u16 value)
169 {
170 	return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
171 }
172 
173 #ifdef CONFIG_X86_32
174 
175 #define FLAG_MASK		FLAG_MASK_32
176 
pt_regs_access(struct pt_regs * regs,unsigned long regno)177 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
178 {
179 	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
180 	return &regs->bx + (regno >> 2);
181 }
182 
get_segment_reg(struct task_struct * task,unsigned long offset)183 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
184 {
185 	/*
186 	 * Returning the value truncates it to 16 bits.
187 	 */
188 	unsigned int retval;
189 	if (offset != offsetof(struct user_regs_struct, gs))
190 		retval = *pt_regs_access(task_pt_regs(task), offset);
191 	else {
192 		if (task == current)
193 			savesegment(gs, retval);
194 		else
195 			retval = task->thread.gs;
196 	}
197 	return retval;
198 }
199 
set_segment_reg(struct task_struct * task,unsigned long offset,u16 value)200 static int set_segment_reg(struct task_struct *task,
201 			   unsigned long offset, u16 value)
202 {
203 	if (WARN_ON_ONCE(task == current))
204 		return -EIO;
205 
206 	/*
207 	 * The value argument was already truncated to 16 bits.
208 	 */
209 	if (invalid_selector(value))
210 		return -EIO;
211 
212 	/*
213 	 * For %cs and %ss we cannot permit a null selector.
214 	 * We can permit a bogus selector as long as it has USER_RPL.
215 	 * Null selectors are fine for other segment registers, but
216 	 * we will never get back to user mode with invalid %cs or %ss
217 	 * and will take the trap in iret instead.  Much code relies
218 	 * on user_mode() to distinguish a user trap frame (which can
219 	 * safely use invalid selectors) from a kernel trap frame.
220 	 */
221 	switch (offset) {
222 	case offsetof(struct user_regs_struct, cs):
223 	case offsetof(struct user_regs_struct, ss):
224 		if (unlikely(value == 0))
225 			return -EIO;
226 		fallthrough;
227 
228 	default:
229 		*pt_regs_access(task_pt_regs(task), offset) = value;
230 		break;
231 
232 	case offsetof(struct user_regs_struct, gs):
233 		task->thread.gs = value;
234 	}
235 
236 	return 0;
237 }
238 
239 #else  /* CONFIG_X86_64 */
240 
241 #define FLAG_MASK		(FLAG_MASK_32 | X86_EFLAGS_NT)
242 
pt_regs_access(struct pt_regs * regs,unsigned long offset)243 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
244 {
245 	BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
246 	return &regs->r15 + (offset / sizeof(regs->r15));
247 }
248 
get_segment_reg(struct task_struct * task,unsigned long offset)249 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
250 {
251 	/*
252 	 * Returning the value truncates it to 16 bits.
253 	 */
254 	unsigned int seg;
255 
256 	switch (offset) {
257 	case offsetof(struct user_regs_struct, fs):
258 		if (task == current) {
259 			/* Older gas can't assemble movq %?s,%r?? */
260 			asm("movl %%fs,%0" : "=r" (seg));
261 			return seg;
262 		}
263 		return task->thread.fsindex;
264 	case offsetof(struct user_regs_struct, gs):
265 		if (task == current) {
266 			asm("movl %%gs,%0" : "=r" (seg));
267 			return seg;
268 		}
269 		return task->thread.gsindex;
270 	case offsetof(struct user_regs_struct, ds):
271 		if (task == current) {
272 			asm("movl %%ds,%0" : "=r" (seg));
273 			return seg;
274 		}
275 		return task->thread.ds;
276 	case offsetof(struct user_regs_struct, es):
277 		if (task == current) {
278 			asm("movl %%es,%0" : "=r" (seg));
279 			return seg;
280 		}
281 		return task->thread.es;
282 
283 	case offsetof(struct user_regs_struct, cs):
284 	case offsetof(struct user_regs_struct, ss):
285 		break;
286 	}
287 	return *pt_regs_access(task_pt_regs(task), offset);
288 }
289 
set_segment_reg(struct task_struct * task,unsigned long offset,u16 value)290 static int set_segment_reg(struct task_struct *task,
291 			   unsigned long offset, u16 value)
292 {
293 	if (WARN_ON_ONCE(task == current))
294 		return -EIO;
295 
296 	/*
297 	 * The value argument was already truncated to 16 bits.
298 	 */
299 	if (invalid_selector(value))
300 		return -EIO;
301 
302 	/*
303 	 * Writes to FS and GS will change the stored selector.  Whether
304 	 * this changes the segment base as well depends on whether
305 	 * FSGSBASE is enabled.
306 	 */
307 
308 	switch (offset) {
309 	case offsetof(struct user_regs_struct,fs):
310 		task->thread.fsindex = value;
311 		break;
312 	case offsetof(struct user_regs_struct,gs):
313 		task->thread.gsindex = value;
314 		break;
315 	case offsetof(struct user_regs_struct,ds):
316 		task->thread.ds = value;
317 		break;
318 	case offsetof(struct user_regs_struct,es):
319 		task->thread.es = value;
320 		break;
321 
322 		/*
323 		 * Can't actually change these in 64-bit mode.
324 		 */
325 	case offsetof(struct user_regs_struct,cs):
326 		if (unlikely(value == 0))
327 			return -EIO;
328 		task_pt_regs(task)->cs = value;
329 		break;
330 	case offsetof(struct user_regs_struct,ss):
331 		if (unlikely(value == 0))
332 			return -EIO;
333 		task_pt_regs(task)->ss = value;
334 		break;
335 	}
336 
337 	return 0;
338 }
339 
340 #endif	/* CONFIG_X86_32 */
341 
get_flags(struct task_struct * task)342 static unsigned long get_flags(struct task_struct *task)
343 {
344 	unsigned long retval = task_pt_regs(task)->flags;
345 
346 	/*
347 	 * If the debugger set TF, hide it from the readout.
348 	 */
349 	if (test_tsk_thread_flag(task, TIF_FORCED_TF))
350 		retval &= ~X86_EFLAGS_TF;
351 
352 	return retval;
353 }
354 
set_flags(struct task_struct * task,unsigned long value)355 static int set_flags(struct task_struct *task, unsigned long value)
356 {
357 	struct pt_regs *regs = task_pt_regs(task);
358 
359 	/*
360 	 * If the user value contains TF, mark that
361 	 * it was not "us" (the debugger) that set it.
362 	 * If not, make sure it stays set if we had.
363 	 */
364 	if (value & X86_EFLAGS_TF)
365 		clear_tsk_thread_flag(task, TIF_FORCED_TF);
366 	else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
367 		value |= X86_EFLAGS_TF;
368 
369 	regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
370 
371 	return 0;
372 }
373 
putreg(struct task_struct * child,unsigned long offset,unsigned long value)374 static int putreg(struct task_struct *child,
375 		  unsigned long offset, unsigned long value)
376 {
377 	switch (offset) {
378 	case offsetof(struct user_regs_struct, cs):
379 	case offsetof(struct user_regs_struct, ds):
380 	case offsetof(struct user_regs_struct, es):
381 	case offsetof(struct user_regs_struct, fs):
382 	case offsetof(struct user_regs_struct, gs):
383 	case offsetof(struct user_regs_struct, ss):
384 		return set_segment_reg(child, offset, value);
385 
386 	case offsetof(struct user_regs_struct, flags):
387 		return set_flags(child, value);
388 
389 #ifdef CONFIG_X86_64
390 	case offsetof(struct user_regs_struct,fs_base):
391 		if (value >= TASK_SIZE_MAX)
392 			return -EIO;
393 		x86_fsbase_write_task(child, value);
394 		return 0;
395 	case offsetof(struct user_regs_struct,gs_base):
396 		if (value >= TASK_SIZE_MAX)
397 			return -EIO;
398 		x86_gsbase_write_task(child, value);
399 		return 0;
400 #endif
401 	}
402 
403 	*pt_regs_access(task_pt_regs(child), offset) = value;
404 	return 0;
405 }
406 
getreg(struct task_struct * task,unsigned long offset)407 static unsigned long getreg(struct task_struct *task, unsigned long offset)
408 {
409 	switch (offset) {
410 	case offsetof(struct user_regs_struct, cs):
411 	case offsetof(struct user_regs_struct, ds):
412 	case offsetof(struct user_regs_struct, es):
413 	case offsetof(struct user_regs_struct, fs):
414 	case offsetof(struct user_regs_struct, gs):
415 	case offsetof(struct user_regs_struct, ss):
416 		return get_segment_reg(task, offset);
417 
418 	case offsetof(struct user_regs_struct, flags):
419 		return get_flags(task);
420 
421 #ifdef CONFIG_X86_64
422 	case offsetof(struct user_regs_struct, fs_base):
423 		return x86_fsbase_read_task(task);
424 	case offsetof(struct user_regs_struct, gs_base):
425 		return x86_gsbase_read_task(task);
426 #endif
427 	}
428 
429 	return *pt_regs_access(task_pt_regs(task), offset);
430 }
431 
genregs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)432 static int genregs_get(struct task_struct *target,
433 		       const struct user_regset *regset,
434 		       struct membuf to)
435 {
436 	int reg;
437 
438 	for (reg = 0; to.left; reg++)
439 		membuf_store(&to, getreg(target, reg * sizeof(unsigned long)));
440 	return 0;
441 }
442 
genregs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)443 static int genregs_set(struct task_struct *target,
444 		       const struct user_regset *regset,
445 		       unsigned int pos, unsigned int count,
446 		       const void *kbuf, const void __user *ubuf)
447 {
448 	int ret = 0;
449 	if (kbuf) {
450 		const unsigned long *k = kbuf;
451 		while (count >= sizeof(*k) && !ret) {
452 			ret = putreg(target, pos, *k++);
453 			count -= sizeof(*k);
454 			pos += sizeof(*k);
455 		}
456 	} else {
457 		const unsigned long  __user *u = ubuf;
458 		while (count >= sizeof(*u) && !ret) {
459 			unsigned long word;
460 			ret = __get_user(word, u++);
461 			if (ret)
462 				break;
463 			ret = putreg(target, pos, word);
464 			count -= sizeof(*u);
465 			pos += sizeof(*u);
466 		}
467 	}
468 	return ret;
469 }
470 
ptrace_triggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)471 static void ptrace_triggered(struct perf_event *bp,
472 			     struct perf_sample_data *data,
473 			     struct pt_regs *regs)
474 {
475 	int i;
476 	struct thread_struct *thread = &(current->thread);
477 
478 	/*
479 	 * Store in the virtual DR6 register the fact that the breakpoint
480 	 * was hit so the thread's debugger will see it.
481 	 */
482 	for (i = 0; i < HBP_NUM; i++) {
483 		if (thread->ptrace_bps[i] == bp)
484 			break;
485 	}
486 
487 	thread->virtual_dr6 |= (DR_TRAP0 << i);
488 }
489 
490 /*
491  * Walk through every ptrace breakpoints for this thread and
492  * build the dr7 value on top of their attributes.
493  *
494  */
ptrace_get_dr7(struct perf_event * bp[])495 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
496 {
497 	int i;
498 	int dr7 = 0;
499 	struct arch_hw_breakpoint *info;
500 
501 	for (i = 0; i < HBP_NUM; i++) {
502 		if (bp[i] && !bp[i]->attr.disabled) {
503 			info = counter_arch_bp(bp[i]);
504 			dr7 |= encode_dr7(i, info->len, info->type);
505 		}
506 	}
507 
508 	return dr7;
509 }
510 
ptrace_fill_bp_fields(struct perf_event_attr * attr,int len,int type,bool disabled)511 static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
512 					int len, int type, bool disabled)
513 {
514 	int err, bp_len, bp_type;
515 
516 	err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
517 	if (!err) {
518 		attr->bp_len = bp_len;
519 		attr->bp_type = bp_type;
520 		attr->disabled = disabled;
521 	}
522 
523 	return err;
524 }
525 
526 static struct perf_event *
ptrace_register_breakpoint(struct task_struct * tsk,int len,int type,unsigned long addr,bool disabled)527 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
528 				unsigned long addr, bool disabled)
529 {
530 	struct perf_event_attr attr;
531 	int err;
532 
533 	ptrace_breakpoint_init(&attr);
534 	attr.bp_addr = addr;
535 
536 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
537 	if (err)
538 		return ERR_PTR(err);
539 
540 	return register_user_hw_breakpoint(&attr, ptrace_triggered,
541 						 NULL, tsk);
542 }
543 
ptrace_modify_breakpoint(struct perf_event * bp,int len,int type,int disabled)544 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
545 					int disabled)
546 {
547 	struct perf_event_attr attr = bp->attr;
548 	int err;
549 
550 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
551 	if (err)
552 		return err;
553 
554 	return modify_user_hw_breakpoint(bp, &attr);
555 }
556 
557 /*
558  * Handle ptrace writes to debug register 7.
559  */
ptrace_write_dr7(struct task_struct * tsk,unsigned long data)560 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
561 {
562 	struct thread_struct *thread = &tsk->thread;
563 	unsigned long old_dr7;
564 	bool second_pass = false;
565 	int i, rc, ret = 0;
566 
567 	data &= ~DR_CONTROL_RESERVED;
568 	old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
569 
570 restore:
571 	rc = 0;
572 	for (i = 0; i < HBP_NUM; i++) {
573 		unsigned len, type;
574 		bool disabled = !decode_dr7(data, i, &len, &type);
575 		struct perf_event *bp = thread->ptrace_bps[i];
576 
577 		if (!bp) {
578 			if (disabled)
579 				continue;
580 
581 			bp = ptrace_register_breakpoint(tsk,
582 					len, type, 0, disabled);
583 			if (IS_ERR(bp)) {
584 				rc = PTR_ERR(bp);
585 				break;
586 			}
587 
588 			thread->ptrace_bps[i] = bp;
589 			continue;
590 		}
591 
592 		rc = ptrace_modify_breakpoint(bp, len, type, disabled);
593 		if (rc)
594 			break;
595 	}
596 
597 	/* Restore if the first pass failed, second_pass shouldn't fail. */
598 	if (rc && !WARN_ON(second_pass)) {
599 		ret = rc;
600 		data = old_dr7;
601 		second_pass = true;
602 		goto restore;
603 	}
604 
605 	return ret;
606 }
607 
608 /*
609  * Handle PTRACE_PEEKUSR calls for the debug register area.
610  */
ptrace_get_debugreg(struct task_struct * tsk,int n)611 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
612 {
613 	struct thread_struct *thread = &tsk->thread;
614 	unsigned long val = 0;
615 
616 	if (n < HBP_NUM) {
617 		int index = array_index_nospec(n, HBP_NUM);
618 		struct perf_event *bp = thread->ptrace_bps[index];
619 
620 		if (bp)
621 			val = bp->hw.info.address;
622 	} else if (n == 6) {
623 		val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */
624 	} else if (n == 7) {
625 		val = thread->ptrace_dr7;
626 	}
627 	return val;
628 }
629 
ptrace_set_breakpoint_addr(struct task_struct * tsk,int nr,unsigned long addr)630 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
631 				      unsigned long addr)
632 {
633 	struct thread_struct *t = &tsk->thread;
634 	struct perf_event *bp = t->ptrace_bps[nr];
635 	int err = 0;
636 
637 	if (!bp) {
638 		/*
639 		 * Put stub len and type to create an inactive but correct bp.
640 		 *
641 		 * CHECKME: the previous code returned -EIO if the addr wasn't
642 		 * a valid task virtual addr. The new one will return -EINVAL in
643 		 *  this case.
644 		 * -EINVAL may be what we want for in-kernel breakpoints users,
645 		 * but -EIO looks better for ptrace, since we refuse a register
646 		 * writing for the user. And anyway this is the previous
647 		 * behaviour.
648 		 */
649 		bp = ptrace_register_breakpoint(tsk,
650 				X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
651 				addr, true);
652 		if (IS_ERR(bp))
653 			err = PTR_ERR(bp);
654 		else
655 			t->ptrace_bps[nr] = bp;
656 	} else {
657 		struct perf_event_attr attr = bp->attr;
658 
659 		attr.bp_addr = addr;
660 		err = modify_user_hw_breakpoint(bp, &attr);
661 	}
662 
663 	return err;
664 }
665 
666 /*
667  * Handle PTRACE_POKEUSR calls for the debug register area.
668  */
ptrace_set_debugreg(struct task_struct * tsk,int n,unsigned long val)669 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
670 			       unsigned long val)
671 {
672 	struct thread_struct *thread = &tsk->thread;
673 	/* There are no DR4 or DR5 registers */
674 	int rc = -EIO;
675 
676 	if (n < HBP_NUM) {
677 		rc = ptrace_set_breakpoint_addr(tsk, n, val);
678 	} else if (n == 6) {
679 		thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */
680 		rc = 0;
681 	} else if (n == 7) {
682 		rc = ptrace_write_dr7(tsk, val);
683 		if (!rc)
684 			thread->ptrace_dr7 = val;
685 	}
686 	return rc;
687 }
688 
689 /*
690  * These access the current or another (stopped) task's io permission
691  * bitmap for debugging or core dump.
692  */
ioperm_active(struct task_struct * target,const struct user_regset * regset)693 static int ioperm_active(struct task_struct *target,
694 			 const struct user_regset *regset)
695 {
696 	struct io_bitmap *iobm = target->thread.io_bitmap;
697 
698 	return iobm ? DIV_ROUND_UP(iobm->max, regset->size) : 0;
699 }
700 
ioperm_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)701 static int ioperm_get(struct task_struct *target,
702 		      const struct user_regset *regset,
703 		      struct membuf to)
704 {
705 	struct io_bitmap *iobm = target->thread.io_bitmap;
706 
707 	if (!iobm)
708 		return -ENXIO;
709 
710 	return membuf_write(&to, iobm->bitmap, IO_BITMAP_BYTES);
711 }
712 
713 /*
714  * Called by kernel/ptrace.c when detaching..
715  *
716  * Make sure the single step bit is not set.
717  */
ptrace_disable(struct task_struct * child)718 void ptrace_disable(struct task_struct *child)
719 {
720 	user_disable_single_step(child);
721 }
722 
723 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
724 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
725 #endif
726 #ifdef CONFIG_X86_64
727 static const struct user_regset_view user_x86_64_view; /* Initialized below. */
728 #endif
729 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)730 long arch_ptrace(struct task_struct *child, long request,
731 		 unsigned long addr, unsigned long data)
732 {
733 	int ret;
734 	unsigned long __user *datap = (unsigned long __user *)data;
735 
736 #ifdef CONFIG_X86_64
737 	/* This is native 64-bit ptrace() */
738 	const struct user_regset_view *regset_view = &user_x86_64_view;
739 #else
740 	/* This is native 32-bit ptrace() */
741 	const struct user_regset_view *regset_view = &user_x86_32_view;
742 #endif
743 
744 	switch (request) {
745 	/* read the word at location addr in the USER area. */
746 	case PTRACE_PEEKUSR: {
747 		unsigned long tmp;
748 
749 		ret = -EIO;
750 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
751 			break;
752 
753 		tmp = 0;  /* Default return condition */
754 		if (addr < sizeof(struct user_regs_struct))
755 			tmp = getreg(child, addr);
756 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
757 			 addr <= offsetof(struct user, u_debugreg[7])) {
758 			addr -= offsetof(struct user, u_debugreg[0]);
759 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
760 		}
761 		ret = put_user(tmp, datap);
762 		break;
763 	}
764 
765 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
766 		ret = -EIO;
767 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
768 			break;
769 
770 		if (addr < sizeof(struct user_regs_struct))
771 			ret = putreg(child, addr, data);
772 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
773 			 addr <= offsetof(struct user, u_debugreg[7])) {
774 			addr -= offsetof(struct user, u_debugreg[0]);
775 			ret = ptrace_set_debugreg(child,
776 						  addr / sizeof(data), data);
777 		}
778 		break;
779 
780 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
781 		return copy_regset_to_user(child,
782 					   regset_view,
783 					   REGSET_GENERAL,
784 					   0, sizeof(struct user_regs_struct),
785 					   datap);
786 
787 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
788 		return copy_regset_from_user(child,
789 					     regset_view,
790 					     REGSET_GENERAL,
791 					     0, sizeof(struct user_regs_struct),
792 					     datap);
793 
794 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
795 		return copy_regset_to_user(child,
796 					   regset_view,
797 					   REGSET_FP,
798 					   0, sizeof(struct user_i387_struct),
799 					   datap);
800 
801 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
802 		return copy_regset_from_user(child,
803 					     regset_view,
804 					     REGSET_FP,
805 					     0, sizeof(struct user_i387_struct),
806 					     datap);
807 
808 #ifdef CONFIG_X86_32
809 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
810 		return copy_regset_to_user(child, &user_x86_32_view,
811 					   REGSET32_XFP,
812 					   0, sizeof(struct user_fxsr_struct),
813 					   datap) ? -EIO : 0;
814 
815 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
816 		return copy_regset_from_user(child, &user_x86_32_view,
817 					     REGSET32_XFP,
818 					     0, sizeof(struct user_fxsr_struct),
819 					     datap) ? -EIO : 0;
820 #endif
821 
822 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
823 	case PTRACE_GET_THREAD_AREA:
824 		if ((int) addr < 0)
825 			return -EIO;
826 		ret = do_get_thread_area(child, addr,
827 					(struct user_desc __user *)data);
828 		break;
829 
830 	case PTRACE_SET_THREAD_AREA:
831 		if ((int) addr < 0)
832 			return -EIO;
833 		ret = do_set_thread_area(child, addr,
834 					(struct user_desc __user *)data, 0);
835 		break;
836 #endif
837 
838 #ifdef CONFIG_X86_64
839 		/* normal 64bit interface to access TLS data.
840 		   Works just like arch_prctl, except that the arguments
841 		   are reversed. */
842 	case PTRACE_ARCH_PRCTL:
843 		ret = do_arch_prctl_64(child, data, addr);
844 		break;
845 #endif
846 
847 	default:
848 		ret = ptrace_request(child, request, addr, data);
849 		break;
850 	}
851 
852 	return ret;
853 }
854 
855 #ifdef CONFIG_IA32_EMULATION
856 
857 #include <linux/compat.h>
858 #include <linux/syscalls.h>
859 #include <asm/ia32.h>
860 #include <asm/user32.h>
861 
862 #define R32(l,q)							\
863 	case offsetof(struct user32, regs.l):				\
864 		regs->q = value; break
865 
866 #define SEG32(rs)							\
867 	case offsetof(struct user32, regs.rs):				\
868 		return set_segment_reg(child,				\
869 				       offsetof(struct user_regs_struct, rs), \
870 				       value);				\
871 		break
872 
putreg32(struct task_struct * child,unsigned regno,u32 value)873 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
874 {
875 	struct pt_regs *regs = task_pt_regs(child);
876 	int ret;
877 
878 	switch (regno) {
879 
880 	SEG32(cs);
881 	SEG32(ds);
882 	SEG32(es);
883 
884 	/*
885 	 * A 32-bit ptracer on a 64-bit kernel expects that writing
886 	 * FS or GS will also update the base.  This is needed for
887 	 * operations like PTRACE_SETREGS to fully restore a saved
888 	 * CPU state.
889 	 */
890 
891 	case offsetof(struct user32, regs.fs):
892 		ret = set_segment_reg(child,
893 				      offsetof(struct user_regs_struct, fs),
894 				      value);
895 		if (ret == 0)
896 			child->thread.fsbase =
897 				x86_fsgsbase_read_task(child, value);
898 		return ret;
899 
900 	case offsetof(struct user32, regs.gs):
901 		ret = set_segment_reg(child,
902 				      offsetof(struct user_regs_struct, gs),
903 				      value);
904 		if (ret == 0)
905 			child->thread.gsbase =
906 				x86_fsgsbase_read_task(child, value);
907 		return ret;
908 
909 	SEG32(ss);
910 
911 	R32(ebx, bx);
912 	R32(ecx, cx);
913 	R32(edx, dx);
914 	R32(edi, di);
915 	R32(esi, si);
916 	R32(ebp, bp);
917 	R32(eax, ax);
918 	R32(eip, ip);
919 	R32(esp, sp);
920 
921 	case offsetof(struct user32, regs.orig_eax):
922 		/*
923 		 * Warning: bizarre corner case fixup here.  A 32-bit
924 		 * debugger setting orig_eax to -1 wants to disable
925 		 * syscall restart.  Make sure that the syscall
926 		 * restart code sign-extends orig_ax.  Also make sure
927 		 * we interpret the -ERESTART* codes correctly if
928 		 * loaded into regs->ax in case the task is not
929 		 * actually still sitting at the exit from a 32-bit
930 		 * syscall with TS_COMPAT still set.
931 		 */
932 		regs->orig_ax = value;
933 		if (syscall_get_nr(child, regs) != -1)
934 			child->thread_info.status |= TS_I386_REGS_POKED;
935 		break;
936 
937 	case offsetof(struct user32, regs.eflags):
938 		return set_flags(child, value);
939 
940 	case offsetof(struct user32, u_debugreg[0]) ...
941 		offsetof(struct user32, u_debugreg[7]):
942 		regno -= offsetof(struct user32, u_debugreg[0]);
943 		return ptrace_set_debugreg(child, regno / 4, value);
944 
945 	default:
946 		if (regno > sizeof(struct user32) || (regno & 3))
947 			return -EIO;
948 
949 		/*
950 		 * Other dummy fields in the virtual user structure
951 		 * are ignored
952 		 */
953 		break;
954 	}
955 	return 0;
956 }
957 
958 #undef R32
959 #undef SEG32
960 
961 #define R32(l,q)							\
962 	case offsetof(struct user32, regs.l):				\
963 		*val = regs->q; break
964 
965 #define SEG32(rs)							\
966 	case offsetof(struct user32, regs.rs):				\
967 		*val = get_segment_reg(child,				\
968 				       offsetof(struct user_regs_struct, rs)); \
969 		break
970 
getreg32(struct task_struct * child,unsigned regno,u32 * val)971 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
972 {
973 	struct pt_regs *regs = task_pt_regs(child);
974 
975 	switch (regno) {
976 
977 	SEG32(ds);
978 	SEG32(es);
979 	SEG32(fs);
980 	SEG32(gs);
981 
982 	R32(cs, cs);
983 	R32(ss, ss);
984 	R32(ebx, bx);
985 	R32(ecx, cx);
986 	R32(edx, dx);
987 	R32(edi, di);
988 	R32(esi, si);
989 	R32(ebp, bp);
990 	R32(eax, ax);
991 	R32(orig_eax, orig_ax);
992 	R32(eip, ip);
993 	R32(esp, sp);
994 
995 	case offsetof(struct user32, regs.eflags):
996 		*val = get_flags(child);
997 		break;
998 
999 	case offsetof(struct user32, u_debugreg[0]) ...
1000 		offsetof(struct user32, u_debugreg[7]):
1001 		regno -= offsetof(struct user32, u_debugreg[0]);
1002 		*val = ptrace_get_debugreg(child, regno / 4);
1003 		break;
1004 
1005 	default:
1006 		if (regno > sizeof(struct user32) || (regno & 3))
1007 			return -EIO;
1008 
1009 		/*
1010 		 * Other dummy fields in the virtual user structure
1011 		 * are ignored
1012 		 */
1013 		*val = 0;
1014 		break;
1015 	}
1016 	return 0;
1017 }
1018 
1019 #undef R32
1020 #undef SEG32
1021 
genregs32_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1022 static int genregs32_get(struct task_struct *target,
1023 			 const struct user_regset *regset,
1024 			 struct membuf to)
1025 {
1026 	int reg;
1027 
1028 	for (reg = 0; to.left; reg++) {
1029 		u32 val;
1030 		getreg32(target, reg * 4, &val);
1031 		membuf_store(&to, val);
1032 	}
1033 	return 0;
1034 }
1035 
genregs32_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1036 static int genregs32_set(struct task_struct *target,
1037 			 const struct user_regset *regset,
1038 			 unsigned int pos, unsigned int count,
1039 			 const void *kbuf, const void __user *ubuf)
1040 {
1041 	int ret = 0;
1042 	if (kbuf) {
1043 		const compat_ulong_t *k = kbuf;
1044 		while (count >= sizeof(*k) && !ret) {
1045 			ret = putreg32(target, pos, *k++);
1046 			count -= sizeof(*k);
1047 			pos += sizeof(*k);
1048 		}
1049 	} else {
1050 		const compat_ulong_t __user *u = ubuf;
1051 		while (count >= sizeof(*u) && !ret) {
1052 			compat_ulong_t word;
1053 			ret = __get_user(word, u++);
1054 			if (ret)
1055 				break;
1056 			ret = putreg32(target, pos, word);
1057 			count -= sizeof(*u);
1058 			pos += sizeof(*u);
1059 		}
1060 	}
1061 	return ret;
1062 }
1063 
ia32_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)1064 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
1065 			     compat_ulong_t caddr, compat_ulong_t cdata)
1066 {
1067 	unsigned long addr = caddr;
1068 	unsigned long data = cdata;
1069 	void __user *datap = compat_ptr(data);
1070 	int ret;
1071 	__u32 val;
1072 
1073 	switch (request) {
1074 	case PTRACE_PEEKUSR:
1075 		ret = getreg32(child, addr, &val);
1076 		if (ret == 0)
1077 			ret = put_user(val, (__u32 __user *)datap);
1078 		break;
1079 
1080 	case PTRACE_POKEUSR:
1081 		ret = putreg32(child, addr, data);
1082 		break;
1083 
1084 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1085 		return copy_regset_to_user(child, &user_x86_32_view,
1086 					   REGSET_GENERAL,
1087 					   0, sizeof(struct user_regs_struct32),
1088 					   datap);
1089 
1090 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1091 		return copy_regset_from_user(child, &user_x86_32_view,
1092 					     REGSET_GENERAL, 0,
1093 					     sizeof(struct user_regs_struct32),
1094 					     datap);
1095 
1096 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1097 		return copy_regset_to_user(child, &user_x86_32_view,
1098 					   REGSET_FP, 0,
1099 					   sizeof(struct user_i387_ia32_struct),
1100 					   datap);
1101 
1102 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1103 		return copy_regset_from_user(
1104 			child, &user_x86_32_view, REGSET_FP,
1105 			0, sizeof(struct user_i387_ia32_struct), datap);
1106 
1107 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
1108 		return copy_regset_to_user(child, &user_x86_32_view,
1109 					   REGSET32_XFP, 0,
1110 					   sizeof(struct user32_fxsr_struct),
1111 					   datap);
1112 
1113 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
1114 		return copy_regset_from_user(child, &user_x86_32_view,
1115 					     REGSET32_XFP, 0,
1116 					     sizeof(struct user32_fxsr_struct),
1117 					     datap);
1118 
1119 	case PTRACE_GET_THREAD_AREA:
1120 	case PTRACE_SET_THREAD_AREA:
1121 		return arch_ptrace(child, request, addr, data);
1122 
1123 	default:
1124 		return compat_ptrace_request(child, request, addr, data);
1125 	}
1126 
1127 	return ret;
1128 }
1129 #endif /* CONFIG_IA32_EMULATION */
1130 
1131 #ifdef CONFIG_X86_X32_ABI
x32_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)1132 static long x32_arch_ptrace(struct task_struct *child,
1133 			    compat_long_t request, compat_ulong_t caddr,
1134 			    compat_ulong_t cdata)
1135 {
1136 	unsigned long addr = caddr;
1137 	unsigned long data = cdata;
1138 	void __user *datap = compat_ptr(data);
1139 	int ret;
1140 
1141 	switch (request) {
1142 	/* Read 32bits at location addr in the USER area.  Only allow
1143 	   to return the lower 32bits of segment and debug registers.  */
1144 	case PTRACE_PEEKUSR: {
1145 		u32 tmp;
1146 
1147 		ret = -EIO;
1148 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1149 		    addr < offsetof(struct user_regs_struct, cs))
1150 			break;
1151 
1152 		tmp = 0;  /* Default return condition */
1153 		if (addr < sizeof(struct user_regs_struct))
1154 			tmp = getreg(child, addr);
1155 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1156 			 addr <= offsetof(struct user, u_debugreg[7])) {
1157 			addr -= offsetof(struct user, u_debugreg[0]);
1158 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1159 		}
1160 		ret = put_user(tmp, (__u32 __user *)datap);
1161 		break;
1162 	}
1163 
1164 	/* Write the word at location addr in the USER area.  Only allow
1165 	   to update segment and debug registers with the upper 32bits
1166 	   zero-extended. */
1167 	case PTRACE_POKEUSR:
1168 		ret = -EIO;
1169 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1170 		    addr < offsetof(struct user_regs_struct, cs))
1171 			break;
1172 
1173 		if (addr < sizeof(struct user_regs_struct))
1174 			ret = putreg(child, addr, data);
1175 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1176 			 addr <= offsetof(struct user, u_debugreg[7])) {
1177 			addr -= offsetof(struct user, u_debugreg[0]);
1178 			ret = ptrace_set_debugreg(child,
1179 						  addr / sizeof(data), data);
1180 		}
1181 		break;
1182 
1183 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1184 		return copy_regset_to_user(child,
1185 					   &user_x86_64_view,
1186 					   REGSET_GENERAL,
1187 					   0, sizeof(struct user_regs_struct),
1188 					   datap);
1189 
1190 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1191 		return copy_regset_from_user(child,
1192 					     &user_x86_64_view,
1193 					     REGSET_GENERAL,
1194 					     0, sizeof(struct user_regs_struct),
1195 					     datap);
1196 
1197 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1198 		return copy_regset_to_user(child,
1199 					   &user_x86_64_view,
1200 					   REGSET_FP,
1201 					   0, sizeof(struct user_i387_struct),
1202 					   datap);
1203 
1204 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1205 		return copy_regset_from_user(child,
1206 					     &user_x86_64_view,
1207 					     REGSET_FP,
1208 					     0, sizeof(struct user_i387_struct),
1209 					     datap);
1210 
1211 	default:
1212 		return compat_ptrace_request(child, request, addr, data);
1213 	}
1214 
1215 	return ret;
1216 }
1217 #endif
1218 
1219 #ifdef CONFIG_COMPAT
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)1220 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1221 			compat_ulong_t caddr, compat_ulong_t cdata)
1222 {
1223 #ifdef CONFIG_X86_X32_ABI
1224 	if (!in_ia32_syscall())
1225 		return x32_arch_ptrace(child, request, caddr, cdata);
1226 #endif
1227 #ifdef CONFIG_IA32_EMULATION
1228 	return ia32_arch_ptrace(child, request, caddr, cdata);
1229 #else
1230 	return 0;
1231 #endif
1232 }
1233 #endif	/* CONFIG_COMPAT */
1234 
1235 #ifdef CONFIG_X86_64
1236 
1237 static struct user_regset x86_64_regsets[] __ro_after_init = {
1238 	[REGSET64_GENERAL] = {
1239 		.core_note_type	= NT_PRSTATUS,
1240 		.n		= sizeof(struct user_regs_struct) / sizeof(long),
1241 		.size		= sizeof(long),
1242 		.align		= sizeof(long),
1243 		.regset_get	= genregs_get,
1244 		.set		= genregs_set
1245 	},
1246 	[REGSET64_FP] = {
1247 		.core_note_type	= NT_PRFPREG,
1248 		.n		= sizeof(struct fxregs_state) / sizeof(long),
1249 		.size		= sizeof(long),
1250 		.align		= sizeof(long),
1251 		.active		= regset_xregset_fpregs_active,
1252 		.regset_get	= xfpregs_get,
1253 		.set		= xfpregs_set
1254 	},
1255 	[REGSET64_XSTATE] = {
1256 		.core_note_type	= NT_X86_XSTATE,
1257 		.size		= sizeof(u64),
1258 		.align		= sizeof(u64),
1259 		.active		= xstateregs_active,
1260 		.regset_get	= xstateregs_get,
1261 		.set		= xstateregs_set
1262 	},
1263 	[REGSET64_IOPERM] = {
1264 		.core_note_type	= NT_386_IOPERM,
1265 		.n		= IO_BITMAP_LONGS,
1266 		.size		= sizeof(long),
1267 		.align		= sizeof(long),
1268 		.active		= ioperm_active,
1269 		.regset_get	= ioperm_get
1270 	},
1271 #ifdef CONFIG_X86_USER_SHADOW_STACK
1272 	[REGSET64_SSP] = {
1273 		.core_note_type	= NT_X86_SHSTK,
1274 		.n		= 1,
1275 		.size		= sizeof(u64),
1276 		.align		= sizeof(u64),
1277 		.active		= ssp_active,
1278 		.regset_get	= ssp_get,
1279 		.set		= ssp_set
1280 	},
1281 #endif
1282 };
1283 
1284 static const struct user_regset_view user_x86_64_view = {
1285 	.name = "x86_64", .e_machine = EM_X86_64,
1286 	.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1287 };
1288 
1289 #else  /* CONFIG_X86_32 */
1290 
1291 #define user_regs_struct32	user_regs_struct
1292 #define genregs32_get		genregs_get
1293 #define genregs32_set		genregs_set
1294 
1295 #endif	/* CONFIG_X86_64 */
1296 
1297 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1298 static struct user_regset x86_32_regsets[] __ro_after_init = {
1299 	[REGSET32_GENERAL] = {
1300 		.core_note_type	= NT_PRSTATUS,
1301 		.n		= sizeof(struct user_regs_struct32) / sizeof(u32),
1302 		.size		= sizeof(u32),
1303 		.align		= sizeof(u32),
1304 		.regset_get	= genregs32_get,
1305 		.set		= genregs32_set
1306 	},
1307 	[REGSET32_FP] = {
1308 		.core_note_type	= NT_PRFPREG,
1309 		.n		= sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1310 		.size		= sizeof(u32),
1311 		.align		= sizeof(u32),
1312 		.active		= regset_fpregs_active,
1313 		.regset_get	= fpregs_get,
1314 		.set		= fpregs_set
1315 	},
1316 	[REGSET32_XFP] = {
1317 		.core_note_type	= NT_PRXFPREG,
1318 		.n		= sizeof(struct fxregs_state) / sizeof(u32),
1319 		.size		= sizeof(u32),
1320 		.align		= sizeof(u32),
1321 		.active		= regset_xregset_fpregs_active,
1322 		.regset_get	= xfpregs_get,
1323 		.set		= xfpregs_set
1324 	},
1325 	[REGSET32_XSTATE] = {
1326 		.core_note_type	= NT_X86_XSTATE,
1327 		.size		= sizeof(u64),
1328 		.align		= sizeof(u64),
1329 		.active		= xstateregs_active,
1330 		.regset_get	= xstateregs_get,
1331 		.set		= xstateregs_set
1332 	},
1333 	[REGSET32_TLS] = {
1334 		.core_note_type	= NT_386_TLS,
1335 		.n		= GDT_ENTRY_TLS_ENTRIES,
1336 		.bias		= GDT_ENTRY_TLS_MIN,
1337 		.size		= sizeof(struct user_desc),
1338 		.align		= sizeof(struct user_desc),
1339 		.active		= regset_tls_active,
1340 		.regset_get	= regset_tls_get,
1341 		.set		= regset_tls_set
1342 	},
1343 	[REGSET32_IOPERM] = {
1344 		.core_note_type	= NT_386_IOPERM,
1345 		.n		= IO_BITMAP_BYTES / sizeof(u32),
1346 		.size		= sizeof(u32),
1347 		.align		= sizeof(u32),
1348 		.active		= ioperm_active,
1349 		.regset_get	= ioperm_get
1350 	},
1351 };
1352 
1353 static const struct user_regset_view user_x86_32_view = {
1354 	.name = "i386", .e_machine = EM_386,
1355 	.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1356 };
1357 #endif
1358 
1359 /*
1360  * This represents bytes 464..511 in the memory layout exported through
1361  * the REGSET_XSTATE interface.
1362  */
1363 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1364 
update_regset_xstate_info(unsigned int size,u64 xstate_mask)1365 void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1366 {
1367 #ifdef CONFIG_X86_64
1368 	x86_64_regsets[REGSET64_XSTATE].n = size / sizeof(u64);
1369 #endif
1370 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1371 	x86_32_regsets[REGSET32_XSTATE].n = size / sizeof(u64);
1372 #endif
1373 	xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1374 }
1375 
1376 /*
1377  * This is used by the core dump code to decide which regset to dump.  The
1378  * core dump code writes out the resulting .e_machine and the corresponding
1379  * regsets.  This is suboptimal if the task is messing around with its CS.L
1380  * field, but at worst the core dump will end up missing some information.
1381  *
1382  * Unfortunately, it is also used by the broken PTRACE_GETREGSET and
1383  * PTRACE_SETREGSET APIs.  These APIs look at the .regsets field but have
1384  * no way to make sure that the e_machine they use matches the caller's
1385  * expectations.  The result is that the data format returned by
1386  * PTRACE_GETREGSET depends on the returned CS field (and even the offset
1387  * of the returned CS field depends on its value!) and the data format
1388  * accepted by PTRACE_SETREGSET is determined by the old CS value.  The
1389  * upshot is that it is basically impossible to use these APIs correctly.
1390  *
1391  * The best way to fix it in the long run would probably be to add new
1392  * improved ptrace() APIs to read and write registers reliably, possibly by
1393  * allowing userspace to select the ELF e_machine variant that they expect.
1394  */
task_user_regset_view(struct task_struct * task)1395 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1396 {
1397 #ifdef CONFIG_IA32_EMULATION
1398 	if (!user_64bit_mode(task_pt_regs(task)))
1399 #endif
1400 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1401 		return &user_x86_32_view;
1402 #endif
1403 #ifdef CONFIG_X86_64
1404 	return &user_x86_64_view;
1405 #endif
1406 }
1407 
send_sigtrap(struct pt_regs * regs,int error_code,int si_code)1408 void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
1409 {
1410 	struct task_struct *tsk = current;
1411 
1412 	tsk->thread.trap_nr = X86_TRAP_DB;
1413 	tsk->thread.error_code = error_code;
1414 
1415 	/* Send us the fake SIGTRAP */
1416 	force_sig_fault(SIGTRAP, si_code,
1417 			user_mode(regs) ? (void __user *)regs->ip : NULL);
1418 }
1419 
user_single_step_report(struct pt_regs * regs)1420 void user_single_step_report(struct pt_regs *regs)
1421 {
1422 	send_sigtrap(regs, 0, TRAP_BRKPT);
1423 }
1424