1 // SPDX-License-Identifier: GPL-2.0-only 2 /* By Ross Biro 1/23/92 */ 3 /* 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/sched.h> 10 #include <linux/sched/task_stack.h> 11 #include <linux/mm.h> 12 #include <linux/smp.h> 13 #include <linux/errno.h> 14 #include <linux/slab.h> 15 #include <linux/ptrace.h> 16 #include <linux/tracehook.h> 17 #include <linux/user.h> 18 #include <linux/elf.h> 19 #include <linux/security.h> 20 #include <linux/audit.h> 21 #include <linux/seccomp.h> 22 #include <linux/signal.h> 23 #include <linux/perf_event.h> 24 #include <linux/hw_breakpoint.h> 25 #include <linux/rcupdate.h> 26 #include <linux/export.h> 27 #include <linux/context_tracking.h> 28 29 #include <linux/uaccess.h> 30 #include <asm/pgtable.h> 31 #include <asm/processor.h> 32 #include <asm/fpu/internal.h> 33 #include <asm/fpu/signal.h> 34 #include <asm/fpu/regset.h> 35 #include <asm/debugreg.h> 36 #include <asm/ldt.h> 37 #include <asm/desc.h> 38 #include <asm/prctl.h> 39 #include <asm/proto.h> 40 #include <asm/hw_breakpoint.h> 41 #include <asm/traps.h> 42 #include <asm/syscall.h> 43 #include <asm/fsgsbase.h> 44 45 #include "tls.h" 46 47 enum x86_regset { 48 REGSET_GENERAL, 49 REGSET_FP, 50 REGSET_XFP, 51 REGSET_IOPERM64 = REGSET_XFP, 52 REGSET_XSTATE, 53 REGSET_TLS, 54 REGSET_IOPERM32, 55 }; 56 57 struct pt_regs_offset { 58 const char *name; 59 int offset; 60 }; 61 62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 63 #define REG_OFFSET_END {.name = NULL, .offset = 0} 64 65 static const struct pt_regs_offset regoffset_table[] = { 66 #ifdef CONFIG_X86_64 67 REG_OFFSET_NAME(r15), 68 REG_OFFSET_NAME(r14), 69 REG_OFFSET_NAME(r13), 70 REG_OFFSET_NAME(r12), 71 REG_OFFSET_NAME(r11), 72 REG_OFFSET_NAME(r10), 73 REG_OFFSET_NAME(r9), 74 REG_OFFSET_NAME(r8), 75 #endif 76 REG_OFFSET_NAME(bx), 77 REG_OFFSET_NAME(cx), 78 REG_OFFSET_NAME(dx), 79 REG_OFFSET_NAME(si), 80 REG_OFFSET_NAME(di), 81 REG_OFFSET_NAME(bp), 82 REG_OFFSET_NAME(ax), 83 #ifdef CONFIG_X86_32 84 REG_OFFSET_NAME(ds), 85 REG_OFFSET_NAME(es), 86 REG_OFFSET_NAME(fs), 87 REG_OFFSET_NAME(gs), 88 #endif 89 REG_OFFSET_NAME(orig_ax), 90 REG_OFFSET_NAME(ip), 91 REG_OFFSET_NAME(cs), 92 REG_OFFSET_NAME(flags), 93 REG_OFFSET_NAME(sp), 94 REG_OFFSET_NAME(ss), 95 REG_OFFSET_END, 96 }; 97 98 /** 99 * regs_query_register_offset() - query register offset from its name 100 * @name: the name of a register 101 * 102 * regs_query_register_offset() returns the offset of a register in struct 103 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 104 */ 105 int regs_query_register_offset(const char *name) 106 { 107 const struct pt_regs_offset *roff; 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_query_register_name() - query register name from its offset 116 * @offset: the offset of a register in struct pt_regs. 117 * 118 * regs_query_register_name() returns the name of a register from its 119 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 120 */ 121 const char *regs_query_register_name(unsigned int offset) 122 { 123 const struct pt_regs_offset *roff; 124 for (roff = regoffset_table; roff->name != NULL; roff++) 125 if (roff->offset == offset) 126 return roff->name; 127 return NULL; 128 } 129 130 /* 131 * does not yet catch signals sent when the child dies. 132 * in exit.c or in signal.c. 133 */ 134 135 /* 136 * Determines which flags the user has access to [1 = access, 0 = no access]. 137 */ 138 #define FLAG_MASK_32 ((unsigned long) \ 139 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 140 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 141 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 142 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 143 X86_EFLAGS_RF | X86_EFLAGS_AC)) 144 145 /* 146 * Determines whether a value may be installed in a segment register. 147 */ 148 static inline bool invalid_selector(u16 value) 149 { 150 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 151 } 152 153 #ifdef CONFIG_X86_32 154 155 #define FLAG_MASK FLAG_MASK_32 156 157 /* 158 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 159 * when it traps. The previous stack will be directly underneath the saved 160 * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. 161 * 162 * Now, if the stack is empty, '®s->sp' is out of range. In this 163 * case we try to take the previous stack. To always return a non-null 164 * stack pointer we fall back to regs as stack if no previous stack 165 * exists. 166 * 167 * This is valid only for kernel mode traps. 168 */ 169 unsigned long kernel_stack_pointer(struct pt_regs *regs) 170 { 171 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); 172 unsigned long sp = (unsigned long)®s->sp; 173 u32 *prev_esp; 174 175 if (context == (sp & ~(THREAD_SIZE - 1))) 176 return sp; 177 178 prev_esp = (u32 *)(context); 179 if (*prev_esp) 180 return (unsigned long)*prev_esp; 181 182 return (unsigned long)regs; 183 } 184 EXPORT_SYMBOL_GPL(kernel_stack_pointer); 185 186 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 187 { 188 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 189 return ®s->bx + (regno >> 2); 190 } 191 192 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 193 { 194 /* 195 * Returning the value truncates it to 16 bits. 196 */ 197 unsigned int retval; 198 if (offset != offsetof(struct user_regs_struct, gs)) 199 retval = *pt_regs_access(task_pt_regs(task), offset); 200 else { 201 if (task == current) 202 retval = get_user_gs(task_pt_regs(task)); 203 else 204 retval = task_user_gs(task); 205 } 206 return retval; 207 } 208 209 static int set_segment_reg(struct task_struct *task, 210 unsigned long offset, u16 value) 211 { 212 /* 213 * The value argument was already truncated to 16 bits. 214 */ 215 if (invalid_selector(value)) 216 return -EIO; 217 218 /* 219 * For %cs and %ss we cannot permit a null selector. 220 * We can permit a bogus selector as long as it has USER_RPL. 221 * Null selectors are fine for other segment registers, but 222 * we will never get back to user mode with invalid %cs or %ss 223 * and will take the trap in iret instead. Much code relies 224 * on user_mode() to distinguish a user trap frame (which can 225 * safely use invalid selectors) from a kernel trap frame. 226 */ 227 switch (offset) { 228 case offsetof(struct user_regs_struct, cs): 229 case offsetof(struct user_regs_struct, ss): 230 if (unlikely(value == 0)) 231 return -EIO; 232 233 default: 234 *pt_regs_access(task_pt_regs(task), offset) = value; 235 break; 236 237 case offsetof(struct user_regs_struct, gs): 238 if (task == current) 239 set_user_gs(task_pt_regs(task), value); 240 else 241 task_user_gs(task) = value; 242 } 243 244 return 0; 245 } 246 247 #else /* CONFIG_X86_64 */ 248 249 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 250 251 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 252 { 253 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 254 return ®s->r15 + (offset / sizeof(regs->r15)); 255 } 256 257 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 258 { 259 /* 260 * Returning the value truncates it to 16 bits. 261 */ 262 unsigned int seg; 263 264 switch (offset) { 265 case offsetof(struct user_regs_struct, fs): 266 if (task == current) { 267 /* Older gas can't assemble movq %?s,%r?? */ 268 asm("movl %%fs,%0" : "=r" (seg)); 269 return seg; 270 } 271 return task->thread.fsindex; 272 case offsetof(struct user_regs_struct, gs): 273 if (task == current) { 274 asm("movl %%gs,%0" : "=r" (seg)); 275 return seg; 276 } 277 return task->thread.gsindex; 278 case offsetof(struct user_regs_struct, ds): 279 if (task == current) { 280 asm("movl %%ds,%0" : "=r" (seg)); 281 return seg; 282 } 283 return task->thread.ds; 284 case offsetof(struct user_regs_struct, es): 285 if (task == current) { 286 asm("movl %%es,%0" : "=r" (seg)); 287 return seg; 288 } 289 return task->thread.es; 290 291 case offsetof(struct user_regs_struct, cs): 292 case offsetof(struct user_regs_struct, ss): 293 break; 294 } 295 return *pt_regs_access(task_pt_regs(task), offset); 296 } 297 298 static int set_segment_reg(struct task_struct *task, 299 unsigned long offset, u16 value) 300 { 301 /* 302 * The value argument was already truncated to 16 bits. 303 */ 304 if (invalid_selector(value)) 305 return -EIO; 306 307 switch (offset) { 308 case offsetof(struct user_regs_struct,fs): 309 task->thread.fsindex = value; 310 if (task == current) 311 loadsegment(fs, task->thread.fsindex); 312 break; 313 case offsetof(struct user_regs_struct,gs): 314 task->thread.gsindex = value; 315 if (task == current) 316 load_gs_index(task->thread.gsindex); 317 break; 318 case offsetof(struct user_regs_struct,ds): 319 task->thread.ds = value; 320 if (task == current) 321 loadsegment(ds, task->thread.ds); 322 break; 323 case offsetof(struct user_regs_struct,es): 324 task->thread.es = value; 325 if (task == current) 326 loadsegment(es, task->thread.es); 327 break; 328 329 /* 330 * Can't actually change these in 64-bit mode. 331 */ 332 case offsetof(struct user_regs_struct,cs): 333 if (unlikely(value == 0)) 334 return -EIO; 335 task_pt_regs(task)->cs = value; 336 break; 337 case offsetof(struct user_regs_struct,ss): 338 if (unlikely(value == 0)) 339 return -EIO; 340 task_pt_regs(task)->ss = value; 341 break; 342 } 343 344 return 0; 345 } 346 347 #endif /* CONFIG_X86_32 */ 348 349 static unsigned long get_flags(struct task_struct *task) 350 { 351 unsigned long retval = task_pt_regs(task)->flags; 352 353 /* 354 * If the debugger set TF, hide it from the readout. 355 */ 356 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 357 retval &= ~X86_EFLAGS_TF; 358 359 return retval; 360 } 361 362 static int set_flags(struct task_struct *task, unsigned long value) 363 { 364 struct pt_regs *regs = task_pt_regs(task); 365 366 /* 367 * If the user value contains TF, mark that 368 * it was not "us" (the debugger) that set it. 369 * If not, make sure it stays set if we had. 370 */ 371 if (value & X86_EFLAGS_TF) 372 clear_tsk_thread_flag(task, TIF_FORCED_TF); 373 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 374 value |= X86_EFLAGS_TF; 375 376 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 377 378 return 0; 379 } 380 381 static int putreg(struct task_struct *child, 382 unsigned long offset, unsigned long value) 383 { 384 switch (offset) { 385 case offsetof(struct user_regs_struct, cs): 386 case offsetof(struct user_regs_struct, ds): 387 case offsetof(struct user_regs_struct, es): 388 case offsetof(struct user_regs_struct, fs): 389 case offsetof(struct user_regs_struct, gs): 390 case offsetof(struct user_regs_struct, ss): 391 return set_segment_reg(child, offset, value); 392 393 case offsetof(struct user_regs_struct, flags): 394 return set_flags(child, value); 395 396 #ifdef CONFIG_X86_64 397 case offsetof(struct user_regs_struct,fs_base): 398 if (value >= TASK_SIZE_MAX) 399 return -EIO; 400 x86_fsbase_write_task(child, value); 401 return 0; 402 case offsetof(struct user_regs_struct,gs_base): 403 if (value >= TASK_SIZE_MAX) 404 return -EIO; 405 x86_gsbase_write_task(child, value); 406 return 0; 407 #endif 408 } 409 410 *pt_regs_access(task_pt_regs(child), offset) = value; 411 return 0; 412 } 413 414 static unsigned long getreg(struct task_struct *task, unsigned long offset) 415 { 416 switch (offset) { 417 case offsetof(struct user_regs_struct, cs): 418 case offsetof(struct user_regs_struct, ds): 419 case offsetof(struct user_regs_struct, es): 420 case offsetof(struct user_regs_struct, fs): 421 case offsetof(struct user_regs_struct, gs): 422 case offsetof(struct user_regs_struct, ss): 423 return get_segment_reg(task, offset); 424 425 case offsetof(struct user_regs_struct, flags): 426 return get_flags(task); 427 428 #ifdef CONFIG_X86_64 429 case offsetof(struct user_regs_struct, fs_base): 430 return x86_fsbase_read_task(task); 431 case offsetof(struct user_regs_struct, gs_base): 432 return x86_gsbase_read_task(task); 433 #endif 434 } 435 436 return *pt_regs_access(task_pt_regs(task), offset); 437 } 438 439 static int genregs_get(struct task_struct *target, 440 const struct user_regset *regset, 441 unsigned int pos, unsigned int count, 442 void *kbuf, void __user *ubuf) 443 { 444 if (kbuf) { 445 unsigned long *k = kbuf; 446 while (count >= sizeof(*k)) { 447 *k++ = getreg(target, pos); 448 count -= sizeof(*k); 449 pos += sizeof(*k); 450 } 451 } else { 452 unsigned long __user *u = ubuf; 453 while (count >= sizeof(*u)) { 454 if (__put_user(getreg(target, pos), u++)) 455 return -EFAULT; 456 count -= sizeof(*u); 457 pos += sizeof(*u); 458 } 459 } 460 461 return 0; 462 } 463 464 static int genregs_set(struct task_struct *target, 465 const struct user_regset *regset, 466 unsigned int pos, unsigned int count, 467 const void *kbuf, const void __user *ubuf) 468 { 469 int ret = 0; 470 if (kbuf) { 471 const unsigned long *k = kbuf; 472 while (count >= sizeof(*k) && !ret) { 473 ret = putreg(target, pos, *k++); 474 count -= sizeof(*k); 475 pos += sizeof(*k); 476 } 477 } else { 478 const unsigned long __user *u = ubuf; 479 while (count >= sizeof(*u) && !ret) { 480 unsigned long word; 481 ret = __get_user(word, u++); 482 if (ret) 483 break; 484 ret = putreg(target, pos, word); 485 count -= sizeof(*u); 486 pos += sizeof(*u); 487 } 488 } 489 return ret; 490 } 491 492 static void ptrace_triggered(struct perf_event *bp, 493 struct perf_sample_data *data, 494 struct pt_regs *regs) 495 { 496 int i; 497 struct thread_struct *thread = &(current->thread); 498 499 /* 500 * Store in the virtual DR6 register the fact that the breakpoint 501 * was hit so the thread's debugger will see it. 502 */ 503 for (i = 0; i < HBP_NUM; i++) { 504 if (thread->ptrace_bps[i] == bp) 505 break; 506 } 507 508 thread->debugreg6 |= (DR_TRAP0 << i); 509 } 510 511 /* 512 * Walk through every ptrace breakpoints for this thread and 513 * build the dr7 value on top of their attributes. 514 * 515 */ 516 static unsigned long ptrace_get_dr7(struct perf_event *bp[]) 517 { 518 int i; 519 int dr7 = 0; 520 struct arch_hw_breakpoint *info; 521 522 for (i = 0; i < HBP_NUM; i++) { 523 if (bp[i] && !bp[i]->attr.disabled) { 524 info = counter_arch_bp(bp[i]); 525 dr7 |= encode_dr7(i, info->len, info->type); 526 } 527 } 528 529 return dr7; 530 } 531 532 static int ptrace_fill_bp_fields(struct perf_event_attr *attr, 533 int len, int type, bool disabled) 534 { 535 int err, bp_len, bp_type; 536 537 err = arch_bp_generic_fields(len, type, &bp_len, &bp_type); 538 if (!err) { 539 attr->bp_len = bp_len; 540 attr->bp_type = bp_type; 541 attr->disabled = disabled; 542 } 543 544 return err; 545 } 546 547 static struct perf_event * 548 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type, 549 unsigned long addr, bool disabled) 550 { 551 struct perf_event_attr attr; 552 int err; 553 554 ptrace_breakpoint_init(&attr); 555 attr.bp_addr = addr; 556 557 err = ptrace_fill_bp_fields(&attr, len, type, disabled); 558 if (err) 559 return ERR_PTR(err); 560 561 return register_user_hw_breakpoint(&attr, ptrace_triggered, 562 NULL, tsk); 563 } 564 565 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 566 int disabled) 567 { 568 struct perf_event_attr attr = bp->attr; 569 int err; 570 571 err = ptrace_fill_bp_fields(&attr, len, type, disabled); 572 if (err) 573 return err; 574 575 return modify_user_hw_breakpoint(bp, &attr); 576 } 577 578 /* 579 * Handle ptrace writes to debug register 7. 580 */ 581 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) 582 { 583 struct thread_struct *thread = &tsk->thread; 584 unsigned long old_dr7; 585 bool second_pass = false; 586 int i, rc, ret = 0; 587 588 data &= ~DR_CONTROL_RESERVED; 589 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 590 591 restore: 592 rc = 0; 593 for (i = 0; i < HBP_NUM; i++) { 594 unsigned len, type; 595 bool disabled = !decode_dr7(data, i, &len, &type); 596 struct perf_event *bp = thread->ptrace_bps[i]; 597 598 if (!bp) { 599 if (disabled) 600 continue; 601 602 bp = ptrace_register_breakpoint(tsk, 603 len, type, 0, disabled); 604 if (IS_ERR(bp)) { 605 rc = PTR_ERR(bp); 606 break; 607 } 608 609 thread->ptrace_bps[i] = bp; 610 continue; 611 } 612 613 rc = ptrace_modify_breakpoint(bp, len, type, disabled); 614 if (rc) 615 break; 616 } 617 618 /* Restore if the first pass failed, second_pass shouldn't fail. */ 619 if (rc && !WARN_ON(second_pass)) { 620 ret = rc; 621 data = old_dr7; 622 second_pass = true; 623 goto restore; 624 } 625 626 return ret; 627 } 628 629 /* 630 * Handle PTRACE_PEEKUSR calls for the debug register area. 631 */ 632 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) 633 { 634 struct thread_struct *thread = &tsk->thread; 635 unsigned long val = 0; 636 637 if (n < HBP_NUM) { 638 struct perf_event *bp = thread->ptrace_bps[n]; 639 640 if (bp) 641 val = bp->hw.info.address; 642 } else if (n == 6) { 643 val = thread->debugreg6; 644 } else if (n == 7) { 645 val = thread->ptrace_dr7; 646 } 647 return val; 648 } 649 650 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, 651 unsigned long addr) 652 { 653 struct thread_struct *t = &tsk->thread; 654 struct perf_event *bp = t->ptrace_bps[nr]; 655 int err = 0; 656 657 if (!bp) { 658 /* 659 * Put stub len and type to create an inactive but correct bp. 660 * 661 * CHECKME: the previous code returned -EIO if the addr wasn't 662 * a valid task virtual addr. The new one will return -EINVAL in 663 * this case. 664 * -EINVAL may be what we want for in-kernel breakpoints users, 665 * but -EIO looks better for ptrace, since we refuse a register 666 * writing for the user. And anyway this is the previous 667 * behaviour. 668 */ 669 bp = ptrace_register_breakpoint(tsk, 670 X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE, 671 addr, true); 672 if (IS_ERR(bp)) 673 err = PTR_ERR(bp); 674 else 675 t->ptrace_bps[nr] = bp; 676 } else { 677 struct perf_event_attr attr = bp->attr; 678 679 attr.bp_addr = addr; 680 err = modify_user_hw_breakpoint(bp, &attr); 681 } 682 683 return err; 684 } 685 686 /* 687 * Handle PTRACE_POKEUSR calls for the debug register area. 688 */ 689 static int ptrace_set_debugreg(struct task_struct *tsk, int n, 690 unsigned long val) 691 { 692 struct thread_struct *thread = &tsk->thread; 693 /* There are no DR4 or DR5 registers */ 694 int rc = -EIO; 695 696 if (n < HBP_NUM) { 697 rc = ptrace_set_breakpoint_addr(tsk, n, val); 698 } else if (n == 6) { 699 thread->debugreg6 = val; 700 rc = 0; 701 } else if (n == 7) { 702 rc = ptrace_write_dr7(tsk, val); 703 if (!rc) 704 thread->ptrace_dr7 = val; 705 } 706 return rc; 707 } 708 709 /* 710 * These access the current or another (stopped) task's io permission 711 * bitmap for debugging or core dump. 712 */ 713 static int ioperm_active(struct task_struct *target, 714 const struct user_regset *regset) 715 { 716 return target->thread.io_bitmap_max / regset->size; 717 } 718 719 static int ioperm_get(struct task_struct *target, 720 const struct user_regset *regset, 721 unsigned int pos, unsigned int count, 722 void *kbuf, void __user *ubuf) 723 { 724 if (!target->thread.io_bitmap_ptr) 725 return -ENXIO; 726 727 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 728 target->thread.io_bitmap_ptr, 729 0, IO_BITMAP_BYTES); 730 } 731 732 /* 733 * Called by kernel/ptrace.c when detaching.. 734 * 735 * Make sure the single step bit is not set. 736 */ 737 void ptrace_disable(struct task_struct *child) 738 { 739 user_disable_single_step(child); 740 #ifdef TIF_SYSCALL_EMU 741 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 742 #endif 743 } 744 745 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 746 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 747 #endif 748 749 long arch_ptrace(struct task_struct *child, long request, 750 unsigned long addr, unsigned long data) 751 { 752 int ret; 753 unsigned long __user *datap = (unsigned long __user *)data; 754 755 switch (request) { 756 /* read the word at location addr in the USER area. */ 757 case PTRACE_PEEKUSR: { 758 unsigned long tmp; 759 760 ret = -EIO; 761 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) 762 break; 763 764 tmp = 0; /* Default return condition */ 765 if (addr < sizeof(struct user_regs_struct)) 766 tmp = getreg(child, addr); 767 else if (addr >= offsetof(struct user, u_debugreg[0]) && 768 addr <= offsetof(struct user, u_debugreg[7])) { 769 addr -= offsetof(struct user, u_debugreg[0]); 770 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 771 } 772 ret = put_user(tmp, datap); 773 break; 774 } 775 776 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 777 ret = -EIO; 778 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) 779 break; 780 781 if (addr < sizeof(struct user_regs_struct)) 782 ret = putreg(child, addr, data); 783 else if (addr >= offsetof(struct user, u_debugreg[0]) && 784 addr <= offsetof(struct user, u_debugreg[7])) { 785 addr -= offsetof(struct user, u_debugreg[0]); 786 ret = ptrace_set_debugreg(child, 787 addr / sizeof(data), data); 788 } 789 break; 790 791 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 792 return copy_regset_to_user(child, 793 task_user_regset_view(current), 794 REGSET_GENERAL, 795 0, sizeof(struct user_regs_struct), 796 datap); 797 798 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 799 return copy_regset_from_user(child, 800 task_user_regset_view(current), 801 REGSET_GENERAL, 802 0, sizeof(struct user_regs_struct), 803 datap); 804 805 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 806 return copy_regset_to_user(child, 807 task_user_regset_view(current), 808 REGSET_FP, 809 0, sizeof(struct user_i387_struct), 810 datap); 811 812 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 813 return copy_regset_from_user(child, 814 task_user_regset_view(current), 815 REGSET_FP, 816 0, sizeof(struct user_i387_struct), 817 datap); 818 819 #ifdef CONFIG_X86_32 820 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 821 return copy_regset_to_user(child, &user_x86_32_view, 822 REGSET_XFP, 823 0, sizeof(struct user_fxsr_struct), 824 datap) ? -EIO : 0; 825 826 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 827 return copy_regset_from_user(child, &user_x86_32_view, 828 REGSET_XFP, 829 0, sizeof(struct user_fxsr_struct), 830 datap) ? -EIO : 0; 831 #endif 832 833 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 834 case PTRACE_GET_THREAD_AREA: 835 if ((int) addr < 0) 836 return -EIO; 837 ret = do_get_thread_area(child, addr, 838 (struct user_desc __user *)data); 839 break; 840 841 case PTRACE_SET_THREAD_AREA: 842 if ((int) addr < 0) 843 return -EIO; 844 ret = do_set_thread_area(child, addr, 845 (struct user_desc __user *)data, 0); 846 break; 847 #endif 848 849 #ifdef CONFIG_X86_64 850 /* normal 64bit interface to access TLS data. 851 Works just like arch_prctl, except that the arguments 852 are reversed. */ 853 case PTRACE_ARCH_PRCTL: 854 ret = do_arch_prctl_64(child, data, addr); 855 break; 856 #endif 857 858 default: 859 ret = ptrace_request(child, request, addr, data); 860 break; 861 } 862 863 return ret; 864 } 865 866 #ifdef CONFIG_IA32_EMULATION 867 868 #include <linux/compat.h> 869 #include <linux/syscalls.h> 870 #include <asm/ia32.h> 871 #include <asm/user32.h> 872 873 #define R32(l,q) \ 874 case offsetof(struct user32, regs.l): \ 875 regs->q = value; break 876 877 #define SEG32(rs) \ 878 case offsetof(struct user32, regs.rs): \ 879 return set_segment_reg(child, \ 880 offsetof(struct user_regs_struct, rs), \ 881 value); \ 882 break 883 884 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 885 { 886 struct pt_regs *regs = task_pt_regs(child); 887 888 switch (regno) { 889 890 SEG32(cs); 891 SEG32(ds); 892 SEG32(es); 893 SEG32(fs); 894 SEG32(gs); 895 SEG32(ss); 896 897 R32(ebx, bx); 898 R32(ecx, cx); 899 R32(edx, dx); 900 R32(edi, di); 901 R32(esi, si); 902 R32(ebp, bp); 903 R32(eax, ax); 904 R32(eip, ip); 905 R32(esp, sp); 906 907 case offsetof(struct user32, regs.orig_eax): 908 /* 909 * Warning: bizarre corner case fixup here. A 32-bit 910 * debugger setting orig_eax to -1 wants to disable 911 * syscall restart. Make sure that the syscall 912 * restart code sign-extends orig_ax. Also make sure 913 * we interpret the -ERESTART* codes correctly if 914 * loaded into regs->ax in case the task is not 915 * actually still sitting at the exit from a 32-bit 916 * syscall with TS_COMPAT still set. 917 */ 918 regs->orig_ax = value; 919 if (syscall_get_nr(child, regs) >= 0) 920 child->thread_info.status |= TS_I386_REGS_POKED; 921 break; 922 923 case offsetof(struct user32, regs.eflags): 924 return set_flags(child, value); 925 926 case offsetof(struct user32, u_debugreg[0]) ... 927 offsetof(struct user32, u_debugreg[7]): 928 regno -= offsetof(struct user32, u_debugreg[0]); 929 return ptrace_set_debugreg(child, regno / 4, value); 930 931 default: 932 if (regno > sizeof(struct user32) || (regno & 3)) 933 return -EIO; 934 935 /* 936 * Other dummy fields in the virtual user structure 937 * are ignored 938 */ 939 break; 940 } 941 return 0; 942 } 943 944 #undef R32 945 #undef SEG32 946 947 #define R32(l,q) \ 948 case offsetof(struct user32, regs.l): \ 949 *val = regs->q; break 950 951 #define SEG32(rs) \ 952 case offsetof(struct user32, regs.rs): \ 953 *val = get_segment_reg(child, \ 954 offsetof(struct user_regs_struct, rs)); \ 955 break 956 957 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 958 { 959 struct pt_regs *regs = task_pt_regs(child); 960 961 switch (regno) { 962 963 SEG32(ds); 964 SEG32(es); 965 SEG32(fs); 966 SEG32(gs); 967 968 R32(cs, cs); 969 R32(ss, ss); 970 R32(ebx, bx); 971 R32(ecx, cx); 972 R32(edx, dx); 973 R32(edi, di); 974 R32(esi, si); 975 R32(ebp, bp); 976 R32(eax, ax); 977 R32(orig_eax, orig_ax); 978 R32(eip, ip); 979 R32(esp, sp); 980 981 case offsetof(struct user32, regs.eflags): 982 *val = get_flags(child); 983 break; 984 985 case offsetof(struct user32, u_debugreg[0]) ... 986 offsetof(struct user32, u_debugreg[7]): 987 regno -= offsetof(struct user32, u_debugreg[0]); 988 *val = ptrace_get_debugreg(child, regno / 4); 989 break; 990 991 default: 992 if (regno > sizeof(struct user32) || (regno & 3)) 993 return -EIO; 994 995 /* 996 * Other dummy fields in the virtual user structure 997 * are ignored 998 */ 999 *val = 0; 1000 break; 1001 } 1002 return 0; 1003 } 1004 1005 #undef R32 1006 #undef SEG32 1007 1008 static int genregs32_get(struct task_struct *target, 1009 const struct user_regset *regset, 1010 unsigned int pos, unsigned int count, 1011 void *kbuf, void __user *ubuf) 1012 { 1013 if (kbuf) { 1014 compat_ulong_t *k = kbuf; 1015 while (count >= sizeof(*k)) { 1016 getreg32(target, pos, k++); 1017 count -= sizeof(*k); 1018 pos += sizeof(*k); 1019 } 1020 } else { 1021 compat_ulong_t __user *u = ubuf; 1022 while (count >= sizeof(*u)) { 1023 compat_ulong_t word; 1024 getreg32(target, pos, &word); 1025 if (__put_user(word, u++)) 1026 return -EFAULT; 1027 count -= sizeof(*u); 1028 pos += sizeof(*u); 1029 } 1030 } 1031 1032 return 0; 1033 } 1034 1035 static int genregs32_set(struct task_struct *target, 1036 const struct user_regset *regset, 1037 unsigned int pos, unsigned int count, 1038 const void *kbuf, const void __user *ubuf) 1039 { 1040 int ret = 0; 1041 if (kbuf) { 1042 const compat_ulong_t *k = kbuf; 1043 while (count >= sizeof(*k) && !ret) { 1044 ret = putreg32(target, pos, *k++); 1045 count -= sizeof(*k); 1046 pos += sizeof(*k); 1047 } 1048 } else { 1049 const compat_ulong_t __user *u = ubuf; 1050 while (count >= sizeof(*u) && !ret) { 1051 compat_ulong_t word; 1052 ret = __get_user(word, u++); 1053 if (ret) 1054 break; 1055 ret = putreg32(target, pos, word); 1056 count -= sizeof(*u); 1057 pos += sizeof(*u); 1058 } 1059 } 1060 return ret; 1061 } 1062 1063 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request, 1064 compat_ulong_t caddr, compat_ulong_t cdata) 1065 { 1066 unsigned long addr = caddr; 1067 unsigned long data = cdata; 1068 void __user *datap = compat_ptr(data); 1069 int ret; 1070 __u32 val; 1071 1072 switch (request) { 1073 case PTRACE_PEEKUSR: 1074 ret = getreg32(child, addr, &val); 1075 if (ret == 0) 1076 ret = put_user(val, (__u32 __user *)datap); 1077 break; 1078 1079 case PTRACE_POKEUSR: 1080 ret = putreg32(child, addr, data); 1081 break; 1082 1083 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1084 return copy_regset_to_user(child, &user_x86_32_view, 1085 REGSET_GENERAL, 1086 0, sizeof(struct user_regs_struct32), 1087 datap); 1088 1089 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1090 return copy_regset_from_user(child, &user_x86_32_view, 1091 REGSET_GENERAL, 0, 1092 sizeof(struct user_regs_struct32), 1093 datap); 1094 1095 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1096 return copy_regset_to_user(child, &user_x86_32_view, 1097 REGSET_FP, 0, 1098 sizeof(struct user_i387_ia32_struct), 1099 datap); 1100 1101 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1102 return copy_regset_from_user( 1103 child, &user_x86_32_view, REGSET_FP, 1104 0, sizeof(struct user_i387_ia32_struct), datap); 1105 1106 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1107 return copy_regset_to_user(child, &user_x86_32_view, 1108 REGSET_XFP, 0, 1109 sizeof(struct user32_fxsr_struct), 1110 datap); 1111 1112 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1113 return copy_regset_from_user(child, &user_x86_32_view, 1114 REGSET_XFP, 0, 1115 sizeof(struct user32_fxsr_struct), 1116 datap); 1117 1118 case PTRACE_GET_THREAD_AREA: 1119 case PTRACE_SET_THREAD_AREA: 1120 return arch_ptrace(child, request, addr, data); 1121 1122 default: 1123 return compat_ptrace_request(child, request, addr, data); 1124 } 1125 1126 return ret; 1127 } 1128 #endif /* CONFIG_IA32_EMULATION */ 1129 1130 #ifdef CONFIG_X86_X32_ABI 1131 static long x32_arch_ptrace(struct task_struct *child, 1132 compat_long_t request, compat_ulong_t caddr, 1133 compat_ulong_t cdata) 1134 { 1135 unsigned long addr = caddr; 1136 unsigned long data = cdata; 1137 void __user *datap = compat_ptr(data); 1138 int ret; 1139 1140 switch (request) { 1141 /* Read 32bits at location addr in the USER area. Only allow 1142 to return the lower 32bits of segment and debug registers. */ 1143 case PTRACE_PEEKUSR: { 1144 u32 tmp; 1145 1146 ret = -EIO; 1147 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || 1148 addr < offsetof(struct user_regs_struct, cs)) 1149 break; 1150 1151 tmp = 0; /* Default return condition */ 1152 if (addr < sizeof(struct user_regs_struct)) 1153 tmp = getreg(child, addr); 1154 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1155 addr <= offsetof(struct user, u_debugreg[7])) { 1156 addr -= offsetof(struct user, u_debugreg[0]); 1157 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 1158 } 1159 ret = put_user(tmp, (__u32 __user *)datap); 1160 break; 1161 } 1162 1163 /* Write the word at location addr in the USER area. Only allow 1164 to update segment and debug registers with the upper 32bits 1165 zero-extended. */ 1166 case PTRACE_POKEUSR: 1167 ret = -EIO; 1168 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || 1169 addr < offsetof(struct user_regs_struct, cs)) 1170 break; 1171 1172 if (addr < sizeof(struct user_regs_struct)) 1173 ret = putreg(child, addr, data); 1174 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1175 addr <= offsetof(struct user, u_debugreg[7])) { 1176 addr -= offsetof(struct user, u_debugreg[0]); 1177 ret = ptrace_set_debugreg(child, 1178 addr / sizeof(data), data); 1179 } 1180 break; 1181 1182 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1183 return copy_regset_to_user(child, 1184 task_user_regset_view(current), 1185 REGSET_GENERAL, 1186 0, sizeof(struct user_regs_struct), 1187 datap); 1188 1189 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1190 return copy_regset_from_user(child, 1191 task_user_regset_view(current), 1192 REGSET_GENERAL, 1193 0, sizeof(struct user_regs_struct), 1194 datap); 1195 1196 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1197 return copy_regset_to_user(child, 1198 task_user_regset_view(current), 1199 REGSET_FP, 1200 0, sizeof(struct user_i387_struct), 1201 datap); 1202 1203 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1204 return copy_regset_from_user(child, 1205 task_user_regset_view(current), 1206 REGSET_FP, 1207 0, sizeof(struct user_i387_struct), 1208 datap); 1209 1210 default: 1211 return compat_ptrace_request(child, request, addr, data); 1212 } 1213 1214 return ret; 1215 } 1216 #endif 1217 1218 #ifdef CONFIG_COMPAT 1219 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1220 compat_ulong_t caddr, compat_ulong_t cdata) 1221 { 1222 #ifdef CONFIG_X86_X32_ABI 1223 if (!in_ia32_syscall()) 1224 return x32_arch_ptrace(child, request, caddr, cdata); 1225 #endif 1226 #ifdef CONFIG_IA32_EMULATION 1227 return ia32_arch_ptrace(child, request, caddr, cdata); 1228 #else 1229 return 0; 1230 #endif 1231 } 1232 #endif /* CONFIG_COMPAT */ 1233 1234 #ifdef CONFIG_X86_64 1235 1236 static struct user_regset x86_64_regsets[] __ro_after_init = { 1237 [REGSET_GENERAL] = { 1238 .core_note_type = NT_PRSTATUS, 1239 .n = sizeof(struct user_regs_struct) / sizeof(long), 1240 .size = sizeof(long), .align = sizeof(long), 1241 .get = genregs_get, .set = genregs_set 1242 }, 1243 [REGSET_FP] = { 1244 .core_note_type = NT_PRFPREG, 1245 .n = sizeof(struct user_i387_struct) / sizeof(long), 1246 .size = sizeof(long), .align = sizeof(long), 1247 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set 1248 }, 1249 [REGSET_XSTATE] = { 1250 .core_note_type = NT_X86_XSTATE, 1251 .size = sizeof(u64), .align = sizeof(u64), 1252 .active = xstateregs_active, .get = xstateregs_get, 1253 .set = xstateregs_set 1254 }, 1255 [REGSET_IOPERM64] = { 1256 .core_note_type = NT_386_IOPERM, 1257 .n = IO_BITMAP_LONGS, 1258 .size = sizeof(long), .align = sizeof(long), 1259 .active = ioperm_active, .get = ioperm_get 1260 }, 1261 }; 1262 1263 static const struct user_regset_view user_x86_64_view = { 1264 .name = "x86_64", .e_machine = EM_X86_64, 1265 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1266 }; 1267 1268 #else /* CONFIG_X86_32 */ 1269 1270 #define user_regs_struct32 user_regs_struct 1271 #define genregs32_get genregs_get 1272 #define genregs32_set genregs_set 1273 1274 #endif /* CONFIG_X86_64 */ 1275 1276 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1277 static struct user_regset x86_32_regsets[] __ro_after_init = { 1278 [REGSET_GENERAL] = { 1279 .core_note_type = NT_PRSTATUS, 1280 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1281 .size = sizeof(u32), .align = sizeof(u32), 1282 .get = genregs32_get, .set = genregs32_set 1283 }, 1284 [REGSET_FP] = { 1285 .core_note_type = NT_PRFPREG, 1286 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1287 .size = sizeof(u32), .align = sizeof(u32), 1288 .active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set 1289 }, 1290 [REGSET_XFP] = { 1291 .core_note_type = NT_PRXFPREG, 1292 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1293 .size = sizeof(u32), .align = sizeof(u32), 1294 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set 1295 }, 1296 [REGSET_XSTATE] = { 1297 .core_note_type = NT_X86_XSTATE, 1298 .size = sizeof(u64), .align = sizeof(u64), 1299 .active = xstateregs_active, .get = xstateregs_get, 1300 .set = xstateregs_set 1301 }, 1302 [REGSET_TLS] = { 1303 .core_note_type = NT_386_TLS, 1304 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1305 .size = sizeof(struct user_desc), 1306 .align = sizeof(struct user_desc), 1307 .active = regset_tls_active, 1308 .get = regset_tls_get, .set = regset_tls_set 1309 }, 1310 [REGSET_IOPERM32] = { 1311 .core_note_type = NT_386_IOPERM, 1312 .n = IO_BITMAP_BYTES / sizeof(u32), 1313 .size = sizeof(u32), .align = sizeof(u32), 1314 .active = ioperm_active, .get = ioperm_get 1315 }, 1316 }; 1317 1318 static const struct user_regset_view user_x86_32_view = { 1319 .name = "i386", .e_machine = EM_386, 1320 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1321 }; 1322 #endif 1323 1324 /* 1325 * This represents bytes 464..511 in the memory layout exported through 1326 * the REGSET_XSTATE interface. 1327 */ 1328 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 1329 1330 void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask) 1331 { 1332 #ifdef CONFIG_X86_64 1333 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); 1334 #endif 1335 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1336 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64); 1337 #endif 1338 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask; 1339 } 1340 1341 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1342 { 1343 #ifdef CONFIG_IA32_EMULATION 1344 if (!user_64bit_mode(task_pt_regs(task))) 1345 #endif 1346 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1347 return &user_x86_32_view; 1348 #endif 1349 #ifdef CONFIG_X86_64 1350 return &user_x86_64_view; 1351 #endif 1352 } 1353 1354 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1355 int error_code, int si_code) 1356 { 1357 tsk->thread.trap_nr = X86_TRAP_DB; 1358 tsk->thread.error_code = error_code; 1359 1360 /* Send us the fake SIGTRAP */ 1361 force_sig_fault(SIGTRAP, si_code, 1362 user_mode(regs) ? (void __user *)regs->ip : NULL, tsk); 1363 } 1364 1365 void user_single_step_report(struct pt_regs *regs) 1366 { 1367 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1368 } 1369