1 // SPDX-License-Identifier: GPL-2.0-only 2 /* By Ross Biro 1/23/92 */ 3 /* 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/sched.h> 10 #include <linux/sched/task_stack.h> 11 #include <linux/mm.h> 12 #include <linux/smp.h> 13 #include <linux/errno.h> 14 #include <linux/slab.h> 15 #include <linux/ptrace.h> 16 #include <linux/tracehook.h> 17 #include <linux/user.h> 18 #include <linux/elf.h> 19 #include <linux/security.h> 20 #include <linux/audit.h> 21 #include <linux/seccomp.h> 22 #include <linux/signal.h> 23 #include <linux/perf_event.h> 24 #include <linux/hw_breakpoint.h> 25 #include <linux/rcupdate.h> 26 #include <linux/export.h> 27 #include <linux/context_tracking.h> 28 #include <linux/nospec.h> 29 30 #include <linux/uaccess.h> 31 #include <asm/pgtable.h> 32 #include <asm/processor.h> 33 #include <asm/fpu/internal.h> 34 #include <asm/fpu/signal.h> 35 #include <asm/fpu/regset.h> 36 #include <asm/debugreg.h> 37 #include <asm/ldt.h> 38 #include <asm/desc.h> 39 #include <asm/prctl.h> 40 #include <asm/proto.h> 41 #include <asm/hw_breakpoint.h> 42 #include <asm/traps.h> 43 #include <asm/syscall.h> 44 #include <asm/fsgsbase.h> 45 #include <asm/io_bitmap.h> 46 47 #include "tls.h" 48 49 enum x86_regset { 50 REGSET_GENERAL, 51 REGSET_FP, 52 REGSET_XFP, 53 REGSET_IOPERM64 = REGSET_XFP, 54 REGSET_XSTATE, 55 REGSET_TLS, 56 REGSET_IOPERM32, 57 }; 58 59 struct pt_regs_offset { 60 const char *name; 61 int offset; 62 }; 63 64 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 65 #define REG_OFFSET_END {.name = NULL, .offset = 0} 66 67 static const struct pt_regs_offset regoffset_table[] = { 68 #ifdef CONFIG_X86_64 69 REG_OFFSET_NAME(r15), 70 REG_OFFSET_NAME(r14), 71 REG_OFFSET_NAME(r13), 72 REG_OFFSET_NAME(r12), 73 REG_OFFSET_NAME(r11), 74 REG_OFFSET_NAME(r10), 75 REG_OFFSET_NAME(r9), 76 REG_OFFSET_NAME(r8), 77 #endif 78 REG_OFFSET_NAME(bx), 79 REG_OFFSET_NAME(cx), 80 REG_OFFSET_NAME(dx), 81 REG_OFFSET_NAME(si), 82 REG_OFFSET_NAME(di), 83 REG_OFFSET_NAME(bp), 84 REG_OFFSET_NAME(ax), 85 #ifdef CONFIG_X86_32 86 REG_OFFSET_NAME(ds), 87 REG_OFFSET_NAME(es), 88 REG_OFFSET_NAME(fs), 89 REG_OFFSET_NAME(gs), 90 #endif 91 REG_OFFSET_NAME(orig_ax), 92 REG_OFFSET_NAME(ip), 93 REG_OFFSET_NAME(cs), 94 REG_OFFSET_NAME(flags), 95 REG_OFFSET_NAME(sp), 96 REG_OFFSET_NAME(ss), 97 REG_OFFSET_END, 98 }; 99 100 /** 101 * regs_query_register_offset() - query register offset from its name 102 * @name: the name of a register 103 * 104 * regs_query_register_offset() returns the offset of a register in struct 105 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 106 */ 107 int regs_query_register_offset(const char *name) 108 { 109 const struct pt_regs_offset *roff; 110 for (roff = regoffset_table; roff->name != NULL; roff++) 111 if (!strcmp(roff->name, name)) 112 return roff->offset; 113 return -EINVAL; 114 } 115 116 /** 117 * regs_query_register_name() - query register name from its offset 118 * @offset: the offset of a register in struct pt_regs. 119 * 120 * regs_query_register_name() returns the name of a register from its 121 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 122 */ 123 const char *regs_query_register_name(unsigned int offset) 124 { 125 const struct pt_regs_offset *roff; 126 for (roff = regoffset_table; roff->name != NULL; roff++) 127 if (roff->offset == offset) 128 return roff->name; 129 return NULL; 130 } 131 132 /* 133 * does not yet catch signals sent when the child dies. 134 * in exit.c or in signal.c. 135 */ 136 137 /* 138 * Determines which flags the user has access to [1 = access, 0 = no access]. 139 */ 140 #define FLAG_MASK_32 ((unsigned long) \ 141 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 142 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 143 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 144 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 145 X86_EFLAGS_RF | X86_EFLAGS_AC)) 146 147 /* 148 * Determines whether a value may be installed in a segment register. 149 */ 150 static inline bool invalid_selector(u16 value) 151 { 152 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 153 } 154 155 #ifdef CONFIG_X86_32 156 157 #define FLAG_MASK FLAG_MASK_32 158 159 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 160 { 161 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 162 return ®s->bx + (regno >> 2); 163 } 164 165 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 166 { 167 /* 168 * Returning the value truncates it to 16 bits. 169 */ 170 unsigned int retval; 171 if (offset != offsetof(struct user_regs_struct, gs)) 172 retval = *pt_regs_access(task_pt_regs(task), offset); 173 else { 174 if (task == current) 175 retval = get_user_gs(task_pt_regs(task)); 176 else 177 retval = task_user_gs(task); 178 } 179 return retval; 180 } 181 182 static int set_segment_reg(struct task_struct *task, 183 unsigned long offset, u16 value) 184 { 185 /* 186 * The value argument was already truncated to 16 bits. 187 */ 188 if (invalid_selector(value)) 189 return -EIO; 190 191 /* 192 * For %cs and %ss we cannot permit a null selector. 193 * We can permit a bogus selector as long as it has USER_RPL. 194 * Null selectors are fine for other segment registers, but 195 * we will never get back to user mode with invalid %cs or %ss 196 * and will take the trap in iret instead. Much code relies 197 * on user_mode() to distinguish a user trap frame (which can 198 * safely use invalid selectors) from a kernel trap frame. 199 */ 200 switch (offset) { 201 case offsetof(struct user_regs_struct, cs): 202 case offsetof(struct user_regs_struct, ss): 203 if (unlikely(value == 0)) 204 return -EIO; 205 /* Else, fall through */ 206 207 default: 208 *pt_regs_access(task_pt_regs(task), offset) = value; 209 break; 210 211 case offsetof(struct user_regs_struct, gs): 212 if (task == current) 213 set_user_gs(task_pt_regs(task), value); 214 else 215 task_user_gs(task) = value; 216 } 217 218 return 0; 219 } 220 221 #else /* CONFIG_X86_64 */ 222 223 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 224 225 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 226 { 227 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 228 return ®s->r15 + (offset / sizeof(regs->r15)); 229 } 230 231 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 232 { 233 /* 234 * Returning the value truncates it to 16 bits. 235 */ 236 unsigned int seg; 237 238 switch (offset) { 239 case offsetof(struct user_regs_struct, fs): 240 if (task == current) { 241 /* Older gas can't assemble movq %?s,%r?? */ 242 asm("movl %%fs,%0" : "=r" (seg)); 243 return seg; 244 } 245 return task->thread.fsindex; 246 case offsetof(struct user_regs_struct, gs): 247 if (task == current) { 248 asm("movl %%gs,%0" : "=r" (seg)); 249 return seg; 250 } 251 return task->thread.gsindex; 252 case offsetof(struct user_regs_struct, ds): 253 if (task == current) { 254 asm("movl %%ds,%0" : "=r" (seg)); 255 return seg; 256 } 257 return task->thread.ds; 258 case offsetof(struct user_regs_struct, es): 259 if (task == current) { 260 asm("movl %%es,%0" : "=r" (seg)); 261 return seg; 262 } 263 return task->thread.es; 264 265 case offsetof(struct user_regs_struct, cs): 266 case offsetof(struct user_regs_struct, ss): 267 break; 268 } 269 return *pt_regs_access(task_pt_regs(task), offset); 270 } 271 272 static int set_segment_reg(struct task_struct *task, 273 unsigned long offset, u16 value) 274 { 275 /* 276 * The value argument was already truncated to 16 bits. 277 */ 278 if (invalid_selector(value)) 279 return -EIO; 280 281 switch (offset) { 282 case offsetof(struct user_regs_struct,fs): 283 task->thread.fsindex = value; 284 if (task == current) 285 loadsegment(fs, task->thread.fsindex); 286 break; 287 case offsetof(struct user_regs_struct,gs): 288 task->thread.gsindex = value; 289 if (task == current) 290 load_gs_index(task->thread.gsindex); 291 break; 292 case offsetof(struct user_regs_struct,ds): 293 task->thread.ds = value; 294 if (task == current) 295 loadsegment(ds, task->thread.ds); 296 break; 297 case offsetof(struct user_regs_struct,es): 298 task->thread.es = value; 299 if (task == current) 300 loadsegment(es, task->thread.es); 301 break; 302 303 /* 304 * Can't actually change these in 64-bit mode. 305 */ 306 case offsetof(struct user_regs_struct,cs): 307 if (unlikely(value == 0)) 308 return -EIO; 309 task_pt_regs(task)->cs = value; 310 break; 311 case offsetof(struct user_regs_struct,ss): 312 if (unlikely(value == 0)) 313 return -EIO; 314 task_pt_regs(task)->ss = value; 315 break; 316 } 317 318 return 0; 319 } 320 321 #endif /* CONFIG_X86_32 */ 322 323 static unsigned long get_flags(struct task_struct *task) 324 { 325 unsigned long retval = task_pt_regs(task)->flags; 326 327 /* 328 * If the debugger set TF, hide it from the readout. 329 */ 330 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 331 retval &= ~X86_EFLAGS_TF; 332 333 return retval; 334 } 335 336 static int set_flags(struct task_struct *task, unsigned long value) 337 { 338 struct pt_regs *regs = task_pt_regs(task); 339 340 /* 341 * If the user value contains TF, mark that 342 * it was not "us" (the debugger) that set it. 343 * If not, make sure it stays set if we had. 344 */ 345 if (value & X86_EFLAGS_TF) 346 clear_tsk_thread_flag(task, TIF_FORCED_TF); 347 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 348 value |= X86_EFLAGS_TF; 349 350 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 351 352 return 0; 353 } 354 355 static int putreg(struct task_struct *child, 356 unsigned long offset, unsigned long value) 357 { 358 switch (offset) { 359 case offsetof(struct user_regs_struct, cs): 360 case offsetof(struct user_regs_struct, ds): 361 case offsetof(struct user_regs_struct, es): 362 case offsetof(struct user_regs_struct, fs): 363 case offsetof(struct user_regs_struct, gs): 364 case offsetof(struct user_regs_struct, ss): 365 return set_segment_reg(child, offset, value); 366 367 case offsetof(struct user_regs_struct, flags): 368 return set_flags(child, value); 369 370 #ifdef CONFIG_X86_64 371 case offsetof(struct user_regs_struct,fs_base): 372 if (value >= TASK_SIZE_MAX) 373 return -EIO; 374 /* 375 * When changing the FS base, use do_arch_prctl_64() 376 * to set the index to zero and to set the base 377 * as requested. 378 */ 379 if (child->thread.fsbase != value) 380 return do_arch_prctl_64(child, ARCH_SET_FS, value); 381 return 0; 382 case offsetof(struct user_regs_struct,gs_base): 383 /* 384 * Exactly the same here as the %fs handling above. 385 */ 386 if (value >= TASK_SIZE_MAX) 387 return -EIO; 388 if (child->thread.gsbase != value) 389 return do_arch_prctl_64(child, ARCH_SET_GS, value); 390 return 0; 391 #endif 392 } 393 394 *pt_regs_access(task_pt_regs(child), offset) = value; 395 return 0; 396 } 397 398 static unsigned long getreg(struct task_struct *task, unsigned long offset) 399 { 400 switch (offset) { 401 case offsetof(struct user_regs_struct, cs): 402 case offsetof(struct user_regs_struct, ds): 403 case offsetof(struct user_regs_struct, es): 404 case offsetof(struct user_regs_struct, fs): 405 case offsetof(struct user_regs_struct, gs): 406 case offsetof(struct user_regs_struct, ss): 407 return get_segment_reg(task, offset); 408 409 case offsetof(struct user_regs_struct, flags): 410 return get_flags(task); 411 412 #ifdef CONFIG_X86_64 413 case offsetof(struct user_regs_struct, fs_base): 414 return x86_fsbase_read_task(task); 415 case offsetof(struct user_regs_struct, gs_base): 416 return x86_gsbase_read_task(task); 417 #endif 418 } 419 420 return *pt_regs_access(task_pt_regs(task), offset); 421 } 422 423 static int genregs_get(struct task_struct *target, 424 const struct user_regset *regset, 425 unsigned int pos, unsigned int count, 426 void *kbuf, void __user *ubuf) 427 { 428 if (kbuf) { 429 unsigned long *k = kbuf; 430 while (count >= sizeof(*k)) { 431 *k++ = getreg(target, pos); 432 count -= sizeof(*k); 433 pos += sizeof(*k); 434 } 435 } else { 436 unsigned long __user *u = ubuf; 437 while (count >= sizeof(*u)) { 438 if (__put_user(getreg(target, pos), u++)) 439 return -EFAULT; 440 count -= sizeof(*u); 441 pos += sizeof(*u); 442 } 443 } 444 445 return 0; 446 } 447 448 static int genregs_set(struct task_struct *target, 449 const struct user_regset *regset, 450 unsigned int pos, unsigned int count, 451 const void *kbuf, const void __user *ubuf) 452 { 453 int ret = 0; 454 if (kbuf) { 455 const unsigned long *k = kbuf; 456 while (count >= sizeof(*k) && !ret) { 457 ret = putreg(target, pos, *k++); 458 count -= sizeof(*k); 459 pos += sizeof(*k); 460 } 461 } else { 462 const unsigned long __user *u = ubuf; 463 while (count >= sizeof(*u) && !ret) { 464 unsigned long word; 465 ret = __get_user(word, u++); 466 if (ret) 467 break; 468 ret = putreg(target, pos, word); 469 count -= sizeof(*u); 470 pos += sizeof(*u); 471 } 472 } 473 return ret; 474 } 475 476 static void ptrace_triggered(struct perf_event *bp, 477 struct perf_sample_data *data, 478 struct pt_regs *regs) 479 { 480 int i; 481 struct thread_struct *thread = &(current->thread); 482 483 /* 484 * Store in the virtual DR6 register the fact that the breakpoint 485 * was hit so the thread's debugger will see it. 486 */ 487 for (i = 0; i < HBP_NUM; i++) { 488 if (thread->ptrace_bps[i] == bp) 489 break; 490 } 491 492 thread->debugreg6 |= (DR_TRAP0 << i); 493 } 494 495 /* 496 * Walk through every ptrace breakpoints for this thread and 497 * build the dr7 value on top of their attributes. 498 * 499 */ 500 static unsigned long ptrace_get_dr7(struct perf_event *bp[]) 501 { 502 int i; 503 int dr7 = 0; 504 struct arch_hw_breakpoint *info; 505 506 for (i = 0; i < HBP_NUM; i++) { 507 if (bp[i] && !bp[i]->attr.disabled) { 508 info = counter_arch_bp(bp[i]); 509 dr7 |= encode_dr7(i, info->len, info->type); 510 } 511 } 512 513 return dr7; 514 } 515 516 static int ptrace_fill_bp_fields(struct perf_event_attr *attr, 517 int len, int type, bool disabled) 518 { 519 int err, bp_len, bp_type; 520 521 err = arch_bp_generic_fields(len, type, &bp_len, &bp_type); 522 if (!err) { 523 attr->bp_len = bp_len; 524 attr->bp_type = bp_type; 525 attr->disabled = disabled; 526 } 527 528 return err; 529 } 530 531 static struct perf_event * 532 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type, 533 unsigned long addr, bool disabled) 534 { 535 struct perf_event_attr attr; 536 int err; 537 538 ptrace_breakpoint_init(&attr); 539 attr.bp_addr = addr; 540 541 err = ptrace_fill_bp_fields(&attr, len, type, disabled); 542 if (err) 543 return ERR_PTR(err); 544 545 return register_user_hw_breakpoint(&attr, ptrace_triggered, 546 NULL, tsk); 547 } 548 549 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 550 int disabled) 551 { 552 struct perf_event_attr attr = bp->attr; 553 int err; 554 555 err = ptrace_fill_bp_fields(&attr, len, type, disabled); 556 if (err) 557 return err; 558 559 return modify_user_hw_breakpoint(bp, &attr); 560 } 561 562 /* 563 * Handle ptrace writes to debug register 7. 564 */ 565 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) 566 { 567 struct thread_struct *thread = &tsk->thread; 568 unsigned long old_dr7; 569 bool second_pass = false; 570 int i, rc, ret = 0; 571 572 data &= ~DR_CONTROL_RESERVED; 573 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 574 575 restore: 576 rc = 0; 577 for (i = 0; i < HBP_NUM; i++) { 578 unsigned len, type; 579 bool disabled = !decode_dr7(data, i, &len, &type); 580 struct perf_event *bp = thread->ptrace_bps[i]; 581 582 if (!bp) { 583 if (disabled) 584 continue; 585 586 bp = ptrace_register_breakpoint(tsk, 587 len, type, 0, disabled); 588 if (IS_ERR(bp)) { 589 rc = PTR_ERR(bp); 590 break; 591 } 592 593 thread->ptrace_bps[i] = bp; 594 continue; 595 } 596 597 rc = ptrace_modify_breakpoint(bp, len, type, disabled); 598 if (rc) 599 break; 600 } 601 602 /* Restore if the first pass failed, second_pass shouldn't fail. */ 603 if (rc && !WARN_ON(second_pass)) { 604 ret = rc; 605 data = old_dr7; 606 second_pass = true; 607 goto restore; 608 } 609 610 return ret; 611 } 612 613 /* 614 * Handle PTRACE_PEEKUSR calls for the debug register area. 615 */ 616 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) 617 { 618 struct thread_struct *thread = &tsk->thread; 619 unsigned long val = 0; 620 621 if (n < HBP_NUM) { 622 int index = array_index_nospec(n, HBP_NUM); 623 struct perf_event *bp = thread->ptrace_bps[index]; 624 625 if (bp) 626 val = bp->hw.info.address; 627 } else if (n == 6) { 628 val = thread->debugreg6; 629 } else if (n == 7) { 630 val = thread->ptrace_dr7; 631 } 632 return val; 633 } 634 635 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, 636 unsigned long addr) 637 { 638 struct thread_struct *t = &tsk->thread; 639 struct perf_event *bp = t->ptrace_bps[nr]; 640 int err = 0; 641 642 if (!bp) { 643 /* 644 * Put stub len and type to create an inactive but correct bp. 645 * 646 * CHECKME: the previous code returned -EIO if the addr wasn't 647 * a valid task virtual addr. The new one will return -EINVAL in 648 * this case. 649 * -EINVAL may be what we want for in-kernel breakpoints users, 650 * but -EIO looks better for ptrace, since we refuse a register 651 * writing for the user. And anyway this is the previous 652 * behaviour. 653 */ 654 bp = ptrace_register_breakpoint(tsk, 655 X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE, 656 addr, true); 657 if (IS_ERR(bp)) 658 err = PTR_ERR(bp); 659 else 660 t->ptrace_bps[nr] = bp; 661 } else { 662 struct perf_event_attr attr = bp->attr; 663 664 attr.bp_addr = addr; 665 err = modify_user_hw_breakpoint(bp, &attr); 666 } 667 668 return err; 669 } 670 671 /* 672 * Handle PTRACE_POKEUSR calls for the debug register area. 673 */ 674 static int ptrace_set_debugreg(struct task_struct *tsk, int n, 675 unsigned long val) 676 { 677 struct thread_struct *thread = &tsk->thread; 678 /* There are no DR4 or DR5 registers */ 679 int rc = -EIO; 680 681 if (n < HBP_NUM) { 682 rc = ptrace_set_breakpoint_addr(tsk, n, val); 683 } else if (n == 6) { 684 thread->debugreg6 = val; 685 rc = 0; 686 } else if (n == 7) { 687 rc = ptrace_write_dr7(tsk, val); 688 if (!rc) 689 thread->ptrace_dr7 = val; 690 } 691 return rc; 692 } 693 694 /* 695 * These access the current or another (stopped) task's io permission 696 * bitmap for debugging or core dump. 697 */ 698 static int ioperm_active(struct task_struct *target, 699 const struct user_regset *regset) 700 { 701 struct io_bitmap *iobm = target->thread.io_bitmap; 702 703 return iobm ? DIV_ROUND_UP(iobm->max, regset->size) : 0; 704 } 705 706 static int ioperm_get(struct task_struct *target, 707 const struct user_regset *regset, 708 unsigned int pos, unsigned int count, 709 void *kbuf, void __user *ubuf) 710 { 711 struct io_bitmap *iobm = target->thread.io_bitmap; 712 713 if (!iobm) 714 return -ENXIO; 715 716 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 717 iobm->bitmap, 0, IO_BITMAP_BYTES); 718 } 719 720 /* 721 * Called by kernel/ptrace.c when detaching.. 722 * 723 * Make sure the single step bit is not set. 724 */ 725 void ptrace_disable(struct task_struct *child) 726 { 727 user_disable_single_step(child); 728 } 729 730 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 731 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 732 #endif 733 734 long arch_ptrace(struct task_struct *child, long request, 735 unsigned long addr, unsigned long data) 736 { 737 int ret; 738 unsigned long __user *datap = (unsigned long __user *)data; 739 740 switch (request) { 741 /* read the word at location addr in the USER area. */ 742 case PTRACE_PEEKUSR: { 743 unsigned long tmp; 744 745 ret = -EIO; 746 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) 747 break; 748 749 tmp = 0; /* Default return condition */ 750 if (addr < sizeof(struct user_regs_struct)) 751 tmp = getreg(child, addr); 752 else if (addr >= offsetof(struct user, u_debugreg[0]) && 753 addr <= offsetof(struct user, u_debugreg[7])) { 754 addr -= offsetof(struct user, u_debugreg[0]); 755 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 756 } 757 ret = put_user(tmp, datap); 758 break; 759 } 760 761 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 762 ret = -EIO; 763 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) 764 break; 765 766 if (addr < sizeof(struct user_regs_struct)) 767 ret = putreg(child, addr, data); 768 else if (addr >= offsetof(struct user, u_debugreg[0]) && 769 addr <= offsetof(struct user, u_debugreg[7])) { 770 addr -= offsetof(struct user, u_debugreg[0]); 771 ret = ptrace_set_debugreg(child, 772 addr / sizeof(data), data); 773 } 774 break; 775 776 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 777 return copy_regset_to_user(child, 778 task_user_regset_view(current), 779 REGSET_GENERAL, 780 0, sizeof(struct user_regs_struct), 781 datap); 782 783 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 784 return copy_regset_from_user(child, 785 task_user_regset_view(current), 786 REGSET_GENERAL, 787 0, sizeof(struct user_regs_struct), 788 datap); 789 790 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 791 return copy_regset_to_user(child, 792 task_user_regset_view(current), 793 REGSET_FP, 794 0, sizeof(struct user_i387_struct), 795 datap); 796 797 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 798 return copy_regset_from_user(child, 799 task_user_regset_view(current), 800 REGSET_FP, 801 0, sizeof(struct user_i387_struct), 802 datap); 803 804 #ifdef CONFIG_X86_32 805 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 806 return copy_regset_to_user(child, &user_x86_32_view, 807 REGSET_XFP, 808 0, sizeof(struct user_fxsr_struct), 809 datap) ? -EIO : 0; 810 811 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 812 return copy_regset_from_user(child, &user_x86_32_view, 813 REGSET_XFP, 814 0, sizeof(struct user_fxsr_struct), 815 datap) ? -EIO : 0; 816 #endif 817 818 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 819 case PTRACE_GET_THREAD_AREA: 820 if ((int) addr < 0) 821 return -EIO; 822 ret = do_get_thread_area(child, addr, 823 (struct user_desc __user *)data); 824 break; 825 826 case PTRACE_SET_THREAD_AREA: 827 if ((int) addr < 0) 828 return -EIO; 829 ret = do_set_thread_area(child, addr, 830 (struct user_desc __user *)data, 0); 831 break; 832 #endif 833 834 #ifdef CONFIG_X86_64 835 /* normal 64bit interface to access TLS data. 836 Works just like arch_prctl, except that the arguments 837 are reversed. */ 838 case PTRACE_ARCH_PRCTL: 839 ret = do_arch_prctl_64(child, data, addr); 840 break; 841 #endif 842 843 default: 844 ret = ptrace_request(child, request, addr, data); 845 break; 846 } 847 848 return ret; 849 } 850 851 #ifdef CONFIG_IA32_EMULATION 852 853 #include <linux/compat.h> 854 #include <linux/syscalls.h> 855 #include <asm/ia32.h> 856 #include <asm/user32.h> 857 858 #define R32(l,q) \ 859 case offsetof(struct user32, regs.l): \ 860 regs->q = value; break 861 862 #define SEG32(rs) \ 863 case offsetof(struct user32, regs.rs): \ 864 return set_segment_reg(child, \ 865 offsetof(struct user_regs_struct, rs), \ 866 value); \ 867 break 868 869 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 870 { 871 struct pt_regs *regs = task_pt_regs(child); 872 873 switch (regno) { 874 875 SEG32(cs); 876 SEG32(ds); 877 SEG32(es); 878 SEG32(fs); 879 SEG32(gs); 880 SEG32(ss); 881 882 R32(ebx, bx); 883 R32(ecx, cx); 884 R32(edx, dx); 885 R32(edi, di); 886 R32(esi, si); 887 R32(ebp, bp); 888 R32(eax, ax); 889 R32(eip, ip); 890 R32(esp, sp); 891 892 case offsetof(struct user32, regs.orig_eax): 893 /* 894 * Warning: bizarre corner case fixup here. A 32-bit 895 * debugger setting orig_eax to -1 wants to disable 896 * syscall restart. Make sure that the syscall 897 * restart code sign-extends orig_ax. Also make sure 898 * we interpret the -ERESTART* codes correctly if 899 * loaded into regs->ax in case the task is not 900 * actually still sitting at the exit from a 32-bit 901 * syscall with TS_COMPAT still set. 902 */ 903 regs->orig_ax = value; 904 if (syscall_get_nr(child, regs) >= 0) 905 child->thread_info.status |= TS_I386_REGS_POKED; 906 break; 907 908 case offsetof(struct user32, regs.eflags): 909 return set_flags(child, value); 910 911 case offsetof(struct user32, u_debugreg[0]) ... 912 offsetof(struct user32, u_debugreg[7]): 913 regno -= offsetof(struct user32, u_debugreg[0]); 914 return ptrace_set_debugreg(child, regno / 4, value); 915 916 default: 917 if (regno > sizeof(struct user32) || (regno & 3)) 918 return -EIO; 919 920 /* 921 * Other dummy fields in the virtual user structure 922 * are ignored 923 */ 924 break; 925 } 926 return 0; 927 } 928 929 #undef R32 930 #undef SEG32 931 932 #define R32(l,q) \ 933 case offsetof(struct user32, regs.l): \ 934 *val = regs->q; break 935 936 #define SEG32(rs) \ 937 case offsetof(struct user32, regs.rs): \ 938 *val = get_segment_reg(child, \ 939 offsetof(struct user_regs_struct, rs)); \ 940 break 941 942 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 943 { 944 struct pt_regs *regs = task_pt_regs(child); 945 946 switch (regno) { 947 948 SEG32(ds); 949 SEG32(es); 950 SEG32(fs); 951 SEG32(gs); 952 953 R32(cs, cs); 954 R32(ss, ss); 955 R32(ebx, bx); 956 R32(ecx, cx); 957 R32(edx, dx); 958 R32(edi, di); 959 R32(esi, si); 960 R32(ebp, bp); 961 R32(eax, ax); 962 R32(orig_eax, orig_ax); 963 R32(eip, ip); 964 R32(esp, sp); 965 966 case offsetof(struct user32, regs.eflags): 967 *val = get_flags(child); 968 break; 969 970 case offsetof(struct user32, u_debugreg[0]) ... 971 offsetof(struct user32, u_debugreg[7]): 972 regno -= offsetof(struct user32, u_debugreg[0]); 973 *val = ptrace_get_debugreg(child, regno / 4); 974 break; 975 976 default: 977 if (regno > sizeof(struct user32) || (regno & 3)) 978 return -EIO; 979 980 /* 981 * Other dummy fields in the virtual user structure 982 * are ignored 983 */ 984 *val = 0; 985 break; 986 } 987 return 0; 988 } 989 990 #undef R32 991 #undef SEG32 992 993 static int genregs32_get(struct task_struct *target, 994 const struct user_regset *regset, 995 unsigned int pos, unsigned int count, 996 void *kbuf, void __user *ubuf) 997 { 998 if (kbuf) { 999 compat_ulong_t *k = kbuf; 1000 while (count >= sizeof(*k)) { 1001 getreg32(target, pos, k++); 1002 count -= sizeof(*k); 1003 pos += sizeof(*k); 1004 } 1005 } else { 1006 compat_ulong_t __user *u = ubuf; 1007 while (count >= sizeof(*u)) { 1008 compat_ulong_t word; 1009 getreg32(target, pos, &word); 1010 if (__put_user(word, u++)) 1011 return -EFAULT; 1012 count -= sizeof(*u); 1013 pos += sizeof(*u); 1014 } 1015 } 1016 1017 return 0; 1018 } 1019 1020 static int genregs32_set(struct task_struct *target, 1021 const struct user_regset *regset, 1022 unsigned int pos, unsigned int count, 1023 const void *kbuf, const void __user *ubuf) 1024 { 1025 int ret = 0; 1026 if (kbuf) { 1027 const compat_ulong_t *k = kbuf; 1028 while (count >= sizeof(*k) && !ret) { 1029 ret = putreg32(target, pos, *k++); 1030 count -= sizeof(*k); 1031 pos += sizeof(*k); 1032 } 1033 } else { 1034 const compat_ulong_t __user *u = ubuf; 1035 while (count >= sizeof(*u) && !ret) { 1036 compat_ulong_t word; 1037 ret = __get_user(word, u++); 1038 if (ret) 1039 break; 1040 ret = putreg32(target, pos, word); 1041 count -= sizeof(*u); 1042 pos += sizeof(*u); 1043 } 1044 } 1045 return ret; 1046 } 1047 1048 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request, 1049 compat_ulong_t caddr, compat_ulong_t cdata) 1050 { 1051 unsigned long addr = caddr; 1052 unsigned long data = cdata; 1053 void __user *datap = compat_ptr(data); 1054 int ret; 1055 __u32 val; 1056 1057 switch (request) { 1058 case PTRACE_PEEKUSR: 1059 ret = getreg32(child, addr, &val); 1060 if (ret == 0) 1061 ret = put_user(val, (__u32 __user *)datap); 1062 break; 1063 1064 case PTRACE_POKEUSR: 1065 ret = putreg32(child, addr, data); 1066 break; 1067 1068 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1069 return copy_regset_to_user(child, &user_x86_32_view, 1070 REGSET_GENERAL, 1071 0, sizeof(struct user_regs_struct32), 1072 datap); 1073 1074 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1075 return copy_regset_from_user(child, &user_x86_32_view, 1076 REGSET_GENERAL, 0, 1077 sizeof(struct user_regs_struct32), 1078 datap); 1079 1080 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1081 return copy_regset_to_user(child, &user_x86_32_view, 1082 REGSET_FP, 0, 1083 sizeof(struct user_i387_ia32_struct), 1084 datap); 1085 1086 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1087 return copy_regset_from_user( 1088 child, &user_x86_32_view, REGSET_FP, 1089 0, sizeof(struct user_i387_ia32_struct), datap); 1090 1091 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1092 return copy_regset_to_user(child, &user_x86_32_view, 1093 REGSET_XFP, 0, 1094 sizeof(struct user32_fxsr_struct), 1095 datap); 1096 1097 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1098 return copy_regset_from_user(child, &user_x86_32_view, 1099 REGSET_XFP, 0, 1100 sizeof(struct user32_fxsr_struct), 1101 datap); 1102 1103 case PTRACE_GET_THREAD_AREA: 1104 case PTRACE_SET_THREAD_AREA: 1105 return arch_ptrace(child, request, addr, data); 1106 1107 default: 1108 return compat_ptrace_request(child, request, addr, data); 1109 } 1110 1111 return ret; 1112 } 1113 #endif /* CONFIG_IA32_EMULATION */ 1114 1115 #ifdef CONFIG_X86_X32_ABI 1116 static long x32_arch_ptrace(struct task_struct *child, 1117 compat_long_t request, compat_ulong_t caddr, 1118 compat_ulong_t cdata) 1119 { 1120 unsigned long addr = caddr; 1121 unsigned long data = cdata; 1122 void __user *datap = compat_ptr(data); 1123 int ret; 1124 1125 switch (request) { 1126 /* Read 32bits at location addr in the USER area. Only allow 1127 to return the lower 32bits of segment and debug registers. */ 1128 case PTRACE_PEEKUSR: { 1129 u32 tmp; 1130 1131 ret = -EIO; 1132 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || 1133 addr < offsetof(struct user_regs_struct, cs)) 1134 break; 1135 1136 tmp = 0; /* Default return condition */ 1137 if (addr < sizeof(struct user_regs_struct)) 1138 tmp = getreg(child, addr); 1139 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1140 addr <= offsetof(struct user, u_debugreg[7])) { 1141 addr -= offsetof(struct user, u_debugreg[0]); 1142 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 1143 } 1144 ret = put_user(tmp, (__u32 __user *)datap); 1145 break; 1146 } 1147 1148 /* Write the word at location addr in the USER area. Only allow 1149 to update segment and debug registers with the upper 32bits 1150 zero-extended. */ 1151 case PTRACE_POKEUSR: 1152 ret = -EIO; 1153 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || 1154 addr < offsetof(struct user_regs_struct, cs)) 1155 break; 1156 1157 if (addr < sizeof(struct user_regs_struct)) 1158 ret = putreg(child, addr, data); 1159 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1160 addr <= offsetof(struct user, u_debugreg[7])) { 1161 addr -= offsetof(struct user, u_debugreg[0]); 1162 ret = ptrace_set_debugreg(child, 1163 addr / sizeof(data), data); 1164 } 1165 break; 1166 1167 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1168 return copy_regset_to_user(child, 1169 task_user_regset_view(current), 1170 REGSET_GENERAL, 1171 0, sizeof(struct user_regs_struct), 1172 datap); 1173 1174 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1175 return copy_regset_from_user(child, 1176 task_user_regset_view(current), 1177 REGSET_GENERAL, 1178 0, sizeof(struct user_regs_struct), 1179 datap); 1180 1181 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1182 return copy_regset_to_user(child, 1183 task_user_regset_view(current), 1184 REGSET_FP, 1185 0, sizeof(struct user_i387_struct), 1186 datap); 1187 1188 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1189 return copy_regset_from_user(child, 1190 task_user_regset_view(current), 1191 REGSET_FP, 1192 0, sizeof(struct user_i387_struct), 1193 datap); 1194 1195 default: 1196 return compat_ptrace_request(child, request, addr, data); 1197 } 1198 1199 return ret; 1200 } 1201 #endif 1202 1203 #ifdef CONFIG_COMPAT 1204 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1205 compat_ulong_t caddr, compat_ulong_t cdata) 1206 { 1207 #ifdef CONFIG_X86_X32_ABI 1208 if (!in_ia32_syscall()) 1209 return x32_arch_ptrace(child, request, caddr, cdata); 1210 #endif 1211 #ifdef CONFIG_IA32_EMULATION 1212 return ia32_arch_ptrace(child, request, caddr, cdata); 1213 #else 1214 return 0; 1215 #endif 1216 } 1217 #endif /* CONFIG_COMPAT */ 1218 1219 #ifdef CONFIG_X86_64 1220 1221 static struct user_regset x86_64_regsets[] __ro_after_init = { 1222 [REGSET_GENERAL] = { 1223 .core_note_type = NT_PRSTATUS, 1224 .n = sizeof(struct user_regs_struct) / sizeof(long), 1225 .size = sizeof(long), .align = sizeof(long), 1226 .get = genregs_get, .set = genregs_set 1227 }, 1228 [REGSET_FP] = { 1229 .core_note_type = NT_PRFPREG, 1230 .n = sizeof(struct user_i387_struct) / sizeof(long), 1231 .size = sizeof(long), .align = sizeof(long), 1232 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set 1233 }, 1234 [REGSET_XSTATE] = { 1235 .core_note_type = NT_X86_XSTATE, 1236 .size = sizeof(u64), .align = sizeof(u64), 1237 .active = xstateregs_active, .get = xstateregs_get, 1238 .set = xstateregs_set 1239 }, 1240 [REGSET_IOPERM64] = { 1241 .core_note_type = NT_386_IOPERM, 1242 .n = IO_BITMAP_LONGS, 1243 .size = sizeof(long), .align = sizeof(long), 1244 .active = ioperm_active, .get = ioperm_get 1245 }, 1246 }; 1247 1248 static const struct user_regset_view user_x86_64_view = { 1249 .name = "x86_64", .e_machine = EM_X86_64, 1250 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1251 }; 1252 1253 #else /* CONFIG_X86_32 */ 1254 1255 #define user_regs_struct32 user_regs_struct 1256 #define genregs32_get genregs_get 1257 #define genregs32_set genregs_set 1258 1259 #endif /* CONFIG_X86_64 */ 1260 1261 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1262 static struct user_regset x86_32_regsets[] __ro_after_init = { 1263 [REGSET_GENERAL] = { 1264 .core_note_type = NT_PRSTATUS, 1265 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1266 .size = sizeof(u32), .align = sizeof(u32), 1267 .get = genregs32_get, .set = genregs32_set 1268 }, 1269 [REGSET_FP] = { 1270 .core_note_type = NT_PRFPREG, 1271 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1272 .size = sizeof(u32), .align = sizeof(u32), 1273 .active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set 1274 }, 1275 [REGSET_XFP] = { 1276 .core_note_type = NT_PRXFPREG, 1277 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1278 .size = sizeof(u32), .align = sizeof(u32), 1279 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set 1280 }, 1281 [REGSET_XSTATE] = { 1282 .core_note_type = NT_X86_XSTATE, 1283 .size = sizeof(u64), .align = sizeof(u64), 1284 .active = xstateregs_active, .get = xstateregs_get, 1285 .set = xstateregs_set 1286 }, 1287 [REGSET_TLS] = { 1288 .core_note_type = NT_386_TLS, 1289 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1290 .size = sizeof(struct user_desc), 1291 .align = sizeof(struct user_desc), 1292 .active = regset_tls_active, 1293 .get = regset_tls_get, .set = regset_tls_set 1294 }, 1295 [REGSET_IOPERM32] = { 1296 .core_note_type = NT_386_IOPERM, 1297 .n = IO_BITMAP_BYTES / sizeof(u32), 1298 .size = sizeof(u32), .align = sizeof(u32), 1299 .active = ioperm_active, .get = ioperm_get 1300 }, 1301 }; 1302 1303 static const struct user_regset_view user_x86_32_view = { 1304 .name = "i386", .e_machine = EM_386, 1305 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1306 }; 1307 #endif 1308 1309 /* 1310 * This represents bytes 464..511 in the memory layout exported through 1311 * the REGSET_XSTATE interface. 1312 */ 1313 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 1314 1315 void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask) 1316 { 1317 #ifdef CONFIG_X86_64 1318 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); 1319 #endif 1320 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1321 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64); 1322 #endif 1323 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask; 1324 } 1325 1326 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1327 { 1328 #ifdef CONFIG_IA32_EMULATION 1329 if (!user_64bit_mode(task_pt_regs(task))) 1330 #endif 1331 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1332 return &user_x86_32_view; 1333 #endif 1334 #ifdef CONFIG_X86_64 1335 return &user_x86_64_view; 1336 #endif 1337 } 1338 1339 void send_sigtrap(struct pt_regs *regs, int error_code, int si_code) 1340 { 1341 struct task_struct *tsk = current; 1342 1343 tsk->thread.trap_nr = X86_TRAP_DB; 1344 tsk->thread.error_code = error_code; 1345 1346 /* Send us the fake SIGTRAP */ 1347 force_sig_fault(SIGTRAP, si_code, 1348 user_mode(regs) ? (void __user *)regs->ip : NULL); 1349 } 1350 1351 void user_single_step_report(struct pt_regs *regs) 1352 { 1353 send_sigtrap(regs, 0, TRAP_BRKPT); 1354 } 1355