1 /* By Ross Biro 1/23/92 */ 2 /* 3 * Pentium III FXSR, SSE support 4 * Gareth Hughes <gareth@valinux.com>, May 2000 5 * 6 * BTS tracing 7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/errno.h> 15 #include <linux/ptrace.h> 16 #include <linux/regset.h> 17 #include <linux/tracehook.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/signal.h> 24 #include <linux/workqueue.h> 25 #include <linux/perf_event.h> 26 #include <linux/hw_breakpoint.h> 27 28 #include <asm/uaccess.h> 29 #include <asm/pgtable.h> 30 #include <asm/system.h> 31 #include <asm/processor.h> 32 #include <asm/i387.h> 33 #include <asm/debugreg.h> 34 #include <asm/ldt.h> 35 #include <asm/desc.h> 36 #include <asm/prctl.h> 37 #include <asm/proto.h> 38 #include <asm/ds.h> 39 #include <asm/hw_breakpoint.h> 40 41 #include "tls.h" 42 43 #define CREATE_TRACE_POINTS 44 #include <trace/events/syscalls.h> 45 46 enum x86_regset { 47 REGSET_GENERAL, 48 REGSET_FP, 49 REGSET_XFP, 50 REGSET_IOPERM64 = REGSET_XFP, 51 REGSET_TLS, 52 REGSET_IOPERM32, 53 }; 54 55 struct pt_regs_offset { 56 const char *name; 57 int offset; 58 }; 59 60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 61 #define REG_OFFSET_END {.name = NULL, .offset = 0} 62 63 static const struct pt_regs_offset regoffset_table[] = { 64 #ifdef CONFIG_X86_64 65 REG_OFFSET_NAME(r15), 66 REG_OFFSET_NAME(r14), 67 REG_OFFSET_NAME(r13), 68 REG_OFFSET_NAME(r12), 69 REG_OFFSET_NAME(r11), 70 REG_OFFSET_NAME(r10), 71 REG_OFFSET_NAME(r9), 72 REG_OFFSET_NAME(r8), 73 #endif 74 REG_OFFSET_NAME(bx), 75 REG_OFFSET_NAME(cx), 76 REG_OFFSET_NAME(dx), 77 REG_OFFSET_NAME(si), 78 REG_OFFSET_NAME(di), 79 REG_OFFSET_NAME(bp), 80 REG_OFFSET_NAME(ax), 81 #ifdef CONFIG_X86_32 82 REG_OFFSET_NAME(ds), 83 REG_OFFSET_NAME(es), 84 REG_OFFSET_NAME(fs), 85 REG_OFFSET_NAME(gs), 86 #endif 87 REG_OFFSET_NAME(orig_ax), 88 REG_OFFSET_NAME(ip), 89 REG_OFFSET_NAME(cs), 90 REG_OFFSET_NAME(flags), 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(ss), 93 REG_OFFSET_END, 94 }; 95 96 /** 97 * regs_query_register_offset() - query register offset from its name 98 * @name: the name of a register 99 * 100 * regs_query_register_offset() returns the offset of a register in struct 101 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 102 */ 103 int regs_query_register_offset(const char *name) 104 { 105 const struct pt_regs_offset *roff; 106 for (roff = regoffset_table; roff->name != NULL; roff++) 107 if (!strcmp(roff->name, name)) 108 return roff->offset; 109 return -EINVAL; 110 } 111 112 /** 113 * regs_query_register_name() - query register name from its offset 114 * @offset: the offset of a register in struct pt_regs. 115 * 116 * regs_query_register_name() returns the name of a register from its 117 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 118 */ 119 const char *regs_query_register_name(unsigned int offset) 120 { 121 const struct pt_regs_offset *roff; 122 for (roff = regoffset_table; roff->name != NULL; roff++) 123 if (roff->offset == offset) 124 return roff->name; 125 return NULL; 126 } 127 128 static const int arg_offs_table[] = { 129 #ifdef CONFIG_X86_32 130 [0] = offsetof(struct pt_regs, ax), 131 [1] = offsetof(struct pt_regs, dx), 132 [2] = offsetof(struct pt_regs, cx) 133 #else /* CONFIG_X86_64 */ 134 [0] = offsetof(struct pt_regs, di), 135 [1] = offsetof(struct pt_regs, si), 136 [2] = offsetof(struct pt_regs, dx), 137 [3] = offsetof(struct pt_regs, cx), 138 [4] = offsetof(struct pt_regs, r8), 139 [5] = offsetof(struct pt_regs, r9) 140 #endif 141 }; 142 143 /** 144 * regs_get_argument_nth() - get Nth argument at function call 145 * @regs: pt_regs which contains registers at function entry. 146 * @n: argument number. 147 * 148 * regs_get_argument_nth() returns @n th argument of a function call. 149 * Since usually the kernel stack will be changed right after function entry, 150 * you must use this at function entry. If the @n th entry is NOT in the 151 * kernel stack or pt_regs, this returns 0. 152 */ 153 unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) 154 { 155 if (n < ARRAY_SIZE(arg_offs_table)) 156 return *(unsigned long *)((char *)regs + arg_offs_table[n]); 157 else { 158 /* 159 * The typical case: arg n is on the stack. 160 * (Note: stack[0] = return address, so skip it) 161 */ 162 n -= ARRAY_SIZE(arg_offs_table); 163 return regs_get_kernel_stack_nth(regs, 1 + n); 164 } 165 } 166 167 /* 168 * does not yet catch signals sent when the child dies. 169 * in exit.c or in signal.c. 170 */ 171 172 /* 173 * Determines which flags the user has access to [1 = access, 0 = no access]. 174 */ 175 #define FLAG_MASK_32 ((unsigned long) \ 176 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 177 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 178 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 179 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 180 X86_EFLAGS_RF | X86_EFLAGS_AC)) 181 182 /* 183 * Determines whether a value may be installed in a segment register. 184 */ 185 static inline bool invalid_selector(u16 value) 186 { 187 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 188 } 189 190 #ifdef CONFIG_X86_32 191 192 #define FLAG_MASK FLAG_MASK_32 193 194 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 195 { 196 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 197 return ®s->bx + (regno >> 2); 198 } 199 200 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 201 { 202 /* 203 * Returning the value truncates it to 16 bits. 204 */ 205 unsigned int retval; 206 if (offset != offsetof(struct user_regs_struct, gs)) 207 retval = *pt_regs_access(task_pt_regs(task), offset); 208 else { 209 if (task == current) 210 retval = get_user_gs(task_pt_regs(task)); 211 else 212 retval = task_user_gs(task); 213 } 214 return retval; 215 } 216 217 static int set_segment_reg(struct task_struct *task, 218 unsigned long offset, u16 value) 219 { 220 /* 221 * The value argument was already truncated to 16 bits. 222 */ 223 if (invalid_selector(value)) 224 return -EIO; 225 226 /* 227 * For %cs and %ss we cannot permit a null selector. 228 * We can permit a bogus selector as long as it has USER_RPL. 229 * Null selectors are fine for other segment registers, but 230 * we will never get back to user mode with invalid %cs or %ss 231 * and will take the trap in iret instead. Much code relies 232 * on user_mode() to distinguish a user trap frame (which can 233 * safely use invalid selectors) from a kernel trap frame. 234 */ 235 switch (offset) { 236 case offsetof(struct user_regs_struct, cs): 237 case offsetof(struct user_regs_struct, ss): 238 if (unlikely(value == 0)) 239 return -EIO; 240 241 default: 242 *pt_regs_access(task_pt_regs(task), offset) = value; 243 break; 244 245 case offsetof(struct user_regs_struct, gs): 246 if (task == current) 247 set_user_gs(task_pt_regs(task), value); 248 else 249 task_user_gs(task) = value; 250 } 251 252 return 0; 253 } 254 255 #else /* CONFIG_X86_64 */ 256 257 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 258 259 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 260 { 261 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 262 return ®s->r15 + (offset / sizeof(regs->r15)); 263 } 264 265 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 266 { 267 /* 268 * Returning the value truncates it to 16 bits. 269 */ 270 unsigned int seg; 271 272 switch (offset) { 273 case offsetof(struct user_regs_struct, fs): 274 if (task == current) { 275 /* Older gas can't assemble movq %?s,%r?? */ 276 asm("movl %%fs,%0" : "=r" (seg)); 277 return seg; 278 } 279 return task->thread.fsindex; 280 case offsetof(struct user_regs_struct, gs): 281 if (task == current) { 282 asm("movl %%gs,%0" : "=r" (seg)); 283 return seg; 284 } 285 return task->thread.gsindex; 286 case offsetof(struct user_regs_struct, ds): 287 if (task == current) { 288 asm("movl %%ds,%0" : "=r" (seg)); 289 return seg; 290 } 291 return task->thread.ds; 292 case offsetof(struct user_regs_struct, es): 293 if (task == current) { 294 asm("movl %%es,%0" : "=r" (seg)); 295 return seg; 296 } 297 return task->thread.es; 298 299 case offsetof(struct user_regs_struct, cs): 300 case offsetof(struct user_regs_struct, ss): 301 break; 302 } 303 return *pt_regs_access(task_pt_regs(task), offset); 304 } 305 306 static int set_segment_reg(struct task_struct *task, 307 unsigned long offset, u16 value) 308 { 309 /* 310 * The value argument was already truncated to 16 bits. 311 */ 312 if (invalid_selector(value)) 313 return -EIO; 314 315 switch (offset) { 316 case offsetof(struct user_regs_struct,fs): 317 /* 318 * If this is setting fs as for normal 64-bit use but 319 * setting fs_base has implicitly changed it, leave it. 320 */ 321 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 322 task->thread.fs != 0) || 323 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 324 task->thread.fs == 0)) 325 break; 326 task->thread.fsindex = value; 327 if (task == current) 328 loadsegment(fs, task->thread.fsindex); 329 break; 330 case offsetof(struct user_regs_struct,gs): 331 /* 332 * If this is setting gs as for normal 64-bit use but 333 * setting gs_base has implicitly changed it, leave it. 334 */ 335 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 336 task->thread.gs != 0) || 337 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 338 task->thread.gs == 0)) 339 break; 340 task->thread.gsindex = value; 341 if (task == current) 342 load_gs_index(task->thread.gsindex); 343 break; 344 case offsetof(struct user_regs_struct,ds): 345 task->thread.ds = value; 346 if (task == current) 347 loadsegment(ds, task->thread.ds); 348 break; 349 case offsetof(struct user_regs_struct,es): 350 task->thread.es = value; 351 if (task == current) 352 loadsegment(es, task->thread.es); 353 break; 354 355 /* 356 * Can't actually change these in 64-bit mode. 357 */ 358 case offsetof(struct user_regs_struct,cs): 359 if (unlikely(value == 0)) 360 return -EIO; 361 #ifdef CONFIG_IA32_EMULATION 362 if (test_tsk_thread_flag(task, TIF_IA32)) 363 task_pt_regs(task)->cs = value; 364 #endif 365 break; 366 case offsetof(struct user_regs_struct,ss): 367 if (unlikely(value == 0)) 368 return -EIO; 369 #ifdef CONFIG_IA32_EMULATION 370 if (test_tsk_thread_flag(task, TIF_IA32)) 371 task_pt_regs(task)->ss = value; 372 #endif 373 break; 374 } 375 376 return 0; 377 } 378 379 #endif /* CONFIG_X86_32 */ 380 381 static unsigned long get_flags(struct task_struct *task) 382 { 383 unsigned long retval = task_pt_regs(task)->flags; 384 385 /* 386 * If the debugger set TF, hide it from the readout. 387 */ 388 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 389 retval &= ~X86_EFLAGS_TF; 390 391 return retval; 392 } 393 394 static int set_flags(struct task_struct *task, unsigned long value) 395 { 396 struct pt_regs *regs = task_pt_regs(task); 397 398 /* 399 * If the user value contains TF, mark that 400 * it was not "us" (the debugger) that set it. 401 * If not, make sure it stays set if we had. 402 */ 403 if (value & X86_EFLAGS_TF) 404 clear_tsk_thread_flag(task, TIF_FORCED_TF); 405 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 406 value |= X86_EFLAGS_TF; 407 408 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 409 410 return 0; 411 } 412 413 static int putreg(struct task_struct *child, 414 unsigned long offset, unsigned long value) 415 { 416 switch (offset) { 417 case offsetof(struct user_regs_struct, cs): 418 case offsetof(struct user_regs_struct, ds): 419 case offsetof(struct user_regs_struct, es): 420 case offsetof(struct user_regs_struct, fs): 421 case offsetof(struct user_regs_struct, gs): 422 case offsetof(struct user_regs_struct, ss): 423 return set_segment_reg(child, offset, value); 424 425 case offsetof(struct user_regs_struct, flags): 426 return set_flags(child, value); 427 428 #ifdef CONFIG_X86_64 429 case offsetof(struct user_regs_struct,fs_base): 430 if (value >= TASK_SIZE_OF(child)) 431 return -EIO; 432 /* 433 * When changing the segment base, use do_arch_prctl 434 * to set either thread.fs or thread.fsindex and the 435 * corresponding GDT slot. 436 */ 437 if (child->thread.fs != value) 438 return do_arch_prctl(child, ARCH_SET_FS, value); 439 return 0; 440 case offsetof(struct user_regs_struct,gs_base): 441 /* 442 * Exactly the same here as the %fs handling above. 443 */ 444 if (value >= TASK_SIZE_OF(child)) 445 return -EIO; 446 if (child->thread.gs != value) 447 return do_arch_prctl(child, ARCH_SET_GS, value); 448 return 0; 449 #endif 450 } 451 452 *pt_regs_access(task_pt_regs(child), offset) = value; 453 return 0; 454 } 455 456 static unsigned long getreg(struct task_struct *task, unsigned long offset) 457 { 458 switch (offset) { 459 case offsetof(struct user_regs_struct, cs): 460 case offsetof(struct user_regs_struct, ds): 461 case offsetof(struct user_regs_struct, es): 462 case offsetof(struct user_regs_struct, fs): 463 case offsetof(struct user_regs_struct, gs): 464 case offsetof(struct user_regs_struct, ss): 465 return get_segment_reg(task, offset); 466 467 case offsetof(struct user_regs_struct, flags): 468 return get_flags(task); 469 470 #ifdef CONFIG_X86_64 471 case offsetof(struct user_regs_struct, fs_base): { 472 /* 473 * do_arch_prctl may have used a GDT slot instead of 474 * the MSR. To userland, it appears the same either 475 * way, except the %fs segment selector might not be 0. 476 */ 477 unsigned int seg = task->thread.fsindex; 478 if (task->thread.fs != 0) 479 return task->thread.fs; 480 if (task == current) 481 asm("movl %%fs,%0" : "=r" (seg)); 482 if (seg != FS_TLS_SEL) 483 return 0; 484 return get_desc_base(&task->thread.tls_array[FS_TLS]); 485 } 486 case offsetof(struct user_regs_struct, gs_base): { 487 /* 488 * Exactly the same here as the %fs handling above. 489 */ 490 unsigned int seg = task->thread.gsindex; 491 if (task->thread.gs != 0) 492 return task->thread.gs; 493 if (task == current) 494 asm("movl %%gs,%0" : "=r" (seg)); 495 if (seg != GS_TLS_SEL) 496 return 0; 497 return get_desc_base(&task->thread.tls_array[GS_TLS]); 498 } 499 #endif 500 } 501 502 return *pt_regs_access(task_pt_regs(task), offset); 503 } 504 505 static int genregs_get(struct task_struct *target, 506 const struct user_regset *regset, 507 unsigned int pos, unsigned int count, 508 void *kbuf, void __user *ubuf) 509 { 510 if (kbuf) { 511 unsigned long *k = kbuf; 512 while (count >= sizeof(*k)) { 513 *k++ = getreg(target, pos); 514 count -= sizeof(*k); 515 pos += sizeof(*k); 516 } 517 } else { 518 unsigned long __user *u = ubuf; 519 while (count >= sizeof(*u)) { 520 if (__put_user(getreg(target, pos), u++)) 521 return -EFAULT; 522 count -= sizeof(*u); 523 pos += sizeof(*u); 524 } 525 } 526 527 return 0; 528 } 529 530 static int genregs_set(struct task_struct *target, 531 const struct user_regset *regset, 532 unsigned int pos, unsigned int count, 533 const void *kbuf, const void __user *ubuf) 534 { 535 int ret = 0; 536 if (kbuf) { 537 const unsigned long *k = kbuf; 538 while (count >= sizeof(*k) && !ret) { 539 ret = putreg(target, pos, *k++); 540 count -= sizeof(*k); 541 pos += sizeof(*k); 542 } 543 } else { 544 const unsigned long __user *u = ubuf; 545 while (count >= sizeof(*u) && !ret) { 546 unsigned long word; 547 ret = __get_user(word, u++); 548 if (ret) 549 break; 550 ret = putreg(target, pos, word); 551 count -= sizeof(*u); 552 pos += sizeof(*u); 553 } 554 } 555 return ret; 556 } 557 558 static void ptrace_triggered(struct perf_event *bp, int nmi, 559 struct perf_sample_data *data, 560 struct pt_regs *regs) 561 { 562 int i; 563 struct thread_struct *thread = &(current->thread); 564 565 /* 566 * Store in the virtual DR6 register the fact that the breakpoint 567 * was hit so the thread's debugger will see it. 568 */ 569 for (i = 0; i < HBP_NUM; i++) { 570 if (thread->ptrace_bps[i] == bp) 571 break; 572 } 573 574 thread->debugreg6 |= (DR_TRAP0 << i); 575 } 576 577 /* 578 * Walk through every ptrace breakpoints for this thread and 579 * build the dr7 value on top of their attributes. 580 * 581 */ 582 static unsigned long ptrace_get_dr7(struct perf_event *bp[]) 583 { 584 int i; 585 int dr7 = 0; 586 struct arch_hw_breakpoint *info; 587 588 for (i = 0; i < HBP_NUM; i++) { 589 if (bp[i] && !bp[i]->attr.disabled) { 590 info = counter_arch_bp(bp[i]); 591 dr7 |= encode_dr7(i, info->len, info->type); 592 } 593 } 594 595 return dr7; 596 } 597 598 static int 599 ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 600 struct task_struct *tsk, int disabled) 601 { 602 int err; 603 int gen_len, gen_type; 604 struct perf_event_attr attr; 605 606 /* 607 * We shoud have at least an inactive breakpoint at this 608 * slot. It means the user is writing dr7 without having 609 * written the address register first 610 */ 611 if (!bp) 612 return -EINVAL; 613 614 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); 615 if (err) 616 return err; 617 618 attr = bp->attr; 619 attr.bp_len = gen_len; 620 attr.bp_type = gen_type; 621 attr.disabled = disabled; 622 623 return modify_user_hw_breakpoint(bp, &attr); 624 } 625 626 /* 627 * Handle ptrace writes to debug register 7. 628 */ 629 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) 630 { 631 struct thread_struct *thread = &(tsk->thread); 632 unsigned long old_dr7; 633 int i, orig_ret = 0, rc = 0; 634 int enabled, second_pass = 0; 635 unsigned len, type; 636 struct perf_event *bp; 637 638 data &= ~DR_CONTROL_RESERVED; 639 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 640 restore: 641 /* 642 * Loop through all the hardware breakpoints, making the 643 * appropriate changes to each. 644 */ 645 for (i = 0; i < HBP_NUM; i++) { 646 enabled = decode_dr7(data, i, &len, &type); 647 bp = thread->ptrace_bps[i]; 648 649 if (!enabled) { 650 if (bp) { 651 /* 652 * Don't unregister the breakpoints right-away, 653 * unless all register_user_hw_breakpoint() 654 * requests have succeeded. This prevents 655 * any window of opportunity for debug 656 * register grabbing by other users. 657 */ 658 if (!second_pass) 659 continue; 660 661 rc = ptrace_modify_breakpoint(bp, len, type, 662 tsk, 1); 663 if (rc) 664 break; 665 } 666 continue; 667 } 668 669 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); 670 if (rc) 671 break; 672 } 673 /* 674 * Make a second pass to free the remaining unused breakpoints 675 * or to restore the original breakpoints if an error occurred. 676 */ 677 if (!second_pass) { 678 second_pass = 1; 679 if (rc < 0) { 680 orig_ret = rc; 681 data = old_dr7; 682 } 683 goto restore; 684 } 685 return ((orig_ret < 0) ? orig_ret : rc); 686 } 687 688 /* 689 * Handle PTRACE_PEEKUSR calls for the debug register area. 690 */ 691 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) 692 { 693 struct thread_struct *thread = &(tsk->thread); 694 unsigned long val = 0; 695 696 if (n < HBP_NUM) { 697 struct perf_event *bp; 698 bp = thread->ptrace_bps[n]; 699 if (!bp) 700 return 0; 701 val = bp->hw.info.address; 702 } else if (n == 6) { 703 val = thread->debugreg6; 704 } else if (n == 7) { 705 val = thread->ptrace_dr7; 706 } 707 return val; 708 } 709 710 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, 711 unsigned long addr) 712 { 713 struct perf_event *bp; 714 struct thread_struct *t = &tsk->thread; 715 struct perf_event_attr attr; 716 717 if (!t->ptrace_bps[nr]) { 718 hw_breakpoint_init(&attr); 719 /* 720 * Put stub len and type to register (reserve) an inactive but 721 * correct bp 722 */ 723 attr.bp_addr = addr; 724 attr.bp_len = HW_BREAKPOINT_LEN_1; 725 attr.bp_type = HW_BREAKPOINT_W; 726 attr.disabled = 1; 727 728 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); 729 730 /* 731 * CHECKME: the previous code returned -EIO if the addr wasn't 732 * a valid task virtual addr. The new one will return -EINVAL in 733 * this case. 734 * -EINVAL may be what we want for in-kernel breakpoints users, 735 * but -EIO looks better for ptrace, since we refuse a register 736 * writing for the user. And anyway this is the previous 737 * behaviour. 738 */ 739 if (IS_ERR(bp)) 740 return PTR_ERR(bp); 741 742 t->ptrace_bps[nr] = bp; 743 } else { 744 int err; 745 746 bp = t->ptrace_bps[nr]; 747 748 attr = bp->attr; 749 attr.bp_addr = addr; 750 err = modify_user_hw_breakpoint(bp, &attr); 751 if (err) 752 return err; 753 } 754 755 756 return 0; 757 } 758 759 /* 760 * Handle PTRACE_POKEUSR calls for the debug register area. 761 */ 762 int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) 763 { 764 struct thread_struct *thread = &(tsk->thread); 765 int rc = 0; 766 767 /* There are no DR4 or DR5 registers */ 768 if (n == 4 || n == 5) 769 return -EIO; 770 771 if (n == 6) { 772 thread->debugreg6 = val; 773 goto ret_path; 774 } 775 if (n < HBP_NUM) { 776 rc = ptrace_set_breakpoint_addr(tsk, n, val); 777 if (rc) 778 return rc; 779 } 780 /* All that's left is DR7 */ 781 if (n == 7) { 782 rc = ptrace_write_dr7(tsk, val); 783 if (!rc) 784 thread->ptrace_dr7 = val; 785 } 786 787 ret_path: 788 return rc; 789 } 790 791 /* 792 * These access the current or another (stopped) task's io permission 793 * bitmap for debugging or core dump. 794 */ 795 static int ioperm_active(struct task_struct *target, 796 const struct user_regset *regset) 797 { 798 return target->thread.io_bitmap_max / regset->size; 799 } 800 801 static int ioperm_get(struct task_struct *target, 802 const struct user_regset *regset, 803 unsigned int pos, unsigned int count, 804 void *kbuf, void __user *ubuf) 805 { 806 if (!target->thread.io_bitmap_ptr) 807 return -ENXIO; 808 809 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 810 target->thread.io_bitmap_ptr, 811 0, IO_BITMAP_BYTES); 812 } 813 814 #ifdef CONFIG_X86_PTRACE_BTS 815 /* 816 * A branch trace store context. 817 * 818 * Contexts may only be installed by ptrace_bts_config() and only for 819 * ptraced tasks. 820 * 821 * Contexts are destroyed when the tracee is detached from the tracer. 822 * The actual destruction work requires interrupts enabled, so the 823 * work is deferred and will be scheduled during __ptrace_unlink(). 824 * 825 * Contexts hold an additional task_struct reference on the traced 826 * task, as well as a reference on the tracer's mm. 827 * 828 * Ptrace already holds a task_struct for the duration of ptrace operations, 829 * but since destruction is deferred, it may be executed after both 830 * tracer and tracee exited. 831 */ 832 struct bts_context { 833 /* The branch trace handle. */ 834 struct bts_tracer *tracer; 835 836 /* The buffer used to store the branch trace and its size. */ 837 void *buffer; 838 unsigned int size; 839 840 /* The mm that paid for the above buffer. */ 841 struct mm_struct *mm; 842 843 /* The task this context belongs to. */ 844 struct task_struct *task; 845 846 /* The signal to send on a bts buffer overflow. */ 847 unsigned int bts_ovfl_signal; 848 849 /* The work struct to destroy a context. */ 850 struct work_struct work; 851 }; 852 853 static int alloc_bts_buffer(struct bts_context *context, unsigned int size) 854 { 855 void *buffer = NULL; 856 int err = -ENOMEM; 857 858 err = account_locked_memory(current->mm, current->signal->rlim, size); 859 if (err < 0) 860 return err; 861 862 buffer = kzalloc(size, GFP_KERNEL); 863 if (!buffer) 864 goto out_refund; 865 866 context->buffer = buffer; 867 context->size = size; 868 context->mm = get_task_mm(current); 869 870 return 0; 871 872 out_refund: 873 refund_locked_memory(current->mm, size); 874 return err; 875 } 876 877 static inline void free_bts_buffer(struct bts_context *context) 878 { 879 if (!context->buffer) 880 return; 881 882 kfree(context->buffer); 883 context->buffer = NULL; 884 885 refund_locked_memory(context->mm, context->size); 886 context->size = 0; 887 888 mmput(context->mm); 889 context->mm = NULL; 890 } 891 892 static void free_bts_context_work(struct work_struct *w) 893 { 894 struct bts_context *context; 895 896 context = container_of(w, struct bts_context, work); 897 898 ds_release_bts(context->tracer); 899 put_task_struct(context->task); 900 free_bts_buffer(context); 901 kfree(context); 902 } 903 904 static inline void free_bts_context(struct bts_context *context) 905 { 906 INIT_WORK(&context->work, free_bts_context_work); 907 schedule_work(&context->work); 908 } 909 910 static inline struct bts_context *alloc_bts_context(struct task_struct *task) 911 { 912 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL); 913 if (context) { 914 context->task = task; 915 task->bts = context; 916 917 get_task_struct(task); 918 } 919 920 return context; 921 } 922 923 static int ptrace_bts_read_record(struct task_struct *child, size_t index, 924 struct bts_struct __user *out) 925 { 926 struct bts_context *context; 927 const struct bts_trace *trace; 928 struct bts_struct bts; 929 const unsigned char *at; 930 int error; 931 932 context = child->bts; 933 if (!context) 934 return -ESRCH; 935 936 trace = ds_read_bts(context->tracer); 937 if (!trace) 938 return -ESRCH; 939 940 at = trace->ds.top - ((index + 1) * trace->ds.size); 941 if ((void *)at < trace->ds.begin) 942 at += (trace->ds.n * trace->ds.size); 943 944 if (!trace->read) 945 return -EOPNOTSUPP; 946 947 error = trace->read(context->tracer, at, &bts); 948 if (error < 0) 949 return error; 950 951 if (copy_to_user(out, &bts, sizeof(bts))) 952 return -EFAULT; 953 954 return sizeof(bts); 955 } 956 957 static int ptrace_bts_drain(struct task_struct *child, 958 long size, 959 struct bts_struct __user *out) 960 { 961 struct bts_context *context; 962 const struct bts_trace *trace; 963 const unsigned char *at; 964 int error, drained = 0; 965 966 context = child->bts; 967 if (!context) 968 return -ESRCH; 969 970 trace = ds_read_bts(context->tracer); 971 if (!trace) 972 return -ESRCH; 973 974 if (!trace->read) 975 return -EOPNOTSUPP; 976 977 if (size < (trace->ds.top - trace->ds.begin)) 978 return -EIO; 979 980 for (at = trace->ds.begin; (void *)at < trace->ds.top; 981 out++, drained++, at += trace->ds.size) { 982 struct bts_struct bts; 983 984 error = trace->read(context->tracer, at, &bts); 985 if (error < 0) 986 return error; 987 988 if (copy_to_user(out, &bts, sizeof(bts))) 989 return -EFAULT; 990 } 991 992 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 993 994 error = ds_reset_bts(context->tracer); 995 if (error < 0) 996 return error; 997 998 return drained; 999 } 1000 1001 static int ptrace_bts_config(struct task_struct *child, 1002 long cfg_size, 1003 const struct ptrace_bts_config __user *ucfg) 1004 { 1005 struct bts_context *context; 1006 struct ptrace_bts_config cfg; 1007 unsigned int flags = 0; 1008 1009 if (cfg_size < sizeof(cfg)) 1010 return -EIO; 1011 1012 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 1013 return -EFAULT; 1014 1015 context = child->bts; 1016 if (!context) 1017 context = alloc_bts_context(child); 1018 if (!context) 1019 return -ENOMEM; 1020 1021 if (cfg.flags & PTRACE_BTS_O_SIGNAL) { 1022 if (!cfg.signal) 1023 return -EINVAL; 1024 1025 return -EOPNOTSUPP; 1026 context->bts_ovfl_signal = cfg.signal; 1027 } 1028 1029 ds_release_bts(context->tracer); 1030 context->tracer = NULL; 1031 1032 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { 1033 int err; 1034 1035 free_bts_buffer(context); 1036 if (!cfg.size) 1037 return 0; 1038 1039 err = alloc_bts_buffer(context, cfg.size); 1040 if (err < 0) 1041 return err; 1042 } 1043 1044 if (cfg.flags & PTRACE_BTS_O_TRACE) 1045 flags |= BTS_USER; 1046 1047 if (cfg.flags & PTRACE_BTS_O_SCHED) 1048 flags |= BTS_TIMESTAMPS; 1049 1050 context->tracer = 1051 ds_request_bts_task(child, context->buffer, context->size, 1052 NULL, (size_t)-1, flags); 1053 if (unlikely(IS_ERR(context->tracer))) { 1054 int error = PTR_ERR(context->tracer); 1055 1056 free_bts_buffer(context); 1057 context->tracer = NULL; 1058 return error; 1059 } 1060 1061 return sizeof(cfg); 1062 } 1063 1064 static int ptrace_bts_status(struct task_struct *child, 1065 long cfg_size, 1066 struct ptrace_bts_config __user *ucfg) 1067 { 1068 struct bts_context *context; 1069 const struct bts_trace *trace; 1070 struct ptrace_bts_config cfg; 1071 1072 context = child->bts; 1073 if (!context) 1074 return -ESRCH; 1075 1076 if (cfg_size < sizeof(cfg)) 1077 return -EIO; 1078 1079 trace = ds_read_bts(context->tracer); 1080 if (!trace) 1081 return -ESRCH; 1082 1083 memset(&cfg, 0, sizeof(cfg)); 1084 cfg.size = trace->ds.end - trace->ds.begin; 1085 cfg.signal = context->bts_ovfl_signal; 1086 cfg.bts_size = sizeof(struct bts_struct); 1087 1088 if (cfg.signal) 1089 cfg.flags |= PTRACE_BTS_O_SIGNAL; 1090 1091 if (trace->ds.flags & BTS_USER) 1092 cfg.flags |= PTRACE_BTS_O_TRACE; 1093 1094 if (trace->ds.flags & BTS_TIMESTAMPS) 1095 cfg.flags |= PTRACE_BTS_O_SCHED; 1096 1097 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 1098 return -EFAULT; 1099 1100 return sizeof(cfg); 1101 } 1102 1103 static int ptrace_bts_clear(struct task_struct *child) 1104 { 1105 struct bts_context *context; 1106 const struct bts_trace *trace; 1107 1108 context = child->bts; 1109 if (!context) 1110 return -ESRCH; 1111 1112 trace = ds_read_bts(context->tracer); 1113 if (!trace) 1114 return -ESRCH; 1115 1116 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 1117 1118 return ds_reset_bts(context->tracer); 1119 } 1120 1121 static int ptrace_bts_size(struct task_struct *child) 1122 { 1123 struct bts_context *context; 1124 const struct bts_trace *trace; 1125 1126 context = child->bts; 1127 if (!context) 1128 return -ESRCH; 1129 1130 trace = ds_read_bts(context->tracer); 1131 if (!trace) 1132 return -ESRCH; 1133 1134 return (trace->ds.top - trace->ds.begin) / trace->ds.size; 1135 } 1136 1137 /* 1138 * Called from __ptrace_unlink() after the child has been moved back 1139 * to its original parent. 1140 */ 1141 void ptrace_bts_untrace(struct task_struct *child) 1142 { 1143 if (unlikely(child->bts)) { 1144 free_bts_context(child->bts); 1145 child->bts = NULL; 1146 } 1147 } 1148 #endif /* CONFIG_X86_PTRACE_BTS */ 1149 1150 /* 1151 * Called by kernel/ptrace.c when detaching.. 1152 * 1153 * Make sure the single step bit is not set. 1154 */ 1155 void ptrace_disable(struct task_struct *child) 1156 { 1157 user_disable_single_step(child); 1158 #ifdef TIF_SYSCALL_EMU 1159 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 1160 #endif 1161 } 1162 1163 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1164 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 1165 #endif 1166 1167 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 1168 { 1169 int ret; 1170 unsigned long __user *datap = (unsigned long __user *)data; 1171 1172 switch (request) { 1173 /* read the word at location addr in the USER area. */ 1174 case PTRACE_PEEKUSR: { 1175 unsigned long tmp; 1176 1177 ret = -EIO; 1178 if ((addr & (sizeof(data) - 1)) || addr < 0 || 1179 addr >= sizeof(struct user)) 1180 break; 1181 1182 tmp = 0; /* Default return condition */ 1183 if (addr < sizeof(struct user_regs_struct)) 1184 tmp = getreg(child, addr); 1185 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1186 addr <= offsetof(struct user, u_debugreg[7])) { 1187 addr -= offsetof(struct user, u_debugreg[0]); 1188 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 1189 } 1190 ret = put_user(tmp, datap); 1191 break; 1192 } 1193 1194 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 1195 ret = -EIO; 1196 if ((addr & (sizeof(data) - 1)) || addr < 0 || 1197 addr >= sizeof(struct user)) 1198 break; 1199 1200 if (addr < sizeof(struct user_regs_struct)) 1201 ret = putreg(child, addr, data); 1202 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1203 addr <= offsetof(struct user, u_debugreg[7])) { 1204 addr -= offsetof(struct user, u_debugreg[0]); 1205 ret = ptrace_set_debugreg(child, 1206 addr / sizeof(data), data); 1207 } 1208 break; 1209 1210 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1211 return copy_regset_to_user(child, 1212 task_user_regset_view(current), 1213 REGSET_GENERAL, 1214 0, sizeof(struct user_regs_struct), 1215 datap); 1216 1217 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1218 return copy_regset_from_user(child, 1219 task_user_regset_view(current), 1220 REGSET_GENERAL, 1221 0, sizeof(struct user_regs_struct), 1222 datap); 1223 1224 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1225 return copy_regset_to_user(child, 1226 task_user_regset_view(current), 1227 REGSET_FP, 1228 0, sizeof(struct user_i387_struct), 1229 datap); 1230 1231 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1232 return copy_regset_from_user(child, 1233 task_user_regset_view(current), 1234 REGSET_FP, 1235 0, sizeof(struct user_i387_struct), 1236 datap); 1237 1238 #ifdef CONFIG_X86_32 1239 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1240 return copy_regset_to_user(child, &user_x86_32_view, 1241 REGSET_XFP, 1242 0, sizeof(struct user_fxsr_struct), 1243 datap) ? -EIO : 0; 1244 1245 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1246 return copy_regset_from_user(child, &user_x86_32_view, 1247 REGSET_XFP, 1248 0, sizeof(struct user_fxsr_struct), 1249 datap) ? -EIO : 0; 1250 #endif 1251 1252 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1253 case PTRACE_GET_THREAD_AREA: 1254 if (addr < 0) 1255 return -EIO; 1256 ret = do_get_thread_area(child, addr, 1257 (struct user_desc __user *) data); 1258 break; 1259 1260 case PTRACE_SET_THREAD_AREA: 1261 if (addr < 0) 1262 return -EIO; 1263 ret = do_set_thread_area(child, addr, 1264 (struct user_desc __user *) data, 0); 1265 break; 1266 #endif 1267 1268 #ifdef CONFIG_X86_64 1269 /* normal 64bit interface to access TLS data. 1270 Works just like arch_prctl, except that the arguments 1271 are reversed. */ 1272 case PTRACE_ARCH_PRCTL: 1273 ret = do_arch_prctl(child, data, addr); 1274 break; 1275 #endif 1276 1277 /* 1278 * These bits need more cooking - not enabled yet: 1279 */ 1280 #ifdef CONFIG_X86_PTRACE_BTS 1281 case PTRACE_BTS_CONFIG: 1282 ret = ptrace_bts_config 1283 (child, data, (struct ptrace_bts_config __user *)addr); 1284 break; 1285 1286 case PTRACE_BTS_STATUS: 1287 ret = ptrace_bts_status 1288 (child, data, (struct ptrace_bts_config __user *)addr); 1289 break; 1290 1291 case PTRACE_BTS_SIZE: 1292 ret = ptrace_bts_size(child); 1293 break; 1294 1295 case PTRACE_BTS_GET: 1296 ret = ptrace_bts_read_record 1297 (child, data, (struct bts_struct __user *) addr); 1298 break; 1299 1300 case PTRACE_BTS_CLEAR: 1301 ret = ptrace_bts_clear(child); 1302 break; 1303 1304 case PTRACE_BTS_DRAIN: 1305 ret = ptrace_bts_drain 1306 (child, data, (struct bts_struct __user *) addr); 1307 break; 1308 #endif /* CONFIG_X86_PTRACE_BTS */ 1309 1310 default: 1311 ret = ptrace_request(child, request, addr, data); 1312 break; 1313 } 1314 1315 return ret; 1316 } 1317 1318 #ifdef CONFIG_IA32_EMULATION 1319 1320 #include <linux/compat.h> 1321 #include <linux/syscalls.h> 1322 #include <asm/ia32.h> 1323 #include <asm/user32.h> 1324 1325 #define R32(l,q) \ 1326 case offsetof(struct user32, regs.l): \ 1327 regs->q = value; break 1328 1329 #define SEG32(rs) \ 1330 case offsetof(struct user32, regs.rs): \ 1331 return set_segment_reg(child, \ 1332 offsetof(struct user_regs_struct, rs), \ 1333 value); \ 1334 break 1335 1336 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 1337 { 1338 struct pt_regs *regs = task_pt_regs(child); 1339 1340 switch (regno) { 1341 1342 SEG32(cs); 1343 SEG32(ds); 1344 SEG32(es); 1345 SEG32(fs); 1346 SEG32(gs); 1347 SEG32(ss); 1348 1349 R32(ebx, bx); 1350 R32(ecx, cx); 1351 R32(edx, dx); 1352 R32(edi, di); 1353 R32(esi, si); 1354 R32(ebp, bp); 1355 R32(eax, ax); 1356 R32(eip, ip); 1357 R32(esp, sp); 1358 1359 case offsetof(struct user32, regs.orig_eax): 1360 /* 1361 * A 32-bit debugger setting orig_eax means to restore 1362 * the state of the task restarting a 32-bit syscall. 1363 * Make sure we interpret the -ERESTART* codes correctly 1364 * in case the task is not actually still sitting at the 1365 * exit from a 32-bit syscall with TS_COMPAT still set. 1366 */ 1367 regs->orig_ax = value; 1368 if (syscall_get_nr(child, regs) >= 0) 1369 task_thread_info(child)->status |= TS_COMPAT; 1370 break; 1371 1372 case offsetof(struct user32, regs.eflags): 1373 return set_flags(child, value); 1374 1375 case offsetof(struct user32, u_debugreg[0]) ... 1376 offsetof(struct user32, u_debugreg[7]): 1377 regno -= offsetof(struct user32, u_debugreg[0]); 1378 return ptrace_set_debugreg(child, regno / 4, value); 1379 1380 default: 1381 if (regno > sizeof(struct user32) || (regno & 3)) 1382 return -EIO; 1383 1384 /* 1385 * Other dummy fields in the virtual user structure 1386 * are ignored 1387 */ 1388 break; 1389 } 1390 return 0; 1391 } 1392 1393 #undef R32 1394 #undef SEG32 1395 1396 #define R32(l,q) \ 1397 case offsetof(struct user32, regs.l): \ 1398 *val = regs->q; break 1399 1400 #define SEG32(rs) \ 1401 case offsetof(struct user32, regs.rs): \ 1402 *val = get_segment_reg(child, \ 1403 offsetof(struct user_regs_struct, rs)); \ 1404 break 1405 1406 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1407 { 1408 struct pt_regs *regs = task_pt_regs(child); 1409 1410 switch (regno) { 1411 1412 SEG32(ds); 1413 SEG32(es); 1414 SEG32(fs); 1415 SEG32(gs); 1416 1417 R32(cs, cs); 1418 R32(ss, ss); 1419 R32(ebx, bx); 1420 R32(ecx, cx); 1421 R32(edx, dx); 1422 R32(edi, di); 1423 R32(esi, si); 1424 R32(ebp, bp); 1425 R32(eax, ax); 1426 R32(orig_eax, orig_ax); 1427 R32(eip, ip); 1428 R32(esp, sp); 1429 1430 case offsetof(struct user32, regs.eflags): 1431 *val = get_flags(child); 1432 break; 1433 1434 case offsetof(struct user32, u_debugreg[0]) ... 1435 offsetof(struct user32, u_debugreg[7]): 1436 regno -= offsetof(struct user32, u_debugreg[0]); 1437 *val = ptrace_get_debugreg(child, regno / 4); 1438 break; 1439 1440 default: 1441 if (regno > sizeof(struct user32) || (regno & 3)) 1442 return -EIO; 1443 1444 /* 1445 * Other dummy fields in the virtual user structure 1446 * are ignored 1447 */ 1448 *val = 0; 1449 break; 1450 } 1451 return 0; 1452 } 1453 1454 #undef R32 1455 #undef SEG32 1456 1457 static int genregs32_get(struct task_struct *target, 1458 const struct user_regset *regset, 1459 unsigned int pos, unsigned int count, 1460 void *kbuf, void __user *ubuf) 1461 { 1462 if (kbuf) { 1463 compat_ulong_t *k = kbuf; 1464 while (count >= sizeof(*k)) { 1465 getreg32(target, pos, k++); 1466 count -= sizeof(*k); 1467 pos += sizeof(*k); 1468 } 1469 } else { 1470 compat_ulong_t __user *u = ubuf; 1471 while (count >= sizeof(*u)) { 1472 compat_ulong_t word; 1473 getreg32(target, pos, &word); 1474 if (__put_user(word, u++)) 1475 return -EFAULT; 1476 count -= sizeof(*u); 1477 pos += sizeof(*u); 1478 } 1479 } 1480 1481 return 0; 1482 } 1483 1484 static int genregs32_set(struct task_struct *target, 1485 const struct user_regset *regset, 1486 unsigned int pos, unsigned int count, 1487 const void *kbuf, const void __user *ubuf) 1488 { 1489 int ret = 0; 1490 if (kbuf) { 1491 const compat_ulong_t *k = kbuf; 1492 while (count >= sizeof(*k) && !ret) { 1493 ret = putreg32(target, pos, *k++); 1494 count -= sizeof(*k); 1495 pos += sizeof(*k); 1496 } 1497 } else { 1498 const compat_ulong_t __user *u = ubuf; 1499 while (count >= sizeof(*u) && !ret) { 1500 compat_ulong_t word; 1501 ret = __get_user(word, u++); 1502 if (ret) 1503 break; 1504 ret = putreg32(target, pos, word); 1505 count -= sizeof(*u); 1506 pos += sizeof(*u); 1507 } 1508 } 1509 return ret; 1510 } 1511 1512 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1513 compat_ulong_t caddr, compat_ulong_t cdata) 1514 { 1515 unsigned long addr = caddr; 1516 unsigned long data = cdata; 1517 void __user *datap = compat_ptr(data); 1518 int ret; 1519 __u32 val; 1520 1521 switch (request) { 1522 case PTRACE_PEEKUSR: 1523 ret = getreg32(child, addr, &val); 1524 if (ret == 0) 1525 ret = put_user(val, (__u32 __user *)datap); 1526 break; 1527 1528 case PTRACE_POKEUSR: 1529 ret = putreg32(child, addr, data); 1530 break; 1531 1532 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1533 return copy_regset_to_user(child, &user_x86_32_view, 1534 REGSET_GENERAL, 1535 0, sizeof(struct user_regs_struct32), 1536 datap); 1537 1538 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1539 return copy_regset_from_user(child, &user_x86_32_view, 1540 REGSET_GENERAL, 0, 1541 sizeof(struct user_regs_struct32), 1542 datap); 1543 1544 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1545 return copy_regset_to_user(child, &user_x86_32_view, 1546 REGSET_FP, 0, 1547 sizeof(struct user_i387_ia32_struct), 1548 datap); 1549 1550 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1551 return copy_regset_from_user( 1552 child, &user_x86_32_view, REGSET_FP, 1553 0, sizeof(struct user_i387_ia32_struct), datap); 1554 1555 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1556 return copy_regset_to_user(child, &user_x86_32_view, 1557 REGSET_XFP, 0, 1558 sizeof(struct user32_fxsr_struct), 1559 datap); 1560 1561 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1562 return copy_regset_from_user(child, &user_x86_32_view, 1563 REGSET_XFP, 0, 1564 sizeof(struct user32_fxsr_struct), 1565 datap); 1566 1567 case PTRACE_GET_THREAD_AREA: 1568 case PTRACE_SET_THREAD_AREA: 1569 #ifdef CONFIG_X86_PTRACE_BTS 1570 case PTRACE_BTS_CONFIG: 1571 case PTRACE_BTS_STATUS: 1572 case PTRACE_BTS_SIZE: 1573 case PTRACE_BTS_GET: 1574 case PTRACE_BTS_CLEAR: 1575 case PTRACE_BTS_DRAIN: 1576 #endif /* CONFIG_X86_PTRACE_BTS */ 1577 return arch_ptrace(child, request, addr, data); 1578 1579 default: 1580 return compat_ptrace_request(child, request, addr, data); 1581 } 1582 1583 return ret; 1584 } 1585 1586 #endif /* CONFIG_IA32_EMULATION */ 1587 1588 #ifdef CONFIG_X86_64 1589 1590 static const struct user_regset x86_64_regsets[] = { 1591 [REGSET_GENERAL] = { 1592 .core_note_type = NT_PRSTATUS, 1593 .n = sizeof(struct user_regs_struct) / sizeof(long), 1594 .size = sizeof(long), .align = sizeof(long), 1595 .get = genregs_get, .set = genregs_set 1596 }, 1597 [REGSET_FP] = { 1598 .core_note_type = NT_PRFPREG, 1599 .n = sizeof(struct user_i387_struct) / sizeof(long), 1600 .size = sizeof(long), .align = sizeof(long), 1601 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1602 }, 1603 [REGSET_IOPERM64] = { 1604 .core_note_type = NT_386_IOPERM, 1605 .n = IO_BITMAP_LONGS, 1606 .size = sizeof(long), .align = sizeof(long), 1607 .active = ioperm_active, .get = ioperm_get 1608 }, 1609 }; 1610 1611 static const struct user_regset_view user_x86_64_view = { 1612 .name = "x86_64", .e_machine = EM_X86_64, 1613 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1614 }; 1615 1616 #else /* CONFIG_X86_32 */ 1617 1618 #define user_regs_struct32 user_regs_struct 1619 #define genregs32_get genregs_get 1620 #define genregs32_set genregs_set 1621 1622 #define user_i387_ia32_struct user_i387_struct 1623 #define user32_fxsr_struct user_fxsr_struct 1624 1625 #endif /* CONFIG_X86_64 */ 1626 1627 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1628 static const struct user_regset x86_32_regsets[] = { 1629 [REGSET_GENERAL] = { 1630 .core_note_type = NT_PRSTATUS, 1631 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1632 .size = sizeof(u32), .align = sizeof(u32), 1633 .get = genregs32_get, .set = genregs32_set 1634 }, 1635 [REGSET_FP] = { 1636 .core_note_type = NT_PRFPREG, 1637 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1638 .size = sizeof(u32), .align = sizeof(u32), 1639 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1640 }, 1641 [REGSET_XFP] = { 1642 .core_note_type = NT_PRXFPREG, 1643 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1644 .size = sizeof(u32), .align = sizeof(u32), 1645 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1646 }, 1647 [REGSET_TLS] = { 1648 .core_note_type = NT_386_TLS, 1649 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1650 .size = sizeof(struct user_desc), 1651 .align = sizeof(struct user_desc), 1652 .active = regset_tls_active, 1653 .get = regset_tls_get, .set = regset_tls_set 1654 }, 1655 [REGSET_IOPERM32] = { 1656 .core_note_type = NT_386_IOPERM, 1657 .n = IO_BITMAP_BYTES / sizeof(u32), 1658 .size = sizeof(u32), .align = sizeof(u32), 1659 .active = ioperm_active, .get = ioperm_get 1660 }, 1661 }; 1662 1663 static const struct user_regset_view user_x86_32_view = { 1664 .name = "i386", .e_machine = EM_386, 1665 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1666 }; 1667 #endif 1668 1669 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1670 { 1671 #ifdef CONFIG_IA32_EMULATION 1672 if (test_tsk_thread_flag(task, TIF_IA32)) 1673 #endif 1674 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1675 return &user_x86_32_view; 1676 #endif 1677 #ifdef CONFIG_X86_64 1678 return &user_x86_64_view; 1679 #endif 1680 } 1681 1682 static void fill_sigtrap_info(struct task_struct *tsk, 1683 struct pt_regs *regs, 1684 int error_code, int si_code, 1685 struct siginfo *info) 1686 { 1687 tsk->thread.trap_no = 1; 1688 tsk->thread.error_code = error_code; 1689 1690 memset(info, 0, sizeof(*info)); 1691 info->si_signo = SIGTRAP; 1692 info->si_code = si_code; 1693 info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; 1694 } 1695 1696 void user_single_step_siginfo(struct task_struct *tsk, 1697 struct pt_regs *regs, 1698 struct siginfo *info) 1699 { 1700 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info); 1701 } 1702 1703 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1704 int error_code, int si_code) 1705 { 1706 struct siginfo info; 1707 1708 fill_sigtrap_info(tsk, regs, error_code, si_code, &info); 1709 /* Send us the fake SIGTRAP */ 1710 force_sig_info(SIGTRAP, &info, tsk); 1711 } 1712 1713 1714 #ifdef CONFIG_X86_32 1715 # define IS_IA32 1 1716 #elif defined CONFIG_IA32_EMULATION 1717 # define IS_IA32 is_compat_task() 1718 #else 1719 # define IS_IA32 0 1720 #endif 1721 1722 /* 1723 * We must return the syscall number to actually look up in the table. 1724 * This can be -1L to skip running any syscall at all. 1725 */ 1726 asmregparm long syscall_trace_enter(struct pt_regs *regs) 1727 { 1728 long ret = 0; 1729 1730 /* 1731 * If we stepped into a sysenter/syscall insn, it trapped in 1732 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 1733 * If user-mode had set TF itself, then it's still clear from 1734 * do_debug() and we need to set it again to restore the user 1735 * state. If we entered on the slow path, TF was already set. 1736 */ 1737 if (test_thread_flag(TIF_SINGLESTEP)) 1738 regs->flags |= X86_EFLAGS_TF; 1739 1740 /* do the secure computing check first */ 1741 secure_computing(regs->orig_ax); 1742 1743 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1744 ret = -1L; 1745 1746 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && 1747 tracehook_report_syscall_entry(regs)) 1748 ret = -1L; 1749 1750 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1751 trace_sys_enter(regs, regs->orig_ax); 1752 1753 if (unlikely(current->audit_context)) { 1754 if (IS_IA32) 1755 audit_syscall_entry(AUDIT_ARCH_I386, 1756 regs->orig_ax, 1757 regs->bx, regs->cx, 1758 regs->dx, regs->si); 1759 #ifdef CONFIG_X86_64 1760 else 1761 audit_syscall_entry(AUDIT_ARCH_X86_64, 1762 regs->orig_ax, 1763 regs->di, regs->si, 1764 regs->dx, regs->r10); 1765 #endif 1766 } 1767 1768 return ret ?: regs->orig_ax; 1769 } 1770 1771 asmregparm void syscall_trace_leave(struct pt_regs *regs) 1772 { 1773 bool step; 1774 1775 if (unlikely(current->audit_context)) 1776 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1777 1778 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1779 trace_sys_exit(regs, regs->ax); 1780 1781 /* 1782 * If TIF_SYSCALL_EMU is set, we only get here because of 1783 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1784 * We already reported this syscall instruction in 1785 * syscall_trace_enter(). 1786 */ 1787 step = unlikely(test_thread_flag(TIF_SINGLESTEP)) && 1788 !test_thread_flag(TIF_SYSCALL_EMU); 1789 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1790 tracehook_report_syscall_exit(regs, step); 1791 } 1792