1 /* By Ross Biro 1/23/92 */ 2 /* 3 * Pentium III FXSR, SSE support 4 * Gareth Hughes <gareth@valinux.com>, May 2000 5 * 6 * BTS tracing 7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/errno.h> 15 #include <linux/ptrace.h> 16 #include <linux/regset.h> 17 #include <linux/tracehook.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/signal.h> 24 25 #include <asm/uaccess.h> 26 #include <asm/pgtable.h> 27 #include <asm/system.h> 28 #include <asm/processor.h> 29 #include <asm/i387.h> 30 #include <asm/debugreg.h> 31 #include <asm/ldt.h> 32 #include <asm/desc.h> 33 #include <asm/prctl.h> 34 #include <asm/proto.h> 35 #include <asm/ds.h> 36 37 #include "tls.h" 38 39 enum x86_regset { 40 REGSET_GENERAL, 41 REGSET_FP, 42 REGSET_XFP, 43 REGSET_IOPERM64 = REGSET_XFP, 44 REGSET_TLS, 45 REGSET_IOPERM32, 46 }; 47 48 /* 49 * does not yet catch signals sent when the child dies. 50 * in exit.c or in signal.c. 51 */ 52 53 /* 54 * Determines which flags the user has access to [1 = access, 0 = no access]. 55 */ 56 #define FLAG_MASK_32 ((unsigned long) \ 57 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 58 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 59 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 60 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 61 X86_EFLAGS_RF | X86_EFLAGS_AC)) 62 63 /* 64 * Determines whether a value may be installed in a segment register. 65 */ 66 static inline bool invalid_selector(u16 value) 67 { 68 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 69 } 70 71 #ifdef CONFIG_X86_32 72 73 #define FLAG_MASK FLAG_MASK_32 74 75 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 76 { 77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 78 regno >>= 2; 79 if (regno > FS) 80 --regno; 81 return ®s->bx + regno; 82 } 83 84 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 85 { 86 /* 87 * Returning the value truncates it to 16 bits. 88 */ 89 unsigned int retval; 90 if (offset != offsetof(struct user_regs_struct, gs)) 91 retval = *pt_regs_access(task_pt_regs(task), offset); 92 else { 93 retval = task->thread.gs; 94 if (task == current) 95 savesegment(gs, retval); 96 } 97 return retval; 98 } 99 100 static int set_segment_reg(struct task_struct *task, 101 unsigned long offset, u16 value) 102 { 103 /* 104 * The value argument was already truncated to 16 bits. 105 */ 106 if (invalid_selector(value)) 107 return -EIO; 108 109 /* 110 * For %cs and %ss we cannot permit a null selector. 111 * We can permit a bogus selector as long as it has USER_RPL. 112 * Null selectors are fine for other segment registers, but 113 * we will never get back to user mode with invalid %cs or %ss 114 * and will take the trap in iret instead. Much code relies 115 * on user_mode() to distinguish a user trap frame (which can 116 * safely use invalid selectors) from a kernel trap frame. 117 */ 118 switch (offset) { 119 case offsetof(struct user_regs_struct, cs): 120 case offsetof(struct user_regs_struct, ss): 121 if (unlikely(value == 0)) 122 return -EIO; 123 124 default: 125 *pt_regs_access(task_pt_regs(task), offset) = value; 126 break; 127 128 case offsetof(struct user_regs_struct, gs): 129 task->thread.gs = value; 130 if (task == current) 131 /* 132 * The user-mode %gs is not affected by 133 * kernel entry, so we must update the CPU. 134 */ 135 loadsegment(gs, value); 136 } 137 138 return 0; 139 } 140 141 static unsigned long debugreg_addr_limit(struct task_struct *task) 142 { 143 return TASK_SIZE - 3; 144 } 145 146 #else /* CONFIG_X86_64 */ 147 148 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 149 150 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 151 { 152 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 153 return ®s->r15 + (offset / sizeof(regs->r15)); 154 } 155 156 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 157 { 158 /* 159 * Returning the value truncates it to 16 bits. 160 */ 161 unsigned int seg; 162 163 switch (offset) { 164 case offsetof(struct user_regs_struct, fs): 165 if (task == current) { 166 /* Older gas can't assemble movq %?s,%r?? */ 167 asm("movl %%fs,%0" : "=r" (seg)); 168 return seg; 169 } 170 return task->thread.fsindex; 171 case offsetof(struct user_regs_struct, gs): 172 if (task == current) { 173 asm("movl %%gs,%0" : "=r" (seg)); 174 return seg; 175 } 176 return task->thread.gsindex; 177 case offsetof(struct user_regs_struct, ds): 178 if (task == current) { 179 asm("movl %%ds,%0" : "=r" (seg)); 180 return seg; 181 } 182 return task->thread.ds; 183 case offsetof(struct user_regs_struct, es): 184 if (task == current) { 185 asm("movl %%es,%0" : "=r" (seg)); 186 return seg; 187 } 188 return task->thread.es; 189 190 case offsetof(struct user_regs_struct, cs): 191 case offsetof(struct user_regs_struct, ss): 192 break; 193 } 194 return *pt_regs_access(task_pt_regs(task), offset); 195 } 196 197 static int set_segment_reg(struct task_struct *task, 198 unsigned long offset, u16 value) 199 { 200 /* 201 * The value argument was already truncated to 16 bits. 202 */ 203 if (invalid_selector(value)) 204 return -EIO; 205 206 switch (offset) { 207 case offsetof(struct user_regs_struct,fs): 208 /* 209 * If this is setting fs as for normal 64-bit use but 210 * setting fs_base has implicitly changed it, leave it. 211 */ 212 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 213 task->thread.fs != 0) || 214 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 215 task->thread.fs == 0)) 216 break; 217 task->thread.fsindex = value; 218 if (task == current) 219 loadsegment(fs, task->thread.fsindex); 220 break; 221 case offsetof(struct user_regs_struct,gs): 222 /* 223 * If this is setting gs as for normal 64-bit use but 224 * setting gs_base has implicitly changed it, leave it. 225 */ 226 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 227 task->thread.gs != 0) || 228 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 229 task->thread.gs == 0)) 230 break; 231 task->thread.gsindex = value; 232 if (task == current) 233 load_gs_index(task->thread.gsindex); 234 break; 235 case offsetof(struct user_regs_struct,ds): 236 task->thread.ds = value; 237 if (task == current) 238 loadsegment(ds, task->thread.ds); 239 break; 240 case offsetof(struct user_regs_struct,es): 241 task->thread.es = value; 242 if (task == current) 243 loadsegment(es, task->thread.es); 244 break; 245 246 /* 247 * Can't actually change these in 64-bit mode. 248 */ 249 case offsetof(struct user_regs_struct,cs): 250 if (unlikely(value == 0)) 251 return -EIO; 252 #ifdef CONFIG_IA32_EMULATION 253 if (test_tsk_thread_flag(task, TIF_IA32)) 254 task_pt_regs(task)->cs = value; 255 #endif 256 break; 257 case offsetof(struct user_regs_struct,ss): 258 if (unlikely(value == 0)) 259 return -EIO; 260 #ifdef CONFIG_IA32_EMULATION 261 if (test_tsk_thread_flag(task, TIF_IA32)) 262 task_pt_regs(task)->ss = value; 263 #endif 264 break; 265 } 266 267 return 0; 268 } 269 270 static unsigned long debugreg_addr_limit(struct task_struct *task) 271 { 272 #ifdef CONFIG_IA32_EMULATION 273 if (test_tsk_thread_flag(task, TIF_IA32)) 274 return IA32_PAGE_OFFSET - 3; 275 #endif 276 return TASK_SIZE64 - 7; 277 } 278 279 #endif /* CONFIG_X86_32 */ 280 281 static unsigned long get_flags(struct task_struct *task) 282 { 283 unsigned long retval = task_pt_regs(task)->flags; 284 285 /* 286 * If the debugger set TF, hide it from the readout. 287 */ 288 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 289 retval &= ~X86_EFLAGS_TF; 290 291 return retval; 292 } 293 294 static int set_flags(struct task_struct *task, unsigned long value) 295 { 296 struct pt_regs *regs = task_pt_regs(task); 297 298 /* 299 * If the user value contains TF, mark that 300 * it was not "us" (the debugger) that set it. 301 * If not, make sure it stays set if we had. 302 */ 303 if (value & X86_EFLAGS_TF) 304 clear_tsk_thread_flag(task, TIF_FORCED_TF); 305 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 306 value |= X86_EFLAGS_TF; 307 308 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 309 310 return 0; 311 } 312 313 static int putreg(struct task_struct *child, 314 unsigned long offset, unsigned long value) 315 { 316 switch (offset) { 317 case offsetof(struct user_regs_struct, cs): 318 case offsetof(struct user_regs_struct, ds): 319 case offsetof(struct user_regs_struct, es): 320 case offsetof(struct user_regs_struct, fs): 321 case offsetof(struct user_regs_struct, gs): 322 case offsetof(struct user_regs_struct, ss): 323 return set_segment_reg(child, offset, value); 324 325 case offsetof(struct user_regs_struct, flags): 326 return set_flags(child, value); 327 328 #ifdef CONFIG_X86_64 329 /* 330 * Orig_ax is really just a flag with small positive and 331 * negative values, so make sure to always sign-extend it 332 * from 32 bits so that it works correctly regardless of 333 * whether we come from a 32-bit environment or not. 334 */ 335 case offsetof(struct user_regs_struct, orig_ax): 336 value = (long) (s32) value; 337 break; 338 339 case offsetof(struct user_regs_struct,fs_base): 340 if (value >= TASK_SIZE_OF(child)) 341 return -EIO; 342 /* 343 * When changing the segment base, use do_arch_prctl 344 * to set either thread.fs or thread.fsindex and the 345 * corresponding GDT slot. 346 */ 347 if (child->thread.fs != value) 348 return do_arch_prctl(child, ARCH_SET_FS, value); 349 return 0; 350 case offsetof(struct user_regs_struct,gs_base): 351 /* 352 * Exactly the same here as the %fs handling above. 353 */ 354 if (value >= TASK_SIZE_OF(child)) 355 return -EIO; 356 if (child->thread.gs != value) 357 return do_arch_prctl(child, ARCH_SET_GS, value); 358 return 0; 359 #endif 360 } 361 362 *pt_regs_access(task_pt_regs(child), offset) = value; 363 return 0; 364 } 365 366 static unsigned long getreg(struct task_struct *task, unsigned long offset) 367 { 368 switch (offset) { 369 case offsetof(struct user_regs_struct, cs): 370 case offsetof(struct user_regs_struct, ds): 371 case offsetof(struct user_regs_struct, es): 372 case offsetof(struct user_regs_struct, fs): 373 case offsetof(struct user_regs_struct, gs): 374 case offsetof(struct user_regs_struct, ss): 375 return get_segment_reg(task, offset); 376 377 case offsetof(struct user_regs_struct, flags): 378 return get_flags(task); 379 380 #ifdef CONFIG_X86_64 381 case offsetof(struct user_regs_struct, fs_base): { 382 /* 383 * do_arch_prctl may have used a GDT slot instead of 384 * the MSR. To userland, it appears the same either 385 * way, except the %fs segment selector might not be 0. 386 */ 387 unsigned int seg = task->thread.fsindex; 388 if (task->thread.fs != 0) 389 return task->thread.fs; 390 if (task == current) 391 asm("movl %%fs,%0" : "=r" (seg)); 392 if (seg != FS_TLS_SEL) 393 return 0; 394 return get_desc_base(&task->thread.tls_array[FS_TLS]); 395 } 396 case offsetof(struct user_regs_struct, gs_base): { 397 /* 398 * Exactly the same here as the %fs handling above. 399 */ 400 unsigned int seg = task->thread.gsindex; 401 if (task->thread.gs != 0) 402 return task->thread.gs; 403 if (task == current) 404 asm("movl %%gs,%0" : "=r" (seg)); 405 if (seg != GS_TLS_SEL) 406 return 0; 407 return get_desc_base(&task->thread.tls_array[GS_TLS]); 408 } 409 #endif 410 } 411 412 return *pt_regs_access(task_pt_regs(task), offset); 413 } 414 415 static int genregs_get(struct task_struct *target, 416 const struct user_regset *regset, 417 unsigned int pos, unsigned int count, 418 void *kbuf, void __user *ubuf) 419 { 420 if (kbuf) { 421 unsigned long *k = kbuf; 422 while (count > 0) { 423 *k++ = getreg(target, pos); 424 count -= sizeof(*k); 425 pos += sizeof(*k); 426 } 427 } else { 428 unsigned long __user *u = ubuf; 429 while (count > 0) { 430 if (__put_user(getreg(target, pos), u++)) 431 return -EFAULT; 432 count -= sizeof(*u); 433 pos += sizeof(*u); 434 } 435 } 436 437 return 0; 438 } 439 440 static int genregs_set(struct task_struct *target, 441 const struct user_regset *regset, 442 unsigned int pos, unsigned int count, 443 const void *kbuf, const void __user *ubuf) 444 { 445 int ret = 0; 446 if (kbuf) { 447 const unsigned long *k = kbuf; 448 while (count > 0 && !ret) { 449 ret = putreg(target, pos, *k++); 450 count -= sizeof(*k); 451 pos += sizeof(*k); 452 } 453 } else { 454 const unsigned long __user *u = ubuf; 455 while (count > 0 && !ret) { 456 unsigned long word; 457 ret = __get_user(word, u++); 458 if (ret) 459 break; 460 ret = putreg(target, pos, word); 461 count -= sizeof(*u); 462 pos += sizeof(*u); 463 } 464 } 465 return ret; 466 } 467 468 /* 469 * This function is trivial and will be inlined by the compiler. 470 * Having it separates the implementation details of debug 471 * registers from the interface details of ptrace. 472 */ 473 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) 474 { 475 switch (n) { 476 case 0: return child->thread.debugreg0; 477 case 1: return child->thread.debugreg1; 478 case 2: return child->thread.debugreg2; 479 case 3: return child->thread.debugreg3; 480 case 6: return child->thread.debugreg6; 481 case 7: return child->thread.debugreg7; 482 } 483 return 0; 484 } 485 486 static int ptrace_set_debugreg(struct task_struct *child, 487 int n, unsigned long data) 488 { 489 int i; 490 491 if (unlikely(n == 4 || n == 5)) 492 return -EIO; 493 494 if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) 495 return -EIO; 496 497 switch (n) { 498 case 0: child->thread.debugreg0 = data; break; 499 case 1: child->thread.debugreg1 = data; break; 500 case 2: child->thread.debugreg2 = data; break; 501 case 3: child->thread.debugreg3 = data; break; 502 503 case 6: 504 if ((data & ~0xffffffffUL) != 0) 505 return -EIO; 506 child->thread.debugreg6 = data; 507 break; 508 509 case 7: 510 /* 511 * Sanity-check data. Take one half-byte at once with 512 * check = (val >> (16 + 4*i)) & 0xf. It contains the 513 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits 514 * 2 and 3 are LENi. Given a list of invalid values, 515 * we do mask |= 1 << invalid_value, so that 516 * (mask >> check) & 1 is a correct test for invalid 517 * values. 518 * 519 * R/Wi contains the type of the breakpoint / 520 * watchpoint, LENi contains the length of the watched 521 * data in the watchpoint case. 522 * 523 * The invalid values are: 524 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] 525 * - R/Wi == 0x10 (break on I/O reads or writes), so 526 * mask |= 0x4444. 527 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= 528 * 0x1110. 529 * 530 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. 531 * 532 * See the Intel Manual "System Programming Guide", 533 * 15.2.4 534 * 535 * Note that LENi == 0x10 is defined on x86_64 in long 536 * mode (i.e. even for 32-bit userspace software, but 537 * 64-bit kernel), so the x86_64 mask value is 0x5454. 538 * See the AMD manual no. 24593 (AMD64 System Programming) 539 */ 540 #ifdef CONFIG_X86_32 541 #define DR7_MASK 0x5f54 542 #else 543 #define DR7_MASK 0x5554 544 #endif 545 data &= ~DR_CONTROL_RESERVED; 546 for (i = 0; i < 4; i++) 547 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) 548 return -EIO; 549 child->thread.debugreg7 = data; 550 if (data) 551 set_tsk_thread_flag(child, TIF_DEBUG); 552 else 553 clear_tsk_thread_flag(child, TIF_DEBUG); 554 break; 555 } 556 557 return 0; 558 } 559 560 /* 561 * These access the current or another (stopped) task's io permission 562 * bitmap for debugging or core dump. 563 */ 564 static int ioperm_active(struct task_struct *target, 565 const struct user_regset *regset) 566 { 567 return target->thread.io_bitmap_max / regset->size; 568 } 569 570 static int ioperm_get(struct task_struct *target, 571 const struct user_regset *regset, 572 unsigned int pos, unsigned int count, 573 void *kbuf, void __user *ubuf) 574 { 575 if (!target->thread.io_bitmap_ptr) 576 return -ENXIO; 577 578 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 579 target->thread.io_bitmap_ptr, 580 0, IO_BITMAP_BYTES); 581 } 582 583 #ifdef CONFIG_X86_PTRACE_BTS 584 static int ptrace_bts_read_record(struct task_struct *child, size_t index, 585 struct bts_struct __user *out) 586 { 587 const struct bts_trace *trace; 588 struct bts_struct bts; 589 const unsigned char *at; 590 int error; 591 592 trace = ds_read_bts(child->bts); 593 if (!trace) 594 return -EPERM; 595 596 at = trace->ds.top - ((index + 1) * trace->ds.size); 597 if ((void *)at < trace->ds.begin) 598 at += (trace->ds.n * trace->ds.size); 599 600 if (!trace->read) 601 return -EOPNOTSUPP; 602 603 error = trace->read(child->bts, at, &bts); 604 if (error < 0) 605 return error; 606 607 if (copy_to_user(out, &bts, sizeof(bts))) 608 return -EFAULT; 609 610 return sizeof(bts); 611 } 612 613 static int ptrace_bts_drain(struct task_struct *child, 614 long size, 615 struct bts_struct __user *out) 616 { 617 const struct bts_trace *trace; 618 const unsigned char *at; 619 int error, drained = 0; 620 621 trace = ds_read_bts(child->bts); 622 if (!trace) 623 return -EPERM; 624 625 if (!trace->read) 626 return -EOPNOTSUPP; 627 628 if (size < (trace->ds.top - trace->ds.begin)) 629 return -EIO; 630 631 for (at = trace->ds.begin; (void *)at < trace->ds.top; 632 out++, drained++, at += trace->ds.size) { 633 struct bts_struct bts; 634 int error; 635 636 error = trace->read(child->bts, at, &bts); 637 if (error < 0) 638 return error; 639 640 if (copy_to_user(out, &bts, sizeof(bts))) 641 return -EFAULT; 642 } 643 644 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 645 646 error = ds_reset_bts(child->bts); 647 if (error < 0) 648 return error; 649 650 return drained; 651 } 652 653 static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size) 654 { 655 child->bts_buffer = alloc_locked_buffer(size); 656 if (!child->bts_buffer) 657 return -ENOMEM; 658 659 child->bts_size = size; 660 661 return 0; 662 } 663 664 static void ptrace_bts_free_buffer(struct task_struct *child) 665 { 666 free_locked_buffer(child->bts_buffer, child->bts_size); 667 child->bts_buffer = NULL; 668 child->bts_size = 0; 669 } 670 671 static int ptrace_bts_config(struct task_struct *child, 672 long cfg_size, 673 const struct ptrace_bts_config __user *ucfg) 674 { 675 struct ptrace_bts_config cfg; 676 unsigned int flags = 0; 677 678 if (cfg_size < sizeof(cfg)) 679 return -EIO; 680 681 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 682 return -EFAULT; 683 684 if (child->bts) { 685 ds_release_bts(child->bts); 686 child->bts = NULL; 687 } 688 689 if (cfg.flags & PTRACE_BTS_O_SIGNAL) { 690 if (!cfg.signal) 691 return -EINVAL; 692 693 return -EOPNOTSUPP; 694 695 child->thread.bts_ovfl_signal = cfg.signal; 696 } 697 698 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && 699 (cfg.size != child->bts_size)) { 700 int error; 701 702 ptrace_bts_free_buffer(child); 703 704 error = ptrace_bts_allocate_buffer(child, cfg.size); 705 if (error < 0) 706 return error; 707 } 708 709 if (cfg.flags & PTRACE_BTS_O_TRACE) 710 flags |= BTS_USER; 711 712 if (cfg.flags & PTRACE_BTS_O_SCHED) 713 flags |= BTS_TIMESTAMPS; 714 715 child->bts = ds_request_bts(child, child->bts_buffer, child->bts_size, 716 /* ovfl = */ NULL, /* th = */ (size_t)-1, 717 flags); 718 if (IS_ERR(child->bts)) { 719 int error = PTR_ERR(child->bts); 720 721 ptrace_bts_free_buffer(child); 722 child->bts = NULL; 723 724 return error; 725 } 726 727 return sizeof(cfg); 728 } 729 730 static int ptrace_bts_status(struct task_struct *child, 731 long cfg_size, 732 struct ptrace_bts_config __user *ucfg) 733 { 734 const struct bts_trace *trace; 735 struct ptrace_bts_config cfg; 736 737 if (cfg_size < sizeof(cfg)) 738 return -EIO; 739 740 trace = ds_read_bts(child->bts); 741 if (!trace) 742 return -EPERM; 743 744 memset(&cfg, 0, sizeof(cfg)); 745 cfg.size = trace->ds.end - trace->ds.begin; 746 cfg.signal = child->thread.bts_ovfl_signal; 747 cfg.bts_size = sizeof(struct bts_struct); 748 749 if (cfg.signal) 750 cfg.flags |= PTRACE_BTS_O_SIGNAL; 751 752 if (trace->ds.flags & BTS_USER) 753 cfg.flags |= PTRACE_BTS_O_TRACE; 754 755 if (trace->ds.flags & BTS_TIMESTAMPS) 756 cfg.flags |= PTRACE_BTS_O_SCHED; 757 758 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 759 return -EFAULT; 760 761 return sizeof(cfg); 762 } 763 764 static int ptrace_bts_clear(struct task_struct *child) 765 { 766 const struct bts_trace *trace; 767 768 trace = ds_read_bts(child->bts); 769 if (!trace) 770 return -EPERM; 771 772 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 773 774 return ds_reset_bts(child->bts); 775 } 776 777 static int ptrace_bts_size(struct task_struct *child) 778 { 779 const struct bts_trace *trace; 780 781 trace = ds_read_bts(child->bts); 782 if (!trace) 783 return -EPERM; 784 785 return (trace->ds.top - trace->ds.begin) / trace->ds.size; 786 } 787 788 static void ptrace_bts_fork(struct task_struct *tsk) 789 { 790 tsk->bts = NULL; 791 tsk->bts_buffer = NULL; 792 tsk->bts_size = 0; 793 tsk->thread.bts_ovfl_signal = 0; 794 } 795 796 static void ptrace_bts_untrace(struct task_struct *child) 797 { 798 if (unlikely(child->bts)) { 799 ds_release_bts(child->bts); 800 child->bts = NULL; 801 802 /* We cannot update total_vm and locked_vm since 803 child's mm is already gone. But we can reclaim the 804 memory. */ 805 kfree(child->bts_buffer); 806 child->bts_buffer = NULL; 807 child->bts_size = 0; 808 } 809 } 810 811 static void ptrace_bts_detach(struct task_struct *child) 812 { 813 /* 814 * Ptrace_detach() races with ptrace_untrace() in case 815 * the child dies and is reaped by another thread. 816 * 817 * We only do the memory accounting at this point and 818 * leave the buffer deallocation and the bts tracer 819 * release to ptrace_bts_untrace() which will be called 820 * later on with tasklist_lock held. 821 */ 822 release_locked_buffer(child->bts_buffer, child->bts_size); 823 } 824 #else 825 static inline void ptrace_bts_fork(struct task_struct *tsk) {} 826 static inline void ptrace_bts_detach(struct task_struct *child) {} 827 static inline void ptrace_bts_untrace(struct task_struct *child) {} 828 #endif /* CONFIG_X86_PTRACE_BTS */ 829 830 void x86_ptrace_fork(struct task_struct *child, unsigned long clone_flags) 831 { 832 ptrace_bts_fork(child); 833 } 834 835 void x86_ptrace_untrace(struct task_struct *child) 836 { 837 ptrace_bts_untrace(child); 838 } 839 840 /* 841 * Called by kernel/ptrace.c when detaching.. 842 * 843 * Make sure the single step bit is not set. 844 */ 845 void ptrace_disable(struct task_struct *child) 846 { 847 user_disable_single_step(child); 848 #ifdef TIF_SYSCALL_EMU 849 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 850 #endif 851 ptrace_bts_detach(child); 852 } 853 854 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 855 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 856 #endif 857 858 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 859 { 860 int ret; 861 unsigned long __user *datap = (unsigned long __user *)data; 862 863 switch (request) { 864 /* read the word at location addr in the USER area. */ 865 case PTRACE_PEEKUSR: { 866 unsigned long tmp; 867 868 ret = -EIO; 869 if ((addr & (sizeof(data) - 1)) || addr < 0 || 870 addr >= sizeof(struct user)) 871 break; 872 873 tmp = 0; /* Default return condition */ 874 if (addr < sizeof(struct user_regs_struct)) 875 tmp = getreg(child, addr); 876 else if (addr >= offsetof(struct user, u_debugreg[0]) && 877 addr <= offsetof(struct user, u_debugreg[7])) { 878 addr -= offsetof(struct user, u_debugreg[0]); 879 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 880 } 881 ret = put_user(tmp, datap); 882 break; 883 } 884 885 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 886 ret = -EIO; 887 if ((addr & (sizeof(data) - 1)) || addr < 0 || 888 addr >= sizeof(struct user)) 889 break; 890 891 if (addr < sizeof(struct user_regs_struct)) 892 ret = putreg(child, addr, data); 893 else if (addr >= offsetof(struct user, u_debugreg[0]) && 894 addr <= offsetof(struct user, u_debugreg[7])) { 895 addr -= offsetof(struct user, u_debugreg[0]); 896 ret = ptrace_set_debugreg(child, 897 addr / sizeof(data), data); 898 } 899 break; 900 901 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 902 return copy_regset_to_user(child, 903 task_user_regset_view(current), 904 REGSET_GENERAL, 905 0, sizeof(struct user_regs_struct), 906 datap); 907 908 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 909 return copy_regset_from_user(child, 910 task_user_regset_view(current), 911 REGSET_GENERAL, 912 0, sizeof(struct user_regs_struct), 913 datap); 914 915 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 916 return copy_regset_to_user(child, 917 task_user_regset_view(current), 918 REGSET_FP, 919 0, sizeof(struct user_i387_struct), 920 datap); 921 922 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 923 return copy_regset_from_user(child, 924 task_user_regset_view(current), 925 REGSET_FP, 926 0, sizeof(struct user_i387_struct), 927 datap); 928 929 #ifdef CONFIG_X86_32 930 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 931 return copy_regset_to_user(child, &user_x86_32_view, 932 REGSET_XFP, 933 0, sizeof(struct user_fxsr_struct), 934 datap) ? -EIO : 0; 935 936 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 937 return copy_regset_from_user(child, &user_x86_32_view, 938 REGSET_XFP, 939 0, sizeof(struct user_fxsr_struct), 940 datap) ? -EIO : 0; 941 #endif 942 943 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 944 case PTRACE_GET_THREAD_AREA: 945 if (addr < 0) 946 return -EIO; 947 ret = do_get_thread_area(child, addr, 948 (struct user_desc __user *) data); 949 break; 950 951 case PTRACE_SET_THREAD_AREA: 952 if (addr < 0) 953 return -EIO; 954 ret = do_set_thread_area(child, addr, 955 (struct user_desc __user *) data, 0); 956 break; 957 #endif 958 959 #ifdef CONFIG_X86_64 960 /* normal 64bit interface to access TLS data. 961 Works just like arch_prctl, except that the arguments 962 are reversed. */ 963 case PTRACE_ARCH_PRCTL: 964 ret = do_arch_prctl(child, data, addr); 965 break; 966 #endif 967 968 /* 969 * These bits need more cooking - not enabled yet: 970 */ 971 #ifdef CONFIG_X86_PTRACE_BTS 972 case PTRACE_BTS_CONFIG: 973 ret = ptrace_bts_config 974 (child, data, (struct ptrace_bts_config __user *)addr); 975 break; 976 977 case PTRACE_BTS_STATUS: 978 ret = ptrace_bts_status 979 (child, data, (struct ptrace_bts_config __user *)addr); 980 break; 981 982 case PTRACE_BTS_SIZE: 983 ret = ptrace_bts_size(child); 984 break; 985 986 case PTRACE_BTS_GET: 987 ret = ptrace_bts_read_record 988 (child, data, (struct bts_struct __user *) addr); 989 break; 990 991 case PTRACE_BTS_CLEAR: 992 ret = ptrace_bts_clear(child); 993 break; 994 995 case PTRACE_BTS_DRAIN: 996 ret = ptrace_bts_drain 997 (child, data, (struct bts_struct __user *) addr); 998 break; 999 #endif /* CONFIG_X86_PTRACE_BTS */ 1000 1001 default: 1002 ret = ptrace_request(child, request, addr, data); 1003 break; 1004 } 1005 1006 return ret; 1007 } 1008 1009 #ifdef CONFIG_IA32_EMULATION 1010 1011 #include <linux/compat.h> 1012 #include <linux/syscalls.h> 1013 #include <asm/ia32.h> 1014 #include <asm/user32.h> 1015 1016 #define R32(l,q) \ 1017 case offsetof(struct user32, regs.l): \ 1018 regs->q = value; break 1019 1020 #define SEG32(rs) \ 1021 case offsetof(struct user32, regs.rs): \ 1022 return set_segment_reg(child, \ 1023 offsetof(struct user_regs_struct, rs), \ 1024 value); \ 1025 break 1026 1027 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 1028 { 1029 struct pt_regs *regs = task_pt_regs(child); 1030 1031 switch (regno) { 1032 1033 SEG32(cs); 1034 SEG32(ds); 1035 SEG32(es); 1036 SEG32(fs); 1037 SEG32(gs); 1038 SEG32(ss); 1039 1040 R32(ebx, bx); 1041 R32(ecx, cx); 1042 R32(edx, dx); 1043 R32(edi, di); 1044 R32(esi, si); 1045 R32(ebp, bp); 1046 R32(eax, ax); 1047 R32(eip, ip); 1048 R32(esp, sp); 1049 1050 case offsetof(struct user32, regs.orig_eax): 1051 /* 1052 * Sign-extend the value so that orig_eax = -1 1053 * causes (long)orig_ax < 0 tests to fire correctly. 1054 */ 1055 regs->orig_ax = (long) (s32) value; 1056 break; 1057 1058 case offsetof(struct user32, regs.eflags): 1059 return set_flags(child, value); 1060 1061 case offsetof(struct user32, u_debugreg[0]) ... 1062 offsetof(struct user32, u_debugreg[7]): 1063 regno -= offsetof(struct user32, u_debugreg[0]); 1064 return ptrace_set_debugreg(child, regno / 4, value); 1065 1066 default: 1067 if (regno > sizeof(struct user32) || (regno & 3)) 1068 return -EIO; 1069 1070 /* 1071 * Other dummy fields in the virtual user structure 1072 * are ignored 1073 */ 1074 break; 1075 } 1076 return 0; 1077 } 1078 1079 #undef R32 1080 #undef SEG32 1081 1082 #define R32(l,q) \ 1083 case offsetof(struct user32, regs.l): \ 1084 *val = regs->q; break 1085 1086 #define SEG32(rs) \ 1087 case offsetof(struct user32, regs.rs): \ 1088 *val = get_segment_reg(child, \ 1089 offsetof(struct user_regs_struct, rs)); \ 1090 break 1091 1092 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1093 { 1094 struct pt_regs *regs = task_pt_regs(child); 1095 1096 switch (regno) { 1097 1098 SEG32(ds); 1099 SEG32(es); 1100 SEG32(fs); 1101 SEG32(gs); 1102 1103 R32(cs, cs); 1104 R32(ss, ss); 1105 R32(ebx, bx); 1106 R32(ecx, cx); 1107 R32(edx, dx); 1108 R32(edi, di); 1109 R32(esi, si); 1110 R32(ebp, bp); 1111 R32(eax, ax); 1112 R32(orig_eax, orig_ax); 1113 R32(eip, ip); 1114 R32(esp, sp); 1115 1116 case offsetof(struct user32, regs.eflags): 1117 *val = get_flags(child); 1118 break; 1119 1120 case offsetof(struct user32, u_debugreg[0]) ... 1121 offsetof(struct user32, u_debugreg[7]): 1122 regno -= offsetof(struct user32, u_debugreg[0]); 1123 *val = ptrace_get_debugreg(child, regno / 4); 1124 break; 1125 1126 default: 1127 if (regno > sizeof(struct user32) || (regno & 3)) 1128 return -EIO; 1129 1130 /* 1131 * Other dummy fields in the virtual user structure 1132 * are ignored 1133 */ 1134 *val = 0; 1135 break; 1136 } 1137 return 0; 1138 } 1139 1140 #undef R32 1141 #undef SEG32 1142 1143 static int genregs32_get(struct task_struct *target, 1144 const struct user_regset *regset, 1145 unsigned int pos, unsigned int count, 1146 void *kbuf, void __user *ubuf) 1147 { 1148 if (kbuf) { 1149 compat_ulong_t *k = kbuf; 1150 while (count > 0) { 1151 getreg32(target, pos, k++); 1152 count -= sizeof(*k); 1153 pos += sizeof(*k); 1154 } 1155 } else { 1156 compat_ulong_t __user *u = ubuf; 1157 while (count > 0) { 1158 compat_ulong_t word; 1159 getreg32(target, pos, &word); 1160 if (__put_user(word, u++)) 1161 return -EFAULT; 1162 count -= sizeof(*u); 1163 pos += sizeof(*u); 1164 } 1165 } 1166 1167 return 0; 1168 } 1169 1170 static int genregs32_set(struct task_struct *target, 1171 const struct user_regset *regset, 1172 unsigned int pos, unsigned int count, 1173 const void *kbuf, const void __user *ubuf) 1174 { 1175 int ret = 0; 1176 if (kbuf) { 1177 const compat_ulong_t *k = kbuf; 1178 while (count > 0 && !ret) { 1179 ret = putreg32(target, pos, *k++); 1180 count -= sizeof(*k); 1181 pos += sizeof(*k); 1182 } 1183 } else { 1184 const compat_ulong_t __user *u = ubuf; 1185 while (count > 0 && !ret) { 1186 compat_ulong_t word; 1187 ret = __get_user(word, u++); 1188 if (ret) 1189 break; 1190 ret = putreg32(target, pos, word); 1191 count -= sizeof(*u); 1192 pos += sizeof(*u); 1193 } 1194 } 1195 return ret; 1196 } 1197 1198 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1199 compat_ulong_t caddr, compat_ulong_t cdata) 1200 { 1201 unsigned long addr = caddr; 1202 unsigned long data = cdata; 1203 void __user *datap = compat_ptr(data); 1204 int ret; 1205 __u32 val; 1206 1207 switch (request) { 1208 case PTRACE_PEEKUSR: 1209 ret = getreg32(child, addr, &val); 1210 if (ret == 0) 1211 ret = put_user(val, (__u32 __user *)datap); 1212 break; 1213 1214 case PTRACE_POKEUSR: 1215 ret = putreg32(child, addr, data); 1216 break; 1217 1218 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1219 return copy_regset_to_user(child, &user_x86_32_view, 1220 REGSET_GENERAL, 1221 0, sizeof(struct user_regs_struct32), 1222 datap); 1223 1224 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1225 return copy_regset_from_user(child, &user_x86_32_view, 1226 REGSET_GENERAL, 0, 1227 sizeof(struct user_regs_struct32), 1228 datap); 1229 1230 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1231 return copy_regset_to_user(child, &user_x86_32_view, 1232 REGSET_FP, 0, 1233 sizeof(struct user_i387_ia32_struct), 1234 datap); 1235 1236 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1237 return copy_regset_from_user( 1238 child, &user_x86_32_view, REGSET_FP, 1239 0, sizeof(struct user_i387_ia32_struct), datap); 1240 1241 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1242 return copy_regset_to_user(child, &user_x86_32_view, 1243 REGSET_XFP, 0, 1244 sizeof(struct user32_fxsr_struct), 1245 datap); 1246 1247 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1248 return copy_regset_from_user(child, &user_x86_32_view, 1249 REGSET_XFP, 0, 1250 sizeof(struct user32_fxsr_struct), 1251 datap); 1252 1253 case PTRACE_GET_THREAD_AREA: 1254 case PTRACE_SET_THREAD_AREA: 1255 #ifdef CONFIG_X86_PTRACE_BTS 1256 case PTRACE_BTS_CONFIG: 1257 case PTRACE_BTS_STATUS: 1258 case PTRACE_BTS_SIZE: 1259 case PTRACE_BTS_GET: 1260 case PTRACE_BTS_CLEAR: 1261 case PTRACE_BTS_DRAIN: 1262 #endif /* CONFIG_X86_PTRACE_BTS */ 1263 return arch_ptrace(child, request, addr, data); 1264 1265 default: 1266 return compat_ptrace_request(child, request, addr, data); 1267 } 1268 1269 return ret; 1270 } 1271 1272 #endif /* CONFIG_IA32_EMULATION */ 1273 1274 #ifdef CONFIG_X86_64 1275 1276 static const struct user_regset x86_64_regsets[] = { 1277 [REGSET_GENERAL] = { 1278 .core_note_type = NT_PRSTATUS, 1279 .n = sizeof(struct user_regs_struct) / sizeof(long), 1280 .size = sizeof(long), .align = sizeof(long), 1281 .get = genregs_get, .set = genregs_set 1282 }, 1283 [REGSET_FP] = { 1284 .core_note_type = NT_PRFPREG, 1285 .n = sizeof(struct user_i387_struct) / sizeof(long), 1286 .size = sizeof(long), .align = sizeof(long), 1287 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1288 }, 1289 [REGSET_IOPERM64] = { 1290 .core_note_type = NT_386_IOPERM, 1291 .n = IO_BITMAP_LONGS, 1292 .size = sizeof(long), .align = sizeof(long), 1293 .active = ioperm_active, .get = ioperm_get 1294 }, 1295 }; 1296 1297 static const struct user_regset_view user_x86_64_view = { 1298 .name = "x86_64", .e_machine = EM_X86_64, 1299 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1300 }; 1301 1302 #else /* CONFIG_X86_32 */ 1303 1304 #define user_regs_struct32 user_regs_struct 1305 #define genregs32_get genregs_get 1306 #define genregs32_set genregs_set 1307 1308 #define user_i387_ia32_struct user_i387_struct 1309 #define user32_fxsr_struct user_fxsr_struct 1310 1311 #endif /* CONFIG_X86_64 */ 1312 1313 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1314 static const struct user_regset x86_32_regsets[] = { 1315 [REGSET_GENERAL] = { 1316 .core_note_type = NT_PRSTATUS, 1317 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1318 .size = sizeof(u32), .align = sizeof(u32), 1319 .get = genregs32_get, .set = genregs32_set 1320 }, 1321 [REGSET_FP] = { 1322 .core_note_type = NT_PRFPREG, 1323 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1324 .size = sizeof(u32), .align = sizeof(u32), 1325 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1326 }, 1327 [REGSET_XFP] = { 1328 .core_note_type = NT_PRXFPREG, 1329 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1330 .size = sizeof(u32), .align = sizeof(u32), 1331 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1332 }, 1333 [REGSET_TLS] = { 1334 .core_note_type = NT_386_TLS, 1335 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1336 .size = sizeof(struct user_desc), 1337 .align = sizeof(struct user_desc), 1338 .active = regset_tls_active, 1339 .get = regset_tls_get, .set = regset_tls_set 1340 }, 1341 [REGSET_IOPERM32] = { 1342 .core_note_type = NT_386_IOPERM, 1343 .n = IO_BITMAP_BYTES / sizeof(u32), 1344 .size = sizeof(u32), .align = sizeof(u32), 1345 .active = ioperm_active, .get = ioperm_get 1346 }, 1347 }; 1348 1349 static const struct user_regset_view user_x86_32_view = { 1350 .name = "i386", .e_machine = EM_386, 1351 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1352 }; 1353 #endif 1354 1355 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1356 { 1357 #ifdef CONFIG_IA32_EMULATION 1358 if (test_tsk_thread_flag(task, TIF_IA32)) 1359 #endif 1360 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1361 return &user_x86_32_view; 1362 #endif 1363 #ifdef CONFIG_X86_64 1364 return &user_x86_64_view; 1365 #endif 1366 } 1367 1368 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1369 int error_code, int si_code) 1370 { 1371 struct siginfo info; 1372 1373 tsk->thread.trap_no = 1; 1374 tsk->thread.error_code = error_code; 1375 1376 memset(&info, 0, sizeof(info)); 1377 info.si_signo = SIGTRAP; 1378 info.si_code = si_code; 1379 1380 /* User-mode ip? */ 1381 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1382 1383 /* Send us the fake SIGTRAP */ 1384 force_sig_info(SIGTRAP, &info, tsk); 1385 } 1386 1387 1388 #ifdef CONFIG_X86_32 1389 # define IS_IA32 1 1390 #elif defined CONFIG_IA32_EMULATION 1391 # define IS_IA32 is_compat_task() 1392 #else 1393 # define IS_IA32 0 1394 #endif 1395 1396 /* 1397 * We must return the syscall number to actually look up in the table. 1398 * This can be -1L to skip running any syscall at all. 1399 */ 1400 asmregparm long syscall_trace_enter(struct pt_regs *regs) 1401 { 1402 long ret = 0; 1403 1404 /* 1405 * If we stepped into a sysenter/syscall insn, it trapped in 1406 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 1407 * If user-mode had set TF itself, then it's still clear from 1408 * do_debug() and we need to set it again to restore the user 1409 * state. If we entered on the slow path, TF was already set. 1410 */ 1411 if (test_thread_flag(TIF_SINGLESTEP)) 1412 regs->flags |= X86_EFLAGS_TF; 1413 1414 /* do the secure computing check first */ 1415 secure_computing(regs->orig_ax); 1416 1417 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1418 ret = -1L; 1419 1420 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && 1421 tracehook_report_syscall_entry(regs)) 1422 ret = -1L; 1423 1424 if (unlikely(current->audit_context)) { 1425 if (IS_IA32) 1426 audit_syscall_entry(AUDIT_ARCH_I386, 1427 regs->orig_ax, 1428 regs->bx, regs->cx, 1429 regs->dx, regs->si); 1430 #ifdef CONFIG_X86_64 1431 else 1432 audit_syscall_entry(AUDIT_ARCH_X86_64, 1433 regs->orig_ax, 1434 regs->di, regs->si, 1435 regs->dx, regs->r10); 1436 #endif 1437 } 1438 1439 return ret ?: regs->orig_ax; 1440 } 1441 1442 asmregparm void syscall_trace_leave(struct pt_regs *regs) 1443 { 1444 if (unlikely(current->audit_context)) 1445 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1446 1447 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1448 tracehook_report_syscall_exit(regs, 0); 1449 1450 /* 1451 * If TIF_SYSCALL_EMU is set, we only get here because of 1452 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1453 * We already reported this syscall instruction in 1454 * syscall_trace_enter(), so don't do any more now. 1455 */ 1456 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1457 return; 1458 1459 /* 1460 * If we are single-stepping, synthesize a trap to follow the 1461 * system call instruction. 1462 */ 1463 if (test_thread_flag(TIF_SINGLESTEP) && 1464 tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL)) 1465 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1466 } 1467