1 /* By Ross Biro 1/23/92 */ 2 /* 3 * Pentium III FXSR, SSE support 4 * Gareth Hughes <gareth@valinux.com>, May 2000 5 * 6 * BTS tracing 7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/errno.h> 15 #include <linux/ptrace.h> 16 #include <linux/regset.h> 17 #include <linux/tracehook.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/signal.h> 24 #include <linux/workqueue.h> 25 26 #include <asm/uaccess.h> 27 #include <asm/pgtable.h> 28 #include <asm/system.h> 29 #include <asm/processor.h> 30 #include <asm/i387.h> 31 #include <asm/debugreg.h> 32 #include <asm/ldt.h> 33 #include <asm/desc.h> 34 #include <asm/prctl.h> 35 #include <asm/proto.h> 36 #include <asm/ds.h> 37 38 #include "tls.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/syscalls.h> 42 43 enum x86_regset { 44 REGSET_GENERAL, 45 REGSET_FP, 46 REGSET_XFP, 47 REGSET_IOPERM64 = REGSET_XFP, 48 REGSET_TLS, 49 REGSET_IOPERM32, 50 }; 51 52 /* 53 * does not yet catch signals sent when the child dies. 54 * in exit.c or in signal.c. 55 */ 56 57 /* 58 * Determines which flags the user has access to [1 = access, 0 = no access]. 59 */ 60 #define FLAG_MASK_32 ((unsigned long) \ 61 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 62 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 63 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 64 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 65 X86_EFLAGS_RF | X86_EFLAGS_AC)) 66 67 /* 68 * Determines whether a value may be installed in a segment register. 69 */ 70 static inline bool invalid_selector(u16 value) 71 { 72 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 73 } 74 75 #ifdef CONFIG_X86_32 76 77 #define FLAG_MASK FLAG_MASK_32 78 79 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 80 { 81 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 82 return ®s->bx + (regno >> 2); 83 } 84 85 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 86 { 87 /* 88 * Returning the value truncates it to 16 bits. 89 */ 90 unsigned int retval; 91 if (offset != offsetof(struct user_regs_struct, gs)) 92 retval = *pt_regs_access(task_pt_regs(task), offset); 93 else { 94 if (task == current) 95 retval = get_user_gs(task_pt_regs(task)); 96 else 97 retval = task_user_gs(task); 98 } 99 return retval; 100 } 101 102 static int set_segment_reg(struct task_struct *task, 103 unsigned long offset, u16 value) 104 { 105 /* 106 * The value argument was already truncated to 16 bits. 107 */ 108 if (invalid_selector(value)) 109 return -EIO; 110 111 /* 112 * For %cs and %ss we cannot permit a null selector. 113 * We can permit a bogus selector as long as it has USER_RPL. 114 * Null selectors are fine for other segment registers, but 115 * we will never get back to user mode with invalid %cs or %ss 116 * and will take the trap in iret instead. Much code relies 117 * on user_mode() to distinguish a user trap frame (which can 118 * safely use invalid selectors) from a kernel trap frame. 119 */ 120 switch (offset) { 121 case offsetof(struct user_regs_struct, cs): 122 case offsetof(struct user_regs_struct, ss): 123 if (unlikely(value == 0)) 124 return -EIO; 125 126 default: 127 *pt_regs_access(task_pt_regs(task), offset) = value; 128 break; 129 130 case offsetof(struct user_regs_struct, gs): 131 if (task == current) 132 set_user_gs(task_pt_regs(task), value); 133 else 134 task_user_gs(task) = value; 135 } 136 137 return 0; 138 } 139 140 static unsigned long debugreg_addr_limit(struct task_struct *task) 141 { 142 return TASK_SIZE - 3; 143 } 144 145 #else /* CONFIG_X86_64 */ 146 147 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 148 149 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 150 { 151 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 152 return ®s->r15 + (offset / sizeof(regs->r15)); 153 } 154 155 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 156 { 157 /* 158 * Returning the value truncates it to 16 bits. 159 */ 160 unsigned int seg; 161 162 switch (offset) { 163 case offsetof(struct user_regs_struct, fs): 164 if (task == current) { 165 /* Older gas can't assemble movq %?s,%r?? */ 166 asm("movl %%fs,%0" : "=r" (seg)); 167 return seg; 168 } 169 return task->thread.fsindex; 170 case offsetof(struct user_regs_struct, gs): 171 if (task == current) { 172 asm("movl %%gs,%0" : "=r" (seg)); 173 return seg; 174 } 175 return task->thread.gsindex; 176 case offsetof(struct user_regs_struct, ds): 177 if (task == current) { 178 asm("movl %%ds,%0" : "=r" (seg)); 179 return seg; 180 } 181 return task->thread.ds; 182 case offsetof(struct user_regs_struct, es): 183 if (task == current) { 184 asm("movl %%es,%0" : "=r" (seg)); 185 return seg; 186 } 187 return task->thread.es; 188 189 case offsetof(struct user_regs_struct, cs): 190 case offsetof(struct user_regs_struct, ss): 191 break; 192 } 193 return *pt_regs_access(task_pt_regs(task), offset); 194 } 195 196 static int set_segment_reg(struct task_struct *task, 197 unsigned long offset, u16 value) 198 { 199 /* 200 * The value argument was already truncated to 16 bits. 201 */ 202 if (invalid_selector(value)) 203 return -EIO; 204 205 switch (offset) { 206 case offsetof(struct user_regs_struct,fs): 207 /* 208 * If this is setting fs as for normal 64-bit use but 209 * setting fs_base has implicitly changed it, leave it. 210 */ 211 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 212 task->thread.fs != 0) || 213 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 214 task->thread.fs == 0)) 215 break; 216 task->thread.fsindex = value; 217 if (task == current) 218 loadsegment(fs, task->thread.fsindex); 219 break; 220 case offsetof(struct user_regs_struct,gs): 221 /* 222 * If this is setting gs as for normal 64-bit use but 223 * setting gs_base has implicitly changed it, leave it. 224 */ 225 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 226 task->thread.gs != 0) || 227 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 228 task->thread.gs == 0)) 229 break; 230 task->thread.gsindex = value; 231 if (task == current) 232 load_gs_index(task->thread.gsindex); 233 break; 234 case offsetof(struct user_regs_struct,ds): 235 task->thread.ds = value; 236 if (task == current) 237 loadsegment(ds, task->thread.ds); 238 break; 239 case offsetof(struct user_regs_struct,es): 240 task->thread.es = value; 241 if (task == current) 242 loadsegment(es, task->thread.es); 243 break; 244 245 /* 246 * Can't actually change these in 64-bit mode. 247 */ 248 case offsetof(struct user_regs_struct,cs): 249 if (unlikely(value == 0)) 250 return -EIO; 251 #ifdef CONFIG_IA32_EMULATION 252 if (test_tsk_thread_flag(task, TIF_IA32)) 253 task_pt_regs(task)->cs = value; 254 #endif 255 break; 256 case offsetof(struct user_regs_struct,ss): 257 if (unlikely(value == 0)) 258 return -EIO; 259 #ifdef CONFIG_IA32_EMULATION 260 if (test_tsk_thread_flag(task, TIF_IA32)) 261 task_pt_regs(task)->ss = value; 262 #endif 263 break; 264 } 265 266 return 0; 267 } 268 269 static unsigned long debugreg_addr_limit(struct task_struct *task) 270 { 271 #ifdef CONFIG_IA32_EMULATION 272 if (test_tsk_thread_flag(task, TIF_IA32)) 273 return IA32_PAGE_OFFSET - 3; 274 #endif 275 return TASK_SIZE_MAX - 7; 276 } 277 278 #endif /* CONFIG_X86_32 */ 279 280 static unsigned long get_flags(struct task_struct *task) 281 { 282 unsigned long retval = task_pt_regs(task)->flags; 283 284 /* 285 * If the debugger set TF, hide it from the readout. 286 */ 287 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 288 retval &= ~X86_EFLAGS_TF; 289 290 return retval; 291 } 292 293 static int set_flags(struct task_struct *task, unsigned long value) 294 { 295 struct pt_regs *regs = task_pt_regs(task); 296 297 /* 298 * If the user value contains TF, mark that 299 * it was not "us" (the debugger) that set it. 300 * If not, make sure it stays set if we had. 301 */ 302 if (value & X86_EFLAGS_TF) 303 clear_tsk_thread_flag(task, TIF_FORCED_TF); 304 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 305 value |= X86_EFLAGS_TF; 306 307 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 308 309 return 0; 310 } 311 312 static int putreg(struct task_struct *child, 313 unsigned long offset, unsigned long value) 314 { 315 switch (offset) { 316 case offsetof(struct user_regs_struct, cs): 317 case offsetof(struct user_regs_struct, ds): 318 case offsetof(struct user_regs_struct, es): 319 case offsetof(struct user_regs_struct, fs): 320 case offsetof(struct user_regs_struct, gs): 321 case offsetof(struct user_regs_struct, ss): 322 return set_segment_reg(child, offset, value); 323 324 case offsetof(struct user_regs_struct, flags): 325 return set_flags(child, value); 326 327 #ifdef CONFIG_X86_64 328 /* 329 * Orig_ax is really just a flag with small positive and 330 * negative values, so make sure to always sign-extend it 331 * from 32 bits so that it works correctly regardless of 332 * whether we come from a 32-bit environment or not. 333 */ 334 case offsetof(struct user_regs_struct, orig_ax): 335 value = (long) (s32) value; 336 break; 337 338 case offsetof(struct user_regs_struct,fs_base): 339 if (value >= TASK_SIZE_OF(child)) 340 return -EIO; 341 /* 342 * When changing the segment base, use do_arch_prctl 343 * to set either thread.fs or thread.fsindex and the 344 * corresponding GDT slot. 345 */ 346 if (child->thread.fs != value) 347 return do_arch_prctl(child, ARCH_SET_FS, value); 348 return 0; 349 case offsetof(struct user_regs_struct,gs_base): 350 /* 351 * Exactly the same here as the %fs handling above. 352 */ 353 if (value >= TASK_SIZE_OF(child)) 354 return -EIO; 355 if (child->thread.gs != value) 356 return do_arch_prctl(child, ARCH_SET_GS, value); 357 return 0; 358 #endif 359 } 360 361 *pt_regs_access(task_pt_regs(child), offset) = value; 362 return 0; 363 } 364 365 static unsigned long getreg(struct task_struct *task, unsigned long offset) 366 { 367 switch (offset) { 368 case offsetof(struct user_regs_struct, cs): 369 case offsetof(struct user_regs_struct, ds): 370 case offsetof(struct user_regs_struct, es): 371 case offsetof(struct user_regs_struct, fs): 372 case offsetof(struct user_regs_struct, gs): 373 case offsetof(struct user_regs_struct, ss): 374 return get_segment_reg(task, offset); 375 376 case offsetof(struct user_regs_struct, flags): 377 return get_flags(task); 378 379 #ifdef CONFIG_X86_64 380 case offsetof(struct user_regs_struct, fs_base): { 381 /* 382 * do_arch_prctl may have used a GDT slot instead of 383 * the MSR. To userland, it appears the same either 384 * way, except the %fs segment selector might not be 0. 385 */ 386 unsigned int seg = task->thread.fsindex; 387 if (task->thread.fs != 0) 388 return task->thread.fs; 389 if (task == current) 390 asm("movl %%fs,%0" : "=r" (seg)); 391 if (seg != FS_TLS_SEL) 392 return 0; 393 return get_desc_base(&task->thread.tls_array[FS_TLS]); 394 } 395 case offsetof(struct user_regs_struct, gs_base): { 396 /* 397 * Exactly the same here as the %fs handling above. 398 */ 399 unsigned int seg = task->thread.gsindex; 400 if (task->thread.gs != 0) 401 return task->thread.gs; 402 if (task == current) 403 asm("movl %%gs,%0" : "=r" (seg)); 404 if (seg != GS_TLS_SEL) 405 return 0; 406 return get_desc_base(&task->thread.tls_array[GS_TLS]); 407 } 408 #endif 409 } 410 411 return *pt_regs_access(task_pt_regs(task), offset); 412 } 413 414 static int genregs_get(struct task_struct *target, 415 const struct user_regset *regset, 416 unsigned int pos, unsigned int count, 417 void *kbuf, void __user *ubuf) 418 { 419 if (kbuf) { 420 unsigned long *k = kbuf; 421 while (count > 0) { 422 *k++ = getreg(target, pos); 423 count -= sizeof(*k); 424 pos += sizeof(*k); 425 } 426 } else { 427 unsigned long __user *u = ubuf; 428 while (count > 0) { 429 if (__put_user(getreg(target, pos), u++)) 430 return -EFAULT; 431 count -= sizeof(*u); 432 pos += sizeof(*u); 433 } 434 } 435 436 return 0; 437 } 438 439 static int genregs_set(struct task_struct *target, 440 const struct user_regset *regset, 441 unsigned int pos, unsigned int count, 442 const void *kbuf, const void __user *ubuf) 443 { 444 int ret = 0; 445 if (kbuf) { 446 const unsigned long *k = kbuf; 447 while (count > 0 && !ret) { 448 ret = putreg(target, pos, *k++); 449 count -= sizeof(*k); 450 pos += sizeof(*k); 451 } 452 } else { 453 const unsigned long __user *u = ubuf; 454 while (count > 0 && !ret) { 455 unsigned long word; 456 ret = __get_user(word, u++); 457 if (ret) 458 break; 459 ret = putreg(target, pos, word); 460 count -= sizeof(*u); 461 pos += sizeof(*u); 462 } 463 } 464 return ret; 465 } 466 467 /* 468 * This function is trivial and will be inlined by the compiler. 469 * Having it separates the implementation details of debug 470 * registers from the interface details of ptrace. 471 */ 472 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) 473 { 474 switch (n) { 475 case 0: return child->thread.debugreg0; 476 case 1: return child->thread.debugreg1; 477 case 2: return child->thread.debugreg2; 478 case 3: return child->thread.debugreg3; 479 case 6: return child->thread.debugreg6; 480 case 7: return child->thread.debugreg7; 481 } 482 return 0; 483 } 484 485 static int ptrace_set_debugreg(struct task_struct *child, 486 int n, unsigned long data) 487 { 488 int i; 489 490 if (unlikely(n == 4 || n == 5)) 491 return -EIO; 492 493 if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) 494 return -EIO; 495 496 switch (n) { 497 case 0: child->thread.debugreg0 = data; break; 498 case 1: child->thread.debugreg1 = data; break; 499 case 2: child->thread.debugreg2 = data; break; 500 case 3: child->thread.debugreg3 = data; break; 501 502 case 6: 503 if ((data & ~0xffffffffUL) != 0) 504 return -EIO; 505 child->thread.debugreg6 = data; 506 break; 507 508 case 7: 509 /* 510 * Sanity-check data. Take one half-byte at once with 511 * check = (val >> (16 + 4*i)) & 0xf. It contains the 512 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits 513 * 2 and 3 are LENi. Given a list of invalid values, 514 * we do mask |= 1 << invalid_value, so that 515 * (mask >> check) & 1 is a correct test for invalid 516 * values. 517 * 518 * R/Wi contains the type of the breakpoint / 519 * watchpoint, LENi contains the length of the watched 520 * data in the watchpoint case. 521 * 522 * The invalid values are: 523 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] 524 * - R/Wi == 0x10 (break on I/O reads or writes), so 525 * mask |= 0x4444. 526 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= 527 * 0x1110. 528 * 529 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. 530 * 531 * See the Intel Manual "System Programming Guide", 532 * 15.2.4 533 * 534 * Note that LENi == 0x10 is defined on x86_64 in long 535 * mode (i.e. even for 32-bit userspace software, but 536 * 64-bit kernel), so the x86_64 mask value is 0x5454. 537 * See the AMD manual no. 24593 (AMD64 System Programming) 538 */ 539 #ifdef CONFIG_X86_32 540 #define DR7_MASK 0x5f54 541 #else 542 #define DR7_MASK 0x5554 543 #endif 544 data &= ~DR_CONTROL_RESERVED; 545 for (i = 0; i < 4; i++) 546 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) 547 return -EIO; 548 child->thread.debugreg7 = data; 549 if (data) 550 set_tsk_thread_flag(child, TIF_DEBUG); 551 else 552 clear_tsk_thread_flag(child, TIF_DEBUG); 553 break; 554 } 555 556 return 0; 557 } 558 559 /* 560 * These access the current or another (stopped) task's io permission 561 * bitmap for debugging or core dump. 562 */ 563 static int ioperm_active(struct task_struct *target, 564 const struct user_regset *regset) 565 { 566 return target->thread.io_bitmap_max / regset->size; 567 } 568 569 static int ioperm_get(struct task_struct *target, 570 const struct user_regset *regset, 571 unsigned int pos, unsigned int count, 572 void *kbuf, void __user *ubuf) 573 { 574 if (!target->thread.io_bitmap_ptr) 575 return -ENXIO; 576 577 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 578 target->thread.io_bitmap_ptr, 579 0, IO_BITMAP_BYTES); 580 } 581 582 #ifdef CONFIG_X86_PTRACE_BTS 583 /* 584 * A branch trace store context. 585 * 586 * Contexts may only be installed by ptrace_bts_config() and only for 587 * ptraced tasks. 588 * 589 * Contexts are destroyed when the tracee is detached from the tracer. 590 * The actual destruction work requires interrupts enabled, so the 591 * work is deferred and will be scheduled during __ptrace_unlink(). 592 * 593 * Contexts hold an additional task_struct reference on the traced 594 * task, as well as a reference on the tracer's mm. 595 * 596 * Ptrace already holds a task_struct for the duration of ptrace operations, 597 * but since destruction is deferred, it may be executed after both 598 * tracer and tracee exited. 599 */ 600 struct bts_context { 601 /* The branch trace handle. */ 602 struct bts_tracer *tracer; 603 604 /* The buffer used to store the branch trace and its size. */ 605 void *buffer; 606 unsigned int size; 607 608 /* The mm that paid for the above buffer. */ 609 struct mm_struct *mm; 610 611 /* The task this context belongs to. */ 612 struct task_struct *task; 613 614 /* The signal to send on a bts buffer overflow. */ 615 unsigned int bts_ovfl_signal; 616 617 /* The work struct to destroy a context. */ 618 struct work_struct work; 619 }; 620 621 static int alloc_bts_buffer(struct bts_context *context, unsigned int size) 622 { 623 void *buffer = NULL; 624 int err = -ENOMEM; 625 626 err = account_locked_memory(current->mm, current->signal->rlim, size); 627 if (err < 0) 628 return err; 629 630 buffer = kzalloc(size, GFP_KERNEL); 631 if (!buffer) 632 goto out_refund; 633 634 context->buffer = buffer; 635 context->size = size; 636 context->mm = get_task_mm(current); 637 638 return 0; 639 640 out_refund: 641 refund_locked_memory(current->mm, size); 642 return err; 643 } 644 645 static inline void free_bts_buffer(struct bts_context *context) 646 { 647 if (!context->buffer) 648 return; 649 650 kfree(context->buffer); 651 context->buffer = NULL; 652 653 refund_locked_memory(context->mm, context->size); 654 context->size = 0; 655 656 mmput(context->mm); 657 context->mm = NULL; 658 } 659 660 static void free_bts_context_work(struct work_struct *w) 661 { 662 struct bts_context *context; 663 664 context = container_of(w, struct bts_context, work); 665 666 ds_release_bts(context->tracer); 667 put_task_struct(context->task); 668 free_bts_buffer(context); 669 kfree(context); 670 } 671 672 static inline void free_bts_context(struct bts_context *context) 673 { 674 INIT_WORK(&context->work, free_bts_context_work); 675 schedule_work(&context->work); 676 } 677 678 static inline struct bts_context *alloc_bts_context(struct task_struct *task) 679 { 680 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL); 681 if (context) { 682 context->task = task; 683 task->bts = context; 684 685 get_task_struct(task); 686 } 687 688 return context; 689 } 690 691 static int ptrace_bts_read_record(struct task_struct *child, size_t index, 692 struct bts_struct __user *out) 693 { 694 struct bts_context *context; 695 const struct bts_trace *trace; 696 struct bts_struct bts; 697 const unsigned char *at; 698 int error; 699 700 context = child->bts; 701 if (!context) 702 return -ESRCH; 703 704 trace = ds_read_bts(context->tracer); 705 if (!trace) 706 return -ESRCH; 707 708 at = trace->ds.top - ((index + 1) * trace->ds.size); 709 if ((void *)at < trace->ds.begin) 710 at += (trace->ds.n * trace->ds.size); 711 712 if (!trace->read) 713 return -EOPNOTSUPP; 714 715 error = trace->read(context->tracer, at, &bts); 716 if (error < 0) 717 return error; 718 719 if (copy_to_user(out, &bts, sizeof(bts))) 720 return -EFAULT; 721 722 return sizeof(bts); 723 } 724 725 static int ptrace_bts_drain(struct task_struct *child, 726 long size, 727 struct bts_struct __user *out) 728 { 729 struct bts_context *context; 730 const struct bts_trace *trace; 731 const unsigned char *at; 732 int error, drained = 0; 733 734 context = child->bts; 735 if (!context) 736 return -ESRCH; 737 738 trace = ds_read_bts(context->tracer); 739 if (!trace) 740 return -ESRCH; 741 742 if (!trace->read) 743 return -EOPNOTSUPP; 744 745 if (size < (trace->ds.top - trace->ds.begin)) 746 return -EIO; 747 748 for (at = trace->ds.begin; (void *)at < trace->ds.top; 749 out++, drained++, at += trace->ds.size) { 750 struct bts_struct bts; 751 752 error = trace->read(context->tracer, at, &bts); 753 if (error < 0) 754 return error; 755 756 if (copy_to_user(out, &bts, sizeof(bts))) 757 return -EFAULT; 758 } 759 760 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 761 762 error = ds_reset_bts(context->tracer); 763 if (error < 0) 764 return error; 765 766 return drained; 767 } 768 769 static int ptrace_bts_config(struct task_struct *child, 770 long cfg_size, 771 const struct ptrace_bts_config __user *ucfg) 772 { 773 struct bts_context *context; 774 struct ptrace_bts_config cfg; 775 unsigned int flags = 0; 776 777 if (cfg_size < sizeof(cfg)) 778 return -EIO; 779 780 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 781 return -EFAULT; 782 783 context = child->bts; 784 if (!context) 785 context = alloc_bts_context(child); 786 if (!context) 787 return -ENOMEM; 788 789 if (cfg.flags & PTRACE_BTS_O_SIGNAL) { 790 if (!cfg.signal) 791 return -EINVAL; 792 793 return -EOPNOTSUPP; 794 context->bts_ovfl_signal = cfg.signal; 795 } 796 797 ds_release_bts(context->tracer); 798 context->tracer = NULL; 799 800 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { 801 int err; 802 803 free_bts_buffer(context); 804 if (!cfg.size) 805 return 0; 806 807 err = alloc_bts_buffer(context, cfg.size); 808 if (err < 0) 809 return err; 810 } 811 812 if (cfg.flags & PTRACE_BTS_O_TRACE) 813 flags |= BTS_USER; 814 815 if (cfg.flags & PTRACE_BTS_O_SCHED) 816 flags |= BTS_TIMESTAMPS; 817 818 context->tracer = 819 ds_request_bts_task(child, context->buffer, context->size, 820 NULL, (size_t)-1, flags); 821 if (unlikely(IS_ERR(context->tracer))) { 822 int error = PTR_ERR(context->tracer); 823 824 free_bts_buffer(context); 825 context->tracer = NULL; 826 return error; 827 } 828 829 return sizeof(cfg); 830 } 831 832 static int ptrace_bts_status(struct task_struct *child, 833 long cfg_size, 834 struct ptrace_bts_config __user *ucfg) 835 { 836 struct bts_context *context; 837 const struct bts_trace *trace; 838 struct ptrace_bts_config cfg; 839 840 context = child->bts; 841 if (!context) 842 return -ESRCH; 843 844 if (cfg_size < sizeof(cfg)) 845 return -EIO; 846 847 trace = ds_read_bts(context->tracer); 848 if (!trace) 849 return -ESRCH; 850 851 memset(&cfg, 0, sizeof(cfg)); 852 cfg.size = trace->ds.end - trace->ds.begin; 853 cfg.signal = context->bts_ovfl_signal; 854 cfg.bts_size = sizeof(struct bts_struct); 855 856 if (cfg.signal) 857 cfg.flags |= PTRACE_BTS_O_SIGNAL; 858 859 if (trace->ds.flags & BTS_USER) 860 cfg.flags |= PTRACE_BTS_O_TRACE; 861 862 if (trace->ds.flags & BTS_TIMESTAMPS) 863 cfg.flags |= PTRACE_BTS_O_SCHED; 864 865 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 866 return -EFAULT; 867 868 return sizeof(cfg); 869 } 870 871 static int ptrace_bts_clear(struct task_struct *child) 872 { 873 struct bts_context *context; 874 const struct bts_trace *trace; 875 876 context = child->bts; 877 if (!context) 878 return -ESRCH; 879 880 trace = ds_read_bts(context->tracer); 881 if (!trace) 882 return -ESRCH; 883 884 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 885 886 return ds_reset_bts(context->tracer); 887 } 888 889 static int ptrace_bts_size(struct task_struct *child) 890 { 891 struct bts_context *context; 892 const struct bts_trace *trace; 893 894 context = child->bts; 895 if (!context) 896 return -ESRCH; 897 898 trace = ds_read_bts(context->tracer); 899 if (!trace) 900 return -ESRCH; 901 902 return (trace->ds.top - trace->ds.begin) / trace->ds.size; 903 } 904 905 /* 906 * Called from __ptrace_unlink() after the child has been moved back 907 * to its original parent. 908 */ 909 void ptrace_bts_untrace(struct task_struct *child) 910 { 911 if (unlikely(child->bts)) { 912 free_bts_context(child->bts); 913 child->bts = NULL; 914 } 915 } 916 #endif /* CONFIG_X86_PTRACE_BTS */ 917 918 /* 919 * Called by kernel/ptrace.c when detaching.. 920 * 921 * Make sure the single step bit is not set. 922 */ 923 void ptrace_disable(struct task_struct *child) 924 { 925 user_disable_single_step(child); 926 #ifdef TIF_SYSCALL_EMU 927 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 928 #endif 929 } 930 931 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 932 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 933 #endif 934 935 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 936 { 937 int ret; 938 unsigned long __user *datap = (unsigned long __user *)data; 939 940 switch (request) { 941 /* read the word at location addr in the USER area. */ 942 case PTRACE_PEEKUSR: { 943 unsigned long tmp; 944 945 ret = -EIO; 946 if ((addr & (sizeof(data) - 1)) || addr < 0 || 947 addr >= sizeof(struct user)) 948 break; 949 950 tmp = 0; /* Default return condition */ 951 if (addr < sizeof(struct user_regs_struct)) 952 tmp = getreg(child, addr); 953 else if (addr >= offsetof(struct user, u_debugreg[0]) && 954 addr <= offsetof(struct user, u_debugreg[7])) { 955 addr -= offsetof(struct user, u_debugreg[0]); 956 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 957 } 958 ret = put_user(tmp, datap); 959 break; 960 } 961 962 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 963 ret = -EIO; 964 if ((addr & (sizeof(data) - 1)) || addr < 0 || 965 addr >= sizeof(struct user)) 966 break; 967 968 if (addr < sizeof(struct user_regs_struct)) 969 ret = putreg(child, addr, data); 970 else if (addr >= offsetof(struct user, u_debugreg[0]) && 971 addr <= offsetof(struct user, u_debugreg[7])) { 972 addr -= offsetof(struct user, u_debugreg[0]); 973 ret = ptrace_set_debugreg(child, 974 addr / sizeof(data), data); 975 } 976 break; 977 978 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 979 return copy_regset_to_user(child, 980 task_user_regset_view(current), 981 REGSET_GENERAL, 982 0, sizeof(struct user_regs_struct), 983 datap); 984 985 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 986 return copy_regset_from_user(child, 987 task_user_regset_view(current), 988 REGSET_GENERAL, 989 0, sizeof(struct user_regs_struct), 990 datap); 991 992 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 993 return copy_regset_to_user(child, 994 task_user_regset_view(current), 995 REGSET_FP, 996 0, sizeof(struct user_i387_struct), 997 datap); 998 999 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1000 return copy_regset_from_user(child, 1001 task_user_regset_view(current), 1002 REGSET_FP, 1003 0, sizeof(struct user_i387_struct), 1004 datap); 1005 1006 #ifdef CONFIG_X86_32 1007 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1008 return copy_regset_to_user(child, &user_x86_32_view, 1009 REGSET_XFP, 1010 0, sizeof(struct user_fxsr_struct), 1011 datap) ? -EIO : 0; 1012 1013 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1014 return copy_regset_from_user(child, &user_x86_32_view, 1015 REGSET_XFP, 1016 0, sizeof(struct user_fxsr_struct), 1017 datap) ? -EIO : 0; 1018 #endif 1019 1020 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1021 case PTRACE_GET_THREAD_AREA: 1022 if (addr < 0) 1023 return -EIO; 1024 ret = do_get_thread_area(child, addr, 1025 (struct user_desc __user *) data); 1026 break; 1027 1028 case PTRACE_SET_THREAD_AREA: 1029 if (addr < 0) 1030 return -EIO; 1031 ret = do_set_thread_area(child, addr, 1032 (struct user_desc __user *) data, 0); 1033 break; 1034 #endif 1035 1036 #ifdef CONFIG_X86_64 1037 /* normal 64bit interface to access TLS data. 1038 Works just like arch_prctl, except that the arguments 1039 are reversed. */ 1040 case PTRACE_ARCH_PRCTL: 1041 ret = do_arch_prctl(child, data, addr); 1042 break; 1043 #endif 1044 1045 /* 1046 * These bits need more cooking - not enabled yet: 1047 */ 1048 #ifdef CONFIG_X86_PTRACE_BTS 1049 case PTRACE_BTS_CONFIG: 1050 ret = ptrace_bts_config 1051 (child, data, (struct ptrace_bts_config __user *)addr); 1052 break; 1053 1054 case PTRACE_BTS_STATUS: 1055 ret = ptrace_bts_status 1056 (child, data, (struct ptrace_bts_config __user *)addr); 1057 break; 1058 1059 case PTRACE_BTS_SIZE: 1060 ret = ptrace_bts_size(child); 1061 break; 1062 1063 case PTRACE_BTS_GET: 1064 ret = ptrace_bts_read_record 1065 (child, data, (struct bts_struct __user *) addr); 1066 break; 1067 1068 case PTRACE_BTS_CLEAR: 1069 ret = ptrace_bts_clear(child); 1070 break; 1071 1072 case PTRACE_BTS_DRAIN: 1073 ret = ptrace_bts_drain 1074 (child, data, (struct bts_struct __user *) addr); 1075 break; 1076 #endif /* CONFIG_X86_PTRACE_BTS */ 1077 1078 default: 1079 ret = ptrace_request(child, request, addr, data); 1080 break; 1081 } 1082 1083 return ret; 1084 } 1085 1086 #ifdef CONFIG_IA32_EMULATION 1087 1088 #include <linux/compat.h> 1089 #include <linux/syscalls.h> 1090 #include <asm/ia32.h> 1091 #include <asm/user32.h> 1092 1093 #define R32(l,q) \ 1094 case offsetof(struct user32, regs.l): \ 1095 regs->q = value; break 1096 1097 #define SEG32(rs) \ 1098 case offsetof(struct user32, regs.rs): \ 1099 return set_segment_reg(child, \ 1100 offsetof(struct user_regs_struct, rs), \ 1101 value); \ 1102 break 1103 1104 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 1105 { 1106 struct pt_regs *regs = task_pt_regs(child); 1107 1108 switch (regno) { 1109 1110 SEG32(cs); 1111 SEG32(ds); 1112 SEG32(es); 1113 SEG32(fs); 1114 SEG32(gs); 1115 SEG32(ss); 1116 1117 R32(ebx, bx); 1118 R32(ecx, cx); 1119 R32(edx, dx); 1120 R32(edi, di); 1121 R32(esi, si); 1122 R32(ebp, bp); 1123 R32(eax, ax); 1124 R32(eip, ip); 1125 R32(esp, sp); 1126 1127 case offsetof(struct user32, regs.orig_eax): 1128 /* 1129 * Sign-extend the value so that orig_eax = -1 1130 * causes (long)orig_ax < 0 tests to fire correctly. 1131 */ 1132 regs->orig_ax = (long) (s32) value; 1133 break; 1134 1135 case offsetof(struct user32, regs.eflags): 1136 return set_flags(child, value); 1137 1138 case offsetof(struct user32, u_debugreg[0]) ... 1139 offsetof(struct user32, u_debugreg[7]): 1140 regno -= offsetof(struct user32, u_debugreg[0]); 1141 return ptrace_set_debugreg(child, regno / 4, value); 1142 1143 default: 1144 if (regno > sizeof(struct user32) || (regno & 3)) 1145 return -EIO; 1146 1147 /* 1148 * Other dummy fields in the virtual user structure 1149 * are ignored 1150 */ 1151 break; 1152 } 1153 return 0; 1154 } 1155 1156 #undef R32 1157 #undef SEG32 1158 1159 #define R32(l,q) \ 1160 case offsetof(struct user32, regs.l): \ 1161 *val = regs->q; break 1162 1163 #define SEG32(rs) \ 1164 case offsetof(struct user32, regs.rs): \ 1165 *val = get_segment_reg(child, \ 1166 offsetof(struct user_regs_struct, rs)); \ 1167 break 1168 1169 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1170 { 1171 struct pt_regs *regs = task_pt_regs(child); 1172 1173 switch (regno) { 1174 1175 SEG32(ds); 1176 SEG32(es); 1177 SEG32(fs); 1178 SEG32(gs); 1179 1180 R32(cs, cs); 1181 R32(ss, ss); 1182 R32(ebx, bx); 1183 R32(ecx, cx); 1184 R32(edx, dx); 1185 R32(edi, di); 1186 R32(esi, si); 1187 R32(ebp, bp); 1188 R32(eax, ax); 1189 R32(orig_eax, orig_ax); 1190 R32(eip, ip); 1191 R32(esp, sp); 1192 1193 case offsetof(struct user32, regs.eflags): 1194 *val = get_flags(child); 1195 break; 1196 1197 case offsetof(struct user32, u_debugreg[0]) ... 1198 offsetof(struct user32, u_debugreg[7]): 1199 regno -= offsetof(struct user32, u_debugreg[0]); 1200 *val = ptrace_get_debugreg(child, regno / 4); 1201 break; 1202 1203 default: 1204 if (regno > sizeof(struct user32) || (regno & 3)) 1205 return -EIO; 1206 1207 /* 1208 * Other dummy fields in the virtual user structure 1209 * are ignored 1210 */ 1211 *val = 0; 1212 break; 1213 } 1214 return 0; 1215 } 1216 1217 #undef R32 1218 #undef SEG32 1219 1220 static int genregs32_get(struct task_struct *target, 1221 const struct user_regset *regset, 1222 unsigned int pos, unsigned int count, 1223 void *kbuf, void __user *ubuf) 1224 { 1225 if (kbuf) { 1226 compat_ulong_t *k = kbuf; 1227 while (count > 0) { 1228 getreg32(target, pos, k++); 1229 count -= sizeof(*k); 1230 pos += sizeof(*k); 1231 } 1232 } else { 1233 compat_ulong_t __user *u = ubuf; 1234 while (count > 0) { 1235 compat_ulong_t word; 1236 getreg32(target, pos, &word); 1237 if (__put_user(word, u++)) 1238 return -EFAULT; 1239 count -= sizeof(*u); 1240 pos += sizeof(*u); 1241 } 1242 } 1243 1244 return 0; 1245 } 1246 1247 static int genregs32_set(struct task_struct *target, 1248 const struct user_regset *regset, 1249 unsigned int pos, unsigned int count, 1250 const void *kbuf, const void __user *ubuf) 1251 { 1252 int ret = 0; 1253 if (kbuf) { 1254 const compat_ulong_t *k = kbuf; 1255 while (count > 0 && !ret) { 1256 ret = putreg32(target, pos, *k++); 1257 count -= sizeof(*k); 1258 pos += sizeof(*k); 1259 } 1260 } else { 1261 const compat_ulong_t __user *u = ubuf; 1262 while (count > 0 && !ret) { 1263 compat_ulong_t word; 1264 ret = __get_user(word, u++); 1265 if (ret) 1266 break; 1267 ret = putreg32(target, pos, word); 1268 count -= sizeof(*u); 1269 pos += sizeof(*u); 1270 } 1271 } 1272 return ret; 1273 } 1274 1275 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1276 compat_ulong_t caddr, compat_ulong_t cdata) 1277 { 1278 unsigned long addr = caddr; 1279 unsigned long data = cdata; 1280 void __user *datap = compat_ptr(data); 1281 int ret; 1282 __u32 val; 1283 1284 switch (request) { 1285 case PTRACE_PEEKUSR: 1286 ret = getreg32(child, addr, &val); 1287 if (ret == 0) 1288 ret = put_user(val, (__u32 __user *)datap); 1289 break; 1290 1291 case PTRACE_POKEUSR: 1292 ret = putreg32(child, addr, data); 1293 break; 1294 1295 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1296 return copy_regset_to_user(child, &user_x86_32_view, 1297 REGSET_GENERAL, 1298 0, sizeof(struct user_regs_struct32), 1299 datap); 1300 1301 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1302 return copy_regset_from_user(child, &user_x86_32_view, 1303 REGSET_GENERAL, 0, 1304 sizeof(struct user_regs_struct32), 1305 datap); 1306 1307 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1308 return copy_regset_to_user(child, &user_x86_32_view, 1309 REGSET_FP, 0, 1310 sizeof(struct user_i387_ia32_struct), 1311 datap); 1312 1313 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1314 return copy_regset_from_user( 1315 child, &user_x86_32_view, REGSET_FP, 1316 0, sizeof(struct user_i387_ia32_struct), datap); 1317 1318 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1319 return copy_regset_to_user(child, &user_x86_32_view, 1320 REGSET_XFP, 0, 1321 sizeof(struct user32_fxsr_struct), 1322 datap); 1323 1324 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1325 return copy_regset_from_user(child, &user_x86_32_view, 1326 REGSET_XFP, 0, 1327 sizeof(struct user32_fxsr_struct), 1328 datap); 1329 1330 case PTRACE_GET_THREAD_AREA: 1331 case PTRACE_SET_THREAD_AREA: 1332 #ifdef CONFIG_X86_PTRACE_BTS 1333 case PTRACE_BTS_CONFIG: 1334 case PTRACE_BTS_STATUS: 1335 case PTRACE_BTS_SIZE: 1336 case PTRACE_BTS_GET: 1337 case PTRACE_BTS_CLEAR: 1338 case PTRACE_BTS_DRAIN: 1339 #endif /* CONFIG_X86_PTRACE_BTS */ 1340 return arch_ptrace(child, request, addr, data); 1341 1342 default: 1343 return compat_ptrace_request(child, request, addr, data); 1344 } 1345 1346 return ret; 1347 } 1348 1349 #endif /* CONFIG_IA32_EMULATION */ 1350 1351 #ifdef CONFIG_X86_64 1352 1353 static const struct user_regset x86_64_regsets[] = { 1354 [REGSET_GENERAL] = { 1355 .core_note_type = NT_PRSTATUS, 1356 .n = sizeof(struct user_regs_struct) / sizeof(long), 1357 .size = sizeof(long), .align = sizeof(long), 1358 .get = genregs_get, .set = genregs_set 1359 }, 1360 [REGSET_FP] = { 1361 .core_note_type = NT_PRFPREG, 1362 .n = sizeof(struct user_i387_struct) / sizeof(long), 1363 .size = sizeof(long), .align = sizeof(long), 1364 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1365 }, 1366 [REGSET_IOPERM64] = { 1367 .core_note_type = NT_386_IOPERM, 1368 .n = IO_BITMAP_LONGS, 1369 .size = sizeof(long), .align = sizeof(long), 1370 .active = ioperm_active, .get = ioperm_get 1371 }, 1372 }; 1373 1374 static const struct user_regset_view user_x86_64_view = { 1375 .name = "x86_64", .e_machine = EM_X86_64, 1376 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1377 }; 1378 1379 #else /* CONFIG_X86_32 */ 1380 1381 #define user_regs_struct32 user_regs_struct 1382 #define genregs32_get genregs_get 1383 #define genregs32_set genregs_set 1384 1385 #define user_i387_ia32_struct user_i387_struct 1386 #define user32_fxsr_struct user_fxsr_struct 1387 1388 #endif /* CONFIG_X86_64 */ 1389 1390 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1391 static const struct user_regset x86_32_regsets[] = { 1392 [REGSET_GENERAL] = { 1393 .core_note_type = NT_PRSTATUS, 1394 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1395 .size = sizeof(u32), .align = sizeof(u32), 1396 .get = genregs32_get, .set = genregs32_set 1397 }, 1398 [REGSET_FP] = { 1399 .core_note_type = NT_PRFPREG, 1400 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1401 .size = sizeof(u32), .align = sizeof(u32), 1402 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1403 }, 1404 [REGSET_XFP] = { 1405 .core_note_type = NT_PRXFPREG, 1406 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1407 .size = sizeof(u32), .align = sizeof(u32), 1408 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1409 }, 1410 [REGSET_TLS] = { 1411 .core_note_type = NT_386_TLS, 1412 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1413 .size = sizeof(struct user_desc), 1414 .align = sizeof(struct user_desc), 1415 .active = regset_tls_active, 1416 .get = regset_tls_get, .set = regset_tls_set 1417 }, 1418 [REGSET_IOPERM32] = { 1419 .core_note_type = NT_386_IOPERM, 1420 .n = IO_BITMAP_BYTES / sizeof(u32), 1421 .size = sizeof(u32), .align = sizeof(u32), 1422 .active = ioperm_active, .get = ioperm_get 1423 }, 1424 }; 1425 1426 static const struct user_regset_view user_x86_32_view = { 1427 .name = "i386", .e_machine = EM_386, 1428 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1429 }; 1430 #endif 1431 1432 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1433 { 1434 #ifdef CONFIG_IA32_EMULATION 1435 if (test_tsk_thread_flag(task, TIF_IA32)) 1436 #endif 1437 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1438 return &user_x86_32_view; 1439 #endif 1440 #ifdef CONFIG_X86_64 1441 return &user_x86_64_view; 1442 #endif 1443 } 1444 1445 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1446 int error_code, int si_code) 1447 { 1448 struct siginfo info; 1449 1450 tsk->thread.trap_no = 1; 1451 tsk->thread.error_code = error_code; 1452 1453 memset(&info, 0, sizeof(info)); 1454 info.si_signo = SIGTRAP; 1455 info.si_code = si_code; 1456 1457 /* User-mode ip? */ 1458 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1459 1460 /* Send us the fake SIGTRAP */ 1461 force_sig_info(SIGTRAP, &info, tsk); 1462 } 1463 1464 1465 #ifdef CONFIG_X86_32 1466 # define IS_IA32 1 1467 #elif defined CONFIG_IA32_EMULATION 1468 # define IS_IA32 is_compat_task() 1469 #else 1470 # define IS_IA32 0 1471 #endif 1472 1473 /* 1474 * We must return the syscall number to actually look up in the table. 1475 * This can be -1L to skip running any syscall at all. 1476 */ 1477 asmregparm long syscall_trace_enter(struct pt_regs *regs) 1478 { 1479 long ret = 0; 1480 1481 /* 1482 * If we stepped into a sysenter/syscall insn, it trapped in 1483 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 1484 * If user-mode had set TF itself, then it's still clear from 1485 * do_debug() and we need to set it again to restore the user 1486 * state. If we entered on the slow path, TF was already set. 1487 */ 1488 if (test_thread_flag(TIF_SINGLESTEP)) 1489 regs->flags |= X86_EFLAGS_TF; 1490 1491 /* do the secure computing check first */ 1492 secure_computing(regs->orig_ax); 1493 1494 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1495 ret = -1L; 1496 1497 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && 1498 tracehook_report_syscall_entry(regs)) 1499 ret = -1L; 1500 1501 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1502 trace_sys_enter(regs, regs->orig_ax); 1503 1504 if (unlikely(current->audit_context)) { 1505 if (IS_IA32) 1506 audit_syscall_entry(AUDIT_ARCH_I386, 1507 regs->orig_ax, 1508 regs->bx, regs->cx, 1509 regs->dx, regs->si); 1510 #ifdef CONFIG_X86_64 1511 else 1512 audit_syscall_entry(AUDIT_ARCH_X86_64, 1513 regs->orig_ax, 1514 regs->di, regs->si, 1515 regs->dx, regs->r10); 1516 #endif 1517 } 1518 1519 return ret ?: regs->orig_ax; 1520 } 1521 1522 asmregparm void syscall_trace_leave(struct pt_regs *regs) 1523 { 1524 if (unlikely(current->audit_context)) 1525 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1526 1527 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1528 trace_sys_exit(regs, regs->ax); 1529 1530 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1531 tracehook_report_syscall_exit(regs, 0); 1532 1533 /* 1534 * If TIF_SYSCALL_EMU is set, we only get here because of 1535 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1536 * We already reported this syscall instruction in 1537 * syscall_trace_enter(), so don't do any more now. 1538 */ 1539 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1540 return; 1541 1542 /* 1543 * If we are single-stepping, synthesize a trap to follow the 1544 * system call instruction. 1545 */ 1546 if (test_thread_flag(TIF_SINGLESTEP) && 1547 tracehook_consider_fatal_signal(current, SIGTRAP)) 1548 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1549 } 1550