1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/ptrace.h> 47 #include <sys/sx.h> 48 #include <sys/malloc.h> 49 #include <sys/signalvar.h> 50 51 #include <machine/reg.h> 52 53 #include <security/audit/audit.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 63 #ifdef COMPAT_IA32 64 #include <sys/procfs.h> 65 #include <machine/fpu.h> 66 #include <compat/ia32/ia32_reg.h> 67 68 struct ptrace_io_desc32 { 69 int piod_op; 70 u_int32_t piod_offs; 71 u_int32_t piod_addr; 72 u_int32_t piod_len; 73 }; 74 #endif 75 76 /* 77 * Functions implemented using PROC_ACTION(): 78 * 79 * proc_read_regs(proc, regs) 80 * Get the current user-visible register set from the process 81 * and copy it into the regs structure (<machine/reg.h>). 82 * The process is stopped at the time read_regs is called. 83 * 84 * proc_write_regs(proc, regs) 85 * Update the current register set from the passed in regs 86 * structure. Take care to avoid clobbering special CPU 87 * registers or privileged bits in the PSL. 88 * Depending on the architecture this may have fix-up work to do, 89 * especially if the IAR or PCW are modified. 90 * The process is stopped at the time write_regs is called. 91 * 92 * proc_read_fpregs, proc_write_fpregs 93 * deal with the floating point register set, otherwise as above. 94 * 95 * proc_read_dbregs, proc_write_dbregs 96 * deal with the processor debug register set, otherwise as above. 97 * 98 * proc_sstep(proc) 99 * Arrange for the process to trap after executing a single instruction. 100 */ 101 102 #define PROC_ACTION(action) do { \ 103 int error; \ 104 \ 105 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 106 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 107 error = EIO; \ 108 else \ 109 error = (action); \ 110 return (error); \ 111 } while(0) 112 113 int 114 proc_read_regs(struct thread *td, struct reg *regs) 115 { 116 117 PROC_ACTION(fill_regs(td, regs)); 118 } 119 120 int 121 proc_write_regs(struct thread *td, struct reg *regs) 122 { 123 124 PROC_ACTION(set_regs(td, regs)); 125 } 126 127 int 128 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 129 { 130 131 PROC_ACTION(fill_dbregs(td, dbregs)); 132 } 133 134 int 135 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 136 { 137 138 PROC_ACTION(set_dbregs(td, dbregs)); 139 } 140 141 /* 142 * Ptrace doesn't support fpregs at all, and there are no security holes 143 * or translations for fpregs, so we can just copy them. 144 */ 145 int 146 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 147 { 148 149 PROC_ACTION(fill_fpregs(td, fpregs)); 150 } 151 152 int 153 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 154 { 155 156 PROC_ACTION(set_fpregs(td, fpregs)); 157 } 158 159 #ifdef COMPAT_IA32 160 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 161 int 162 proc_read_regs32(struct thread *td, struct reg32 *regs32) 163 { 164 165 PROC_ACTION(fill_regs32(td, regs32)); 166 } 167 168 int 169 proc_write_regs32(struct thread *td, struct reg32 *regs32) 170 { 171 172 PROC_ACTION(set_regs32(td, regs32)); 173 } 174 175 int 176 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 177 { 178 179 PROC_ACTION(fill_dbregs32(td, dbregs32)); 180 } 181 182 int 183 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 184 { 185 186 PROC_ACTION(set_dbregs32(td, dbregs32)); 187 } 188 189 int 190 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 191 { 192 193 PROC_ACTION(fill_fpregs32(td, fpregs32)); 194 } 195 196 int 197 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 198 { 199 200 PROC_ACTION(set_fpregs32(td, fpregs32)); 201 } 202 #endif 203 204 int 205 proc_sstep(struct thread *td) 206 { 207 208 PROC_ACTION(ptrace_single_step(td)); 209 } 210 211 int 212 proc_rwmem(struct proc *p, struct uio *uio) 213 { 214 vm_map_t map; 215 vm_object_t backing_object, object = NULL; 216 vm_offset_t pageno = 0; /* page number */ 217 vm_prot_t reqprot; 218 int error, fault_flags, writing; 219 220 /* 221 * Assert that someone has locked this vmspace. (Should be 222 * curthread but we can't assert that.) This keeps the process 223 * from exiting out from under us until this operation completes. 224 */ 225 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 226 p, p->p_pid)); 227 228 /* 229 * The map we want... 230 */ 231 map = &p->p_vmspace->vm_map; 232 233 writing = uio->uio_rw == UIO_WRITE; 234 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : 235 VM_PROT_READ; 236 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 237 238 /* 239 * Only map in one page at a time. We don't have to, but it 240 * makes things easier. This way is trivial - right? 241 */ 242 do { 243 vm_map_t tmap; 244 vm_offset_t uva; 245 int page_offset; /* offset into page */ 246 vm_map_entry_t out_entry; 247 vm_prot_t out_prot; 248 boolean_t wired; 249 vm_pindex_t pindex; 250 u_int len; 251 vm_page_t m; 252 253 object = NULL; 254 255 uva = (vm_offset_t)uio->uio_offset; 256 257 /* 258 * Get the page number of this segment. 259 */ 260 pageno = trunc_page(uva); 261 page_offset = uva - pageno; 262 263 /* 264 * How many bytes to copy 265 */ 266 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 267 268 /* 269 * Fault the page on behalf of the process 270 */ 271 error = vm_fault(map, pageno, reqprot, fault_flags); 272 if (error) { 273 error = EFAULT; 274 break; 275 } 276 277 /* 278 * Now we need to get the page. out_entry, out_prot, wired, 279 * and single_use aren't used. One would think the vm code 280 * would be a *bit* nicer... We use tmap because 281 * vm_map_lookup() can change the map argument. 282 */ 283 tmap = map; 284 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 285 &object, &pindex, &out_prot, &wired); 286 if (error) { 287 error = EFAULT; 288 break; 289 } 290 VM_OBJECT_LOCK(object); 291 while ((m = vm_page_lookup(object, pindex)) == NULL && 292 !writing && 293 (backing_object = object->backing_object) != NULL) { 294 /* 295 * Allow fallback to backing objects if we are reading. 296 */ 297 VM_OBJECT_LOCK(backing_object); 298 pindex += OFF_TO_IDX(object->backing_object_offset); 299 VM_OBJECT_UNLOCK(object); 300 object = backing_object; 301 } 302 VM_OBJECT_UNLOCK(object); 303 if (m == NULL) { 304 vm_map_lookup_done(tmap, out_entry); 305 error = EFAULT; 306 break; 307 } 308 309 /* 310 * Hold the page in memory. 311 */ 312 vm_page_lock_queues(); 313 vm_page_hold(m); 314 vm_page_unlock_queues(); 315 316 /* 317 * We're done with tmap now. 318 */ 319 vm_map_lookup_done(tmap, out_entry); 320 321 /* 322 * Now do the i/o move. 323 */ 324 error = uiomove_fromphys(&m, page_offset, len, uio); 325 326 /* 327 * Release the page. 328 */ 329 vm_page_lock_queues(); 330 vm_page_unhold(m); 331 vm_page_unlock_queues(); 332 333 } while (error == 0 && uio->uio_resid > 0); 334 335 return (error); 336 } 337 338 /* 339 * Process debugging system call. 340 */ 341 #ifndef _SYS_SYSPROTO_H_ 342 struct ptrace_args { 343 int req; 344 pid_t pid; 345 caddr_t addr; 346 int data; 347 }; 348 #endif 349 350 #ifdef COMPAT_IA32 351 /* 352 * This CPP subterfuge is to try and reduce the number of ifdefs in 353 * the body of the code. 354 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 355 * becomes either: 356 * copyin(uap->addr, &r.reg, sizeof r.reg); 357 * or 358 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 359 * .. except this is done at runtime. 360 */ 361 #define COPYIN(u, k, s) wrap32 ? \ 362 copyin(u, k ## 32, s ## 32) : \ 363 copyin(u, k, s) 364 #define COPYOUT(k, u, s) wrap32 ? \ 365 copyout(k ## 32, u, s ## 32) : \ 366 copyout(k, u, s) 367 #else 368 #define COPYIN(u, k, s) copyin(u, k, s) 369 #define COPYOUT(k, u, s) copyout(k, u, s) 370 #endif 371 int 372 ptrace(struct thread *td, struct ptrace_args *uap) 373 { 374 /* 375 * XXX this obfuscation is to reduce stack usage, but the register 376 * structs may be too large to put on the stack anyway. 377 */ 378 union { 379 struct ptrace_io_desc piod; 380 struct ptrace_lwpinfo pl; 381 struct dbreg dbreg; 382 struct fpreg fpreg; 383 struct reg reg; 384 #ifdef COMPAT_IA32 385 struct dbreg32 dbreg32; 386 struct fpreg32 fpreg32; 387 struct reg32 reg32; 388 struct ptrace_io_desc32 piod32; 389 #endif 390 } r; 391 void *addr; 392 int error = 0; 393 #ifdef COMPAT_IA32 394 int wrap32 = 0; 395 396 if (SV_CURPROC_FLAG(SV_ILP32)) 397 wrap32 = 1; 398 #endif 399 AUDIT_ARG(pid, uap->pid); 400 AUDIT_ARG(cmd, uap->req); 401 AUDIT_ARG(addr, uap->addr); 402 AUDIT_ARG(value, uap->data); 403 addr = &r; 404 switch (uap->req) { 405 case PT_GETREGS: 406 case PT_GETFPREGS: 407 case PT_GETDBREGS: 408 case PT_LWPINFO: 409 break; 410 case PT_SETREGS: 411 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 412 break; 413 case PT_SETFPREGS: 414 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 415 break; 416 case PT_SETDBREGS: 417 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 418 break; 419 case PT_IO: 420 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 421 break; 422 default: 423 addr = uap->addr; 424 break; 425 } 426 if (error) 427 return (error); 428 429 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 430 if (error) 431 return (error); 432 433 switch (uap->req) { 434 case PT_IO: 435 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 436 break; 437 case PT_GETREGS: 438 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 439 break; 440 case PT_GETFPREGS: 441 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 442 break; 443 case PT_GETDBREGS: 444 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 445 break; 446 case PT_LWPINFO: 447 error = copyout(&r.pl, uap->addr, uap->data); 448 break; 449 } 450 451 return (error); 452 } 453 #undef COPYIN 454 #undef COPYOUT 455 456 #ifdef COMPAT_IA32 457 /* 458 * PROC_READ(regs, td2, addr); 459 * becomes either: 460 * proc_read_regs(td2, addr); 461 * or 462 * proc_read_regs32(td2, addr); 463 * .. except this is done at runtime. There is an additional 464 * complication in that PROC_WRITE disallows 32 bit consumers 465 * from writing to 64 bit address space targets. 466 */ 467 #define PROC_READ(w, t, a) wrap32 ? \ 468 proc_read_ ## w ## 32(t, a) : \ 469 proc_read_ ## w (t, a) 470 #define PROC_WRITE(w, t, a) wrap32 ? \ 471 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 472 proc_write_ ## w (t, a) 473 #else 474 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 475 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 476 #endif 477 478 int 479 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 480 { 481 struct iovec iov; 482 struct uio uio; 483 struct proc *curp, *p, *pp; 484 struct thread *td2 = NULL; 485 struct ptrace_io_desc *piod = NULL; 486 struct ptrace_lwpinfo *pl; 487 int error, write, tmp, num; 488 int proctree_locked = 0; 489 lwpid_t tid = 0, *buf; 490 #ifdef COMPAT_IA32 491 int wrap32 = 0, safe = 0; 492 struct ptrace_io_desc32 *piod32 = NULL; 493 #endif 494 495 curp = td->td_proc; 496 497 /* Lock proctree before locking the process. */ 498 switch (req) { 499 case PT_TRACE_ME: 500 case PT_ATTACH: 501 case PT_STEP: 502 case PT_CONTINUE: 503 case PT_TO_SCE: 504 case PT_TO_SCX: 505 case PT_SYSCALL: 506 case PT_DETACH: 507 sx_xlock(&proctree_lock); 508 proctree_locked = 1; 509 break; 510 default: 511 break; 512 } 513 514 write = 0; 515 if (req == PT_TRACE_ME) { 516 p = td->td_proc; 517 PROC_LOCK(p); 518 } else { 519 if (pid <= PID_MAX) { 520 if ((p = pfind(pid)) == NULL) { 521 if (proctree_locked) 522 sx_xunlock(&proctree_lock); 523 return (ESRCH); 524 } 525 } else { 526 /* this is slow, should be optimized */ 527 sx_slock(&allproc_lock); 528 FOREACH_PROC_IN_SYSTEM(p) { 529 PROC_LOCK(p); 530 FOREACH_THREAD_IN_PROC(p, td2) { 531 if (td2->td_tid == pid) 532 break; 533 } 534 if (td2 != NULL) 535 break; /* proc lock held */ 536 PROC_UNLOCK(p); 537 } 538 sx_sunlock(&allproc_lock); 539 if (p == NULL) { 540 if (proctree_locked) 541 sx_xunlock(&proctree_lock); 542 return (ESRCH); 543 } 544 tid = pid; 545 pid = p->p_pid; 546 } 547 } 548 AUDIT_ARG(process, p); 549 550 if ((p->p_flag & P_WEXIT) != 0) { 551 error = ESRCH; 552 goto fail; 553 } 554 if ((error = p_cansee(td, p)) != 0) 555 goto fail; 556 557 if ((error = p_candebug(td, p)) != 0) 558 goto fail; 559 560 /* 561 * System processes can't be debugged. 562 */ 563 if ((p->p_flag & P_SYSTEM) != 0) { 564 error = EINVAL; 565 goto fail; 566 } 567 568 if (tid == 0) { 569 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 570 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 571 td2 = p->p_xthread; 572 } else { 573 td2 = FIRST_THREAD_IN_PROC(p); 574 } 575 tid = td2->td_tid; 576 } 577 578 #ifdef COMPAT_IA32 579 /* 580 * Test if we're a 32 bit client and what the target is. 581 * Set the wrap controls accordingly. 582 */ 583 if (SV_CURPROC_FLAG(SV_ILP32)) { 584 if (td2->td_proc->p_sysent->sv_flags & SV_ILP32) 585 safe = 1; 586 wrap32 = 1; 587 } 588 #endif 589 /* 590 * Permissions check 591 */ 592 switch (req) { 593 case PT_TRACE_ME: 594 /* Always legal. */ 595 break; 596 597 case PT_ATTACH: 598 /* Self */ 599 if (p->p_pid == td->td_proc->p_pid) { 600 error = EINVAL; 601 goto fail; 602 } 603 604 /* Already traced */ 605 if (p->p_flag & P_TRACED) { 606 error = EBUSY; 607 goto fail; 608 } 609 610 /* Can't trace an ancestor if you're being traced. */ 611 if (curp->p_flag & P_TRACED) { 612 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 613 if (pp == p) { 614 error = EINVAL; 615 goto fail; 616 } 617 } 618 } 619 620 621 /* OK */ 622 break; 623 624 case PT_CLEARSTEP: 625 /* Allow thread to clear single step for itself */ 626 if (td->td_tid == tid) 627 break; 628 629 /* FALLTHROUGH */ 630 default: 631 /* not being traced... */ 632 if ((p->p_flag & P_TRACED) == 0) { 633 error = EPERM; 634 goto fail; 635 } 636 637 /* not being traced by YOU */ 638 if (p->p_pptr != td->td_proc) { 639 error = EBUSY; 640 goto fail; 641 } 642 643 /* not currently stopped */ 644 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 645 p->p_suspcount != p->p_numthreads || 646 (p->p_flag & P_WAITED) == 0) { 647 error = EBUSY; 648 goto fail; 649 } 650 651 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 652 static int count = 0; 653 if (count++ == 0) 654 printf("P_STOPPED_TRACE not set.\n"); 655 } 656 657 /* OK */ 658 break; 659 } 660 661 /* Keep this process around until we finish this request. */ 662 _PHOLD(p); 663 664 #ifdef FIX_SSTEP 665 /* 666 * Single step fixup ala procfs 667 */ 668 FIX_SSTEP(td2); 669 #endif 670 671 /* 672 * Actually do the requests 673 */ 674 675 td->td_retval[0] = 0; 676 677 switch (req) { 678 case PT_TRACE_ME: 679 /* set my trace flag and "owner" so it can read/write me */ 680 p->p_flag |= P_TRACED; 681 p->p_oppid = p->p_pptr->p_pid; 682 break; 683 684 case PT_ATTACH: 685 /* security check done above */ 686 p->p_flag |= P_TRACED; 687 p->p_oppid = p->p_pptr->p_pid; 688 if (p->p_pptr != td->td_proc) 689 proc_reparent(p, td->td_proc); 690 data = SIGSTOP; 691 goto sendsig; /* in PT_CONTINUE below */ 692 693 case PT_CLEARSTEP: 694 error = ptrace_clear_single_step(td2); 695 break; 696 697 case PT_SETSTEP: 698 error = ptrace_single_step(td2); 699 break; 700 701 case PT_SUSPEND: 702 td2->td_dbgflags |= TDB_SUSPEND; 703 thread_lock(td2); 704 td2->td_flags |= TDF_NEEDSUSPCHK; 705 thread_unlock(td2); 706 break; 707 708 case PT_RESUME: 709 td2->td_dbgflags &= ~TDB_SUSPEND; 710 break; 711 712 case PT_STEP: 713 case PT_CONTINUE: 714 case PT_TO_SCE: 715 case PT_TO_SCX: 716 case PT_SYSCALL: 717 case PT_DETACH: 718 /* Zero means do not send any signal */ 719 if (data < 0 || data > _SIG_MAXSIG) { 720 error = EINVAL; 721 break; 722 } 723 724 switch (req) { 725 case PT_STEP: 726 error = ptrace_single_step(td2); 727 if (error) 728 goto out; 729 break; 730 case PT_TO_SCE: 731 p->p_stops |= S_PT_SCE; 732 break; 733 case PT_TO_SCX: 734 p->p_stops |= S_PT_SCX; 735 break; 736 case PT_SYSCALL: 737 p->p_stops |= S_PT_SCE | S_PT_SCX; 738 break; 739 } 740 741 if (addr != (void *)1) { 742 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr); 743 if (error) 744 break; 745 } 746 747 if (req == PT_DETACH) { 748 /* reset process parent */ 749 if (p->p_oppid != p->p_pptr->p_pid) { 750 struct proc *pp; 751 752 PROC_LOCK(p->p_pptr); 753 sigqueue_take(p->p_ksi); 754 PROC_UNLOCK(p->p_pptr); 755 756 PROC_UNLOCK(p); 757 pp = pfind(p->p_oppid); 758 if (pp == NULL) 759 pp = initproc; 760 else 761 PROC_UNLOCK(pp); 762 PROC_LOCK(p); 763 proc_reparent(p, pp); 764 if (pp == initproc) 765 p->p_sigparent = SIGCHLD; 766 } 767 p->p_flag &= ~(P_TRACED | P_WAITED); 768 p->p_oppid = 0; 769 770 /* should we send SIGCHLD? */ 771 /* childproc_continued(p); */ 772 } 773 774 sendsig: 775 if (proctree_locked) { 776 sx_xunlock(&proctree_lock); 777 proctree_locked = 0; 778 } 779 p->p_xstat = data; 780 p->p_xthread = NULL; 781 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 782 /* deliver or queue signal */ 783 td2->td_dbgflags &= ~TDB_XSIG; 784 td2->td_xsig = data; 785 786 if (req == PT_DETACH) { 787 struct thread *td3; 788 FOREACH_THREAD_IN_PROC(p, td3) { 789 td3->td_dbgflags &= ~TDB_SUSPEND; 790 } 791 } 792 /* 793 * unsuspend all threads, to not let a thread run, 794 * you should use PT_SUSPEND to suspend it before 795 * continuing process. 796 */ 797 PROC_SLOCK(p); 798 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 799 thread_unsuspend(p); 800 PROC_SUNLOCK(p); 801 } else { 802 if (data) 803 psignal(p, data); 804 } 805 break; 806 807 case PT_WRITE_I: 808 case PT_WRITE_D: 809 write = 1; 810 /* FALLTHROUGH */ 811 case PT_READ_I: 812 case PT_READ_D: 813 PROC_UNLOCK(p); 814 tmp = 0; 815 /* write = 0 set above */ 816 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 817 iov.iov_len = sizeof(int); 818 uio.uio_iov = &iov; 819 uio.uio_iovcnt = 1; 820 uio.uio_offset = (off_t)(uintptr_t)addr; 821 uio.uio_resid = sizeof(int); 822 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 823 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 824 uio.uio_td = td; 825 error = proc_rwmem(p, &uio); 826 if (uio.uio_resid != 0) { 827 /* 828 * XXX proc_rwmem() doesn't currently return ENOSPC, 829 * so I think write() can bogusly return 0. 830 * XXX what happens for short writes? We don't want 831 * to write partial data. 832 * XXX proc_rwmem() returns EPERM for other invalid 833 * addresses. Convert this to EINVAL. Does this 834 * clobber returns of EPERM for other reasons? 835 */ 836 if (error == 0 || error == ENOSPC || error == EPERM) 837 error = EINVAL; /* EOF */ 838 } 839 if (!write) 840 td->td_retval[0] = tmp; 841 PROC_LOCK(p); 842 break; 843 844 case PT_IO: 845 #ifdef COMPAT_IA32 846 if (wrap32) { 847 piod32 = addr; 848 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 849 iov.iov_len = piod32->piod_len; 850 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 851 uio.uio_resid = piod32->piod_len; 852 } else 853 #endif 854 { 855 piod = addr; 856 iov.iov_base = piod->piod_addr; 857 iov.iov_len = piod->piod_len; 858 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 859 uio.uio_resid = piod->piod_len; 860 } 861 uio.uio_iov = &iov; 862 uio.uio_iovcnt = 1; 863 uio.uio_segflg = UIO_USERSPACE; 864 uio.uio_td = td; 865 #ifdef COMPAT_IA32 866 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 867 #else 868 tmp = piod->piod_op; 869 #endif 870 switch (tmp) { 871 case PIOD_READ_D: 872 case PIOD_READ_I: 873 uio.uio_rw = UIO_READ; 874 break; 875 case PIOD_WRITE_D: 876 case PIOD_WRITE_I: 877 uio.uio_rw = UIO_WRITE; 878 break; 879 default: 880 error = EINVAL; 881 goto out; 882 } 883 PROC_UNLOCK(p); 884 error = proc_rwmem(p, &uio); 885 #ifdef COMPAT_IA32 886 if (wrap32) 887 piod32->piod_len -= uio.uio_resid; 888 else 889 #endif 890 piod->piod_len -= uio.uio_resid; 891 PROC_LOCK(p); 892 break; 893 894 case PT_KILL: 895 data = SIGKILL; 896 goto sendsig; /* in PT_CONTINUE above */ 897 898 case PT_SETREGS: 899 error = PROC_WRITE(regs, td2, addr); 900 break; 901 902 case PT_GETREGS: 903 error = PROC_READ(regs, td2, addr); 904 break; 905 906 case PT_SETFPREGS: 907 error = PROC_WRITE(fpregs, td2, addr); 908 break; 909 910 case PT_GETFPREGS: 911 error = PROC_READ(fpregs, td2, addr); 912 break; 913 914 case PT_SETDBREGS: 915 error = PROC_WRITE(dbregs, td2, addr); 916 break; 917 918 case PT_GETDBREGS: 919 error = PROC_READ(dbregs, td2, addr); 920 break; 921 922 case PT_LWPINFO: 923 if (data <= 0 || data > sizeof(*pl)) { 924 error = EINVAL; 925 break; 926 } 927 pl = addr; 928 pl->pl_lwpid = td2->td_tid; 929 if (td2->td_dbgflags & TDB_XSIG) 930 pl->pl_event = PL_EVENT_SIGNAL; 931 else 932 pl->pl_event = 0; 933 pl->pl_flags = 0; 934 pl->pl_sigmask = td2->td_sigmask; 935 pl->pl_siglist = td2->td_siglist; 936 break; 937 938 case PT_GETNUMLWPS: 939 td->td_retval[0] = p->p_numthreads; 940 break; 941 942 case PT_GETLWPLIST: 943 if (data <= 0) { 944 error = EINVAL; 945 break; 946 } 947 num = imin(p->p_numthreads, data); 948 PROC_UNLOCK(p); 949 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 950 tmp = 0; 951 PROC_LOCK(p); 952 FOREACH_THREAD_IN_PROC(p, td2) { 953 if (tmp >= num) 954 break; 955 buf[tmp++] = td2->td_tid; 956 } 957 PROC_UNLOCK(p); 958 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 959 free(buf, M_TEMP); 960 if (!error) 961 td->td_retval[0] = tmp; 962 PROC_LOCK(p); 963 break; 964 965 default: 966 #ifdef __HAVE_PTRACE_MACHDEP 967 if (req >= PT_FIRSTMACH) { 968 PROC_UNLOCK(p); 969 error = cpu_ptrace(td2, req, addr, data); 970 PROC_LOCK(p); 971 } else 972 #endif 973 /* Unknown request. */ 974 error = EINVAL; 975 break; 976 } 977 978 out: 979 /* Drop our hold on this process now that the request has completed. */ 980 _PRELE(p); 981 fail: 982 PROC_UNLOCK(p); 983 if (proctree_locked) 984 sx_xunlock(&proctree_lock); 985 return (error); 986 } 987 #undef PROC_READ 988 #undef PROC_WRITE 989 990 /* 991 * Stop a process because of a debugging event; 992 * stay stopped until p->p_step is cleared 993 * (cleared by PIOCCONT in procfs). 994 */ 995 void 996 stopevent(struct proc *p, unsigned int event, unsigned int val) 997 { 998 999 PROC_LOCK_ASSERT(p, MA_OWNED); 1000 p->p_step = 1; 1001 do { 1002 p->p_xstat = val; 1003 p->p_xthread = NULL; 1004 p->p_stype = event; /* Which event caused the stop? */ 1005 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1006 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1007 } while (p->p_step); 1008 } 1009