1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/pioctl.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/ptrace.h> 49 #include <sys/rwlock.h> 50 #include <sys/sx.h> 51 #include <sys/malloc.h> 52 #include <sys/signalvar.h> 53 54 #include <machine/reg.h> 55 56 #include <security/audit/audit.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_param.h> 66 67 #ifdef COMPAT_FREEBSD32 68 #include <sys/procfs.h> 69 #include <compat/freebsd32/freebsd32_signal.h> 70 71 struct ptrace_io_desc32 { 72 int piod_op; 73 uint32_t piod_offs; 74 uint32_t piod_addr; 75 uint32_t piod_len; 76 }; 77 78 struct ptrace_vm_entry32 { 79 int pve_entry; 80 int pve_timestamp; 81 uint32_t pve_start; 82 uint32_t pve_end; 83 uint32_t pve_offset; 84 u_int pve_prot; 85 u_int pve_pathlen; 86 int32_t pve_fileid; 87 u_int pve_fsid; 88 uint32_t pve_path; 89 }; 90 91 struct ptrace_lwpinfo32 { 92 lwpid_t pl_lwpid; /* LWP described. */ 93 int pl_event; /* Event that stopped the LWP. */ 94 int pl_flags; /* LWP flags. */ 95 sigset_t pl_sigmask; /* LWP signal mask */ 96 sigset_t pl_siglist; /* LWP pending signal */ 97 struct siginfo32 pl_siginfo; /* siginfo for signal */ 98 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 99 pid_t pl_child_pid; /* New child pid */ 100 u_int pl_syscall_code; 101 u_int pl_syscall_narg; 102 }; 103 104 #endif 105 106 /* 107 * Functions implemented using PROC_ACTION(): 108 * 109 * proc_read_regs(proc, regs) 110 * Get the current user-visible register set from the process 111 * and copy it into the regs structure (<machine/reg.h>). 112 * The process is stopped at the time read_regs is called. 113 * 114 * proc_write_regs(proc, regs) 115 * Update the current register set from the passed in regs 116 * structure. Take care to avoid clobbering special CPU 117 * registers or privileged bits in the PSL. 118 * Depending on the architecture this may have fix-up work to do, 119 * especially if the IAR or PCW are modified. 120 * The process is stopped at the time write_regs is called. 121 * 122 * proc_read_fpregs, proc_write_fpregs 123 * deal with the floating point register set, otherwise as above. 124 * 125 * proc_read_dbregs, proc_write_dbregs 126 * deal with the processor debug register set, otherwise as above. 127 * 128 * proc_sstep(proc) 129 * Arrange for the process to trap after executing a single instruction. 130 */ 131 132 #define PROC_ACTION(action) do { \ 133 int error; \ 134 \ 135 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 136 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 137 error = EIO; \ 138 else \ 139 error = (action); \ 140 return (error); \ 141 } while(0) 142 143 int 144 proc_read_regs(struct thread *td, struct reg *regs) 145 { 146 147 PROC_ACTION(fill_regs(td, regs)); 148 } 149 150 int 151 proc_write_regs(struct thread *td, struct reg *regs) 152 { 153 154 PROC_ACTION(set_regs(td, regs)); 155 } 156 157 int 158 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 159 { 160 161 PROC_ACTION(fill_dbregs(td, dbregs)); 162 } 163 164 int 165 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 166 { 167 168 PROC_ACTION(set_dbregs(td, dbregs)); 169 } 170 171 /* 172 * Ptrace doesn't support fpregs at all, and there are no security holes 173 * or translations for fpregs, so we can just copy them. 174 */ 175 int 176 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 177 { 178 179 PROC_ACTION(fill_fpregs(td, fpregs)); 180 } 181 182 int 183 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 184 { 185 186 PROC_ACTION(set_fpregs(td, fpregs)); 187 } 188 189 #ifdef COMPAT_FREEBSD32 190 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 191 int 192 proc_read_regs32(struct thread *td, struct reg32 *regs32) 193 { 194 195 PROC_ACTION(fill_regs32(td, regs32)); 196 } 197 198 int 199 proc_write_regs32(struct thread *td, struct reg32 *regs32) 200 { 201 202 PROC_ACTION(set_regs32(td, regs32)); 203 } 204 205 int 206 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 207 { 208 209 PROC_ACTION(fill_dbregs32(td, dbregs32)); 210 } 211 212 int 213 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 214 { 215 216 PROC_ACTION(set_dbregs32(td, dbregs32)); 217 } 218 219 int 220 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 221 { 222 223 PROC_ACTION(fill_fpregs32(td, fpregs32)); 224 } 225 226 int 227 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 228 { 229 230 PROC_ACTION(set_fpregs32(td, fpregs32)); 231 } 232 #endif 233 234 int 235 proc_sstep(struct thread *td) 236 { 237 238 PROC_ACTION(ptrace_single_step(td)); 239 } 240 241 int 242 proc_rwmem(struct proc *p, struct uio *uio) 243 { 244 vm_map_t map; 245 vm_offset_t pageno; /* page number */ 246 vm_prot_t reqprot; 247 int error, fault_flags, page_offset, writing; 248 249 /* 250 * Assert that someone has locked this vmspace. (Should be 251 * curthread but we can't assert that.) This keeps the process 252 * from exiting out from under us until this operation completes. 253 */ 254 PROC_ASSERT_HELD(p); 255 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 256 257 /* 258 * The map we want... 259 */ 260 map = &p->p_vmspace->vm_map; 261 262 /* 263 * If we are writing, then we request vm_fault() to create a private 264 * copy of each page. Since these copies will not be writeable by the 265 * process, we must explicity request that they be dirtied. 266 */ 267 writing = uio->uio_rw == UIO_WRITE; 268 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 269 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 270 271 /* 272 * Only map in one page at a time. We don't have to, but it 273 * makes things easier. This way is trivial - right? 274 */ 275 do { 276 vm_offset_t uva; 277 u_int len; 278 vm_page_t m; 279 280 uva = (vm_offset_t)uio->uio_offset; 281 282 /* 283 * Get the page number of this segment. 284 */ 285 pageno = trunc_page(uva); 286 page_offset = uva - pageno; 287 288 /* 289 * How many bytes to copy 290 */ 291 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 292 293 /* 294 * Fault and hold the page on behalf of the process. 295 */ 296 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 297 if (error != KERN_SUCCESS) { 298 if (error == KERN_RESOURCE_SHORTAGE) 299 error = ENOMEM; 300 else 301 error = EFAULT; 302 break; 303 } 304 305 /* 306 * Now do the i/o move. 307 */ 308 error = uiomove_fromphys(&m, page_offset, len, uio); 309 310 /* Make the I-cache coherent for breakpoints. */ 311 if (writing && error == 0) { 312 vm_map_lock_read(map); 313 if (vm_map_check_protection(map, pageno, pageno + 314 PAGE_SIZE, VM_PROT_EXECUTE)) 315 vm_sync_icache(map, uva, len); 316 vm_map_unlock_read(map); 317 } 318 319 /* 320 * Release the page. 321 */ 322 vm_page_lock(m); 323 vm_page_unhold(m); 324 vm_page_unlock(m); 325 326 } while (error == 0 && uio->uio_resid > 0); 327 328 return (error); 329 } 330 331 static ssize_t 332 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 333 size_t len, enum uio_rw rw) 334 { 335 struct iovec iov; 336 struct uio uio; 337 ssize_t slen; 338 int error; 339 340 MPASS(len < SSIZE_MAX); 341 slen = (ssize_t)len; 342 343 iov.iov_base = (caddr_t)buf; 344 iov.iov_len = len; 345 uio.uio_iov = &iov; 346 uio.uio_iovcnt = 1; 347 uio.uio_offset = va; 348 uio.uio_resid = slen; 349 uio.uio_segflg = UIO_SYSSPACE; 350 uio.uio_rw = rw; 351 uio.uio_td = td; 352 error = proc_rwmem(p, &uio); 353 if (uio.uio_resid == slen) 354 return (-1); 355 return (slen - uio.uio_resid); 356 } 357 358 ssize_t 359 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 360 size_t len) 361 { 362 363 return (proc_iop(td, p, va, buf, len, UIO_READ)); 364 } 365 366 ssize_t 367 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 368 size_t len) 369 { 370 371 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 372 } 373 374 static int 375 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 376 { 377 struct vattr vattr; 378 vm_map_t map; 379 vm_map_entry_t entry; 380 vm_object_t obj, tobj, lobj; 381 struct vmspace *vm; 382 struct vnode *vp; 383 char *freepath, *fullpath; 384 u_int pathlen; 385 int error, index; 386 387 error = 0; 388 obj = NULL; 389 390 vm = vmspace_acquire_ref(p); 391 map = &vm->vm_map; 392 vm_map_lock_read(map); 393 394 do { 395 entry = map->header.next; 396 index = 0; 397 while (index < pve->pve_entry && entry != &map->header) { 398 entry = entry->next; 399 index++; 400 } 401 if (index != pve->pve_entry) { 402 error = EINVAL; 403 break; 404 } 405 while (entry != &map->header && 406 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 407 entry = entry->next; 408 index++; 409 } 410 if (entry == &map->header) { 411 error = ENOENT; 412 break; 413 } 414 415 /* We got an entry. */ 416 pve->pve_entry = index + 1; 417 pve->pve_timestamp = map->timestamp; 418 pve->pve_start = entry->start; 419 pve->pve_end = entry->end - 1; 420 pve->pve_offset = entry->offset; 421 pve->pve_prot = entry->protection; 422 423 /* Backing object's path needed? */ 424 if (pve->pve_pathlen == 0) 425 break; 426 427 pathlen = pve->pve_pathlen; 428 pve->pve_pathlen = 0; 429 430 obj = entry->object.vm_object; 431 if (obj != NULL) 432 VM_OBJECT_RLOCK(obj); 433 } while (0); 434 435 vm_map_unlock_read(map); 436 437 pve->pve_fsid = VNOVAL; 438 pve->pve_fileid = VNOVAL; 439 440 if (error == 0 && obj != NULL) { 441 lobj = obj; 442 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 443 if (tobj != obj) 444 VM_OBJECT_RLOCK(tobj); 445 if (lobj != obj) 446 VM_OBJECT_RUNLOCK(lobj); 447 lobj = tobj; 448 pve->pve_offset += tobj->backing_object_offset; 449 } 450 vp = vm_object_vnode(lobj); 451 if (vp != NULL) 452 vref(vp); 453 if (lobj != obj) 454 VM_OBJECT_RUNLOCK(lobj); 455 VM_OBJECT_RUNLOCK(obj); 456 457 if (vp != NULL) { 458 freepath = NULL; 459 fullpath = NULL; 460 vn_fullpath(td, vp, &fullpath, &freepath); 461 vn_lock(vp, LK_SHARED | LK_RETRY); 462 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 463 pve->pve_fileid = vattr.va_fileid; 464 pve->pve_fsid = vattr.va_fsid; 465 } 466 vput(vp); 467 468 if (fullpath != NULL) { 469 pve->pve_pathlen = strlen(fullpath) + 1; 470 if (pve->pve_pathlen <= pathlen) { 471 error = copyout(fullpath, pve->pve_path, 472 pve->pve_pathlen); 473 } else 474 error = ENAMETOOLONG; 475 } 476 if (freepath != NULL) 477 free(freepath, M_TEMP); 478 } 479 } 480 vmspace_free(vm); 481 if (error == 0) 482 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 483 p->p_pid, pve->pve_entry, pve->pve_start); 484 485 return (error); 486 } 487 488 #ifdef COMPAT_FREEBSD32 489 static int 490 ptrace_vm_entry32(struct thread *td, struct proc *p, 491 struct ptrace_vm_entry32 *pve32) 492 { 493 struct ptrace_vm_entry pve; 494 int error; 495 496 pve.pve_entry = pve32->pve_entry; 497 pve.pve_pathlen = pve32->pve_pathlen; 498 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 499 500 error = ptrace_vm_entry(td, p, &pve); 501 if (error == 0) { 502 pve32->pve_entry = pve.pve_entry; 503 pve32->pve_timestamp = pve.pve_timestamp; 504 pve32->pve_start = pve.pve_start; 505 pve32->pve_end = pve.pve_end; 506 pve32->pve_offset = pve.pve_offset; 507 pve32->pve_prot = pve.pve_prot; 508 pve32->pve_fileid = pve.pve_fileid; 509 pve32->pve_fsid = pve.pve_fsid; 510 } 511 512 pve32->pve_pathlen = pve.pve_pathlen; 513 return (error); 514 } 515 516 static void 517 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 518 struct ptrace_lwpinfo32 *pl32) 519 { 520 521 pl32->pl_lwpid = pl->pl_lwpid; 522 pl32->pl_event = pl->pl_event; 523 pl32->pl_flags = pl->pl_flags; 524 pl32->pl_sigmask = pl->pl_sigmask; 525 pl32->pl_siglist = pl->pl_siglist; 526 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 527 strcpy(pl32->pl_tdname, pl->pl_tdname); 528 pl32->pl_child_pid = pl->pl_child_pid; 529 pl32->pl_syscall_code = pl->pl_syscall_code; 530 pl32->pl_syscall_narg = pl->pl_syscall_narg; 531 } 532 #endif /* COMPAT_FREEBSD32 */ 533 534 /* 535 * Process debugging system call. 536 */ 537 #ifndef _SYS_SYSPROTO_H_ 538 struct ptrace_args { 539 int req; 540 pid_t pid; 541 caddr_t addr; 542 int data; 543 }; 544 #endif 545 546 #ifdef COMPAT_FREEBSD32 547 /* 548 * This CPP subterfuge is to try and reduce the number of ifdefs in 549 * the body of the code. 550 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 551 * becomes either: 552 * copyin(uap->addr, &r.reg, sizeof r.reg); 553 * or 554 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 555 * .. except this is done at runtime. 556 */ 557 #define COPYIN(u, k, s) wrap32 ? \ 558 copyin(u, k ## 32, s ## 32) : \ 559 copyin(u, k, s) 560 #define COPYOUT(k, u, s) wrap32 ? \ 561 copyout(k ## 32, u, s ## 32) : \ 562 copyout(k, u, s) 563 #else 564 #define COPYIN(u, k, s) copyin(u, k, s) 565 #define COPYOUT(k, u, s) copyout(k, u, s) 566 #endif 567 int 568 sys_ptrace(struct thread *td, struct ptrace_args *uap) 569 { 570 /* 571 * XXX this obfuscation is to reduce stack usage, but the register 572 * structs may be too large to put on the stack anyway. 573 */ 574 union { 575 struct ptrace_io_desc piod; 576 struct ptrace_lwpinfo pl; 577 struct ptrace_vm_entry pve; 578 struct dbreg dbreg; 579 struct fpreg fpreg; 580 struct reg reg; 581 #ifdef COMPAT_FREEBSD32 582 struct dbreg32 dbreg32; 583 struct fpreg32 fpreg32; 584 struct reg32 reg32; 585 struct ptrace_io_desc32 piod32; 586 struct ptrace_lwpinfo32 pl32; 587 struct ptrace_vm_entry32 pve32; 588 #endif 589 char args[nitems(td->td_sa.args) * sizeof(register_t)]; 590 int ptevents; 591 } r; 592 void *addr; 593 int error = 0; 594 #ifdef COMPAT_FREEBSD32 595 int wrap32 = 0; 596 597 if (SV_CURPROC_FLAG(SV_ILP32)) 598 wrap32 = 1; 599 #endif 600 AUDIT_ARG_PID(uap->pid); 601 AUDIT_ARG_CMD(uap->req); 602 AUDIT_ARG_VALUE(uap->data); 603 addr = &r; 604 switch (uap->req) { 605 case PT_GET_EVENT_MASK: 606 case PT_GETREGS: 607 case PT_GETFPREGS: 608 case PT_GETDBREGS: 609 case PT_LWPINFO: 610 case PT_GET_SC_ARGS: 611 break; 612 case PT_SETREGS: 613 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 614 break; 615 case PT_SETFPREGS: 616 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 617 break; 618 case PT_SETDBREGS: 619 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 620 break; 621 case PT_SET_EVENT_MASK: 622 if (uap->data != sizeof(r.ptevents)) 623 error = EINVAL; 624 else 625 error = copyin(uap->addr, &r.ptevents, uap->data); 626 break; 627 case PT_IO: 628 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 629 break; 630 case PT_VM_ENTRY: 631 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 632 break; 633 default: 634 addr = uap->addr; 635 break; 636 } 637 if (error) 638 return (error); 639 640 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 641 if (error) 642 return (error); 643 644 switch (uap->req) { 645 case PT_VM_ENTRY: 646 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 647 break; 648 case PT_IO: 649 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 650 break; 651 case PT_GETREGS: 652 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 653 break; 654 case PT_GETFPREGS: 655 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 656 break; 657 case PT_GETDBREGS: 658 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 659 break; 660 case PT_GET_EVENT_MASK: 661 /* NB: The size in uap->data is validated in kern_ptrace(). */ 662 error = copyout(&r.ptevents, uap->addr, uap->data); 663 break; 664 case PT_LWPINFO: 665 /* NB: The size in uap->data is validated in kern_ptrace(). */ 666 error = copyout(&r.pl, uap->addr, uap->data); 667 break; 668 case PT_GET_SC_ARGS: 669 error = copyout(r.args, uap->addr, MIN(uap->data, 670 sizeof(r.args))); 671 break; 672 } 673 674 return (error); 675 } 676 #undef COPYIN 677 #undef COPYOUT 678 679 #ifdef COMPAT_FREEBSD32 680 /* 681 * PROC_READ(regs, td2, addr); 682 * becomes either: 683 * proc_read_regs(td2, addr); 684 * or 685 * proc_read_regs32(td2, addr); 686 * .. except this is done at runtime. There is an additional 687 * complication in that PROC_WRITE disallows 32 bit consumers 688 * from writing to 64 bit address space targets. 689 */ 690 #define PROC_READ(w, t, a) wrap32 ? \ 691 proc_read_ ## w ## 32(t, a) : \ 692 proc_read_ ## w (t, a) 693 #define PROC_WRITE(w, t, a) wrap32 ? \ 694 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 695 proc_write_ ## w (t, a) 696 #else 697 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 698 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 699 #endif 700 701 void 702 proc_set_traced(struct proc *p, bool stop) 703 { 704 705 PROC_LOCK_ASSERT(p, MA_OWNED); 706 p->p_flag |= P_TRACED; 707 if (stop) 708 p->p_flag2 |= P2_PTRACE_FSTP; 709 p->p_ptevents = PTRACE_DEFAULT; 710 p->p_oppid = p->p_pptr->p_pid; 711 } 712 713 int 714 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 715 { 716 struct iovec iov; 717 struct uio uio; 718 struct proc *curp, *p, *pp; 719 struct thread *td2 = NULL, *td3; 720 struct ptrace_io_desc *piod = NULL; 721 struct ptrace_lwpinfo *pl; 722 int error, num, tmp; 723 int proctree_locked = 0; 724 lwpid_t tid = 0, *buf; 725 #ifdef COMPAT_FREEBSD32 726 int wrap32 = 0, safe = 0; 727 struct ptrace_io_desc32 *piod32 = NULL; 728 struct ptrace_lwpinfo32 *pl32 = NULL; 729 struct ptrace_lwpinfo plr; 730 #endif 731 732 curp = td->td_proc; 733 734 /* Lock proctree before locking the process. */ 735 switch (req) { 736 case PT_TRACE_ME: 737 case PT_ATTACH: 738 case PT_STEP: 739 case PT_CONTINUE: 740 case PT_TO_SCE: 741 case PT_TO_SCX: 742 case PT_SYSCALL: 743 case PT_FOLLOW_FORK: 744 case PT_LWP_EVENTS: 745 case PT_GET_EVENT_MASK: 746 case PT_SET_EVENT_MASK: 747 case PT_DETACH: 748 case PT_GET_SC_ARGS: 749 sx_xlock(&proctree_lock); 750 proctree_locked = 1; 751 break; 752 default: 753 break; 754 } 755 756 if (req == PT_TRACE_ME) { 757 p = td->td_proc; 758 PROC_LOCK(p); 759 } else { 760 if (pid <= PID_MAX) { 761 if ((p = pfind(pid)) == NULL) { 762 if (proctree_locked) 763 sx_xunlock(&proctree_lock); 764 return (ESRCH); 765 } 766 } else { 767 td2 = tdfind(pid, -1); 768 if (td2 == NULL) { 769 if (proctree_locked) 770 sx_xunlock(&proctree_lock); 771 return (ESRCH); 772 } 773 p = td2->td_proc; 774 tid = pid; 775 pid = p->p_pid; 776 } 777 } 778 AUDIT_ARG_PROCESS(p); 779 780 if ((p->p_flag & P_WEXIT) != 0) { 781 error = ESRCH; 782 goto fail; 783 } 784 if ((error = p_cansee(td, p)) != 0) 785 goto fail; 786 787 if ((error = p_candebug(td, p)) != 0) 788 goto fail; 789 790 /* 791 * System processes can't be debugged. 792 */ 793 if ((p->p_flag & P_SYSTEM) != 0) { 794 error = EINVAL; 795 goto fail; 796 } 797 798 if (tid == 0) { 799 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 800 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 801 td2 = p->p_xthread; 802 } else { 803 td2 = FIRST_THREAD_IN_PROC(p); 804 } 805 tid = td2->td_tid; 806 } 807 808 #ifdef COMPAT_FREEBSD32 809 /* 810 * Test if we're a 32 bit client and what the target is. 811 * Set the wrap controls accordingly. 812 */ 813 if (SV_CURPROC_FLAG(SV_ILP32)) { 814 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 815 safe = 1; 816 wrap32 = 1; 817 } 818 #endif 819 /* 820 * Permissions check 821 */ 822 switch (req) { 823 case PT_TRACE_ME: 824 /* 825 * Always legal, when there is a parent process which 826 * could trace us. Otherwise, reject. 827 */ 828 if ((p->p_flag & P_TRACED) != 0) { 829 error = EBUSY; 830 goto fail; 831 } 832 if (p->p_pptr == initproc) { 833 error = EPERM; 834 goto fail; 835 } 836 break; 837 838 case PT_ATTACH: 839 /* Self */ 840 if (p == td->td_proc) { 841 error = EINVAL; 842 goto fail; 843 } 844 845 /* Already traced */ 846 if (p->p_flag & P_TRACED) { 847 error = EBUSY; 848 goto fail; 849 } 850 851 /* Can't trace an ancestor if you're being traced. */ 852 if (curp->p_flag & P_TRACED) { 853 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 854 if (pp == p) { 855 error = EINVAL; 856 goto fail; 857 } 858 } 859 } 860 861 862 /* OK */ 863 break; 864 865 case PT_CLEARSTEP: 866 /* Allow thread to clear single step for itself */ 867 if (td->td_tid == tid) 868 break; 869 870 /* FALLTHROUGH */ 871 default: 872 /* not being traced... */ 873 if ((p->p_flag & P_TRACED) == 0) { 874 error = EPERM; 875 goto fail; 876 } 877 878 /* not being traced by YOU */ 879 if (p->p_pptr != td->td_proc) { 880 error = EBUSY; 881 goto fail; 882 } 883 884 /* not currently stopped */ 885 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 886 p->p_suspcount != p->p_numthreads || 887 (p->p_flag & P_WAITED) == 0) { 888 error = EBUSY; 889 goto fail; 890 } 891 892 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 893 static int count = 0; 894 if (count++ == 0) 895 printf("P_STOPPED_TRACE not set.\n"); 896 } 897 898 /* OK */ 899 break; 900 } 901 902 /* Keep this process around until we finish this request. */ 903 _PHOLD(p); 904 905 #ifdef FIX_SSTEP 906 /* 907 * Single step fixup ala procfs 908 */ 909 FIX_SSTEP(td2); 910 #endif 911 912 /* 913 * Actually do the requests 914 */ 915 916 td->td_retval[0] = 0; 917 918 switch (req) { 919 case PT_TRACE_ME: 920 /* set my trace flag and "owner" so it can read/write me */ 921 proc_set_traced(p, false); 922 if (p->p_flag & P_PPWAIT) 923 p->p_flag |= P_PPTRACE; 924 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 925 break; 926 927 case PT_ATTACH: 928 /* security check done above */ 929 /* 930 * It would be nice if the tracing relationship was separate 931 * from the parent relationship but that would require 932 * another set of links in the proc struct or for "wait" 933 * to scan the entire proc table. To make life easier, 934 * we just re-parent the process we're trying to trace. 935 * The old parent is remembered so we can put things back 936 * on a "detach". 937 */ 938 proc_set_traced(p, true); 939 if (p->p_pptr != td->td_proc) { 940 proc_reparent(p, td->td_proc); 941 } 942 data = SIGSTOP; 943 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 944 p->p_oppid); 945 goto sendsig; /* in PT_CONTINUE below */ 946 947 case PT_CLEARSTEP: 948 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 949 p->p_pid); 950 error = ptrace_clear_single_step(td2); 951 break; 952 953 case PT_SETSTEP: 954 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 955 p->p_pid); 956 error = ptrace_single_step(td2); 957 break; 958 959 case PT_SUSPEND: 960 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 961 p->p_pid); 962 td2->td_dbgflags |= TDB_SUSPEND; 963 thread_lock(td2); 964 td2->td_flags |= TDF_NEEDSUSPCHK; 965 thread_unlock(td2); 966 break; 967 968 case PT_RESUME: 969 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 970 p->p_pid); 971 td2->td_dbgflags &= ~TDB_SUSPEND; 972 break; 973 974 case PT_FOLLOW_FORK: 975 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 976 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 977 data ? "enabled" : "disabled"); 978 if (data) 979 p->p_ptevents |= PTRACE_FORK; 980 else 981 p->p_ptevents &= ~PTRACE_FORK; 982 break; 983 984 case PT_LWP_EVENTS: 985 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 986 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 987 data ? "enabled" : "disabled"); 988 if (data) 989 p->p_ptevents |= PTRACE_LWP; 990 else 991 p->p_ptevents &= ~PTRACE_LWP; 992 break; 993 994 case PT_GET_EVENT_MASK: 995 if (data != sizeof(p->p_ptevents)) { 996 error = EINVAL; 997 break; 998 } 999 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 1000 p->p_ptevents); 1001 *(int *)addr = p->p_ptevents; 1002 break; 1003 1004 case PT_SET_EVENT_MASK: 1005 if (data != sizeof(p->p_ptevents)) { 1006 error = EINVAL; 1007 break; 1008 } 1009 tmp = *(int *)addr; 1010 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 1011 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 1012 error = EINVAL; 1013 break; 1014 } 1015 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 1016 p->p_pid, p->p_ptevents, tmp); 1017 p->p_ptevents = tmp; 1018 break; 1019 1020 case PT_GET_SC_ARGS: 1021 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1022 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 1023 #ifdef COMPAT_FREEBSD32 1024 || (wrap32 && !safe) 1025 #endif 1026 ) { 1027 error = EINVAL; 1028 break; 1029 } 1030 bzero(addr, sizeof(td2->td_sa.args)); 1031 #ifdef COMPAT_FREEBSD32 1032 if (wrap32) 1033 for (num = 0; num < nitems(td2->td_sa.args); num++) 1034 ((uint32_t *)addr)[num] = (uint32_t) 1035 td2->td_sa.args[num]; 1036 else 1037 #endif 1038 bcopy(td2->td_sa.args, addr, td2->td_sa.narg * 1039 sizeof(register_t)); 1040 break; 1041 1042 case PT_STEP: 1043 case PT_CONTINUE: 1044 case PT_TO_SCE: 1045 case PT_TO_SCX: 1046 case PT_SYSCALL: 1047 case PT_DETACH: 1048 /* Zero means do not send any signal */ 1049 if (data < 0 || data > _SIG_MAXSIG) { 1050 error = EINVAL; 1051 break; 1052 } 1053 1054 switch (req) { 1055 case PT_STEP: 1056 CTR2(KTR_PTRACE, "PT_STEP: tid %d (pid %d)", 1057 td2->td_tid, p->p_pid); 1058 error = ptrace_single_step(td2); 1059 if (error) 1060 goto out; 1061 break; 1062 case PT_CONTINUE: 1063 case PT_TO_SCE: 1064 case PT_TO_SCX: 1065 case PT_SYSCALL: 1066 if (addr != (void *)1) { 1067 error = ptrace_set_pc(td2, 1068 (u_long)(uintfptr_t)addr); 1069 if (error) 1070 goto out; 1071 } 1072 switch (req) { 1073 case PT_TO_SCE: 1074 p->p_ptevents |= PTRACE_SCE; 1075 CTR4(KTR_PTRACE, 1076 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1077 p->p_pid, p->p_ptevents, 1078 (u_long)(uintfptr_t)addr, data); 1079 break; 1080 case PT_TO_SCX: 1081 p->p_ptevents |= PTRACE_SCX; 1082 CTR4(KTR_PTRACE, 1083 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1084 p->p_pid, p->p_ptevents, 1085 (u_long)(uintfptr_t)addr, data); 1086 break; 1087 case PT_SYSCALL: 1088 p->p_ptevents |= PTRACE_SYSCALL; 1089 CTR4(KTR_PTRACE, 1090 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1091 p->p_pid, p->p_ptevents, 1092 (u_long)(uintfptr_t)addr, data); 1093 break; 1094 case PT_CONTINUE: 1095 CTR3(KTR_PTRACE, 1096 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1097 p->p_pid, (u_long)(uintfptr_t)addr, data); 1098 break; 1099 } 1100 break; 1101 case PT_DETACH: 1102 /* 1103 * Reset the process parent. 1104 * 1105 * NB: This clears P_TRACED before reparenting 1106 * a detached process back to its original 1107 * parent. Otherwise the debugee will be set 1108 * as an orphan of the debugger. 1109 */ 1110 p->p_flag &= ~(P_TRACED | P_WAITED); 1111 if (p->p_oppid != p->p_pptr->p_pid) { 1112 PROC_LOCK(p->p_pptr); 1113 sigqueue_take(p->p_ksi); 1114 PROC_UNLOCK(p->p_pptr); 1115 1116 pp = proc_realparent(p); 1117 proc_reparent(p, pp); 1118 if (pp == initproc) 1119 p->p_sigparent = SIGCHLD; 1120 CTR3(KTR_PTRACE, 1121 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1122 p->p_pid, pp->p_pid, data); 1123 } else 1124 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1125 p->p_pid, data); 1126 p->p_oppid = 0; 1127 p->p_ptevents = 0; 1128 FOREACH_THREAD_IN_PROC(p, td3) { 1129 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1130 sigqueue_delete(&td3->td_sigqueue, 1131 SIGSTOP); 1132 } 1133 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP); 1134 } 1135 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1136 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1137 p->p_flag2 &= ~P2_PTRACE_FSTP; 1138 } 1139 1140 /* should we send SIGCHLD? */ 1141 /* childproc_continued(p); */ 1142 break; 1143 } 1144 1145 sendsig: 1146 if (proctree_locked) { 1147 sx_xunlock(&proctree_lock); 1148 proctree_locked = 0; 1149 } 1150 p->p_xsig = data; 1151 p->p_xthread = NULL; 1152 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 1153 /* deliver or queue signal */ 1154 td2->td_dbgflags &= ~TDB_XSIG; 1155 td2->td_xsig = data; 1156 1157 /* 1158 * P_WKILLED is insurance that a PT_KILL/SIGKILL always 1159 * works immediately, even if another thread is 1160 * unsuspended first and attempts to handle a different 1161 * signal or if the POSIX.1b style signal queue cannot 1162 * accommodate any new signals. 1163 */ 1164 if (data == SIGKILL) 1165 p->p_flag |= P_WKILLED; 1166 1167 if (req == PT_DETACH) { 1168 FOREACH_THREAD_IN_PROC(p, td3) 1169 td3->td_dbgflags &= ~TDB_SUSPEND; 1170 } 1171 /* 1172 * unsuspend all threads, to not let a thread run, 1173 * you should use PT_SUSPEND to suspend it before 1174 * continuing process. 1175 */ 1176 PROC_SLOCK(p); 1177 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 1178 thread_unsuspend(p); 1179 PROC_SUNLOCK(p); 1180 if (req == PT_ATTACH) 1181 kern_psignal(p, data); 1182 } else { 1183 if (data) 1184 kern_psignal(p, data); 1185 } 1186 break; 1187 1188 case PT_WRITE_I: 1189 case PT_WRITE_D: 1190 td2->td_dbgflags |= TDB_USERWR; 1191 PROC_UNLOCK(p); 1192 error = 0; 1193 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1194 sizeof(int)) != sizeof(int)) 1195 error = ENOMEM; 1196 else 1197 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1198 p->p_pid, addr, data); 1199 PROC_LOCK(p); 1200 break; 1201 1202 case PT_READ_I: 1203 case PT_READ_D: 1204 PROC_UNLOCK(p); 1205 error = tmp = 0; 1206 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1207 sizeof(int)) != sizeof(int)) 1208 error = ENOMEM; 1209 else 1210 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1211 p->p_pid, addr, tmp); 1212 td->td_retval[0] = tmp; 1213 PROC_LOCK(p); 1214 break; 1215 1216 case PT_IO: 1217 #ifdef COMPAT_FREEBSD32 1218 if (wrap32) { 1219 piod32 = addr; 1220 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1221 iov.iov_len = piod32->piod_len; 1222 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1223 uio.uio_resid = piod32->piod_len; 1224 } else 1225 #endif 1226 { 1227 piod = addr; 1228 iov.iov_base = piod->piod_addr; 1229 iov.iov_len = piod->piod_len; 1230 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1231 uio.uio_resid = piod->piod_len; 1232 } 1233 uio.uio_iov = &iov; 1234 uio.uio_iovcnt = 1; 1235 uio.uio_segflg = UIO_USERSPACE; 1236 uio.uio_td = td; 1237 #ifdef COMPAT_FREEBSD32 1238 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1239 #else 1240 tmp = piod->piod_op; 1241 #endif 1242 switch (tmp) { 1243 case PIOD_READ_D: 1244 case PIOD_READ_I: 1245 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1246 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1247 uio.uio_rw = UIO_READ; 1248 break; 1249 case PIOD_WRITE_D: 1250 case PIOD_WRITE_I: 1251 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1252 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1253 td2->td_dbgflags |= TDB_USERWR; 1254 uio.uio_rw = UIO_WRITE; 1255 break; 1256 default: 1257 error = EINVAL; 1258 goto out; 1259 } 1260 PROC_UNLOCK(p); 1261 error = proc_rwmem(p, &uio); 1262 #ifdef COMPAT_FREEBSD32 1263 if (wrap32) 1264 piod32->piod_len -= uio.uio_resid; 1265 else 1266 #endif 1267 piod->piod_len -= uio.uio_resid; 1268 PROC_LOCK(p); 1269 break; 1270 1271 case PT_KILL: 1272 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1273 data = SIGKILL; 1274 goto sendsig; /* in PT_CONTINUE above */ 1275 1276 case PT_SETREGS: 1277 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1278 p->p_pid); 1279 td2->td_dbgflags |= TDB_USERWR; 1280 error = PROC_WRITE(regs, td2, addr); 1281 break; 1282 1283 case PT_GETREGS: 1284 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1285 p->p_pid); 1286 error = PROC_READ(regs, td2, addr); 1287 break; 1288 1289 case PT_SETFPREGS: 1290 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1291 p->p_pid); 1292 td2->td_dbgflags |= TDB_USERWR; 1293 error = PROC_WRITE(fpregs, td2, addr); 1294 break; 1295 1296 case PT_GETFPREGS: 1297 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1298 p->p_pid); 1299 error = PROC_READ(fpregs, td2, addr); 1300 break; 1301 1302 case PT_SETDBREGS: 1303 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1304 p->p_pid); 1305 td2->td_dbgflags |= TDB_USERWR; 1306 error = PROC_WRITE(dbregs, td2, addr); 1307 break; 1308 1309 case PT_GETDBREGS: 1310 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1311 p->p_pid); 1312 error = PROC_READ(dbregs, td2, addr); 1313 break; 1314 1315 case PT_LWPINFO: 1316 if (data <= 0 || 1317 #ifdef COMPAT_FREEBSD32 1318 (!wrap32 && data > sizeof(*pl)) || 1319 (wrap32 && data > sizeof(*pl32))) { 1320 #else 1321 data > sizeof(*pl)) { 1322 #endif 1323 error = EINVAL; 1324 break; 1325 } 1326 #ifdef COMPAT_FREEBSD32 1327 if (wrap32) { 1328 pl = &plr; 1329 pl32 = addr; 1330 } else 1331 #endif 1332 pl = addr; 1333 pl->pl_lwpid = td2->td_tid; 1334 pl->pl_event = PL_EVENT_NONE; 1335 pl->pl_flags = 0; 1336 if (td2->td_dbgflags & TDB_XSIG) { 1337 pl->pl_event = PL_EVENT_SIGNAL; 1338 if (td2->td_si.si_signo != 0 && 1339 #ifdef COMPAT_FREEBSD32 1340 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1341 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1342 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1343 pl_siginfo) + sizeof(struct siginfo32))) 1344 #else 1345 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1346 + sizeof(pl->pl_siginfo) 1347 #endif 1348 ){ 1349 pl->pl_flags |= PL_FLAG_SI; 1350 pl->pl_siginfo = td2->td_si; 1351 } 1352 } 1353 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1354 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1355 if (td2->td_dbgflags & TDB_SCE) 1356 pl->pl_flags |= PL_FLAG_SCE; 1357 else if (td2->td_dbgflags & TDB_SCX) 1358 pl->pl_flags |= PL_FLAG_SCX; 1359 if (td2->td_dbgflags & TDB_EXEC) 1360 pl->pl_flags |= PL_FLAG_EXEC; 1361 if (td2->td_dbgflags & TDB_FORK) { 1362 pl->pl_flags |= PL_FLAG_FORKED; 1363 pl->pl_child_pid = td2->td_dbg_forked; 1364 if (td2->td_dbgflags & TDB_VFORK) 1365 pl->pl_flags |= PL_FLAG_VFORKED; 1366 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1367 TDB_VFORK) 1368 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1369 if (td2->td_dbgflags & TDB_CHILD) 1370 pl->pl_flags |= PL_FLAG_CHILD; 1371 if (td2->td_dbgflags & TDB_BORN) 1372 pl->pl_flags |= PL_FLAG_BORN; 1373 if (td2->td_dbgflags & TDB_EXIT) 1374 pl->pl_flags |= PL_FLAG_EXITED; 1375 pl->pl_sigmask = td2->td_sigmask; 1376 pl->pl_siglist = td2->td_siglist; 1377 strcpy(pl->pl_tdname, td2->td_name); 1378 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1379 pl->pl_syscall_code = td2->td_sa.code; 1380 pl->pl_syscall_narg = td2->td_sa.narg; 1381 } else { 1382 pl->pl_syscall_code = 0; 1383 pl->pl_syscall_narg = 0; 1384 } 1385 #ifdef COMPAT_FREEBSD32 1386 if (wrap32) 1387 ptrace_lwpinfo_to32(pl, pl32); 1388 #endif 1389 CTR6(KTR_PTRACE, 1390 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1391 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1392 pl->pl_child_pid, pl->pl_syscall_code); 1393 break; 1394 1395 case PT_GETNUMLWPS: 1396 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1397 p->p_numthreads); 1398 td->td_retval[0] = p->p_numthreads; 1399 break; 1400 1401 case PT_GETLWPLIST: 1402 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1403 p->p_pid, data, p->p_numthreads); 1404 if (data <= 0) { 1405 error = EINVAL; 1406 break; 1407 } 1408 num = imin(p->p_numthreads, data); 1409 PROC_UNLOCK(p); 1410 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1411 tmp = 0; 1412 PROC_LOCK(p); 1413 FOREACH_THREAD_IN_PROC(p, td2) { 1414 if (tmp >= num) 1415 break; 1416 buf[tmp++] = td2->td_tid; 1417 } 1418 PROC_UNLOCK(p); 1419 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1420 free(buf, M_TEMP); 1421 if (!error) 1422 td->td_retval[0] = tmp; 1423 PROC_LOCK(p); 1424 break; 1425 1426 case PT_VM_TIMESTAMP: 1427 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1428 p->p_pid, p->p_vmspace->vm_map.timestamp); 1429 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1430 break; 1431 1432 case PT_VM_ENTRY: 1433 PROC_UNLOCK(p); 1434 #ifdef COMPAT_FREEBSD32 1435 if (wrap32) 1436 error = ptrace_vm_entry32(td, p, addr); 1437 else 1438 #endif 1439 error = ptrace_vm_entry(td, p, addr); 1440 PROC_LOCK(p); 1441 break; 1442 1443 default: 1444 #ifdef __HAVE_PTRACE_MACHDEP 1445 if (req >= PT_FIRSTMACH) { 1446 PROC_UNLOCK(p); 1447 error = cpu_ptrace(td2, req, addr, data); 1448 PROC_LOCK(p); 1449 } else 1450 #endif 1451 /* Unknown request. */ 1452 error = EINVAL; 1453 break; 1454 } 1455 1456 out: 1457 /* Drop our hold on this process now that the request has completed. */ 1458 _PRELE(p); 1459 fail: 1460 PROC_UNLOCK(p); 1461 if (proctree_locked) 1462 sx_xunlock(&proctree_lock); 1463 return (error); 1464 } 1465 #undef PROC_READ 1466 #undef PROC_WRITE 1467 1468 /* 1469 * Stop a process because of a debugging event; 1470 * stay stopped until p->p_step is cleared 1471 * (cleared by PIOCCONT in procfs). 1472 */ 1473 void 1474 stopevent(struct proc *p, unsigned int event, unsigned int val) 1475 { 1476 1477 PROC_LOCK_ASSERT(p, MA_OWNED); 1478 p->p_step = 1; 1479 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1480 val); 1481 do { 1482 if (event != S_EXIT) 1483 p->p_xsig = val; 1484 p->p_xthread = NULL; 1485 p->p_stype = event; /* Which event caused the stop? */ 1486 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1487 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1488 } while (p->p_step); 1489 } 1490