1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/pioctl.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/ptrace.h> 49 #include <sys/rwlock.h> 50 #include <sys/sx.h> 51 #include <sys/malloc.h> 52 #include <sys/signalvar.h> 53 54 #include <machine/reg.h> 55 56 #include <security/audit/audit.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_param.h> 66 67 #ifdef COMPAT_FREEBSD32 68 #include <sys/procfs.h> 69 #include <compat/freebsd32/freebsd32_signal.h> 70 71 struct ptrace_io_desc32 { 72 int piod_op; 73 uint32_t piod_offs; 74 uint32_t piod_addr; 75 uint32_t piod_len; 76 }; 77 78 struct ptrace_vm_entry32 { 79 int pve_entry; 80 int pve_timestamp; 81 uint32_t pve_start; 82 uint32_t pve_end; 83 uint32_t pve_offset; 84 u_int pve_prot; 85 u_int pve_pathlen; 86 int32_t pve_fileid; 87 u_int pve_fsid; 88 uint32_t pve_path; 89 }; 90 #endif 91 92 /* 93 * Functions implemented using PROC_ACTION(): 94 * 95 * proc_read_regs(proc, regs) 96 * Get the current user-visible register set from the process 97 * and copy it into the regs structure (<machine/reg.h>). 98 * The process is stopped at the time read_regs is called. 99 * 100 * proc_write_regs(proc, regs) 101 * Update the current register set from the passed in regs 102 * structure. Take care to avoid clobbering special CPU 103 * registers or privileged bits in the PSL. 104 * Depending on the architecture this may have fix-up work to do, 105 * especially if the IAR or PCW are modified. 106 * The process is stopped at the time write_regs is called. 107 * 108 * proc_read_fpregs, proc_write_fpregs 109 * deal with the floating point register set, otherwise as above. 110 * 111 * proc_read_dbregs, proc_write_dbregs 112 * deal with the processor debug register set, otherwise as above. 113 * 114 * proc_sstep(proc) 115 * Arrange for the process to trap after executing a single instruction. 116 */ 117 118 #define PROC_ACTION(action) do { \ 119 int error; \ 120 \ 121 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 122 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 123 error = EIO; \ 124 else \ 125 error = (action); \ 126 return (error); \ 127 } while(0) 128 129 int 130 proc_read_regs(struct thread *td, struct reg *regs) 131 { 132 133 PROC_ACTION(fill_regs(td, regs)); 134 } 135 136 int 137 proc_write_regs(struct thread *td, struct reg *regs) 138 { 139 140 PROC_ACTION(set_regs(td, regs)); 141 } 142 143 int 144 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 145 { 146 147 PROC_ACTION(fill_dbregs(td, dbregs)); 148 } 149 150 int 151 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 152 { 153 154 PROC_ACTION(set_dbregs(td, dbregs)); 155 } 156 157 /* 158 * Ptrace doesn't support fpregs at all, and there are no security holes 159 * or translations for fpregs, so we can just copy them. 160 */ 161 int 162 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 163 { 164 165 PROC_ACTION(fill_fpregs(td, fpregs)); 166 } 167 168 int 169 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 170 { 171 172 PROC_ACTION(set_fpregs(td, fpregs)); 173 } 174 175 #ifdef COMPAT_FREEBSD32 176 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 177 int 178 proc_read_regs32(struct thread *td, struct reg32 *regs32) 179 { 180 181 PROC_ACTION(fill_regs32(td, regs32)); 182 } 183 184 int 185 proc_write_regs32(struct thread *td, struct reg32 *regs32) 186 { 187 188 PROC_ACTION(set_regs32(td, regs32)); 189 } 190 191 int 192 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 193 { 194 195 PROC_ACTION(fill_dbregs32(td, dbregs32)); 196 } 197 198 int 199 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 200 { 201 202 PROC_ACTION(set_dbregs32(td, dbregs32)); 203 } 204 205 int 206 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 207 { 208 209 PROC_ACTION(fill_fpregs32(td, fpregs32)); 210 } 211 212 int 213 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 214 { 215 216 PROC_ACTION(set_fpregs32(td, fpregs32)); 217 } 218 #endif 219 220 int 221 proc_sstep(struct thread *td) 222 { 223 224 PROC_ACTION(ptrace_single_step(td)); 225 } 226 227 int 228 proc_rwmem(struct proc *p, struct uio *uio) 229 { 230 vm_map_t map; 231 vm_offset_t pageno; /* page number */ 232 vm_prot_t reqprot; 233 int error, fault_flags, page_offset, writing; 234 235 /* 236 * Assert that someone has locked this vmspace. (Should be 237 * curthread but we can't assert that.) This keeps the process 238 * from exiting out from under us until this operation completes. 239 */ 240 PROC_ASSERT_HELD(p); 241 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 242 243 /* 244 * The map we want... 245 */ 246 map = &p->p_vmspace->vm_map; 247 248 /* 249 * If we are writing, then we request vm_fault() to create a private 250 * copy of each page. Since these copies will not be writeable by the 251 * process, we must explicity request that they be dirtied. 252 */ 253 writing = uio->uio_rw == UIO_WRITE; 254 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 255 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 256 257 /* 258 * Only map in one page at a time. We don't have to, but it 259 * makes things easier. This way is trivial - right? 260 */ 261 do { 262 vm_offset_t uva; 263 u_int len; 264 vm_page_t m; 265 266 uva = (vm_offset_t)uio->uio_offset; 267 268 /* 269 * Get the page number of this segment. 270 */ 271 pageno = trunc_page(uva); 272 page_offset = uva - pageno; 273 274 /* 275 * How many bytes to copy 276 */ 277 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 278 279 /* 280 * Fault and hold the page on behalf of the process. 281 */ 282 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 283 if (error != KERN_SUCCESS) { 284 if (error == KERN_RESOURCE_SHORTAGE) 285 error = ENOMEM; 286 else 287 error = EFAULT; 288 break; 289 } 290 291 /* 292 * Now do the i/o move. 293 */ 294 error = uiomove_fromphys(&m, page_offset, len, uio); 295 296 /* Make the I-cache coherent for breakpoints. */ 297 if (writing && error == 0) { 298 vm_map_lock_read(map); 299 if (vm_map_check_protection(map, pageno, pageno + 300 PAGE_SIZE, VM_PROT_EXECUTE)) 301 vm_sync_icache(map, uva, len); 302 vm_map_unlock_read(map); 303 } 304 305 /* 306 * Release the page. 307 */ 308 vm_page_lock(m); 309 vm_page_unhold(m); 310 vm_page_unlock(m); 311 312 } while (error == 0 && uio->uio_resid > 0); 313 314 return (error); 315 } 316 317 static ssize_t 318 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 319 size_t len, enum uio_rw rw) 320 { 321 struct iovec iov; 322 struct uio uio; 323 ssize_t slen; 324 325 MPASS(len < SSIZE_MAX); 326 slen = (ssize_t)len; 327 328 iov.iov_base = (caddr_t)buf; 329 iov.iov_len = len; 330 uio.uio_iov = &iov; 331 uio.uio_iovcnt = 1; 332 uio.uio_offset = va; 333 uio.uio_resid = slen; 334 uio.uio_segflg = UIO_SYSSPACE; 335 uio.uio_rw = rw; 336 uio.uio_td = td; 337 proc_rwmem(p, &uio); 338 if (uio.uio_resid == slen) 339 return (-1); 340 return (slen - uio.uio_resid); 341 } 342 343 ssize_t 344 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 345 size_t len) 346 { 347 348 return (proc_iop(td, p, va, buf, len, UIO_READ)); 349 } 350 351 ssize_t 352 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 353 size_t len) 354 { 355 356 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 357 } 358 359 static int 360 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 361 { 362 struct vattr vattr; 363 vm_map_t map; 364 vm_map_entry_t entry; 365 vm_object_t obj, tobj, lobj; 366 struct vmspace *vm; 367 struct vnode *vp; 368 char *freepath, *fullpath; 369 u_int pathlen; 370 int error, index; 371 372 error = 0; 373 obj = NULL; 374 375 vm = vmspace_acquire_ref(p); 376 map = &vm->vm_map; 377 vm_map_lock_read(map); 378 379 do { 380 entry = map->header.next; 381 index = 0; 382 while (index < pve->pve_entry && entry != &map->header) { 383 entry = entry->next; 384 index++; 385 } 386 if (index != pve->pve_entry) { 387 error = EINVAL; 388 break; 389 } 390 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 391 ("Submap in map header")); 392 while ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 393 entry = entry->next; 394 index++; 395 } 396 if (entry == &map->header) { 397 error = ENOENT; 398 break; 399 } 400 401 /* We got an entry. */ 402 pve->pve_entry = index + 1; 403 pve->pve_timestamp = map->timestamp; 404 pve->pve_start = entry->start; 405 pve->pve_end = entry->end - 1; 406 pve->pve_offset = entry->offset; 407 pve->pve_prot = entry->protection; 408 409 /* Backing object's path needed? */ 410 if (pve->pve_pathlen == 0) 411 break; 412 413 pathlen = pve->pve_pathlen; 414 pve->pve_pathlen = 0; 415 416 obj = entry->object.vm_object; 417 if (obj != NULL) 418 VM_OBJECT_RLOCK(obj); 419 } while (0); 420 421 vm_map_unlock_read(map); 422 423 pve->pve_fsid = VNOVAL; 424 pve->pve_fileid = VNOVAL; 425 426 if (error == 0 && obj != NULL) { 427 lobj = obj; 428 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 429 if (tobj != obj) 430 VM_OBJECT_RLOCK(tobj); 431 if (lobj != obj) 432 VM_OBJECT_RUNLOCK(lobj); 433 lobj = tobj; 434 pve->pve_offset += tobj->backing_object_offset; 435 } 436 vp = vm_object_vnode(lobj); 437 if (vp != NULL) 438 vref(vp); 439 if (lobj != obj) 440 VM_OBJECT_RUNLOCK(lobj); 441 VM_OBJECT_RUNLOCK(obj); 442 443 if (vp != NULL) { 444 freepath = NULL; 445 fullpath = NULL; 446 vn_fullpath(td, vp, &fullpath, &freepath); 447 vn_lock(vp, LK_SHARED | LK_RETRY); 448 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 449 pve->pve_fileid = vattr.va_fileid; 450 pve->pve_fsid = vattr.va_fsid; 451 } 452 vput(vp); 453 454 if (fullpath != NULL) { 455 pve->pve_pathlen = strlen(fullpath) + 1; 456 if (pve->pve_pathlen <= pathlen) { 457 error = copyout(fullpath, pve->pve_path, 458 pve->pve_pathlen); 459 } else 460 error = ENAMETOOLONG; 461 } 462 if (freepath != NULL) 463 free(freepath, M_TEMP); 464 } 465 } 466 vmspace_free(vm); 467 if (error == 0) 468 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 469 p->p_pid, pve->pve_entry, pve->pve_start); 470 471 return (error); 472 } 473 474 #ifdef COMPAT_FREEBSD32 475 static int 476 ptrace_vm_entry32(struct thread *td, struct proc *p, 477 struct ptrace_vm_entry32 *pve32) 478 { 479 struct ptrace_vm_entry pve; 480 int error; 481 482 pve.pve_entry = pve32->pve_entry; 483 pve.pve_pathlen = pve32->pve_pathlen; 484 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 485 486 error = ptrace_vm_entry(td, p, &pve); 487 if (error == 0) { 488 pve32->pve_entry = pve.pve_entry; 489 pve32->pve_timestamp = pve.pve_timestamp; 490 pve32->pve_start = pve.pve_start; 491 pve32->pve_end = pve.pve_end; 492 pve32->pve_offset = pve.pve_offset; 493 pve32->pve_prot = pve.pve_prot; 494 pve32->pve_fileid = pve.pve_fileid; 495 pve32->pve_fsid = pve.pve_fsid; 496 } 497 498 pve32->pve_pathlen = pve.pve_pathlen; 499 return (error); 500 } 501 502 static void 503 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 504 struct ptrace_lwpinfo32 *pl32) 505 { 506 507 bzero(pl32, sizeof(*pl32)); 508 pl32->pl_lwpid = pl->pl_lwpid; 509 pl32->pl_event = pl->pl_event; 510 pl32->pl_flags = pl->pl_flags; 511 pl32->pl_sigmask = pl->pl_sigmask; 512 pl32->pl_siglist = pl->pl_siglist; 513 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 514 strcpy(pl32->pl_tdname, pl->pl_tdname); 515 pl32->pl_child_pid = pl->pl_child_pid; 516 pl32->pl_syscall_code = pl->pl_syscall_code; 517 pl32->pl_syscall_narg = pl->pl_syscall_narg; 518 } 519 #endif /* COMPAT_FREEBSD32 */ 520 521 /* 522 * Process debugging system call. 523 */ 524 #ifndef _SYS_SYSPROTO_H_ 525 struct ptrace_args { 526 int req; 527 pid_t pid; 528 caddr_t addr; 529 int data; 530 }; 531 #endif 532 533 #ifdef COMPAT_FREEBSD32 534 /* 535 * This CPP subterfuge is to try and reduce the number of ifdefs in 536 * the body of the code. 537 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 538 * becomes either: 539 * copyin(uap->addr, &r.reg, sizeof r.reg); 540 * or 541 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 542 * .. except this is done at runtime. 543 */ 544 #define BZERO(a, s) wrap32 ? \ 545 bzero(a ## 32, s ## 32) : \ 546 bzero(a, s) 547 #define COPYIN(u, k, s) wrap32 ? \ 548 copyin(u, k ## 32, s ## 32) : \ 549 copyin(u, k, s) 550 #define COPYOUT(k, u, s) wrap32 ? \ 551 copyout(k ## 32, u, s ## 32) : \ 552 copyout(k, u, s) 553 #else 554 #define BZERO(a, s) bzero(a, s) 555 #define COPYIN(u, k, s) copyin(u, k, s) 556 #define COPYOUT(k, u, s) copyout(k, u, s) 557 #endif 558 int 559 sys_ptrace(struct thread *td, struct ptrace_args *uap) 560 { 561 /* 562 * XXX this obfuscation is to reduce stack usage, but the register 563 * structs may be too large to put on the stack anyway. 564 */ 565 union { 566 struct ptrace_io_desc piod; 567 struct ptrace_lwpinfo pl; 568 struct ptrace_vm_entry pve; 569 struct dbreg dbreg; 570 struct fpreg fpreg; 571 struct reg reg; 572 #ifdef COMPAT_FREEBSD32 573 struct dbreg32 dbreg32; 574 struct fpreg32 fpreg32; 575 struct reg32 reg32; 576 struct ptrace_io_desc32 piod32; 577 struct ptrace_lwpinfo32 pl32; 578 struct ptrace_vm_entry32 pve32; 579 #endif 580 char args[sizeof(td->td_sa.args)]; 581 int ptevents; 582 } r; 583 void *addr; 584 int error = 0; 585 #ifdef COMPAT_FREEBSD32 586 int wrap32 = 0; 587 588 if (SV_CURPROC_FLAG(SV_ILP32)) 589 wrap32 = 1; 590 #endif 591 AUDIT_ARG_PID(uap->pid); 592 AUDIT_ARG_CMD(uap->req); 593 AUDIT_ARG_VALUE(uap->data); 594 addr = &r; 595 switch (uap->req) { 596 case PT_GET_EVENT_MASK: 597 case PT_LWPINFO: 598 case PT_GET_SC_ARGS: 599 break; 600 case PT_GETREGS: 601 BZERO(&r.reg, sizeof r.reg); 602 break; 603 case PT_GETFPREGS: 604 BZERO(&r.fpreg, sizeof r.fpreg); 605 break; 606 case PT_GETDBREGS: 607 BZERO(&r.dbreg, sizeof r.dbreg); 608 break; 609 case PT_SETREGS: 610 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 611 break; 612 case PT_SETFPREGS: 613 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 614 break; 615 case PT_SETDBREGS: 616 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 617 break; 618 case PT_SET_EVENT_MASK: 619 if (uap->data != sizeof(r.ptevents)) 620 error = EINVAL; 621 else 622 error = copyin(uap->addr, &r.ptevents, uap->data); 623 break; 624 case PT_IO: 625 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 626 break; 627 case PT_VM_ENTRY: 628 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 629 break; 630 default: 631 addr = uap->addr; 632 break; 633 } 634 if (error) 635 return (error); 636 637 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 638 if (error) 639 return (error); 640 641 switch (uap->req) { 642 case PT_VM_ENTRY: 643 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 644 break; 645 case PT_IO: 646 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 647 break; 648 case PT_GETREGS: 649 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 650 break; 651 case PT_GETFPREGS: 652 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 653 break; 654 case PT_GETDBREGS: 655 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 656 break; 657 case PT_GET_EVENT_MASK: 658 /* NB: The size in uap->data is validated in kern_ptrace(). */ 659 error = copyout(&r.ptevents, uap->addr, uap->data); 660 break; 661 case PT_LWPINFO: 662 /* NB: The size in uap->data is validated in kern_ptrace(). */ 663 error = copyout(&r.pl, uap->addr, uap->data); 664 break; 665 case PT_GET_SC_ARGS: 666 error = copyout(r.args, uap->addr, MIN(uap->data, 667 sizeof(r.args))); 668 break; 669 } 670 671 return (error); 672 } 673 #undef COPYIN 674 #undef COPYOUT 675 #undef BZERO 676 677 #ifdef COMPAT_FREEBSD32 678 /* 679 * PROC_READ(regs, td2, addr); 680 * becomes either: 681 * proc_read_regs(td2, addr); 682 * or 683 * proc_read_regs32(td2, addr); 684 * .. except this is done at runtime. There is an additional 685 * complication in that PROC_WRITE disallows 32 bit consumers 686 * from writing to 64 bit address space targets. 687 */ 688 #define PROC_READ(w, t, a) wrap32 ? \ 689 proc_read_ ## w ## 32(t, a) : \ 690 proc_read_ ## w (t, a) 691 #define PROC_WRITE(w, t, a) wrap32 ? \ 692 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 693 proc_write_ ## w (t, a) 694 #else 695 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 696 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 697 #endif 698 699 void 700 proc_set_traced(struct proc *p, bool stop) 701 { 702 703 sx_assert(&proctree_lock, SX_XLOCKED); 704 PROC_LOCK_ASSERT(p, MA_OWNED); 705 p->p_flag |= P_TRACED; 706 if (stop) 707 p->p_flag2 |= P2_PTRACE_FSTP; 708 p->p_ptevents = PTRACE_DEFAULT; 709 } 710 711 int 712 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 713 { 714 struct iovec iov; 715 struct uio uio; 716 struct proc *curp, *p, *pp; 717 struct thread *td2 = NULL, *td3; 718 struct ptrace_io_desc *piod = NULL; 719 struct ptrace_lwpinfo *pl; 720 int error, num, tmp; 721 int proctree_locked = 0; 722 lwpid_t tid = 0, *buf; 723 #ifdef COMPAT_FREEBSD32 724 int wrap32 = 0, safe = 0; 725 struct ptrace_io_desc32 *piod32 = NULL; 726 struct ptrace_lwpinfo32 *pl32 = NULL; 727 struct ptrace_lwpinfo plr; 728 #endif 729 730 curp = td->td_proc; 731 732 /* Lock proctree before locking the process. */ 733 switch (req) { 734 case PT_TRACE_ME: 735 case PT_ATTACH: 736 case PT_STEP: 737 case PT_CONTINUE: 738 case PT_TO_SCE: 739 case PT_TO_SCX: 740 case PT_SYSCALL: 741 case PT_FOLLOW_FORK: 742 case PT_LWP_EVENTS: 743 case PT_GET_EVENT_MASK: 744 case PT_SET_EVENT_MASK: 745 case PT_DETACH: 746 case PT_GET_SC_ARGS: 747 sx_xlock(&proctree_lock); 748 proctree_locked = 1; 749 break; 750 default: 751 break; 752 } 753 754 if (req == PT_TRACE_ME) { 755 p = td->td_proc; 756 PROC_LOCK(p); 757 } else { 758 if (pid <= PID_MAX) { 759 if ((p = pfind(pid)) == NULL) { 760 if (proctree_locked) 761 sx_xunlock(&proctree_lock); 762 return (ESRCH); 763 } 764 } else { 765 td2 = tdfind(pid, -1); 766 if (td2 == NULL) { 767 if (proctree_locked) 768 sx_xunlock(&proctree_lock); 769 return (ESRCH); 770 } 771 p = td2->td_proc; 772 tid = pid; 773 pid = p->p_pid; 774 } 775 } 776 AUDIT_ARG_PROCESS(p); 777 778 if ((p->p_flag & P_WEXIT) != 0) { 779 error = ESRCH; 780 goto fail; 781 } 782 if ((error = p_cansee(td, p)) != 0) 783 goto fail; 784 785 if ((error = p_candebug(td, p)) != 0) 786 goto fail; 787 788 /* 789 * System processes can't be debugged. 790 */ 791 if ((p->p_flag & P_SYSTEM) != 0) { 792 error = EINVAL; 793 goto fail; 794 } 795 796 if (tid == 0) { 797 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 798 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 799 td2 = p->p_xthread; 800 } else { 801 td2 = FIRST_THREAD_IN_PROC(p); 802 } 803 tid = td2->td_tid; 804 } 805 806 #ifdef COMPAT_FREEBSD32 807 /* 808 * Test if we're a 32 bit client and what the target is. 809 * Set the wrap controls accordingly. 810 */ 811 if (SV_CURPROC_FLAG(SV_ILP32)) { 812 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 813 safe = 1; 814 wrap32 = 1; 815 } 816 #endif 817 /* 818 * Permissions check 819 */ 820 switch (req) { 821 case PT_TRACE_ME: 822 /* 823 * Always legal, when there is a parent process which 824 * could trace us. Otherwise, reject. 825 */ 826 if ((p->p_flag & P_TRACED) != 0) { 827 error = EBUSY; 828 goto fail; 829 } 830 if (p->p_pptr == initproc) { 831 error = EPERM; 832 goto fail; 833 } 834 break; 835 836 case PT_ATTACH: 837 /* Self */ 838 if (p == td->td_proc) { 839 error = EINVAL; 840 goto fail; 841 } 842 843 /* Already traced */ 844 if (p->p_flag & P_TRACED) { 845 error = EBUSY; 846 goto fail; 847 } 848 849 /* Can't trace an ancestor if you're being traced. */ 850 if (curp->p_flag & P_TRACED) { 851 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 852 if (pp == p) { 853 error = EINVAL; 854 goto fail; 855 } 856 } 857 } 858 859 860 /* OK */ 861 break; 862 863 case PT_CLEARSTEP: 864 /* Allow thread to clear single step for itself */ 865 if (td->td_tid == tid) 866 break; 867 868 /* FALLTHROUGH */ 869 default: 870 /* not being traced... */ 871 if ((p->p_flag & P_TRACED) == 0) { 872 error = EPERM; 873 goto fail; 874 } 875 876 /* not being traced by YOU */ 877 if (p->p_pptr != td->td_proc) { 878 error = EBUSY; 879 goto fail; 880 } 881 882 /* not currently stopped */ 883 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 884 p->p_suspcount != p->p_numthreads || 885 (p->p_flag & P_WAITED) == 0) { 886 error = EBUSY; 887 goto fail; 888 } 889 890 /* OK */ 891 break; 892 } 893 894 /* Keep this process around until we finish this request. */ 895 _PHOLD(p); 896 897 #ifdef FIX_SSTEP 898 /* 899 * Single step fixup ala procfs 900 */ 901 FIX_SSTEP(td2); 902 #endif 903 904 /* 905 * Actually do the requests 906 */ 907 908 td->td_retval[0] = 0; 909 910 switch (req) { 911 case PT_TRACE_ME: 912 /* set my trace flag and "owner" so it can read/write me */ 913 proc_set_traced(p, false); 914 if (p->p_flag & P_PPWAIT) 915 p->p_flag |= P_PPTRACE; 916 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 917 break; 918 919 case PT_ATTACH: 920 /* security check done above */ 921 /* 922 * It would be nice if the tracing relationship was separate 923 * from the parent relationship but that would require 924 * another set of links in the proc struct or for "wait" 925 * to scan the entire proc table. To make life easier, 926 * we just re-parent the process we're trying to trace. 927 * The old parent is remembered so we can put things back 928 * on a "detach". 929 */ 930 proc_set_traced(p, true); 931 if (p->p_pptr != td->td_proc) { 932 proc_reparent(p, td->td_proc, false); 933 } 934 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 935 p->p_oppid); 936 937 sx_xunlock(&proctree_lock); 938 proctree_locked = 0; 939 MPASS(p->p_xthread == NULL); 940 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 941 942 /* 943 * If already stopped due to a stop signal, clear the 944 * existing stop before triggering a traced SIGSTOP. 945 */ 946 if ((p->p_flag & P_STOPPED_SIG) != 0) { 947 PROC_SLOCK(p); 948 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 949 thread_unsuspend(p); 950 PROC_SUNLOCK(p); 951 } 952 953 kern_psignal(p, SIGSTOP); 954 break; 955 956 case PT_CLEARSTEP: 957 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 958 p->p_pid); 959 error = ptrace_clear_single_step(td2); 960 break; 961 962 case PT_SETSTEP: 963 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 964 p->p_pid); 965 error = ptrace_single_step(td2); 966 break; 967 968 case PT_SUSPEND: 969 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 970 p->p_pid); 971 td2->td_dbgflags |= TDB_SUSPEND; 972 thread_lock(td2); 973 td2->td_flags |= TDF_NEEDSUSPCHK; 974 thread_unlock(td2); 975 break; 976 977 case PT_RESUME: 978 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 979 p->p_pid); 980 td2->td_dbgflags &= ~TDB_SUSPEND; 981 break; 982 983 case PT_FOLLOW_FORK: 984 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 985 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 986 data ? "enabled" : "disabled"); 987 if (data) 988 p->p_ptevents |= PTRACE_FORK; 989 else 990 p->p_ptevents &= ~PTRACE_FORK; 991 break; 992 993 case PT_LWP_EVENTS: 994 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 995 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 996 data ? "enabled" : "disabled"); 997 if (data) 998 p->p_ptevents |= PTRACE_LWP; 999 else 1000 p->p_ptevents &= ~PTRACE_LWP; 1001 break; 1002 1003 case PT_GET_EVENT_MASK: 1004 if (data != sizeof(p->p_ptevents)) { 1005 error = EINVAL; 1006 break; 1007 } 1008 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 1009 p->p_ptevents); 1010 *(int *)addr = p->p_ptevents; 1011 break; 1012 1013 case PT_SET_EVENT_MASK: 1014 if (data != sizeof(p->p_ptevents)) { 1015 error = EINVAL; 1016 break; 1017 } 1018 tmp = *(int *)addr; 1019 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 1020 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 1021 error = EINVAL; 1022 break; 1023 } 1024 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 1025 p->p_pid, p->p_ptevents, tmp); 1026 p->p_ptevents = tmp; 1027 break; 1028 1029 case PT_GET_SC_ARGS: 1030 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1031 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 1032 #ifdef COMPAT_FREEBSD32 1033 || (wrap32 && !safe) 1034 #endif 1035 ) { 1036 error = EINVAL; 1037 break; 1038 } 1039 bzero(addr, sizeof(td2->td_sa.args)); 1040 #ifdef COMPAT_FREEBSD32 1041 if (wrap32) 1042 for (num = 0; num < nitems(td2->td_sa.args); num++) 1043 ((uint32_t *)addr)[num] = (uint32_t) 1044 td2->td_sa.args[num]; 1045 else 1046 #endif 1047 bcopy(td2->td_sa.args, addr, td2->td_sa.narg * 1048 sizeof(register_t)); 1049 break; 1050 1051 case PT_STEP: 1052 case PT_CONTINUE: 1053 case PT_TO_SCE: 1054 case PT_TO_SCX: 1055 case PT_SYSCALL: 1056 case PT_DETACH: 1057 /* Zero means do not send any signal */ 1058 if (data < 0 || data > _SIG_MAXSIG) { 1059 error = EINVAL; 1060 break; 1061 } 1062 1063 switch (req) { 1064 case PT_STEP: 1065 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 1066 td2->td_tid, p->p_pid, data); 1067 error = ptrace_single_step(td2); 1068 if (error) 1069 goto out; 1070 break; 1071 case PT_CONTINUE: 1072 case PT_TO_SCE: 1073 case PT_TO_SCX: 1074 case PT_SYSCALL: 1075 if (addr != (void *)1) { 1076 error = ptrace_set_pc(td2, 1077 (u_long)(uintfptr_t)addr); 1078 if (error) 1079 goto out; 1080 } 1081 switch (req) { 1082 case PT_TO_SCE: 1083 p->p_ptevents |= PTRACE_SCE; 1084 CTR4(KTR_PTRACE, 1085 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1086 p->p_pid, p->p_ptevents, 1087 (u_long)(uintfptr_t)addr, data); 1088 break; 1089 case PT_TO_SCX: 1090 p->p_ptevents |= PTRACE_SCX; 1091 CTR4(KTR_PTRACE, 1092 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1093 p->p_pid, p->p_ptevents, 1094 (u_long)(uintfptr_t)addr, data); 1095 break; 1096 case PT_SYSCALL: 1097 p->p_ptevents |= PTRACE_SYSCALL; 1098 CTR4(KTR_PTRACE, 1099 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1100 p->p_pid, p->p_ptevents, 1101 (u_long)(uintfptr_t)addr, data); 1102 break; 1103 case PT_CONTINUE: 1104 CTR3(KTR_PTRACE, 1105 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1106 p->p_pid, (u_long)(uintfptr_t)addr, data); 1107 break; 1108 } 1109 break; 1110 case PT_DETACH: 1111 /* 1112 * Reset the process parent. 1113 * 1114 * NB: This clears P_TRACED before reparenting 1115 * a detached process back to its original 1116 * parent. Otherwise the debugee will be set 1117 * as an orphan of the debugger. 1118 */ 1119 p->p_flag &= ~(P_TRACED | P_WAITED); 1120 if (p->p_oppid != p->p_pptr->p_pid) { 1121 PROC_LOCK(p->p_pptr); 1122 sigqueue_take(p->p_ksi); 1123 PROC_UNLOCK(p->p_pptr); 1124 1125 pp = proc_realparent(p); 1126 proc_reparent(p, pp, false); 1127 if (pp == initproc) 1128 p->p_sigparent = SIGCHLD; 1129 CTR3(KTR_PTRACE, 1130 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1131 p->p_pid, pp->p_pid, data); 1132 } else 1133 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1134 p->p_pid, data); 1135 p->p_ptevents = 0; 1136 FOREACH_THREAD_IN_PROC(p, td3) { 1137 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1138 sigqueue_delete(&td3->td_sigqueue, 1139 SIGSTOP); 1140 } 1141 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1142 TDB_SUSPEND); 1143 } 1144 1145 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1146 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1147 p->p_flag2 &= ~P2_PTRACE_FSTP; 1148 } 1149 1150 /* should we send SIGCHLD? */ 1151 /* childproc_continued(p); */ 1152 break; 1153 } 1154 1155 sx_xunlock(&proctree_lock); 1156 proctree_locked = 0; 1157 1158 sendsig: 1159 MPASS(proctree_locked == 0); 1160 1161 /* 1162 * Clear the pending event for the thread that just 1163 * reported its event (p_xthread). This may not be 1164 * the thread passed to PT_CONTINUE, PT_STEP, etc. if 1165 * the debugger is resuming a different thread. 1166 * 1167 * Deliver any pending signal via the reporting thread. 1168 */ 1169 MPASS(p->p_xthread != NULL); 1170 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1171 p->p_xthread->td_xsig = data; 1172 p->p_xthread = NULL; 1173 p->p_xsig = data; 1174 1175 /* 1176 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1177 * always works immediately, even if another thread is 1178 * unsuspended first and attempts to handle a 1179 * different signal or if the POSIX.1b style signal 1180 * queue cannot accommodate any new signals. 1181 */ 1182 if (data == SIGKILL) 1183 proc_wkilled(p); 1184 1185 /* 1186 * Unsuspend all threads. To leave a thread 1187 * suspended, use PT_SUSPEND to suspend it before 1188 * continuing the process. 1189 */ 1190 PROC_SLOCK(p); 1191 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 1192 thread_unsuspend(p); 1193 PROC_SUNLOCK(p); 1194 break; 1195 1196 case PT_WRITE_I: 1197 case PT_WRITE_D: 1198 td2->td_dbgflags |= TDB_USERWR; 1199 PROC_UNLOCK(p); 1200 error = 0; 1201 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1202 sizeof(int)) != sizeof(int)) 1203 error = ENOMEM; 1204 else 1205 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1206 p->p_pid, addr, data); 1207 PROC_LOCK(p); 1208 break; 1209 1210 case PT_READ_I: 1211 case PT_READ_D: 1212 PROC_UNLOCK(p); 1213 error = tmp = 0; 1214 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1215 sizeof(int)) != sizeof(int)) 1216 error = ENOMEM; 1217 else 1218 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1219 p->p_pid, addr, tmp); 1220 td->td_retval[0] = tmp; 1221 PROC_LOCK(p); 1222 break; 1223 1224 case PT_IO: 1225 #ifdef COMPAT_FREEBSD32 1226 if (wrap32) { 1227 piod32 = addr; 1228 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1229 iov.iov_len = piod32->piod_len; 1230 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1231 uio.uio_resid = piod32->piod_len; 1232 } else 1233 #endif 1234 { 1235 piod = addr; 1236 iov.iov_base = piod->piod_addr; 1237 iov.iov_len = piod->piod_len; 1238 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1239 uio.uio_resid = piod->piod_len; 1240 } 1241 uio.uio_iov = &iov; 1242 uio.uio_iovcnt = 1; 1243 uio.uio_segflg = UIO_USERSPACE; 1244 uio.uio_td = td; 1245 #ifdef COMPAT_FREEBSD32 1246 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1247 #else 1248 tmp = piod->piod_op; 1249 #endif 1250 switch (tmp) { 1251 case PIOD_READ_D: 1252 case PIOD_READ_I: 1253 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1254 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1255 uio.uio_rw = UIO_READ; 1256 break; 1257 case PIOD_WRITE_D: 1258 case PIOD_WRITE_I: 1259 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1260 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1261 td2->td_dbgflags |= TDB_USERWR; 1262 uio.uio_rw = UIO_WRITE; 1263 break; 1264 default: 1265 error = EINVAL; 1266 goto out; 1267 } 1268 PROC_UNLOCK(p); 1269 error = proc_rwmem(p, &uio); 1270 #ifdef COMPAT_FREEBSD32 1271 if (wrap32) 1272 piod32->piod_len -= uio.uio_resid; 1273 else 1274 #endif 1275 piod->piod_len -= uio.uio_resid; 1276 PROC_LOCK(p); 1277 break; 1278 1279 case PT_KILL: 1280 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1281 data = SIGKILL; 1282 goto sendsig; /* in PT_CONTINUE above */ 1283 1284 case PT_SETREGS: 1285 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1286 p->p_pid); 1287 td2->td_dbgflags |= TDB_USERWR; 1288 error = PROC_WRITE(regs, td2, addr); 1289 break; 1290 1291 case PT_GETREGS: 1292 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1293 p->p_pid); 1294 error = PROC_READ(regs, td2, addr); 1295 break; 1296 1297 case PT_SETFPREGS: 1298 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1299 p->p_pid); 1300 td2->td_dbgflags |= TDB_USERWR; 1301 error = PROC_WRITE(fpregs, td2, addr); 1302 break; 1303 1304 case PT_GETFPREGS: 1305 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1306 p->p_pid); 1307 error = PROC_READ(fpregs, td2, addr); 1308 break; 1309 1310 case PT_SETDBREGS: 1311 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1312 p->p_pid); 1313 td2->td_dbgflags |= TDB_USERWR; 1314 error = PROC_WRITE(dbregs, td2, addr); 1315 break; 1316 1317 case PT_GETDBREGS: 1318 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1319 p->p_pid); 1320 error = PROC_READ(dbregs, td2, addr); 1321 break; 1322 1323 case PT_LWPINFO: 1324 if (data <= 0 || 1325 #ifdef COMPAT_FREEBSD32 1326 (!wrap32 && data > sizeof(*pl)) || 1327 (wrap32 && data > sizeof(*pl32))) { 1328 #else 1329 data > sizeof(*pl)) { 1330 #endif 1331 error = EINVAL; 1332 break; 1333 } 1334 #ifdef COMPAT_FREEBSD32 1335 if (wrap32) { 1336 pl = &plr; 1337 pl32 = addr; 1338 } else 1339 #endif 1340 pl = addr; 1341 bzero(pl, sizeof(*pl)); 1342 pl->pl_lwpid = td2->td_tid; 1343 pl->pl_event = PL_EVENT_NONE; 1344 pl->pl_flags = 0; 1345 if (td2->td_dbgflags & TDB_XSIG) { 1346 pl->pl_event = PL_EVENT_SIGNAL; 1347 if (td2->td_si.si_signo != 0 && 1348 #ifdef COMPAT_FREEBSD32 1349 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1350 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1351 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1352 pl_siginfo) + sizeof(struct siginfo32))) 1353 #else 1354 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1355 + sizeof(pl->pl_siginfo) 1356 #endif 1357 ){ 1358 pl->pl_flags |= PL_FLAG_SI; 1359 pl->pl_siginfo = td2->td_si; 1360 } 1361 } 1362 if (td2->td_dbgflags & TDB_SCE) 1363 pl->pl_flags |= PL_FLAG_SCE; 1364 else if (td2->td_dbgflags & TDB_SCX) 1365 pl->pl_flags |= PL_FLAG_SCX; 1366 if (td2->td_dbgflags & TDB_EXEC) 1367 pl->pl_flags |= PL_FLAG_EXEC; 1368 if (td2->td_dbgflags & TDB_FORK) { 1369 pl->pl_flags |= PL_FLAG_FORKED; 1370 pl->pl_child_pid = td2->td_dbg_forked; 1371 if (td2->td_dbgflags & TDB_VFORK) 1372 pl->pl_flags |= PL_FLAG_VFORKED; 1373 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1374 TDB_VFORK) 1375 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1376 if (td2->td_dbgflags & TDB_CHILD) 1377 pl->pl_flags |= PL_FLAG_CHILD; 1378 if (td2->td_dbgflags & TDB_BORN) 1379 pl->pl_flags |= PL_FLAG_BORN; 1380 if (td2->td_dbgflags & TDB_EXIT) 1381 pl->pl_flags |= PL_FLAG_EXITED; 1382 pl->pl_sigmask = td2->td_sigmask; 1383 pl->pl_siglist = td2->td_siglist; 1384 strcpy(pl->pl_tdname, td2->td_name); 1385 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1386 pl->pl_syscall_code = td2->td_sa.code; 1387 pl->pl_syscall_narg = td2->td_sa.narg; 1388 } else { 1389 pl->pl_syscall_code = 0; 1390 pl->pl_syscall_narg = 0; 1391 } 1392 #ifdef COMPAT_FREEBSD32 1393 if (wrap32) 1394 ptrace_lwpinfo_to32(pl, pl32); 1395 #endif 1396 CTR6(KTR_PTRACE, 1397 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1398 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1399 pl->pl_child_pid, pl->pl_syscall_code); 1400 break; 1401 1402 case PT_GETNUMLWPS: 1403 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1404 p->p_numthreads); 1405 td->td_retval[0] = p->p_numthreads; 1406 break; 1407 1408 case PT_GETLWPLIST: 1409 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1410 p->p_pid, data, p->p_numthreads); 1411 if (data <= 0) { 1412 error = EINVAL; 1413 break; 1414 } 1415 num = imin(p->p_numthreads, data); 1416 PROC_UNLOCK(p); 1417 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1418 tmp = 0; 1419 PROC_LOCK(p); 1420 FOREACH_THREAD_IN_PROC(p, td2) { 1421 if (tmp >= num) 1422 break; 1423 buf[tmp++] = td2->td_tid; 1424 } 1425 PROC_UNLOCK(p); 1426 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1427 free(buf, M_TEMP); 1428 if (!error) 1429 td->td_retval[0] = tmp; 1430 PROC_LOCK(p); 1431 break; 1432 1433 case PT_VM_TIMESTAMP: 1434 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1435 p->p_pid, p->p_vmspace->vm_map.timestamp); 1436 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1437 break; 1438 1439 case PT_VM_ENTRY: 1440 PROC_UNLOCK(p); 1441 #ifdef COMPAT_FREEBSD32 1442 if (wrap32) 1443 error = ptrace_vm_entry32(td, p, addr); 1444 else 1445 #endif 1446 error = ptrace_vm_entry(td, p, addr); 1447 PROC_LOCK(p); 1448 break; 1449 1450 default: 1451 #ifdef __HAVE_PTRACE_MACHDEP 1452 if (req >= PT_FIRSTMACH) { 1453 PROC_UNLOCK(p); 1454 error = cpu_ptrace(td2, req, addr, data); 1455 PROC_LOCK(p); 1456 } else 1457 #endif 1458 /* Unknown request. */ 1459 error = EINVAL; 1460 break; 1461 } 1462 1463 out: 1464 /* Drop our hold on this process now that the request has completed. */ 1465 _PRELE(p); 1466 fail: 1467 PROC_UNLOCK(p); 1468 if (proctree_locked) 1469 sx_xunlock(&proctree_lock); 1470 return (error); 1471 } 1472 #undef PROC_READ 1473 #undef PROC_WRITE 1474 1475 /* 1476 * Stop a process because of a debugging event; 1477 * stay stopped until p->p_step is cleared 1478 * (cleared by PIOCCONT in procfs). 1479 */ 1480 void 1481 stopevent(struct proc *p, unsigned int event, unsigned int val) 1482 { 1483 1484 PROC_LOCK_ASSERT(p, MA_OWNED); 1485 p->p_step = 1; 1486 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1487 val); 1488 do { 1489 if (event != S_EXIT) 1490 p->p_xsig = val; 1491 p->p_xthread = NULL; 1492 p->p_stype = event; /* Which event caused the stop? */ 1493 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1494 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1495 } while (p->p_step); 1496 } 1497