1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/ktr.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/syscallsubr.h> 43 #include <sys/sysent.h> 44 #include <sys/sysproto.h> 45 #include <sys/pioctl.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/vnode.h> 49 #include <sys/ptrace.h> 50 #include <sys/rwlock.h> 51 #include <sys/sx.h> 52 #include <sys/malloc.h> 53 #include <sys/signalvar.h> 54 55 #include <machine/reg.h> 56 57 #include <security/audit/audit.h> 58 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_extern.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_param.h> 67 68 #ifdef COMPAT_FREEBSD32 69 #include <sys/procfs.h> 70 #include <compat/freebsd32/freebsd32_signal.h> 71 72 struct ptrace_io_desc32 { 73 int piod_op; 74 uint32_t piod_offs; 75 uint32_t piod_addr; 76 uint32_t piod_len; 77 }; 78 79 struct ptrace_sc_ret32 { 80 uint32_t sr_retval[2]; 81 int sr_error; 82 }; 83 84 struct ptrace_vm_entry32 { 85 int pve_entry; 86 int pve_timestamp; 87 uint32_t pve_start; 88 uint32_t pve_end; 89 uint32_t pve_offset; 90 u_int pve_prot; 91 u_int pve_pathlen; 92 int32_t pve_fileid; 93 u_int pve_fsid; 94 uint32_t pve_path; 95 }; 96 #endif 97 98 /* 99 * Functions implemented using PROC_ACTION(): 100 * 101 * proc_read_regs(proc, regs) 102 * Get the current user-visible register set from the process 103 * and copy it into the regs structure (<machine/reg.h>). 104 * The process is stopped at the time read_regs is called. 105 * 106 * proc_write_regs(proc, regs) 107 * Update the current register set from the passed in regs 108 * structure. Take care to avoid clobbering special CPU 109 * registers or privileged bits in the PSL. 110 * Depending on the architecture this may have fix-up work to do, 111 * especially if the IAR or PCW are modified. 112 * The process is stopped at the time write_regs is called. 113 * 114 * proc_read_fpregs, proc_write_fpregs 115 * deal with the floating point register set, otherwise as above. 116 * 117 * proc_read_dbregs, proc_write_dbregs 118 * deal with the processor debug register set, otherwise as above. 119 * 120 * proc_sstep(proc) 121 * Arrange for the process to trap after executing a single instruction. 122 */ 123 124 #define PROC_ACTION(action) do { \ 125 int error; \ 126 \ 127 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 128 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 129 error = EIO; \ 130 else \ 131 error = (action); \ 132 return (error); \ 133 } while(0) 134 135 int 136 proc_read_regs(struct thread *td, struct reg *regs) 137 { 138 139 PROC_ACTION(fill_regs(td, regs)); 140 } 141 142 int 143 proc_write_regs(struct thread *td, struct reg *regs) 144 { 145 146 PROC_ACTION(set_regs(td, regs)); 147 } 148 149 int 150 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 151 { 152 153 PROC_ACTION(fill_dbregs(td, dbregs)); 154 } 155 156 int 157 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 158 { 159 160 PROC_ACTION(set_dbregs(td, dbregs)); 161 } 162 163 /* 164 * Ptrace doesn't support fpregs at all, and there are no security holes 165 * or translations for fpregs, so we can just copy them. 166 */ 167 int 168 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 169 { 170 171 PROC_ACTION(fill_fpregs(td, fpregs)); 172 } 173 174 int 175 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 176 { 177 178 PROC_ACTION(set_fpregs(td, fpregs)); 179 } 180 181 #ifdef COMPAT_FREEBSD32 182 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 183 int 184 proc_read_regs32(struct thread *td, struct reg32 *regs32) 185 { 186 187 PROC_ACTION(fill_regs32(td, regs32)); 188 } 189 190 int 191 proc_write_regs32(struct thread *td, struct reg32 *regs32) 192 { 193 194 PROC_ACTION(set_regs32(td, regs32)); 195 } 196 197 int 198 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 199 { 200 201 PROC_ACTION(fill_dbregs32(td, dbregs32)); 202 } 203 204 int 205 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 206 { 207 208 PROC_ACTION(set_dbregs32(td, dbregs32)); 209 } 210 211 int 212 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 213 { 214 215 PROC_ACTION(fill_fpregs32(td, fpregs32)); 216 } 217 218 int 219 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 220 { 221 222 PROC_ACTION(set_fpregs32(td, fpregs32)); 223 } 224 #endif 225 226 int 227 proc_sstep(struct thread *td) 228 { 229 230 PROC_ACTION(ptrace_single_step(td)); 231 } 232 233 int 234 proc_rwmem(struct proc *p, struct uio *uio) 235 { 236 vm_map_t map; 237 vm_offset_t pageno; /* page number */ 238 vm_prot_t reqprot; 239 int error, fault_flags, page_offset, writing; 240 241 /* 242 * Assert that someone has locked this vmspace. (Should be 243 * curthread but we can't assert that.) This keeps the process 244 * from exiting out from under us until this operation completes. 245 */ 246 PROC_ASSERT_HELD(p); 247 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 248 249 /* 250 * The map we want... 251 */ 252 map = &p->p_vmspace->vm_map; 253 254 /* 255 * If we are writing, then we request vm_fault() to create a private 256 * copy of each page. Since these copies will not be writeable by the 257 * process, we must explicity request that they be dirtied. 258 */ 259 writing = uio->uio_rw == UIO_WRITE; 260 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 261 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 262 263 /* 264 * Only map in one page at a time. We don't have to, but it 265 * makes things easier. This way is trivial - right? 266 */ 267 do { 268 vm_offset_t uva; 269 u_int len; 270 vm_page_t m; 271 272 uva = (vm_offset_t)uio->uio_offset; 273 274 /* 275 * Get the page number of this segment. 276 */ 277 pageno = trunc_page(uva); 278 page_offset = uva - pageno; 279 280 /* 281 * How many bytes to copy 282 */ 283 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 284 285 /* 286 * Fault and hold the page on behalf of the process. 287 */ 288 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 289 if (error != KERN_SUCCESS) { 290 if (error == KERN_RESOURCE_SHORTAGE) 291 error = ENOMEM; 292 else 293 error = EFAULT; 294 break; 295 } 296 297 /* 298 * Now do the i/o move. 299 */ 300 error = uiomove_fromphys(&m, page_offset, len, uio); 301 302 /* Make the I-cache coherent for breakpoints. */ 303 if (writing && error == 0) { 304 vm_map_lock_read(map); 305 if (vm_map_check_protection(map, pageno, pageno + 306 PAGE_SIZE, VM_PROT_EXECUTE)) 307 vm_sync_icache(map, uva, len); 308 vm_map_unlock_read(map); 309 } 310 311 /* 312 * Release the page. 313 */ 314 vm_page_lock(m); 315 if (vm_page_unwire(m, PQ_ACTIVE) && m->object == NULL) 316 vm_page_free(m); 317 vm_page_unlock(m); 318 319 } while (error == 0 && uio->uio_resid > 0); 320 321 return (error); 322 } 323 324 static ssize_t 325 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 326 size_t len, enum uio_rw rw) 327 { 328 struct iovec iov; 329 struct uio uio; 330 ssize_t slen; 331 332 MPASS(len < SSIZE_MAX); 333 slen = (ssize_t)len; 334 335 iov.iov_base = (caddr_t)buf; 336 iov.iov_len = len; 337 uio.uio_iov = &iov; 338 uio.uio_iovcnt = 1; 339 uio.uio_offset = va; 340 uio.uio_resid = slen; 341 uio.uio_segflg = UIO_SYSSPACE; 342 uio.uio_rw = rw; 343 uio.uio_td = td; 344 proc_rwmem(p, &uio); 345 if (uio.uio_resid == slen) 346 return (-1); 347 return (slen - uio.uio_resid); 348 } 349 350 ssize_t 351 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 352 size_t len) 353 { 354 355 return (proc_iop(td, p, va, buf, len, UIO_READ)); 356 } 357 358 ssize_t 359 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 360 size_t len) 361 { 362 363 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 364 } 365 366 static int 367 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 368 { 369 struct vattr vattr; 370 vm_map_t map; 371 vm_map_entry_t entry; 372 vm_object_t obj, tobj, lobj; 373 struct vmspace *vm; 374 struct vnode *vp; 375 char *freepath, *fullpath; 376 u_int pathlen; 377 int error, index; 378 379 error = 0; 380 obj = NULL; 381 382 vm = vmspace_acquire_ref(p); 383 map = &vm->vm_map; 384 vm_map_lock_read(map); 385 386 do { 387 entry = map->header.next; 388 index = 0; 389 while (index < pve->pve_entry && entry != &map->header) { 390 entry = entry->next; 391 index++; 392 } 393 if (index != pve->pve_entry) { 394 error = EINVAL; 395 break; 396 } 397 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 398 ("Submap in map header")); 399 while ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 400 entry = entry->next; 401 index++; 402 } 403 if (entry == &map->header) { 404 error = ENOENT; 405 break; 406 } 407 408 /* We got an entry. */ 409 pve->pve_entry = index + 1; 410 pve->pve_timestamp = map->timestamp; 411 pve->pve_start = entry->start; 412 pve->pve_end = entry->end - 1; 413 pve->pve_offset = entry->offset; 414 pve->pve_prot = entry->protection; 415 416 /* Backing object's path needed? */ 417 if (pve->pve_pathlen == 0) 418 break; 419 420 pathlen = pve->pve_pathlen; 421 pve->pve_pathlen = 0; 422 423 obj = entry->object.vm_object; 424 if (obj != NULL) 425 VM_OBJECT_RLOCK(obj); 426 } while (0); 427 428 vm_map_unlock_read(map); 429 430 pve->pve_fsid = VNOVAL; 431 pve->pve_fileid = VNOVAL; 432 433 if (error == 0 && obj != NULL) { 434 lobj = obj; 435 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 436 if (tobj != obj) 437 VM_OBJECT_RLOCK(tobj); 438 if (lobj != obj) 439 VM_OBJECT_RUNLOCK(lobj); 440 lobj = tobj; 441 pve->pve_offset += tobj->backing_object_offset; 442 } 443 vp = vm_object_vnode(lobj); 444 if (vp != NULL) 445 vref(vp); 446 if (lobj != obj) 447 VM_OBJECT_RUNLOCK(lobj); 448 VM_OBJECT_RUNLOCK(obj); 449 450 if (vp != NULL) { 451 freepath = NULL; 452 fullpath = NULL; 453 vn_fullpath(td, vp, &fullpath, &freepath); 454 vn_lock(vp, LK_SHARED | LK_RETRY); 455 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 456 pve->pve_fileid = vattr.va_fileid; 457 pve->pve_fsid = vattr.va_fsid; 458 } 459 vput(vp); 460 461 if (fullpath != NULL) { 462 pve->pve_pathlen = strlen(fullpath) + 1; 463 if (pve->pve_pathlen <= pathlen) { 464 error = copyout(fullpath, pve->pve_path, 465 pve->pve_pathlen); 466 } else 467 error = ENAMETOOLONG; 468 } 469 if (freepath != NULL) 470 free(freepath, M_TEMP); 471 } 472 } 473 vmspace_free(vm); 474 if (error == 0) 475 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 476 p->p_pid, pve->pve_entry, pve->pve_start); 477 478 return (error); 479 } 480 481 #ifdef COMPAT_FREEBSD32 482 static int 483 ptrace_vm_entry32(struct thread *td, struct proc *p, 484 struct ptrace_vm_entry32 *pve32) 485 { 486 struct ptrace_vm_entry pve; 487 int error; 488 489 pve.pve_entry = pve32->pve_entry; 490 pve.pve_pathlen = pve32->pve_pathlen; 491 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 492 493 error = ptrace_vm_entry(td, p, &pve); 494 if (error == 0) { 495 pve32->pve_entry = pve.pve_entry; 496 pve32->pve_timestamp = pve.pve_timestamp; 497 pve32->pve_start = pve.pve_start; 498 pve32->pve_end = pve.pve_end; 499 pve32->pve_offset = pve.pve_offset; 500 pve32->pve_prot = pve.pve_prot; 501 pve32->pve_fileid = pve.pve_fileid; 502 pve32->pve_fsid = pve.pve_fsid; 503 } 504 505 pve32->pve_pathlen = pve.pve_pathlen; 506 return (error); 507 } 508 509 static void 510 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 511 struct ptrace_lwpinfo32 *pl32) 512 { 513 514 bzero(pl32, sizeof(*pl32)); 515 pl32->pl_lwpid = pl->pl_lwpid; 516 pl32->pl_event = pl->pl_event; 517 pl32->pl_flags = pl->pl_flags; 518 pl32->pl_sigmask = pl->pl_sigmask; 519 pl32->pl_siglist = pl->pl_siglist; 520 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 521 strcpy(pl32->pl_tdname, pl->pl_tdname); 522 pl32->pl_child_pid = pl->pl_child_pid; 523 pl32->pl_syscall_code = pl->pl_syscall_code; 524 pl32->pl_syscall_narg = pl->pl_syscall_narg; 525 } 526 527 static void 528 ptrace_sc_ret_to32(const struct ptrace_sc_ret *psr, 529 struct ptrace_sc_ret32 *psr32) 530 { 531 532 bzero(psr32, sizeof(*psr32)); 533 psr32->sr_retval[0] = psr->sr_retval[0]; 534 psr32->sr_retval[1] = psr->sr_retval[1]; 535 psr32->sr_error = psr->sr_error; 536 } 537 #endif /* COMPAT_FREEBSD32 */ 538 539 /* 540 * Process debugging system call. 541 */ 542 #ifndef _SYS_SYSPROTO_H_ 543 struct ptrace_args { 544 int req; 545 pid_t pid; 546 caddr_t addr; 547 int data; 548 }; 549 #endif 550 551 #ifdef COMPAT_FREEBSD32 552 /* 553 * This CPP subterfuge is to try and reduce the number of ifdefs in 554 * the body of the code. 555 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 556 * becomes either: 557 * copyin(uap->addr, &r.reg, sizeof r.reg); 558 * or 559 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 560 * .. except this is done at runtime. 561 */ 562 #define BZERO(a, s) wrap32 ? \ 563 bzero(a ## 32, s ## 32) : \ 564 bzero(a, s) 565 #define COPYIN(u, k, s) wrap32 ? \ 566 copyin(u, k ## 32, s ## 32) : \ 567 copyin(u, k, s) 568 #define COPYOUT(k, u, s) wrap32 ? \ 569 copyout(k ## 32, u, s ## 32) : \ 570 copyout(k, u, s) 571 #else 572 #define BZERO(a, s) bzero(a, s) 573 #define COPYIN(u, k, s) copyin(u, k, s) 574 #define COPYOUT(k, u, s) copyout(k, u, s) 575 #endif 576 int 577 sys_ptrace(struct thread *td, struct ptrace_args *uap) 578 { 579 /* 580 * XXX this obfuscation is to reduce stack usage, but the register 581 * structs may be too large to put on the stack anyway. 582 */ 583 union { 584 struct ptrace_io_desc piod; 585 struct ptrace_lwpinfo pl; 586 struct ptrace_vm_entry pve; 587 struct dbreg dbreg; 588 struct fpreg fpreg; 589 struct reg reg; 590 #ifdef COMPAT_FREEBSD32 591 struct dbreg32 dbreg32; 592 struct fpreg32 fpreg32; 593 struct reg32 reg32; 594 struct ptrace_io_desc32 piod32; 595 struct ptrace_lwpinfo32 pl32; 596 struct ptrace_vm_entry32 pve32; 597 #endif 598 char args[sizeof(td->td_sa.args)]; 599 struct ptrace_sc_ret psr; 600 int ptevents; 601 } r; 602 void *addr; 603 int error = 0; 604 #ifdef COMPAT_FREEBSD32 605 int wrap32 = 0; 606 607 if (SV_CURPROC_FLAG(SV_ILP32)) 608 wrap32 = 1; 609 #endif 610 AUDIT_ARG_PID(uap->pid); 611 AUDIT_ARG_CMD(uap->req); 612 AUDIT_ARG_VALUE(uap->data); 613 addr = &r; 614 switch (uap->req) { 615 case PT_GET_EVENT_MASK: 616 case PT_LWPINFO: 617 case PT_GET_SC_ARGS: 618 case PT_GET_SC_RET: 619 break; 620 case PT_GETREGS: 621 BZERO(&r.reg, sizeof r.reg); 622 break; 623 case PT_GETFPREGS: 624 BZERO(&r.fpreg, sizeof r.fpreg); 625 break; 626 case PT_GETDBREGS: 627 BZERO(&r.dbreg, sizeof r.dbreg); 628 break; 629 case PT_SETREGS: 630 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 631 break; 632 case PT_SETFPREGS: 633 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 634 break; 635 case PT_SETDBREGS: 636 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 637 break; 638 case PT_SET_EVENT_MASK: 639 if (uap->data != sizeof(r.ptevents)) 640 error = EINVAL; 641 else 642 error = copyin(uap->addr, &r.ptevents, uap->data); 643 break; 644 case PT_IO: 645 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 646 break; 647 case PT_VM_ENTRY: 648 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 649 break; 650 default: 651 addr = uap->addr; 652 break; 653 } 654 if (error) 655 return (error); 656 657 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 658 if (error) 659 return (error); 660 661 switch (uap->req) { 662 case PT_VM_ENTRY: 663 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 664 break; 665 case PT_IO: 666 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 667 break; 668 case PT_GETREGS: 669 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 670 break; 671 case PT_GETFPREGS: 672 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 673 break; 674 case PT_GETDBREGS: 675 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 676 break; 677 case PT_GET_EVENT_MASK: 678 /* NB: The size in uap->data is validated in kern_ptrace(). */ 679 error = copyout(&r.ptevents, uap->addr, uap->data); 680 break; 681 case PT_LWPINFO: 682 /* NB: The size in uap->data is validated in kern_ptrace(). */ 683 error = copyout(&r.pl, uap->addr, uap->data); 684 break; 685 case PT_GET_SC_ARGS: 686 error = copyout(r.args, uap->addr, MIN(uap->data, 687 sizeof(r.args))); 688 break; 689 case PT_GET_SC_RET: 690 error = copyout(&r.psr, uap->addr, MIN(uap->data, 691 sizeof(r.psr))); 692 break; 693 } 694 695 return (error); 696 } 697 #undef COPYIN 698 #undef COPYOUT 699 #undef BZERO 700 701 #ifdef COMPAT_FREEBSD32 702 /* 703 * PROC_READ(regs, td2, addr); 704 * becomes either: 705 * proc_read_regs(td2, addr); 706 * or 707 * proc_read_regs32(td2, addr); 708 * .. except this is done at runtime. There is an additional 709 * complication in that PROC_WRITE disallows 32 bit consumers 710 * from writing to 64 bit address space targets. 711 */ 712 #define PROC_READ(w, t, a) wrap32 ? \ 713 proc_read_ ## w ## 32(t, a) : \ 714 proc_read_ ## w (t, a) 715 #define PROC_WRITE(w, t, a) wrap32 ? \ 716 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 717 proc_write_ ## w (t, a) 718 #else 719 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 720 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 721 #endif 722 723 void 724 proc_set_traced(struct proc *p, bool stop) 725 { 726 727 sx_assert(&proctree_lock, SX_XLOCKED); 728 PROC_LOCK_ASSERT(p, MA_OWNED); 729 p->p_flag |= P_TRACED; 730 if (stop) 731 p->p_flag2 |= P2_PTRACE_FSTP; 732 p->p_ptevents = PTRACE_DEFAULT; 733 } 734 735 int 736 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 737 { 738 struct iovec iov; 739 struct uio uio; 740 struct proc *curp, *p, *pp; 741 struct thread *td2 = NULL, *td3; 742 struct ptrace_io_desc *piod = NULL; 743 struct ptrace_lwpinfo *pl; 744 struct ptrace_sc_ret *psr; 745 int error, num, tmp; 746 int proctree_locked = 0; 747 lwpid_t tid = 0, *buf; 748 #ifdef COMPAT_FREEBSD32 749 int wrap32 = 0, safe = 0; 750 struct ptrace_io_desc32 *piod32 = NULL; 751 struct ptrace_lwpinfo32 *pl32 = NULL; 752 struct ptrace_sc_ret32 *psr32 = NULL; 753 union { 754 struct ptrace_lwpinfo pl; 755 struct ptrace_sc_ret psr; 756 } r; 757 #endif 758 759 curp = td->td_proc; 760 761 /* Lock proctree before locking the process. */ 762 switch (req) { 763 case PT_TRACE_ME: 764 case PT_ATTACH: 765 case PT_STEP: 766 case PT_CONTINUE: 767 case PT_TO_SCE: 768 case PT_TO_SCX: 769 case PT_SYSCALL: 770 case PT_FOLLOW_FORK: 771 case PT_LWP_EVENTS: 772 case PT_GET_EVENT_MASK: 773 case PT_SET_EVENT_MASK: 774 case PT_DETACH: 775 case PT_GET_SC_ARGS: 776 sx_xlock(&proctree_lock); 777 proctree_locked = 1; 778 break; 779 default: 780 break; 781 } 782 783 if (req == PT_TRACE_ME) { 784 p = td->td_proc; 785 PROC_LOCK(p); 786 } else { 787 if (pid <= PID_MAX) { 788 if ((p = pfind(pid)) == NULL) { 789 if (proctree_locked) 790 sx_xunlock(&proctree_lock); 791 return (ESRCH); 792 } 793 } else { 794 td2 = tdfind(pid, -1); 795 if (td2 == NULL) { 796 if (proctree_locked) 797 sx_xunlock(&proctree_lock); 798 return (ESRCH); 799 } 800 p = td2->td_proc; 801 tid = pid; 802 pid = p->p_pid; 803 } 804 } 805 AUDIT_ARG_PROCESS(p); 806 807 if ((p->p_flag & P_WEXIT) != 0) { 808 error = ESRCH; 809 goto fail; 810 } 811 if ((error = p_cansee(td, p)) != 0) 812 goto fail; 813 814 if ((error = p_candebug(td, p)) != 0) 815 goto fail; 816 817 /* 818 * System processes can't be debugged. 819 */ 820 if ((p->p_flag & P_SYSTEM) != 0) { 821 error = EINVAL; 822 goto fail; 823 } 824 825 if (tid == 0) { 826 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 827 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 828 td2 = p->p_xthread; 829 } else { 830 td2 = FIRST_THREAD_IN_PROC(p); 831 } 832 tid = td2->td_tid; 833 } 834 835 #ifdef COMPAT_FREEBSD32 836 /* 837 * Test if we're a 32 bit client and what the target is. 838 * Set the wrap controls accordingly. 839 */ 840 if (SV_CURPROC_FLAG(SV_ILP32)) { 841 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 842 safe = 1; 843 wrap32 = 1; 844 } 845 #endif 846 /* 847 * Permissions check 848 */ 849 switch (req) { 850 case PT_TRACE_ME: 851 /* 852 * Always legal, when there is a parent process which 853 * could trace us. Otherwise, reject. 854 */ 855 if ((p->p_flag & P_TRACED) != 0) { 856 error = EBUSY; 857 goto fail; 858 } 859 if (p->p_pptr == initproc) { 860 error = EPERM; 861 goto fail; 862 } 863 break; 864 865 case PT_ATTACH: 866 /* Self */ 867 if (p == td->td_proc) { 868 error = EINVAL; 869 goto fail; 870 } 871 872 /* Already traced */ 873 if (p->p_flag & P_TRACED) { 874 error = EBUSY; 875 goto fail; 876 } 877 878 /* Can't trace an ancestor if you're being traced. */ 879 if (curp->p_flag & P_TRACED) { 880 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 881 if (pp == p) { 882 error = EINVAL; 883 goto fail; 884 } 885 } 886 } 887 888 889 /* OK */ 890 break; 891 892 case PT_CLEARSTEP: 893 /* Allow thread to clear single step for itself */ 894 if (td->td_tid == tid) 895 break; 896 897 /* FALLTHROUGH */ 898 default: 899 /* not being traced... */ 900 if ((p->p_flag & P_TRACED) == 0) { 901 error = EPERM; 902 goto fail; 903 } 904 905 /* not being traced by YOU */ 906 if (p->p_pptr != td->td_proc) { 907 error = EBUSY; 908 goto fail; 909 } 910 911 /* not currently stopped */ 912 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 913 p->p_suspcount != p->p_numthreads || 914 (p->p_flag & P_WAITED) == 0) { 915 error = EBUSY; 916 goto fail; 917 } 918 919 /* OK */ 920 break; 921 } 922 923 /* Keep this process around until we finish this request. */ 924 _PHOLD(p); 925 926 #ifdef FIX_SSTEP 927 /* 928 * Single step fixup ala procfs 929 */ 930 FIX_SSTEP(td2); 931 #endif 932 933 /* 934 * Actually do the requests 935 */ 936 937 td->td_retval[0] = 0; 938 939 switch (req) { 940 case PT_TRACE_ME: 941 /* set my trace flag and "owner" so it can read/write me */ 942 proc_set_traced(p, false); 943 if (p->p_flag & P_PPWAIT) 944 p->p_flag |= P_PPTRACE; 945 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 946 break; 947 948 case PT_ATTACH: 949 /* security check done above */ 950 /* 951 * It would be nice if the tracing relationship was separate 952 * from the parent relationship but that would require 953 * another set of links in the proc struct or for "wait" 954 * to scan the entire proc table. To make life easier, 955 * we just re-parent the process we're trying to trace. 956 * The old parent is remembered so we can put things back 957 * on a "detach". 958 */ 959 proc_set_traced(p, true); 960 if (p->p_pptr != td->td_proc) { 961 proc_reparent(p, td->td_proc, false); 962 } 963 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 964 p->p_oppid); 965 966 sx_xunlock(&proctree_lock); 967 proctree_locked = 0; 968 MPASS(p->p_xthread == NULL); 969 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 970 971 /* 972 * If already stopped due to a stop signal, clear the 973 * existing stop before triggering a traced SIGSTOP. 974 */ 975 if ((p->p_flag & P_STOPPED_SIG) != 0) { 976 PROC_SLOCK(p); 977 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 978 thread_unsuspend(p); 979 PROC_SUNLOCK(p); 980 } 981 982 kern_psignal(p, SIGSTOP); 983 break; 984 985 case PT_CLEARSTEP: 986 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 987 p->p_pid); 988 error = ptrace_clear_single_step(td2); 989 break; 990 991 case PT_SETSTEP: 992 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 993 p->p_pid); 994 error = ptrace_single_step(td2); 995 break; 996 997 case PT_SUSPEND: 998 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 999 p->p_pid); 1000 td2->td_dbgflags |= TDB_SUSPEND; 1001 thread_lock(td2); 1002 td2->td_flags |= TDF_NEEDSUSPCHK; 1003 thread_unlock(td2); 1004 break; 1005 1006 case PT_RESUME: 1007 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 1008 p->p_pid); 1009 td2->td_dbgflags &= ~TDB_SUSPEND; 1010 break; 1011 1012 case PT_FOLLOW_FORK: 1013 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 1014 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 1015 data ? "enabled" : "disabled"); 1016 if (data) 1017 p->p_ptevents |= PTRACE_FORK; 1018 else 1019 p->p_ptevents &= ~PTRACE_FORK; 1020 break; 1021 1022 case PT_LWP_EVENTS: 1023 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 1024 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 1025 data ? "enabled" : "disabled"); 1026 if (data) 1027 p->p_ptevents |= PTRACE_LWP; 1028 else 1029 p->p_ptevents &= ~PTRACE_LWP; 1030 break; 1031 1032 case PT_GET_EVENT_MASK: 1033 if (data != sizeof(p->p_ptevents)) { 1034 error = EINVAL; 1035 break; 1036 } 1037 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 1038 p->p_ptevents); 1039 *(int *)addr = p->p_ptevents; 1040 break; 1041 1042 case PT_SET_EVENT_MASK: 1043 if (data != sizeof(p->p_ptevents)) { 1044 error = EINVAL; 1045 break; 1046 } 1047 tmp = *(int *)addr; 1048 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 1049 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 1050 error = EINVAL; 1051 break; 1052 } 1053 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 1054 p->p_pid, p->p_ptevents, tmp); 1055 p->p_ptevents = tmp; 1056 break; 1057 1058 case PT_GET_SC_ARGS: 1059 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1060 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 1061 #ifdef COMPAT_FREEBSD32 1062 || (wrap32 && !safe) 1063 #endif 1064 ) { 1065 error = EINVAL; 1066 break; 1067 } 1068 bzero(addr, sizeof(td2->td_sa.args)); 1069 #ifdef COMPAT_FREEBSD32 1070 if (wrap32) 1071 for (num = 0; num < nitems(td2->td_sa.args); num++) 1072 ((uint32_t *)addr)[num] = (uint32_t) 1073 td2->td_sa.args[num]; 1074 else 1075 #endif 1076 bcopy(td2->td_sa.args, addr, td2->td_sa.narg * 1077 sizeof(register_t)); 1078 break; 1079 1080 case PT_GET_SC_RET: 1081 if ((td2->td_dbgflags & (TDB_SCX)) == 0 1082 #ifdef COMPAT_FREEBSD32 1083 || (wrap32 && !safe) 1084 #endif 1085 ) { 1086 error = EINVAL; 1087 break; 1088 } 1089 #ifdef COMPAT_FREEBSD32 1090 if (wrap32) { 1091 psr = &r.psr; 1092 psr32 = addr; 1093 } else 1094 #endif 1095 psr = addr; 1096 bzero(psr, sizeof(*psr)); 1097 psr->sr_error = td2->td_errno; 1098 if (psr->sr_error == 0) { 1099 psr->sr_retval[0] = td2->td_retval[0]; 1100 psr->sr_retval[1] = td2->td_retval[1]; 1101 } 1102 #ifdef COMPAT_FREEBSD32 1103 if (wrap32) 1104 ptrace_sc_ret_to32(psr, psr32); 1105 #endif 1106 CTR4(KTR_PTRACE, 1107 "PT_GET_SC_RET: pid %d error %d retval %#lx,%#lx", 1108 p->p_pid, psr->sr_error, psr->sr_retval[0], 1109 psr->sr_retval[1]); 1110 break; 1111 1112 case PT_STEP: 1113 case PT_CONTINUE: 1114 case PT_TO_SCE: 1115 case PT_TO_SCX: 1116 case PT_SYSCALL: 1117 case PT_DETACH: 1118 /* Zero means do not send any signal */ 1119 if (data < 0 || data > _SIG_MAXSIG) { 1120 error = EINVAL; 1121 break; 1122 } 1123 1124 switch (req) { 1125 case PT_STEP: 1126 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 1127 td2->td_tid, p->p_pid, data); 1128 error = ptrace_single_step(td2); 1129 if (error) 1130 goto out; 1131 break; 1132 case PT_CONTINUE: 1133 case PT_TO_SCE: 1134 case PT_TO_SCX: 1135 case PT_SYSCALL: 1136 if (addr != (void *)1) { 1137 error = ptrace_set_pc(td2, 1138 (u_long)(uintfptr_t)addr); 1139 if (error) 1140 goto out; 1141 } 1142 switch (req) { 1143 case PT_TO_SCE: 1144 p->p_ptevents |= PTRACE_SCE; 1145 CTR4(KTR_PTRACE, 1146 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1147 p->p_pid, p->p_ptevents, 1148 (u_long)(uintfptr_t)addr, data); 1149 break; 1150 case PT_TO_SCX: 1151 p->p_ptevents |= PTRACE_SCX; 1152 CTR4(KTR_PTRACE, 1153 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1154 p->p_pid, p->p_ptevents, 1155 (u_long)(uintfptr_t)addr, data); 1156 break; 1157 case PT_SYSCALL: 1158 p->p_ptevents |= PTRACE_SYSCALL; 1159 CTR4(KTR_PTRACE, 1160 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1161 p->p_pid, p->p_ptevents, 1162 (u_long)(uintfptr_t)addr, data); 1163 break; 1164 case PT_CONTINUE: 1165 CTR3(KTR_PTRACE, 1166 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1167 p->p_pid, (u_long)(uintfptr_t)addr, data); 1168 break; 1169 } 1170 break; 1171 case PT_DETACH: 1172 /* 1173 * Reset the process parent. 1174 * 1175 * NB: This clears P_TRACED before reparenting 1176 * a detached process back to its original 1177 * parent. Otherwise the debugee will be set 1178 * as an orphan of the debugger. 1179 */ 1180 p->p_flag &= ~(P_TRACED | P_WAITED); 1181 if (p->p_oppid != p->p_pptr->p_pid) { 1182 PROC_LOCK(p->p_pptr); 1183 sigqueue_take(p->p_ksi); 1184 PROC_UNLOCK(p->p_pptr); 1185 1186 pp = proc_realparent(p); 1187 proc_reparent(p, pp, false); 1188 if (pp == initproc) 1189 p->p_sigparent = SIGCHLD; 1190 CTR3(KTR_PTRACE, 1191 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1192 p->p_pid, pp->p_pid, data); 1193 } else 1194 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1195 p->p_pid, data); 1196 p->p_ptevents = 0; 1197 FOREACH_THREAD_IN_PROC(p, td3) { 1198 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1199 sigqueue_delete(&td3->td_sigqueue, 1200 SIGSTOP); 1201 } 1202 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1203 TDB_SUSPEND); 1204 } 1205 1206 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1207 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1208 p->p_flag2 &= ~P2_PTRACE_FSTP; 1209 } 1210 1211 /* should we send SIGCHLD? */ 1212 /* childproc_continued(p); */ 1213 break; 1214 } 1215 1216 sx_xunlock(&proctree_lock); 1217 proctree_locked = 0; 1218 1219 sendsig: 1220 MPASS(proctree_locked == 0); 1221 1222 /* 1223 * Clear the pending event for the thread that just 1224 * reported its event (p_xthread). This may not be 1225 * the thread passed to PT_CONTINUE, PT_STEP, etc. if 1226 * the debugger is resuming a different thread. 1227 * 1228 * Deliver any pending signal via the reporting thread. 1229 */ 1230 MPASS(p->p_xthread != NULL); 1231 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1232 p->p_xthread->td_xsig = data; 1233 p->p_xthread = NULL; 1234 p->p_xsig = data; 1235 1236 /* 1237 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1238 * always works immediately, even if another thread is 1239 * unsuspended first and attempts to handle a 1240 * different signal or if the POSIX.1b style signal 1241 * queue cannot accommodate any new signals. 1242 */ 1243 if (data == SIGKILL) 1244 proc_wkilled(p); 1245 1246 /* 1247 * Unsuspend all threads. To leave a thread 1248 * suspended, use PT_SUSPEND to suspend it before 1249 * continuing the process. 1250 */ 1251 PROC_SLOCK(p); 1252 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 1253 thread_unsuspend(p); 1254 PROC_SUNLOCK(p); 1255 break; 1256 1257 case PT_WRITE_I: 1258 case PT_WRITE_D: 1259 td2->td_dbgflags |= TDB_USERWR; 1260 PROC_UNLOCK(p); 1261 error = 0; 1262 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1263 sizeof(int)) != sizeof(int)) 1264 error = ENOMEM; 1265 else 1266 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1267 p->p_pid, addr, data); 1268 PROC_LOCK(p); 1269 break; 1270 1271 case PT_READ_I: 1272 case PT_READ_D: 1273 PROC_UNLOCK(p); 1274 error = tmp = 0; 1275 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1276 sizeof(int)) != sizeof(int)) 1277 error = ENOMEM; 1278 else 1279 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1280 p->p_pid, addr, tmp); 1281 td->td_retval[0] = tmp; 1282 PROC_LOCK(p); 1283 break; 1284 1285 case PT_IO: 1286 #ifdef COMPAT_FREEBSD32 1287 if (wrap32) { 1288 piod32 = addr; 1289 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1290 iov.iov_len = piod32->piod_len; 1291 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1292 uio.uio_resid = piod32->piod_len; 1293 } else 1294 #endif 1295 { 1296 piod = addr; 1297 iov.iov_base = piod->piod_addr; 1298 iov.iov_len = piod->piod_len; 1299 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1300 uio.uio_resid = piod->piod_len; 1301 } 1302 uio.uio_iov = &iov; 1303 uio.uio_iovcnt = 1; 1304 uio.uio_segflg = UIO_USERSPACE; 1305 uio.uio_td = td; 1306 #ifdef COMPAT_FREEBSD32 1307 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1308 #else 1309 tmp = piod->piod_op; 1310 #endif 1311 switch (tmp) { 1312 case PIOD_READ_D: 1313 case PIOD_READ_I: 1314 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1315 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1316 uio.uio_rw = UIO_READ; 1317 break; 1318 case PIOD_WRITE_D: 1319 case PIOD_WRITE_I: 1320 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1321 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1322 td2->td_dbgflags |= TDB_USERWR; 1323 uio.uio_rw = UIO_WRITE; 1324 break; 1325 default: 1326 error = EINVAL; 1327 goto out; 1328 } 1329 PROC_UNLOCK(p); 1330 error = proc_rwmem(p, &uio); 1331 #ifdef COMPAT_FREEBSD32 1332 if (wrap32) 1333 piod32->piod_len -= uio.uio_resid; 1334 else 1335 #endif 1336 piod->piod_len -= uio.uio_resid; 1337 PROC_LOCK(p); 1338 break; 1339 1340 case PT_KILL: 1341 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1342 data = SIGKILL; 1343 goto sendsig; /* in PT_CONTINUE above */ 1344 1345 case PT_SETREGS: 1346 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1347 p->p_pid); 1348 td2->td_dbgflags |= TDB_USERWR; 1349 error = PROC_WRITE(regs, td2, addr); 1350 break; 1351 1352 case PT_GETREGS: 1353 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1354 p->p_pid); 1355 error = PROC_READ(regs, td2, addr); 1356 break; 1357 1358 case PT_SETFPREGS: 1359 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1360 p->p_pid); 1361 td2->td_dbgflags |= TDB_USERWR; 1362 error = PROC_WRITE(fpregs, td2, addr); 1363 break; 1364 1365 case PT_GETFPREGS: 1366 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1367 p->p_pid); 1368 error = PROC_READ(fpregs, td2, addr); 1369 break; 1370 1371 case PT_SETDBREGS: 1372 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1373 p->p_pid); 1374 td2->td_dbgflags |= TDB_USERWR; 1375 error = PROC_WRITE(dbregs, td2, addr); 1376 break; 1377 1378 case PT_GETDBREGS: 1379 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1380 p->p_pid); 1381 error = PROC_READ(dbregs, td2, addr); 1382 break; 1383 1384 case PT_LWPINFO: 1385 if (data <= 0 || 1386 #ifdef COMPAT_FREEBSD32 1387 (!wrap32 && data > sizeof(*pl)) || 1388 (wrap32 && data > sizeof(*pl32))) { 1389 #else 1390 data > sizeof(*pl)) { 1391 #endif 1392 error = EINVAL; 1393 break; 1394 } 1395 #ifdef COMPAT_FREEBSD32 1396 if (wrap32) { 1397 pl = &r.pl; 1398 pl32 = addr; 1399 } else 1400 #endif 1401 pl = addr; 1402 bzero(pl, sizeof(*pl)); 1403 pl->pl_lwpid = td2->td_tid; 1404 pl->pl_event = PL_EVENT_NONE; 1405 pl->pl_flags = 0; 1406 if (td2->td_dbgflags & TDB_XSIG) { 1407 pl->pl_event = PL_EVENT_SIGNAL; 1408 if (td2->td_si.si_signo != 0 && 1409 #ifdef COMPAT_FREEBSD32 1410 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1411 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1412 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1413 pl_siginfo) + sizeof(struct siginfo32))) 1414 #else 1415 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1416 + sizeof(pl->pl_siginfo) 1417 #endif 1418 ){ 1419 pl->pl_flags |= PL_FLAG_SI; 1420 pl->pl_siginfo = td2->td_si; 1421 } 1422 } 1423 if (td2->td_dbgflags & TDB_SCE) 1424 pl->pl_flags |= PL_FLAG_SCE; 1425 else if (td2->td_dbgflags & TDB_SCX) 1426 pl->pl_flags |= PL_FLAG_SCX; 1427 if (td2->td_dbgflags & TDB_EXEC) 1428 pl->pl_flags |= PL_FLAG_EXEC; 1429 if (td2->td_dbgflags & TDB_FORK) { 1430 pl->pl_flags |= PL_FLAG_FORKED; 1431 pl->pl_child_pid = td2->td_dbg_forked; 1432 if (td2->td_dbgflags & TDB_VFORK) 1433 pl->pl_flags |= PL_FLAG_VFORKED; 1434 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1435 TDB_VFORK) 1436 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1437 if (td2->td_dbgflags & TDB_CHILD) 1438 pl->pl_flags |= PL_FLAG_CHILD; 1439 if (td2->td_dbgflags & TDB_BORN) 1440 pl->pl_flags |= PL_FLAG_BORN; 1441 if (td2->td_dbgflags & TDB_EXIT) 1442 pl->pl_flags |= PL_FLAG_EXITED; 1443 pl->pl_sigmask = td2->td_sigmask; 1444 pl->pl_siglist = td2->td_siglist; 1445 strcpy(pl->pl_tdname, td2->td_name); 1446 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1447 pl->pl_syscall_code = td2->td_sa.code; 1448 pl->pl_syscall_narg = td2->td_sa.narg; 1449 } else { 1450 pl->pl_syscall_code = 0; 1451 pl->pl_syscall_narg = 0; 1452 } 1453 #ifdef COMPAT_FREEBSD32 1454 if (wrap32) 1455 ptrace_lwpinfo_to32(pl, pl32); 1456 #endif 1457 CTR6(KTR_PTRACE, 1458 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1459 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1460 pl->pl_child_pid, pl->pl_syscall_code); 1461 break; 1462 1463 case PT_GETNUMLWPS: 1464 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1465 p->p_numthreads); 1466 td->td_retval[0] = p->p_numthreads; 1467 break; 1468 1469 case PT_GETLWPLIST: 1470 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1471 p->p_pid, data, p->p_numthreads); 1472 if (data <= 0) { 1473 error = EINVAL; 1474 break; 1475 } 1476 num = imin(p->p_numthreads, data); 1477 PROC_UNLOCK(p); 1478 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1479 tmp = 0; 1480 PROC_LOCK(p); 1481 FOREACH_THREAD_IN_PROC(p, td2) { 1482 if (tmp >= num) 1483 break; 1484 buf[tmp++] = td2->td_tid; 1485 } 1486 PROC_UNLOCK(p); 1487 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1488 free(buf, M_TEMP); 1489 if (!error) 1490 td->td_retval[0] = tmp; 1491 PROC_LOCK(p); 1492 break; 1493 1494 case PT_VM_TIMESTAMP: 1495 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1496 p->p_pid, p->p_vmspace->vm_map.timestamp); 1497 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1498 break; 1499 1500 case PT_VM_ENTRY: 1501 PROC_UNLOCK(p); 1502 #ifdef COMPAT_FREEBSD32 1503 if (wrap32) 1504 error = ptrace_vm_entry32(td, p, addr); 1505 else 1506 #endif 1507 error = ptrace_vm_entry(td, p, addr); 1508 PROC_LOCK(p); 1509 break; 1510 1511 default: 1512 #ifdef __HAVE_PTRACE_MACHDEP 1513 if (req >= PT_FIRSTMACH) { 1514 PROC_UNLOCK(p); 1515 error = cpu_ptrace(td2, req, addr, data); 1516 PROC_LOCK(p); 1517 } else 1518 #endif 1519 /* Unknown request. */ 1520 error = EINVAL; 1521 break; 1522 } 1523 1524 out: 1525 /* Drop our hold on this process now that the request has completed. */ 1526 _PRELE(p); 1527 fail: 1528 PROC_UNLOCK(p); 1529 if (proctree_locked) 1530 sx_xunlock(&proctree_lock); 1531 return (error); 1532 } 1533 #undef PROC_READ 1534 #undef PROC_WRITE 1535 1536 /* 1537 * Stop a process because of a debugging event; 1538 * stay stopped until p->p_step is cleared 1539 * (cleared by PIOCCONT in procfs). 1540 */ 1541 void 1542 stopevent(struct proc *p, unsigned int event, unsigned int val) 1543 { 1544 1545 PROC_LOCK_ASSERT(p, MA_OWNED); 1546 p->p_step = 1; 1547 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1548 val); 1549 do { 1550 if (event != S_EXIT) 1551 p->p_xsig = val; 1552 p->p_xthread = NULL; 1553 p->p_stype = event; /* Which event caused the stop? */ 1554 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1555 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1556 } while (p->p_step); 1557 } 1558