1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/syscallsubr.h> 44 #include <sys/sysent.h> 45 #include <sys/sysproto.h> 46 #include <sys/pioctl.h> 47 #include <sys/priv.h> 48 #include <sys/proc.h> 49 #include <sys/vnode.h> 50 #include <sys/ptrace.h> 51 #include <sys/rwlock.h> 52 #include <sys/sx.h> 53 #include <sys/malloc.h> 54 #include <sys/signalvar.h> 55 56 #include <machine/reg.h> 57 58 #include <security/audit/audit.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_map.h> 64 #include <vm/vm_kern.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_param.h> 68 69 #ifdef COMPAT_FREEBSD32 70 #include <sys/procfs.h> 71 #include <compat/freebsd32/freebsd32_signal.h> 72 73 struct ptrace_io_desc32 { 74 int piod_op; 75 uint32_t piod_offs; 76 uint32_t piod_addr; 77 uint32_t piod_len; 78 }; 79 80 struct ptrace_sc_ret32 { 81 uint32_t sr_retval[2]; 82 int sr_error; 83 }; 84 85 struct ptrace_vm_entry32 { 86 int pve_entry; 87 int pve_timestamp; 88 uint32_t pve_start; 89 uint32_t pve_end; 90 uint32_t pve_offset; 91 u_int pve_prot; 92 u_int pve_pathlen; 93 int32_t pve_fileid; 94 u_int pve_fsid; 95 uint32_t pve_path; 96 }; 97 #endif 98 99 /* 100 * Functions implemented using PROC_ACTION(): 101 * 102 * proc_read_regs(proc, regs) 103 * Get the current user-visible register set from the process 104 * and copy it into the regs structure (<machine/reg.h>). 105 * The process is stopped at the time read_regs is called. 106 * 107 * proc_write_regs(proc, regs) 108 * Update the current register set from the passed in regs 109 * structure. Take care to avoid clobbering special CPU 110 * registers or privileged bits in the PSL. 111 * Depending on the architecture this may have fix-up work to do, 112 * especially if the IAR or PCW are modified. 113 * The process is stopped at the time write_regs is called. 114 * 115 * proc_read_fpregs, proc_write_fpregs 116 * deal with the floating point register set, otherwise as above. 117 * 118 * proc_read_dbregs, proc_write_dbregs 119 * deal with the processor debug register set, otherwise as above. 120 * 121 * proc_sstep(proc) 122 * Arrange for the process to trap after executing a single instruction. 123 */ 124 125 #define PROC_ACTION(action) do { \ 126 int error; \ 127 \ 128 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 129 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 130 error = EIO; \ 131 else \ 132 error = (action); \ 133 return (error); \ 134 } while(0) 135 136 int 137 proc_read_regs(struct thread *td, struct reg *regs) 138 { 139 140 PROC_ACTION(fill_regs(td, regs)); 141 } 142 143 int 144 proc_write_regs(struct thread *td, struct reg *regs) 145 { 146 147 PROC_ACTION(set_regs(td, regs)); 148 } 149 150 int 151 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 152 { 153 154 PROC_ACTION(fill_dbregs(td, dbregs)); 155 } 156 157 int 158 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 159 { 160 161 PROC_ACTION(set_dbregs(td, dbregs)); 162 } 163 164 /* 165 * Ptrace doesn't support fpregs at all, and there are no security holes 166 * or translations for fpregs, so we can just copy them. 167 */ 168 int 169 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 170 { 171 172 PROC_ACTION(fill_fpregs(td, fpregs)); 173 } 174 175 int 176 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 177 { 178 179 PROC_ACTION(set_fpregs(td, fpregs)); 180 } 181 182 #ifdef COMPAT_FREEBSD32 183 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 184 int 185 proc_read_regs32(struct thread *td, struct reg32 *regs32) 186 { 187 188 PROC_ACTION(fill_regs32(td, regs32)); 189 } 190 191 int 192 proc_write_regs32(struct thread *td, struct reg32 *regs32) 193 { 194 195 PROC_ACTION(set_regs32(td, regs32)); 196 } 197 198 int 199 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 200 { 201 202 PROC_ACTION(fill_dbregs32(td, dbregs32)); 203 } 204 205 int 206 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 207 { 208 209 PROC_ACTION(set_dbregs32(td, dbregs32)); 210 } 211 212 int 213 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 214 { 215 216 PROC_ACTION(fill_fpregs32(td, fpregs32)); 217 } 218 219 int 220 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 221 { 222 223 PROC_ACTION(set_fpregs32(td, fpregs32)); 224 } 225 #endif 226 227 int 228 proc_sstep(struct thread *td) 229 { 230 231 PROC_ACTION(ptrace_single_step(td)); 232 } 233 234 int 235 proc_rwmem(struct proc *p, struct uio *uio) 236 { 237 vm_map_t map; 238 vm_offset_t pageno; /* page number */ 239 vm_prot_t reqprot; 240 int error, fault_flags, page_offset, writing; 241 242 /* 243 * Assert that someone has locked this vmspace. (Should be 244 * curthread but we can't assert that.) This keeps the process 245 * from exiting out from under us until this operation completes. 246 */ 247 PROC_ASSERT_HELD(p); 248 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 249 250 /* 251 * The map we want... 252 */ 253 map = &p->p_vmspace->vm_map; 254 255 /* 256 * If we are writing, then we request vm_fault() to create a private 257 * copy of each page. Since these copies will not be writeable by the 258 * process, we must explicity request that they be dirtied. 259 */ 260 writing = uio->uio_rw == UIO_WRITE; 261 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 262 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 263 264 /* 265 * Only map in one page at a time. We don't have to, but it 266 * makes things easier. This way is trivial - right? 267 */ 268 do { 269 vm_offset_t uva; 270 u_int len; 271 vm_page_t m; 272 273 uva = (vm_offset_t)uio->uio_offset; 274 275 /* 276 * Get the page number of this segment. 277 */ 278 pageno = trunc_page(uva); 279 page_offset = uva - pageno; 280 281 /* 282 * How many bytes to copy 283 */ 284 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 285 286 /* 287 * Fault and hold the page on behalf of the process. 288 */ 289 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 290 if (error != KERN_SUCCESS) { 291 if (error == KERN_RESOURCE_SHORTAGE) 292 error = ENOMEM; 293 else 294 error = EFAULT; 295 break; 296 } 297 298 /* 299 * Now do the i/o move. 300 */ 301 error = uiomove_fromphys(&m, page_offset, len, uio); 302 303 /* Make the I-cache coherent for breakpoints. */ 304 if (writing && error == 0) { 305 vm_map_lock_read(map); 306 if (vm_map_check_protection(map, pageno, pageno + 307 PAGE_SIZE, VM_PROT_EXECUTE)) 308 vm_sync_icache(map, uva, len); 309 vm_map_unlock_read(map); 310 } 311 312 /* 313 * Release the page. 314 */ 315 vm_page_lock(m); 316 if (vm_page_unwire(m, PQ_ACTIVE) && m->object == NULL) 317 vm_page_free(m); 318 vm_page_unlock(m); 319 320 } while (error == 0 && uio->uio_resid > 0); 321 322 return (error); 323 } 324 325 static ssize_t 326 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 327 size_t len, enum uio_rw rw) 328 { 329 struct iovec iov; 330 struct uio uio; 331 ssize_t slen; 332 333 MPASS(len < SSIZE_MAX); 334 slen = (ssize_t)len; 335 336 iov.iov_base = (caddr_t)buf; 337 iov.iov_len = len; 338 uio.uio_iov = &iov; 339 uio.uio_iovcnt = 1; 340 uio.uio_offset = va; 341 uio.uio_resid = slen; 342 uio.uio_segflg = UIO_SYSSPACE; 343 uio.uio_rw = rw; 344 uio.uio_td = td; 345 proc_rwmem(p, &uio); 346 if (uio.uio_resid == slen) 347 return (-1); 348 return (slen - uio.uio_resid); 349 } 350 351 ssize_t 352 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 353 size_t len) 354 { 355 356 return (proc_iop(td, p, va, buf, len, UIO_READ)); 357 } 358 359 ssize_t 360 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 361 size_t len) 362 { 363 364 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 365 } 366 367 static int 368 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 369 { 370 struct vattr vattr; 371 vm_map_t map; 372 vm_map_entry_t entry; 373 vm_object_t obj, tobj, lobj; 374 struct vmspace *vm; 375 struct vnode *vp; 376 char *freepath, *fullpath; 377 u_int pathlen; 378 int error, index; 379 380 error = 0; 381 obj = NULL; 382 383 vm = vmspace_acquire_ref(p); 384 map = &vm->vm_map; 385 vm_map_lock_read(map); 386 387 do { 388 entry = map->header.next; 389 index = 0; 390 while (index < pve->pve_entry && entry != &map->header) { 391 entry = entry->next; 392 index++; 393 } 394 if (index != pve->pve_entry) { 395 error = EINVAL; 396 break; 397 } 398 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 399 ("Submap in map header")); 400 while ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 401 entry = entry->next; 402 index++; 403 } 404 if (entry == &map->header) { 405 error = ENOENT; 406 break; 407 } 408 409 /* We got an entry. */ 410 pve->pve_entry = index + 1; 411 pve->pve_timestamp = map->timestamp; 412 pve->pve_start = entry->start; 413 pve->pve_end = entry->end - 1; 414 pve->pve_offset = entry->offset; 415 pve->pve_prot = entry->protection; 416 417 /* Backing object's path needed? */ 418 if (pve->pve_pathlen == 0) 419 break; 420 421 pathlen = pve->pve_pathlen; 422 pve->pve_pathlen = 0; 423 424 obj = entry->object.vm_object; 425 if (obj != NULL) 426 VM_OBJECT_RLOCK(obj); 427 } while (0); 428 429 vm_map_unlock_read(map); 430 431 pve->pve_fsid = VNOVAL; 432 pve->pve_fileid = VNOVAL; 433 434 if (error == 0 && obj != NULL) { 435 lobj = obj; 436 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 437 if (tobj != obj) 438 VM_OBJECT_RLOCK(tobj); 439 if (lobj != obj) 440 VM_OBJECT_RUNLOCK(lobj); 441 lobj = tobj; 442 pve->pve_offset += tobj->backing_object_offset; 443 } 444 vp = vm_object_vnode(lobj); 445 if (vp != NULL) 446 vref(vp); 447 if (lobj != obj) 448 VM_OBJECT_RUNLOCK(lobj); 449 VM_OBJECT_RUNLOCK(obj); 450 451 if (vp != NULL) { 452 freepath = NULL; 453 fullpath = NULL; 454 vn_fullpath(td, vp, &fullpath, &freepath); 455 vn_lock(vp, LK_SHARED | LK_RETRY); 456 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 457 pve->pve_fileid = vattr.va_fileid; 458 pve->pve_fsid = vattr.va_fsid; 459 } 460 vput(vp); 461 462 if (fullpath != NULL) { 463 pve->pve_pathlen = strlen(fullpath) + 1; 464 if (pve->pve_pathlen <= pathlen) { 465 error = copyout(fullpath, pve->pve_path, 466 pve->pve_pathlen); 467 } else 468 error = ENAMETOOLONG; 469 } 470 if (freepath != NULL) 471 free(freepath, M_TEMP); 472 } 473 } 474 vmspace_free(vm); 475 if (error == 0) 476 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 477 p->p_pid, pve->pve_entry, pve->pve_start); 478 479 return (error); 480 } 481 482 #ifdef COMPAT_FREEBSD32 483 static int 484 ptrace_vm_entry32(struct thread *td, struct proc *p, 485 struct ptrace_vm_entry32 *pve32) 486 { 487 struct ptrace_vm_entry pve; 488 int error; 489 490 pve.pve_entry = pve32->pve_entry; 491 pve.pve_pathlen = pve32->pve_pathlen; 492 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 493 494 error = ptrace_vm_entry(td, p, &pve); 495 if (error == 0) { 496 pve32->pve_entry = pve.pve_entry; 497 pve32->pve_timestamp = pve.pve_timestamp; 498 pve32->pve_start = pve.pve_start; 499 pve32->pve_end = pve.pve_end; 500 pve32->pve_offset = pve.pve_offset; 501 pve32->pve_prot = pve.pve_prot; 502 pve32->pve_fileid = pve.pve_fileid; 503 pve32->pve_fsid = pve.pve_fsid; 504 } 505 506 pve32->pve_pathlen = pve.pve_pathlen; 507 return (error); 508 } 509 510 static void 511 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 512 struct ptrace_lwpinfo32 *pl32) 513 { 514 515 bzero(pl32, sizeof(*pl32)); 516 pl32->pl_lwpid = pl->pl_lwpid; 517 pl32->pl_event = pl->pl_event; 518 pl32->pl_flags = pl->pl_flags; 519 pl32->pl_sigmask = pl->pl_sigmask; 520 pl32->pl_siglist = pl->pl_siglist; 521 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 522 strcpy(pl32->pl_tdname, pl->pl_tdname); 523 pl32->pl_child_pid = pl->pl_child_pid; 524 pl32->pl_syscall_code = pl->pl_syscall_code; 525 pl32->pl_syscall_narg = pl->pl_syscall_narg; 526 } 527 528 static void 529 ptrace_sc_ret_to32(const struct ptrace_sc_ret *psr, 530 struct ptrace_sc_ret32 *psr32) 531 { 532 533 bzero(psr32, sizeof(*psr32)); 534 psr32->sr_retval[0] = psr->sr_retval[0]; 535 psr32->sr_retval[1] = psr->sr_retval[1]; 536 psr32->sr_error = psr->sr_error; 537 } 538 #endif /* COMPAT_FREEBSD32 */ 539 540 /* 541 * Process debugging system call. 542 */ 543 #ifndef _SYS_SYSPROTO_H_ 544 struct ptrace_args { 545 int req; 546 pid_t pid; 547 caddr_t addr; 548 int data; 549 }; 550 #endif 551 552 #ifdef COMPAT_FREEBSD32 553 /* 554 * This CPP subterfuge is to try and reduce the number of ifdefs in 555 * the body of the code. 556 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 557 * becomes either: 558 * copyin(uap->addr, &r.reg, sizeof r.reg); 559 * or 560 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 561 * .. except this is done at runtime. 562 */ 563 #define BZERO(a, s) wrap32 ? \ 564 bzero(a ## 32, s ## 32) : \ 565 bzero(a, s) 566 #define COPYIN(u, k, s) wrap32 ? \ 567 copyin(u, k ## 32, s ## 32) : \ 568 copyin(u, k, s) 569 #define COPYOUT(k, u, s) wrap32 ? \ 570 copyout(k ## 32, u, s ## 32) : \ 571 copyout(k, u, s) 572 #else 573 #define BZERO(a, s) bzero(a, s) 574 #define COPYIN(u, k, s) copyin(u, k, s) 575 #define COPYOUT(k, u, s) copyout(k, u, s) 576 #endif 577 int 578 sys_ptrace(struct thread *td, struct ptrace_args *uap) 579 { 580 /* 581 * XXX this obfuscation is to reduce stack usage, but the register 582 * structs may be too large to put on the stack anyway. 583 */ 584 union { 585 struct ptrace_io_desc piod; 586 struct ptrace_lwpinfo pl; 587 struct ptrace_vm_entry pve; 588 struct dbreg dbreg; 589 struct fpreg fpreg; 590 struct reg reg; 591 #ifdef COMPAT_FREEBSD32 592 struct dbreg32 dbreg32; 593 struct fpreg32 fpreg32; 594 struct reg32 reg32; 595 struct ptrace_io_desc32 piod32; 596 struct ptrace_lwpinfo32 pl32; 597 struct ptrace_vm_entry32 pve32; 598 #endif 599 char args[sizeof(td->td_sa.args)]; 600 struct ptrace_sc_ret psr; 601 int ptevents; 602 } r; 603 void *addr; 604 int error = 0; 605 #ifdef COMPAT_FREEBSD32 606 int wrap32 = 0; 607 608 if (SV_CURPROC_FLAG(SV_ILP32)) 609 wrap32 = 1; 610 #endif 611 AUDIT_ARG_PID(uap->pid); 612 AUDIT_ARG_CMD(uap->req); 613 AUDIT_ARG_VALUE(uap->data); 614 addr = &r; 615 switch (uap->req) { 616 case PT_GET_EVENT_MASK: 617 case PT_LWPINFO: 618 case PT_GET_SC_ARGS: 619 case PT_GET_SC_RET: 620 break; 621 case PT_GETREGS: 622 BZERO(&r.reg, sizeof r.reg); 623 break; 624 case PT_GETFPREGS: 625 BZERO(&r.fpreg, sizeof r.fpreg); 626 break; 627 case PT_GETDBREGS: 628 BZERO(&r.dbreg, sizeof r.dbreg); 629 break; 630 case PT_SETREGS: 631 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 632 break; 633 case PT_SETFPREGS: 634 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 635 break; 636 case PT_SETDBREGS: 637 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 638 break; 639 case PT_SET_EVENT_MASK: 640 if (uap->data != sizeof(r.ptevents)) 641 error = EINVAL; 642 else 643 error = copyin(uap->addr, &r.ptevents, uap->data); 644 break; 645 case PT_IO: 646 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 647 break; 648 case PT_VM_ENTRY: 649 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 650 break; 651 default: 652 addr = uap->addr; 653 break; 654 } 655 if (error) 656 return (error); 657 658 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 659 if (error) 660 return (error); 661 662 switch (uap->req) { 663 case PT_VM_ENTRY: 664 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 665 break; 666 case PT_IO: 667 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 668 break; 669 case PT_GETREGS: 670 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 671 break; 672 case PT_GETFPREGS: 673 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 674 break; 675 case PT_GETDBREGS: 676 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 677 break; 678 case PT_GET_EVENT_MASK: 679 /* NB: The size in uap->data is validated in kern_ptrace(). */ 680 error = copyout(&r.ptevents, uap->addr, uap->data); 681 break; 682 case PT_LWPINFO: 683 /* NB: The size in uap->data is validated in kern_ptrace(). */ 684 error = copyout(&r.pl, uap->addr, uap->data); 685 break; 686 case PT_GET_SC_ARGS: 687 error = copyout(r.args, uap->addr, MIN(uap->data, 688 sizeof(r.args))); 689 break; 690 case PT_GET_SC_RET: 691 error = copyout(&r.psr, uap->addr, MIN(uap->data, 692 sizeof(r.psr))); 693 break; 694 } 695 696 return (error); 697 } 698 #undef COPYIN 699 #undef COPYOUT 700 #undef BZERO 701 702 #ifdef COMPAT_FREEBSD32 703 /* 704 * PROC_READ(regs, td2, addr); 705 * becomes either: 706 * proc_read_regs(td2, addr); 707 * or 708 * proc_read_regs32(td2, addr); 709 * .. except this is done at runtime. There is an additional 710 * complication in that PROC_WRITE disallows 32 bit consumers 711 * from writing to 64 bit address space targets. 712 */ 713 #define PROC_READ(w, t, a) wrap32 ? \ 714 proc_read_ ## w ## 32(t, a) : \ 715 proc_read_ ## w (t, a) 716 #define PROC_WRITE(w, t, a) wrap32 ? \ 717 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 718 proc_write_ ## w (t, a) 719 #else 720 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 721 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 722 #endif 723 724 void 725 proc_set_traced(struct proc *p, bool stop) 726 { 727 728 sx_assert(&proctree_lock, SX_XLOCKED); 729 PROC_LOCK_ASSERT(p, MA_OWNED); 730 p->p_flag |= P_TRACED; 731 if (stop) 732 p->p_flag2 |= P2_PTRACE_FSTP; 733 p->p_ptevents = PTRACE_DEFAULT; 734 } 735 736 int 737 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 738 { 739 struct iovec iov; 740 struct uio uio; 741 struct proc *curp, *p, *pp; 742 struct thread *td2 = NULL, *td3; 743 struct ptrace_io_desc *piod = NULL; 744 struct ptrace_lwpinfo *pl; 745 struct ptrace_sc_ret *psr; 746 int error, num, tmp; 747 int proctree_locked = 0; 748 lwpid_t tid = 0, *buf; 749 #ifdef COMPAT_FREEBSD32 750 int wrap32 = 0, safe = 0; 751 struct ptrace_io_desc32 *piod32 = NULL; 752 struct ptrace_lwpinfo32 *pl32 = NULL; 753 struct ptrace_sc_ret32 *psr32 = NULL; 754 union { 755 struct ptrace_lwpinfo pl; 756 struct ptrace_sc_ret psr; 757 } r; 758 #endif 759 760 curp = td->td_proc; 761 762 /* Lock proctree before locking the process. */ 763 switch (req) { 764 case PT_TRACE_ME: 765 case PT_ATTACH: 766 case PT_STEP: 767 case PT_CONTINUE: 768 case PT_TO_SCE: 769 case PT_TO_SCX: 770 case PT_SYSCALL: 771 case PT_FOLLOW_FORK: 772 case PT_LWP_EVENTS: 773 case PT_GET_EVENT_MASK: 774 case PT_SET_EVENT_MASK: 775 case PT_DETACH: 776 case PT_GET_SC_ARGS: 777 sx_xlock(&proctree_lock); 778 proctree_locked = 1; 779 break; 780 default: 781 break; 782 } 783 784 if (req == PT_TRACE_ME) { 785 p = td->td_proc; 786 PROC_LOCK(p); 787 } else { 788 if (pid <= PID_MAX) { 789 if ((p = pfind(pid)) == NULL) { 790 if (proctree_locked) 791 sx_xunlock(&proctree_lock); 792 return (ESRCH); 793 } 794 } else { 795 td2 = tdfind(pid, -1); 796 if (td2 == NULL) { 797 if (proctree_locked) 798 sx_xunlock(&proctree_lock); 799 return (ESRCH); 800 } 801 p = td2->td_proc; 802 tid = pid; 803 pid = p->p_pid; 804 } 805 } 806 AUDIT_ARG_PROCESS(p); 807 808 if ((p->p_flag & P_WEXIT) != 0) { 809 error = ESRCH; 810 goto fail; 811 } 812 if ((error = p_cansee(td, p)) != 0) 813 goto fail; 814 815 if ((error = p_candebug(td, p)) != 0) 816 goto fail; 817 818 /* 819 * System processes can't be debugged. 820 */ 821 if ((p->p_flag & P_SYSTEM) != 0) { 822 error = EINVAL; 823 goto fail; 824 } 825 826 if (tid == 0) { 827 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 828 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 829 td2 = p->p_xthread; 830 } else { 831 td2 = FIRST_THREAD_IN_PROC(p); 832 } 833 tid = td2->td_tid; 834 } 835 836 #ifdef COMPAT_FREEBSD32 837 /* 838 * Test if we're a 32 bit client and what the target is. 839 * Set the wrap controls accordingly. 840 */ 841 if (SV_CURPROC_FLAG(SV_ILP32)) { 842 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 843 safe = 1; 844 wrap32 = 1; 845 } 846 #endif 847 /* 848 * Permissions check 849 */ 850 switch (req) { 851 case PT_TRACE_ME: 852 /* 853 * Always legal, when there is a parent process which 854 * could trace us. Otherwise, reject. 855 */ 856 if ((p->p_flag & P_TRACED) != 0) { 857 error = EBUSY; 858 goto fail; 859 } 860 if (p->p_pptr == initproc) { 861 error = EPERM; 862 goto fail; 863 } 864 break; 865 866 case PT_ATTACH: 867 /* Self */ 868 if (p == td->td_proc) { 869 error = EINVAL; 870 goto fail; 871 } 872 873 /* Already traced */ 874 if (p->p_flag & P_TRACED) { 875 error = EBUSY; 876 goto fail; 877 } 878 879 /* Can't trace an ancestor if you're being traced. */ 880 if (curp->p_flag & P_TRACED) { 881 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 882 if (pp == p) { 883 error = EINVAL; 884 goto fail; 885 } 886 } 887 } 888 889 890 /* OK */ 891 break; 892 893 case PT_CLEARSTEP: 894 /* Allow thread to clear single step for itself */ 895 if (td->td_tid == tid) 896 break; 897 898 /* FALLTHROUGH */ 899 default: 900 /* not being traced... */ 901 if ((p->p_flag & P_TRACED) == 0) { 902 error = EPERM; 903 goto fail; 904 } 905 906 /* not being traced by YOU */ 907 if (p->p_pptr != td->td_proc) { 908 error = EBUSY; 909 goto fail; 910 } 911 912 /* not currently stopped */ 913 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 914 p->p_suspcount != p->p_numthreads || 915 (p->p_flag & P_WAITED) == 0) { 916 error = EBUSY; 917 goto fail; 918 } 919 920 /* OK */ 921 break; 922 } 923 924 /* Keep this process around until we finish this request. */ 925 _PHOLD(p); 926 927 #ifdef FIX_SSTEP 928 /* 929 * Single step fixup ala procfs 930 */ 931 FIX_SSTEP(td2); 932 #endif 933 934 /* 935 * Actually do the requests 936 */ 937 938 td->td_retval[0] = 0; 939 940 switch (req) { 941 case PT_TRACE_ME: 942 /* set my trace flag and "owner" so it can read/write me */ 943 proc_set_traced(p, false); 944 if (p->p_flag & P_PPWAIT) 945 p->p_flag |= P_PPTRACE; 946 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 947 break; 948 949 case PT_ATTACH: 950 /* security check done above */ 951 /* 952 * It would be nice if the tracing relationship was separate 953 * from the parent relationship but that would require 954 * another set of links in the proc struct or for "wait" 955 * to scan the entire proc table. To make life easier, 956 * we just re-parent the process we're trying to trace. 957 * The old parent is remembered so we can put things back 958 * on a "detach". 959 */ 960 proc_set_traced(p, true); 961 if (p->p_pptr != td->td_proc) { 962 proc_reparent(p, td->td_proc, false); 963 } 964 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 965 p->p_oppid); 966 967 sx_xunlock(&proctree_lock); 968 proctree_locked = 0; 969 MPASS(p->p_xthread == NULL); 970 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 971 972 /* 973 * If already stopped due to a stop signal, clear the 974 * existing stop before triggering a traced SIGSTOP. 975 */ 976 if ((p->p_flag & P_STOPPED_SIG) != 0) { 977 PROC_SLOCK(p); 978 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 979 thread_unsuspend(p); 980 PROC_SUNLOCK(p); 981 } 982 983 kern_psignal(p, SIGSTOP); 984 break; 985 986 case PT_CLEARSTEP: 987 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 988 p->p_pid); 989 error = ptrace_clear_single_step(td2); 990 break; 991 992 case PT_SETSTEP: 993 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 994 p->p_pid); 995 error = ptrace_single_step(td2); 996 break; 997 998 case PT_SUSPEND: 999 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 1000 p->p_pid); 1001 td2->td_dbgflags |= TDB_SUSPEND; 1002 thread_lock(td2); 1003 td2->td_flags |= TDF_NEEDSUSPCHK; 1004 thread_unlock(td2); 1005 break; 1006 1007 case PT_RESUME: 1008 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 1009 p->p_pid); 1010 td2->td_dbgflags &= ~TDB_SUSPEND; 1011 break; 1012 1013 case PT_FOLLOW_FORK: 1014 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 1015 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 1016 data ? "enabled" : "disabled"); 1017 if (data) 1018 p->p_ptevents |= PTRACE_FORK; 1019 else 1020 p->p_ptevents &= ~PTRACE_FORK; 1021 break; 1022 1023 case PT_LWP_EVENTS: 1024 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 1025 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 1026 data ? "enabled" : "disabled"); 1027 if (data) 1028 p->p_ptevents |= PTRACE_LWP; 1029 else 1030 p->p_ptevents &= ~PTRACE_LWP; 1031 break; 1032 1033 case PT_GET_EVENT_MASK: 1034 if (data != sizeof(p->p_ptevents)) { 1035 error = EINVAL; 1036 break; 1037 } 1038 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 1039 p->p_ptevents); 1040 *(int *)addr = p->p_ptevents; 1041 break; 1042 1043 case PT_SET_EVENT_MASK: 1044 if (data != sizeof(p->p_ptevents)) { 1045 error = EINVAL; 1046 break; 1047 } 1048 tmp = *(int *)addr; 1049 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 1050 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 1051 error = EINVAL; 1052 break; 1053 } 1054 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 1055 p->p_pid, p->p_ptevents, tmp); 1056 p->p_ptevents = tmp; 1057 break; 1058 1059 case PT_GET_SC_ARGS: 1060 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1061 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 1062 #ifdef COMPAT_FREEBSD32 1063 || (wrap32 && !safe) 1064 #endif 1065 ) { 1066 error = EINVAL; 1067 break; 1068 } 1069 bzero(addr, sizeof(td2->td_sa.args)); 1070 #ifdef COMPAT_FREEBSD32 1071 if (wrap32) 1072 for (num = 0; num < nitems(td2->td_sa.args); num++) 1073 ((uint32_t *)addr)[num] = (uint32_t) 1074 td2->td_sa.args[num]; 1075 else 1076 #endif 1077 bcopy(td2->td_sa.args, addr, td2->td_sa.narg * 1078 sizeof(register_t)); 1079 break; 1080 1081 case PT_GET_SC_RET: 1082 if ((td2->td_dbgflags & (TDB_SCX)) == 0 1083 #ifdef COMPAT_FREEBSD32 1084 || (wrap32 && !safe) 1085 #endif 1086 ) { 1087 error = EINVAL; 1088 break; 1089 } 1090 #ifdef COMPAT_FREEBSD32 1091 if (wrap32) { 1092 psr = &r.psr; 1093 psr32 = addr; 1094 } else 1095 #endif 1096 psr = addr; 1097 bzero(psr, sizeof(*psr)); 1098 psr->sr_error = td2->td_errno; 1099 if (psr->sr_error == 0) { 1100 psr->sr_retval[0] = td2->td_retval[0]; 1101 psr->sr_retval[1] = td2->td_retval[1]; 1102 } 1103 #ifdef COMPAT_FREEBSD32 1104 if (wrap32) 1105 ptrace_sc_ret_to32(psr, psr32); 1106 #endif 1107 CTR4(KTR_PTRACE, 1108 "PT_GET_SC_RET: pid %d error %d retval %#lx,%#lx", 1109 p->p_pid, psr->sr_error, psr->sr_retval[0], 1110 psr->sr_retval[1]); 1111 break; 1112 1113 case PT_STEP: 1114 case PT_CONTINUE: 1115 case PT_TO_SCE: 1116 case PT_TO_SCX: 1117 case PT_SYSCALL: 1118 case PT_DETACH: 1119 /* Zero means do not send any signal */ 1120 if (data < 0 || data > _SIG_MAXSIG) { 1121 error = EINVAL; 1122 break; 1123 } 1124 1125 switch (req) { 1126 case PT_STEP: 1127 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 1128 td2->td_tid, p->p_pid, data); 1129 error = ptrace_single_step(td2); 1130 if (error) 1131 goto out; 1132 break; 1133 case PT_CONTINUE: 1134 case PT_TO_SCE: 1135 case PT_TO_SCX: 1136 case PT_SYSCALL: 1137 if (addr != (void *)1) { 1138 error = ptrace_set_pc(td2, 1139 (u_long)(uintfptr_t)addr); 1140 if (error) 1141 goto out; 1142 } 1143 switch (req) { 1144 case PT_TO_SCE: 1145 p->p_ptevents |= PTRACE_SCE; 1146 CTR4(KTR_PTRACE, 1147 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1148 p->p_pid, p->p_ptevents, 1149 (u_long)(uintfptr_t)addr, data); 1150 break; 1151 case PT_TO_SCX: 1152 p->p_ptevents |= PTRACE_SCX; 1153 CTR4(KTR_PTRACE, 1154 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1155 p->p_pid, p->p_ptevents, 1156 (u_long)(uintfptr_t)addr, data); 1157 break; 1158 case PT_SYSCALL: 1159 p->p_ptevents |= PTRACE_SYSCALL; 1160 CTR4(KTR_PTRACE, 1161 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1162 p->p_pid, p->p_ptevents, 1163 (u_long)(uintfptr_t)addr, data); 1164 break; 1165 case PT_CONTINUE: 1166 CTR3(KTR_PTRACE, 1167 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1168 p->p_pid, (u_long)(uintfptr_t)addr, data); 1169 break; 1170 } 1171 break; 1172 case PT_DETACH: 1173 /* 1174 * Reset the process parent. 1175 * 1176 * NB: This clears P_TRACED before reparenting 1177 * a detached process back to its original 1178 * parent. Otherwise the debugee will be set 1179 * as an orphan of the debugger. 1180 */ 1181 p->p_flag &= ~(P_TRACED | P_WAITED); 1182 if (p->p_oppid != p->p_pptr->p_pid) { 1183 PROC_LOCK(p->p_pptr); 1184 sigqueue_take(p->p_ksi); 1185 PROC_UNLOCK(p->p_pptr); 1186 1187 pp = proc_realparent(p); 1188 proc_reparent(p, pp, false); 1189 if (pp == initproc) 1190 p->p_sigparent = SIGCHLD; 1191 CTR3(KTR_PTRACE, 1192 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1193 p->p_pid, pp->p_pid, data); 1194 } else 1195 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1196 p->p_pid, data); 1197 p->p_ptevents = 0; 1198 FOREACH_THREAD_IN_PROC(p, td3) { 1199 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1200 sigqueue_delete(&td3->td_sigqueue, 1201 SIGSTOP); 1202 } 1203 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1204 TDB_SUSPEND); 1205 } 1206 1207 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1208 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1209 p->p_flag2 &= ~P2_PTRACE_FSTP; 1210 } 1211 1212 /* should we send SIGCHLD? */ 1213 /* childproc_continued(p); */ 1214 break; 1215 } 1216 1217 sx_xunlock(&proctree_lock); 1218 proctree_locked = 0; 1219 1220 sendsig: 1221 MPASS(proctree_locked == 0); 1222 1223 /* 1224 * Clear the pending event for the thread that just 1225 * reported its event (p_xthread). This may not be 1226 * the thread passed to PT_CONTINUE, PT_STEP, etc. if 1227 * the debugger is resuming a different thread. 1228 * 1229 * Deliver any pending signal via the reporting thread. 1230 */ 1231 MPASS(p->p_xthread != NULL); 1232 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1233 p->p_xthread->td_xsig = data; 1234 p->p_xthread = NULL; 1235 p->p_xsig = data; 1236 1237 /* 1238 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1239 * always works immediately, even if another thread is 1240 * unsuspended first and attempts to handle a 1241 * different signal or if the POSIX.1b style signal 1242 * queue cannot accommodate any new signals. 1243 */ 1244 if (data == SIGKILL) 1245 proc_wkilled(p); 1246 1247 /* 1248 * Unsuspend all threads. To leave a thread 1249 * suspended, use PT_SUSPEND to suspend it before 1250 * continuing the process. 1251 */ 1252 PROC_SLOCK(p); 1253 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 1254 thread_unsuspend(p); 1255 PROC_SUNLOCK(p); 1256 break; 1257 1258 case PT_WRITE_I: 1259 case PT_WRITE_D: 1260 td2->td_dbgflags |= TDB_USERWR; 1261 PROC_UNLOCK(p); 1262 error = 0; 1263 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1264 sizeof(int)) != sizeof(int)) 1265 error = ENOMEM; 1266 else 1267 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1268 p->p_pid, addr, data); 1269 PROC_LOCK(p); 1270 break; 1271 1272 case PT_READ_I: 1273 case PT_READ_D: 1274 PROC_UNLOCK(p); 1275 error = tmp = 0; 1276 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1277 sizeof(int)) != sizeof(int)) 1278 error = ENOMEM; 1279 else 1280 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1281 p->p_pid, addr, tmp); 1282 td->td_retval[0] = tmp; 1283 PROC_LOCK(p); 1284 break; 1285 1286 case PT_IO: 1287 #ifdef COMPAT_FREEBSD32 1288 if (wrap32) { 1289 piod32 = addr; 1290 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1291 iov.iov_len = piod32->piod_len; 1292 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1293 uio.uio_resid = piod32->piod_len; 1294 } else 1295 #endif 1296 { 1297 piod = addr; 1298 iov.iov_base = piod->piod_addr; 1299 iov.iov_len = piod->piod_len; 1300 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1301 uio.uio_resid = piod->piod_len; 1302 } 1303 uio.uio_iov = &iov; 1304 uio.uio_iovcnt = 1; 1305 uio.uio_segflg = UIO_USERSPACE; 1306 uio.uio_td = td; 1307 #ifdef COMPAT_FREEBSD32 1308 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1309 #else 1310 tmp = piod->piod_op; 1311 #endif 1312 switch (tmp) { 1313 case PIOD_READ_D: 1314 case PIOD_READ_I: 1315 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1316 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1317 uio.uio_rw = UIO_READ; 1318 break; 1319 case PIOD_WRITE_D: 1320 case PIOD_WRITE_I: 1321 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1322 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1323 td2->td_dbgflags |= TDB_USERWR; 1324 uio.uio_rw = UIO_WRITE; 1325 break; 1326 default: 1327 error = EINVAL; 1328 goto out; 1329 } 1330 PROC_UNLOCK(p); 1331 error = proc_rwmem(p, &uio); 1332 #ifdef COMPAT_FREEBSD32 1333 if (wrap32) 1334 piod32->piod_len -= uio.uio_resid; 1335 else 1336 #endif 1337 piod->piod_len -= uio.uio_resid; 1338 PROC_LOCK(p); 1339 break; 1340 1341 case PT_KILL: 1342 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1343 data = SIGKILL; 1344 goto sendsig; /* in PT_CONTINUE above */ 1345 1346 case PT_SETREGS: 1347 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1348 p->p_pid); 1349 td2->td_dbgflags |= TDB_USERWR; 1350 error = PROC_WRITE(regs, td2, addr); 1351 break; 1352 1353 case PT_GETREGS: 1354 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1355 p->p_pid); 1356 error = PROC_READ(regs, td2, addr); 1357 break; 1358 1359 case PT_SETFPREGS: 1360 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1361 p->p_pid); 1362 td2->td_dbgflags |= TDB_USERWR; 1363 error = PROC_WRITE(fpregs, td2, addr); 1364 break; 1365 1366 case PT_GETFPREGS: 1367 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1368 p->p_pid); 1369 error = PROC_READ(fpregs, td2, addr); 1370 break; 1371 1372 case PT_SETDBREGS: 1373 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1374 p->p_pid); 1375 td2->td_dbgflags |= TDB_USERWR; 1376 error = PROC_WRITE(dbregs, td2, addr); 1377 break; 1378 1379 case PT_GETDBREGS: 1380 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1381 p->p_pid); 1382 error = PROC_READ(dbregs, td2, addr); 1383 break; 1384 1385 case PT_LWPINFO: 1386 if (data <= 0 || 1387 #ifdef COMPAT_FREEBSD32 1388 (!wrap32 && data > sizeof(*pl)) || 1389 (wrap32 && data > sizeof(*pl32))) { 1390 #else 1391 data > sizeof(*pl)) { 1392 #endif 1393 error = EINVAL; 1394 break; 1395 } 1396 #ifdef COMPAT_FREEBSD32 1397 if (wrap32) { 1398 pl = &r.pl; 1399 pl32 = addr; 1400 } else 1401 #endif 1402 pl = addr; 1403 bzero(pl, sizeof(*pl)); 1404 pl->pl_lwpid = td2->td_tid; 1405 pl->pl_event = PL_EVENT_NONE; 1406 pl->pl_flags = 0; 1407 if (td2->td_dbgflags & TDB_XSIG) { 1408 pl->pl_event = PL_EVENT_SIGNAL; 1409 if (td2->td_si.si_signo != 0 && 1410 #ifdef COMPAT_FREEBSD32 1411 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1412 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1413 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1414 pl_siginfo) + sizeof(struct siginfo32))) 1415 #else 1416 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1417 + sizeof(pl->pl_siginfo) 1418 #endif 1419 ){ 1420 pl->pl_flags |= PL_FLAG_SI; 1421 pl->pl_siginfo = td2->td_si; 1422 } 1423 } 1424 if (td2->td_dbgflags & TDB_SCE) 1425 pl->pl_flags |= PL_FLAG_SCE; 1426 else if (td2->td_dbgflags & TDB_SCX) 1427 pl->pl_flags |= PL_FLAG_SCX; 1428 if (td2->td_dbgflags & TDB_EXEC) 1429 pl->pl_flags |= PL_FLAG_EXEC; 1430 if (td2->td_dbgflags & TDB_FORK) { 1431 pl->pl_flags |= PL_FLAG_FORKED; 1432 pl->pl_child_pid = td2->td_dbg_forked; 1433 if (td2->td_dbgflags & TDB_VFORK) 1434 pl->pl_flags |= PL_FLAG_VFORKED; 1435 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1436 TDB_VFORK) 1437 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1438 if (td2->td_dbgflags & TDB_CHILD) 1439 pl->pl_flags |= PL_FLAG_CHILD; 1440 if (td2->td_dbgflags & TDB_BORN) 1441 pl->pl_flags |= PL_FLAG_BORN; 1442 if (td2->td_dbgflags & TDB_EXIT) 1443 pl->pl_flags |= PL_FLAG_EXITED; 1444 pl->pl_sigmask = td2->td_sigmask; 1445 pl->pl_siglist = td2->td_siglist; 1446 strcpy(pl->pl_tdname, td2->td_name); 1447 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1448 pl->pl_syscall_code = td2->td_sa.code; 1449 pl->pl_syscall_narg = td2->td_sa.narg; 1450 } else { 1451 pl->pl_syscall_code = 0; 1452 pl->pl_syscall_narg = 0; 1453 } 1454 #ifdef COMPAT_FREEBSD32 1455 if (wrap32) 1456 ptrace_lwpinfo_to32(pl, pl32); 1457 #endif 1458 CTR6(KTR_PTRACE, 1459 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1460 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1461 pl->pl_child_pid, pl->pl_syscall_code); 1462 break; 1463 1464 case PT_GETNUMLWPS: 1465 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1466 p->p_numthreads); 1467 td->td_retval[0] = p->p_numthreads; 1468 break; 1469 1470 case PT_GETLWPLIST: 1471 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1472 p->p_pid, data, p->p_numthreads); 1473 if (data <= 0) { 1474 error = EINVAL; 1475 break; 1476 } 1477 num = imin(p->p_numthreads, data); 1478 PROC_UNLOCK(p); 1479 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1480 tmp = 0; 1481 PROC_LOCK(p); 1482 FOREACH_THREAD_IN_PROC(p, td2) { 1483 if (tmp >= num) 1484 break; 1485 buf[tmp++] = td2->td_tid; 1486 } 1487 PROC_UNLOCK(p); 1488 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1489 free(buf, M_TEMP); 1490 if (!error) 1491 td->td_retval[0] = tmp; 1492 PROC_LOCK(p); 1493 break; 1494 1495 case PT_VM_TIMESTAMP: 1496 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1497 p->p_pid, p->p_vmspace->vm_map.timestamp); 1498 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1499 break; 1500 1501 case PT_VM_ENTRY: 1502 PROC_UNLOCK(p); 1503 #ifdef COMPAT_FREEBSD32 1504 if (wrap32) 1505 error = ptrace_vm_entry32(td, p, addr); 1506 else 1507 #endif 1508 error = ptrace_vm_entry(td, p, addr); 1509 PROC_LOCK(p); 1510 break; 1511 1512 default: 1513 #ifdef __HAVE_PTRACE_MACHDEP 1514 if (req >= PT_FIRSTMACH) { 1515 PROC_UNLOCK(p); 1516 error = cpu_ptrace(td2, req, addr, data); 1517 PROC_LOCK(p); 1518 } else 1519 #endif 1520 /* Unknown request. */ 1521 error = EINVAL; 1522 break; 1523 } 1524 1525 out: 1526 /* Drop our hold on this process now that the request has completed. */ 1527 _PRELE(p); 1528 fail: 1529 PROC_UNLOCK(p); 1530 if (proctree_locked) 1531 sx_xunlock(&proctree_lock); 1532 return (error); 1533 } 1534 #undef PROC_READ 1535 #undef PROC_WRITE 1536 1537 /* 1538 * Stop a process because of a debugging event; 1539 * stay stopped until p->p_step is cleared 1540 * (cleared by PIOCCONT in procfs). 1541 */ 1542 void 1543 stopevent(struct proc *p, unsigned int event, unsigned int val) 1544 { 1545 1546 PROC_LOCK_ASSERT(p, MA_OWNED); 1547 p->p_step = 1; 1548 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1549 val); 1550 do { 1551 if (event != S_EXIT) 1552 p->p_xsig = val; 1553 p->p_xthread = NULL; 1554 p->p_stype = event; /* Which event caused the stop? */ 1555 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1556 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1557 } while (p->p_step); 1558 } 1559