1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/syscallsubr.h> 44 #include <sys/sysent.h> 45 #include <sys/sysproto.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/vnode.h> 49 #include <sys/ptrace.h> 50 #include <sys/rwlock.h> 51 #include <sys/sx.h> 52 #include <sys/malloc.h> 53 #include <sys/signalvar.h> 54 55 #include <machine/reg.h> 56 57 #include <security/audit/audit.h> 58 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_extern.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_param.h> 67 68 #ifdef COMPAT_FREEBSD32 69 #include <sys/procfs.h> 70 #endif 71 72 /* 73 * Functions implemented using PROC_ACTION(): 74 * 75 * proc_read_regs(proc, regs) 76 * Get the current user-visible register set from the process 77 * and copy it into the regs structure (<machine/reg.h>). 78 * The process is stopped at the time read_regs is called. 79 * 80 * proc_write_regs(proc, regs) 81 * Update the current register set from the passed in regs 82 * structure. Take care to avoid clobbering special CPU 83 * registers or privileged bits in the PSL. 84 * Depending on the architecture this may have fix-up work to do, 85 * especially if the IAR or PCW are modified. 86 * The process is stopped at the time write_regs is called. 87 * 88 * proc_read_fpregs, proc_write_fpregs 89 * deal with the floating point register set, otherwise as above. 90 * 91 * proc_read_dbregs, proc_write_dbregs 92 * deal with the processor debug register set, otherwise as above. 93 * 94 * proc_sstep(proc) 95 * Arrange for the process to trap after executing a single instruction. 96 */ 97 98 #define PROC_ACTION(action) do { \ 99 int error; \ 100 \ 101 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 102 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 103 error = EIO; \ 104 else \ 105 error = (action); \ 106 return (error); \ 107 } while(0) 108 109 int 110 proc_read_regs(struct thread *td, struct reg *regs) 111 { 112 113 PROC_ACTION(fill_regs(td, regs)); 114 } 115 116 int 117 proc_write_regs(struct thread *td, struct reg *regs) 118 { 119 120 PROC_ACTION(set_regs(td, regs)); 121 } 122 123 int 124 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 125 { 126 127 PROC_ACTION(fill_dbregs(td, dbregs)); 128 } 129 130 int 131 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 132 { 133 134 PROC_ACTION(set_dbregs(td, dbregs)); 135 } 136 137 /* 138 * Ptrace doesn't support fpregs at all, and there are no security holes 139 * or translations for fpregs, so we can just copy them. 140 */ 141 int 142 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 143 { 144 145 PROC_ACTION(fill_fpregs(td, fpregs)); 146 } 147 148 int 149 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 150 { 151 152 PROC_ACTION(set_fpregs(td, fpregs)); 153 } 154 155 #ifdef COMPAT_FREEBSD32 156 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 157 int 158 proc_read_regs32(struct thread *td, struct reg32 *regs32) 159 { 160 161 PROC_ACTION(fill_regs32(td, regs32)); 162 } 163 164 int 165 proc_write_regs32(struct thread *td, struct reg32 *regs32) 166 { 167 168 PROC_ACTION(set_regs32(td, regs32)); 169 } 170 171 int 172 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 173 { 174 175 PROC_ACTION(fill_dbregs32(td, dbregs32)); 176 } 177 178 int 179 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 180 { 181 182 PROC_ACTION(set_dbregs32(td, dbregs32)); 183 } 184 185 int 186 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 187 { 188 189 PROC_ACTION(fill_fpregs32(td, fpregs32)); 190 } 191 192 int 193 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 194 { 195 196 PROC_ACTION(set_fpregs32(td, fpregs32)); 197 } 198 #endif 199 200 int 201 proc_sstep(struct thread *td) 202 { 203 204 PROC_ACTION(ptrace_single_step(td)); 205 } 206 207 int 208 proc_rwmem(struct proc *p, struct uio *uio) 209 { 210 vm_map_t map; 211 vm_offset_t pageno; /* page number */ 212 vm_prot_t reqprot; 213 int error, fault_flags, page_offset, writing; 214 215 /* 216 * Assert that someone has locked this vmspace. (Should be 217 * curthread but we can't assert that.) This keeps the process 218 * from exiting out from under us until this operation completes. 219 */ 220 PROC_ASSERT_HELD(p); 221 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 222 223 /* 224 * The map we want... 225 */ 226 map = &p->p_vmspace->vm_map; 227 228 /* 229 * If we are writing, then we request vm_fault() to create a private 230 * copy of each page. Since these copies will not be writeable by the 231 * process, we must explicity request that they be dirtied. 232 */ 233 writing = uio->uio_rw == UIO_WRITE; 234 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 235 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 236 237 /* 238 * Only map in one page at a time. We don't have to, but it 239 * makes things easier. This way is trivial - right? 240 */ 241 do { 242 vm_offset_t uva; 243 u_int len; 244 vm_page_t m; 245 246 uva = (vm_offset_t)uio->uio_offset; 247 248 /* 249 * Get the page number of this segment. 250 */ 251 pageno = trunc_page(uva); 252 page_offset = uva - pageno; 253 254 /* 255 * How many bytes to copy 256 */ 257 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 258 259 /* 260 * Fault and hold the page on behalf of the process. 261 */ 262 error = vm_fault(map, pageno, reqprot, fault_flags, &m); 263 if (error != KERN_SUCCESS) { 264 if (error == KERN_RESOURCE_SHORTAGE) 265 error = ENOMEM; 266 else 267 error = EFAULT; 268 break; 269 } 270 271 /* 272 * Now do the i/o move. 273 */ 274 error = uiomove_fromphys(&m, page_offset, len, uio); 275 276 /* Make the I-cache coherent for breakpoints. */ 277 if (writing && error == 0) { 278 vm_map_lock_read(map); 279 if (vm_map_check_protection(map, pageno, pageno + 280 PAGE_SIZE, VM_PROT_EXECUTE)) 281 vm_sync_icache(map, uva, len); 282 vm_map_unlock_read(map); 283 } 284 285 /* 286 * Release the page. 287 */ 288 vm_page_unwire(m, PQ_ACTIVE); 289 290 } while (error == 0 && uio->uio_resid > 0); 291 292 return (error); 293 } 294 295 static ssize_t 296 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 297 size_t len, enum uio_rw rw) 298 { 299 struct iovec iov; 300 struct uio uio; 301 ssize_t slen; 302 303 MPASS(len < SSIZE_MAX); 304 slen = (ssize_t)len; 305 306 iov.iov_base = (caddr_t)buf; 307 iov.iov_len = len; 308 uio.uio_iov = &iov; 309 uio.uio_iovcnt = 1; 310 uio.uio_offset = va; 311 uio.uio_resid = slen; 312 uio.uio_segflg = UIO_SYSSPACE; 313 uio.uio_rw = rw; 314 uio.uio_td = td; 315 proc_rwmem(p, &uio); 316 if (uio.uio_resid == slen) 317 return (-1); 318 return (slen - uio.uio_resid); 319 } 320 321 ssize_t 322 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 323 size_t len) 324 { 325 326 return (proc_iop(td, p, va, buf, len, UIO_READ)); 327 } 328 329 ssize_t 330 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 331 size_t len) 332 { 333 334 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 335 } 336 337 static int 338 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 339 { 340 struct vattr vattr; 341 vm_map_t map; 342 vm_map_entry_t entry; 343 vm_object_t obj, tobj, lobj; 344 struct vmspace *vm; 345 struct vnode *vp; 346 char *freepath, *fullpath; 347 u_int pathlen; 348 int error, index; 349 350 error = 0; 351 obj = NULL; 352 353 vm = vmspace_acquire_ref(p); 354 map = &vm->vm_map; 355 vm_map_lock_read(map); 356 357 do { 358 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 359 ("Submap in map header")); 360 index = 0; 361 VM_MAP_ENTRY_FOREACH(entry, map) { 362 if (index >= pve->pve_entry && 363 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 364 break; 365 index++; 366 } 367 if (index < pve->pve_entry) { 368 error = EINVAL; 369 break; 370 } 371 if (entry == &map->header) { 372 error = ENOENT; 373 break; 374 } 375 376 /* We got an entry. */ 377 pve->pve_entry = index + 1; 378 pve->pve_timestamp = map->timestamp; 379 pve->pve_start = entry->start; 380 pve->pve_end = entry->end - 1; 381 pve->pve_offset = entry->offset; 382 pve->pve_prot = entry->protection; 383 384 /* Backing object's path needed? */ 385 if (pve->pve_pathlen == 0) 386 break; 387 388 pathlen = pve->pve_pathlen; 389 pve->pve_pathlen = 0; 390 391 obj = entry->object.vm_object; 392 if (obj != NULL) 393 VM_OBJECT_RLOCK(obj); 394 } while (0); 395 396 vm_map_unlock_read(map); 397 398 pve->pve_fsid = VNOVAL; 399 pve->pve_fileid = VNOVAL; 400 401 if (error == 0 && obj != NULL) { 402 lobj = obj; 403 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 404 if (tobj != obj) 405 VM_OBJECT_RLOCK(tobj); 406 if (lobj != obj) 407 VM_OBJECT_RUNLOCK(lobj); 408 lobj = tobj; 409 pve->pve_offset += tobj->backing_object_offset; 410 } 411 vp = vm_object_vnode(lobj); 412 if (vp != NULL) 413 vref(vp); 414 if (lobj != obj) 415 VM_OBJECT_RUNLOCK(lobj); 416 VM_OBJECT_RUNLOCK(obj); 417 418 if (vp != NULL) { 419 freepath = NULL; 420 fullpath = NULL; 421 vn_fullpath(vp, &fullpath, &freepath); 422 vn_lock(vp, LK_SHARED | LK_RETRY); 423 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 424 pve->pve_fileid = vattr.va_fileid; 425 pve->pve_fsid = vattr.va_fsid; 426 } 427 vput(vp); 428 429 if (fullpath != NULL) { 430 pve->pve_pathlen = strlen(fullpath) + 1; 431 if (pve->pve_pathlen <= pathlen) { 432 error = copyout(fullpath, pve->pve_path, 433 pve->pve_pathlen); 434 } else 435 error = ENAMETOOLONG; 436 } 437 if (freepath != NULL) 438 free(freepath, M_TEMP); 439 } 440 } 441 vmspace_free(vm); 442 if (error == 0) 443 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 444 p->p_pid, pve->pve_entry, pve->pve_start); 445 446 return (error); 447 } 448 449 /* 450 * Process debugging system call. 451 */ 452 #ifndef _SYS_SYSPROTO_H_ 453 struct ptrace_args { 454 int req; 455 pid_t pid; 456 caddr_t addr; 457 int data; 458 }; 459 #endif 460 461 int 462 sys_ptrace(struct thread *td, struct ptrace_args *uap) 463 { 464 /* 465 * XXX this obfuscation is to reduce stack usage, but the register 466 * structs may be too large to put on the stack anyway. 467 */ 468 union { 469 struct ptrace_io_desc piod; 470 struct ptrace_lwpinfo pl; 471 struct ptrace_vm_entry pve; 472 struct dbreg dbreg; 473 struct fpreg fpreg; 474 struct reg reg; 475 char args[sizeof(td->td_sa.args)]; 476 struct ptrace_sc_ret psr; 477 int ptevents; 478 } r; 479 void *addr; 480 int error = 0; 481 482 AUDIT_ARG_PID(uap->pid); 483 AUDIT_ARG_CMD(uap->req); 484 AUDIT_ARG_VALUE(uap->data); 485 addr = &r; 486 switch (uap->req) { 487 case PT_GET_EVENT_MASK: 488 case PT_LWPINFO: 489 case PT_GET_SC_ARGS: 490 case PT_GET_SC_RET: 491 break; 492 case PT_GETREGS: 493 bzero(&r.reg, sizeof(r.reg)); 494 break; 495 case PT_GETFPREGS: 496 bzero(&r.fpreg, sizeof(r.fpreg)); 497 break; 498 case PT_GETDBREGS: 499 bzero(&r.dbreg, sizeof(r.dbreg)); 500 break; 501 case PT_SETREGS: 502 error = copyin(uap->addr, &r.reg, sizeof(r.reg)); 503 break; 504 case PT_SETFPREGS: 505 error = copyin(uap->addr, &r.fpreg, sizeof(r.fpreg)); 506 break; 507 case PT_SETDBREGS: 508 error = copyin(uap->addr, &r.dbreg, sizeof(r.dbreg)); 509 break; 510 case PT_SET_EVENT_MASK: 511 if (uap->data != sizeof(r.ptevents)) 512 error = EINVAL; 513 else 514 error = copyin(uap->addr, &r.ptevents, uap->data); 515 break; 516 case PT_IO: 517 error = copyin(uap->addr, &r.piod, sizeof(r.piod)); 518 break; 519 case PT_VM_ENTRY: 520 error = copyin(uap->addr, &r.pve, sizeof(r.pve)); 521 break; 522 default: 523 addr = uap->addr; 524 break; 525 } 526 if (error) 527 return (error); 528 529 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 530 if (error) 531 return (error); 532 533 switch (uap->req) { 534 case PT_VM_ENTRY: 535 error = copyout(&r.pve, uap->addr, sizeof(r.pve)); 536 break; 537 case PT_IO: 538 error = copyout(&r.piod, uap->addr, sizeof(r.piod)); 539 break; 540 case PT_GETREGS: 541 error = copyout(&r.reg, uap->addr, sizeof(r.reg)); 542 break; 543 case PT_GETFPREGS: 544 error = copyout(&r.fpreg, uap->addr, sizeof(r.fpreg)); 545 break; 546 case PT_GETDBREGS: 547 error = copyout(&r.dbreg, uap->addr, sizeof(r.dbreg)); 548 break; 549 case PT_GET_EVENT_MASK: 550 /* NB: The size in uap->data is validated in kern_ptrace(). */ 551 error = copyout(&r.ptevents, uap->addr, uap->data); 552 break; 553 case PT_LWPINFO: 554 /* NB: The size in uap->data is validated in kern_ptrace(). */ 555 error = copyout(&r.pl, uap->addr, uap->data); 556 break; 557 case PT_GET_SC_ARGS: 558 error = copyout(r.args, uap->addr, MIN(uap->data, 559 sizeof(r.args))); 560 break; 561 case PT_GET_SC_RET: 562 error = copyout(&r.psr, uap->addr, MIN(uap->data, 563 sizeof(r.psr))); 564 break; 565 } 566 567 return (error); 568 } 569 570 #ifdef COMPAT_FREEBSD32 571 /* 572 * PROC_READ(regs, td2, addr); 573 * becomes either: 574 * proc_read_regs(td2, addr); 575 * or 576 * proc_read_regs32(td2, addr); 577 * .. except this is done at runtime. There is an additional 578 * complication in that PROC_WRITE disallows 32 bit consumers 579 * from writing to 64 bit address space targets. 580 */ 581 #define PROC_READ(w, t, a) wrap32 ? \ 582 proc_read_ ## w ## 32(t, a) : \ 583 proc_read_ ## w (t, a) 584 #define PROC_WRITE(w, t, a) wrap32 ? \ 585 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 586 proc_write_ ## w (t, a) 587 #else 588 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 589 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 590 #endif 591 592 void 593 proc_set_traced(struct proc *p, bool stop) 594 { 595 596 sx_assert(&proctree_lock, SX_XLOCKED); 597 PROC_LOCK_ASSERT(p, MA_OWNED); 598 p->p_flag |= P_TRACED; 599 if (stop) 600 p->p_flag2 |= P2_PTRACE_FSTP; 601 p->p_ptevents = PTRACE_DEFAULT; 602 } 603 604 int 605 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 606 { 607 struct iovec iov; 608 struct uio uio; 609 struct proc *curp, *p, *pp; 610 struct thread *td2 = NULL, *td3; 611 struct ptrace_io_desc *piod = NULL; 612 struct ptrace_lwpinfo *pl; 613 struct ptrace_sc_ret *psr; 614 int error, num, tmp; 615 int proctree_locked = 0; 616 lwpid_t tid = 0, *buf; 617 #ifdef COMPAT_FREEBSD32 618 int wrap32 = 0, safe = 0; 619 #endif 620 621 curp = td->td_proc; 622 623 /* Lock proctree before locking the process. */ 624 switch (req) { 625 case PT_TRACE_ME: 626 case PT_ATTACH: 627 case PT_STEP: 628 case PT_CONTINUE: 629 case PT_TO_SCE: 630 case PT_TO_SCX: 631 case PT_SYSCALL: 632 case PT_FOLLOW_FORK: 633 case PT_LWP_EVENTS: 634 case PT_GET_EVENT_MASK: 635 case PT_SET_EVENT_MASK: 636 case PT_DETACH: 637 case PT_GET_SC_ARGS: 638 sx_xlock(&proctree_lock); 639 proctree_locked = 1; 640 break; 641 default: 642 break; 643 } 644 645 if (req == PT_TRACE_ME) { 646 p = td->td_proc; 647 PROC_LOCK(p); 648 } else { 649 if (pid <= PID_MAX) { 650 if ((p = pfind(pid)) == NULL) { 651 if (proctree_locked) 652 sx_xunlock(&proctree_lock); 653 return (ESRCH); 654 } 655 } else { 656 td2 = tdfind(pid, -1); 657 if (td2 == NULL) { 658 if (proctree_locked) 659 sx_xunlock(&proctree_lock); 660 return (ESRCH); 661 } 662 p = td2->td_proc; 663 tid = pid; 664 pid = p->p_pid; 665 } 666 } 667 AUDIT_ARG_PROCESS(p); 668 669 if ((p->p_flag & P_WEXIT) != 0) { 670 error = ESRCH; 671 goto fail; 672 } 673 if ((error = p_cansee(td, p)) != 0) 674 goto fail; 675 676 if ((error = p_candebug(td, p)) != 0) 677 goto fail; 678 679 /* 680 * System processes can't be debugged. 681 */ 682 if ((p->p_flag & P_SYSTEM) != 0) { 683 error = EINVAL; 684 goto fail; 685 } 686 687 if (tid == 0) { 688 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 689 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 690 td2 = p->p_xthread; 691 } else { 692 td2 = FIRST_THREAD_IN_PROC(p); 693 } 694 tid = td2->td_tid; 695 } 696 697 #ifdef COMPAT_FREEBSD32 698 /* 699 * Test if we're a 32 bit client and what the target is. 700 * Set the wrap controls accordingly. 701 */ 702 if (SV_CURPROC_FLAG(SV_ILP32)) { 703 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 704 safe = 1; 705 wrap32 = 1; 706 } 707 #endif 708 /* 709 * Permissions check 710 */ 711 switch (req) { 712 case PT_TRACE_ME: 713 /* 714 * Always legal, when there is a parent process which 715 * could trace us. Otherwise, reject. 716 */ 717 if ((p->p_flag & P_TRACED) != 0) { 718 error = EBUSY; 719 goto fail; 720 } 721 if (p->p_pptr == initproc) { 722 error = EPERM; 723 goto fail; 724 } 725 break; 726 727 case PT_ATTACH: 728 /* Self */ 729 if (p == td->td_proc) { 730 error = EINVAL; 731 goto fail; 732 } 733 734 /* Already traced */ 735 if (p->p_flag & P_TRACED) { 736 error = EBUSY; 737 goto fail; 738 } 739 740 /* Can't trace an ancestor if you're being traced. */ 741 if (curp->p_flag & P_TRACED) { 742 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 743 if (pp == p) { 744 error = EINVAL; 745 goto fail; 746 } 747 } 748 } 749 750 /* OK */ 751 break; 752 753 case PT_CLEARSTEP: 754 /* Allow thread to clear single step for itself */ 755 if (td->td_tid == tid) 756 break; 757 758 /* FALLTHROUGH */ 759 default: 760 /* not being traced... */ 761 if ((p->p_flag & P_TRACED) == 0) { 762 error = EPERM; 763 goto fail; 764 } 765 766 /* not being traced by YOU */ 767 if (p->p_pptr != td->td_proc) { 768 error = EBUSY; 769 goto fail; 770 } 771 772 /* not currently stopped */ 773 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 774 p->p_suspcount != p->p_numthreads || 775 (p->p_flag & P_WAITED) == 0) { 776 error = EBUSY; 777 goto fail; 778 } 779 780 /* OK */ 781 break; 782 } 783 784 /* Keep this process around until we finish this request. */ 785 _PHOLD(p); 786 787 #ifdef FIX_SSTEP 788 /* 789 * Single step fixup ala procfs 790 */ 791 FIX_SSTEP(td2); 792 #endif 793 794 /* 795 * Actually do the requests 796 */ 797 798 td->td_retval[0] = 0; 799 800 switch (req) { 801 case PT_TRACE_ME: 802 /* set my trace flag and "owner" so it can read/write me */ 803 proc_set_traced(p, false); 804 if (p->p_flag & P_PPWAIT) 805 p->p_flag |= P_PPTRACE; 806 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 807 break; 808 809 case PT_ATTACH: 810 /* security check done above */ 811 /* 812 * It would be nice if the tracing relationship was separate 813 * from the parent relationship but that would require 814 * another set of links in the proc struct or for "wait" 815 * to scan the entire proc table. To make life easier, 816 * we just re-parent the process we're trying to trace. 817 * The old parent is remembered so we can put things back 818 * on a "detach". 819 */ 820 proc_set_traced(p, true); 821 proc_reparent(p, td->td_proc, false); 822 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 823 p->p_oppid); 824 825 sx_xunlock(&proctree_lock); 826 proctree_locked = 0; 827 MPASS(p->p_xthread == NULL); 828 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 829 830 /* 831 * If already stopped due to a stop signal, clear the 832 * existing stop before triggering a traced SIGSTOP. 833 */ 834 if ((p->p_flag & P_STOPPED_SIG) != 0) { 835 PROC_SLOCK(p); 836 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 837 thread_unsuspend(p); 838 PROC_SUNLOCK(p); 839 } 840 841 kern_psignal(p, SIGSTOP); 842 break; 843 844 case PT_CLEARSTEP: 845 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 846 p->p_pid); 847 error = ptrace_clear_single_step(td2); 848 break; 849 850 case PT_SETSTEP: 851 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 852 p->p_pid); 853 error = ptrace_single_step(td2); 854 break; 855 856 case PT_SUSPEND: 857 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 858 p->p_pid); 859 td2->td_dbgflags |= TDB_SUSPEND; 860 thread_lock(td2); 861 td2->td_flags |= TDF_NEEDSUSPCHK; 862 thread_unlock(td2); 863 break; 864 865 case PT_RESUME: 866 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 867 p->p_pid); 868 td2->td_dbgflags &= ~TDB_SUSPEND; 869 break; 870 871 case PT_FOLLOW_FORK: 872 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 873 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 874 data ? "enabled" : "disabled"); 875 if (data) 876 p->p_ptevents |= PTRACE_FORK; 877 else 878 p->p_ptevents &= ~PTRACE_FORK; 879 break; 880 881 case PT_LWP_EVENTS: 882 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 883 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 884 data ? "enabled" : "disabled"); 885 if (data) 886 p->p_ptevents |= PTRACE_LWP; 887 else 888 p->p_ptevents &= ~PTRACE_LWP; 889 break; 890 891 case PT_GET_EVENT_MASK: 892 if (data != sizeof(p->p_ptevents)) { 893 error = EINVAL; 894 break; 895 } 896 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 897 p->p_ptevents); 898 *(int *)addr = p->p_ptevents; 899 break; 900 901 case PT_SET_EVENT_MASK: 902 if (data != sizeof(p->p_ptevents)) { 903 error = EINVAL; 904 break; 905 } 906 tmp = *(int *)addr; 907 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 908 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 909 error = EINVAL; 910 break; 911 } 912 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 913 p->p_pid, p->p_ptevents, tmp); 914 p->p_ptevents = tmp; 915 break; 916 917 case PT_GET_SC_ARGS: 918 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 919 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 920 #ifdef COMPAT_FREEBSD32 921 || (wrap32 && !safe) 922 #endif 923 ) { 924 error = EINVAL; 925 break; 926 } 927 bzero(addr, sizeof(td2->td_sa.args)); 928 bcopy(td2->td_sa.args, addr, td2->td_sa.callp->sy_narg * 929 sizeof(register_t)); 930 break; 931 932 case PT_GET_SC_RET: 933 if ((td2->td_dbgflags & (TDB_SCX)) == 0 934 #ifdef COMPAT_FREEBSD32 935 || (wrap32 && !safe) 936 #endif 937 ) { 938 error = EINVAL; 939 break; 940 } 941 psr = addr; 942 bzero(psr, sizeof(*psr)); 943 psr->sr_error = td2->td_errno; 944 if (psr->sr_error == 0) { 945 psr->sr_retval[0] = td2->td_retval[0]; 946 psr->sr_retval[1] = td2->td_retval[1]; 947 } 948 CTR4(KTR_PTRACE, 949 "PT_GET_SC_RET: pid %d error %d retval %#lx,%#lx", 950 p->p_pid, psr->sr_error, psr->sr_retval[0], 951 psr->sr_retval[1]); 952 break; 953 954 case PT_STEP: 955 case PT_CONTINUE: 956 case PT_TO_SCE: 957 case PT_TO_SCX: 958 case PT_SYSCALL: 959 case PT_DETACH: 960 /* Zero means do not send any signal */ 961 if (data < 0 || data > _SIG_MAXSIG) { 962 error = EINVAL; 963 break; 964 } 965 966 switch (req) { 967 case PT_STEP: 968 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 969 td2->td_tid, p->p_pid, data); 970 error = ptrace_single_step(td2); 971 if (error) 972 goto out; 973 break; 974 case PT_CONTINUE: 975 case PT_TO_SCE: 976 case PT_TO_SCX: 977 case PT_SYSCALL: 978 if (addr != (void *)1) { 979 error = ptrace_set_pc(td2, 980 (u_long)(uintfptr_t)addr); 981 if (error) 982 goto out; 983 } 984 switch (req) { 985 case PT_TO_SCE: 986 p->p_ptevents |= PTRACE_SCE; 987 CTR4(KTR_PTRACE, 988 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 989 p->p_pid, p->p_ptevents, 990 (u_long)(uintfptr_t)addr, data); 991 break; 992 case PT_TO_SCX: 993 p->p_ptevents |= PTRACE_SCX; 994 CTR4(KTR_PTRACE, 995 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 996 p->p_pid, p->p_ptevents, 997 (u_long)(uintfptr_t)addr, data); 998 break; 999 case PT_SYSCALL: 1000 p->p_ptevents |= PTRACE_SYSCALL; 1001 CTR4(KTR_PTRACE, 1002 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1003 p->p_pid, p->p_ptevents, 1004 (u_long)(uintfptr_t)addr, data); 1005 break; 1006 case PT_CONTINUE: 1007 CTR3(KTR_PTRACE, 1008 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1009 p->p_pid, (u_long)(uintfptr_t)addr, data); 1010 break; 1011 } 1012 break; 1013 case PT_DETACH: 1014 /* 1015 * Reset the process parent. 1016 * 1017 * NB: This clears P_TRACED before reparenting 1018 * a detached process back to its original 1019 * parent. Otherwise the debugee will be set 1020 * as an orphan of the debugger. 1021 */ 1022 p->p_flag &= ~(P_TRACED | P_WAITED); 1023 if (p->p_oppid != p->p_pptr->p_pid) { 1024 PROC_LOCK(p->p_pptr); 1025 sigqueue_take(p->p_ksi); 1026 PROC_UNLOCK(p->p_pptr); 1027 1028 pp = proc_realparent(p); 1029 proc_reparent(p, pp, false); 1030 if (pp == initproc) 1031 p->p_sigparent = SIGCHLD; 1032 CTR3(KTR_PTRACE, 1033 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1034 p->p_pid, pp->p_pid, data); 1035 } else 1036 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1037 p->p_pid, data); 1038 p->p_ptevents = 0; 1039 FOREACH_THREAD_IN_PROC(p, td3) { 1040 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1041 sigqueue_delete(&td3->td_sigqueue, 1042 SIGSTOP); 1043 } 1044 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1045 TDB_SUSPEND); 1046 } 1047 1048 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1049 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1050 p->p_flag2 &= ~P2_PTRACE_FSTP; 1051 } 1052 1053 /* should we send SIGCHLD? */ 1054 /* childproc_continued(p); */ 1055 break; 1056 } 1057 1058 sx_xunlock(&proctree_lock); 1059 proctree_locked = 0; 1060 1061 sendsig: 1062 MPASS(proctree_locked == 0); 1063 1064 /* 1065 * Clear the pending event for the thread that just 1066 * reported its event (p_xthread). This may not be 1067 * the thread passed to PT_CONTINUE, PT_STEP, etc. if 1068 * the debugger is resuming a different thread. 1069 * 1070 * Deliver any pending signal via the reporting thread. 1071 */ 1072 MPASS(p->p_xthread != NULL); 1073 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1074 p->p_xthread->td_xsig = data; 1075 p->p_xthread = NULL; 1076 p->p_xsig = data; 1077 1078 /* 1079 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1080 * always works immediately, even if another thread is 1081 * unsuspended first and attempts to handle a 1082 * different signal or if the POSIX.1b style signal 1083 * queue cannot accommodate any new signals. 1084 */ 1085 if (data == SIGKILL) 1086 proc_wkilled(p); 1087 1088 /* 1089 * Unsuspend all threads. To leave a thread 1090 * suspended, use PT_SUSPEND to suspend it before 1091 * continuing the process. 1092 */ 1093 PROC_SLOCK(p); 1094 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 1095 thread_unsuspend(p); 1096 PROC_SUNLOCK(p); 1097 break; 1098 1099 case PT_WRITE_I: 1100 case PT_WRITE_D: 1101 td2->td_dbgflags |= TDB_USERWR; 1102 PROC_UNLOCK(p); 1103 error = 0; 1104 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1105 sizeof(int)) != sizeof(int)) 1106 error = ENOMEM; 1107 else 1108 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1109 p->p_pid, addr, data); 1110 PROC_LOCK(p); 1111 break; 1112 1113 case PT_READ_I: 1114 case PT_READ_D: 1115 PROC_UNLOCK(p); 1116 error = tmp = 0; 1117 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1118 sizeof(int)) != sizeof(int)) 1119 error = ENOMEM; 1120 else 1121 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1122 p->p_pid, addr, tmp); 1123 td->td_retval[0] = tmp; 1124 PROC_LOCK(p); 1125 break; 1126 1127 case PT_IO: 1128 piod = addr; 1129 iov.iov_base = piod->piod_addr; 1130 iov.iov_len = piod->piod_len; 1131 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1132 uio.uio_resid = piod->piod_len; 1133 uio.uio_iov = &iov; 1134 uio.uio_iovcnt = 1; 1135 uio.uio_segflg = UIO_USERSPACE; 1136 uio.uio_td = td; 1137 switch (piod->piod_op) { 1138 case PIOD_READ_D: 1139 case PIOD_READ_I: 1140 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1141 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1142 uio.uio_rw = UIO_READ; 1143 break; 1144 case PIOD_WRITE_D: 1145 case PIOD_WRITE_I: 1146 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1147 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1148 td2->td_dbgflags |= TDB_USERWR; 1149 uio.uio_rw = UIO_WRITE; 1150 break; 1151 default: 1152 error = EINVAL; 1153 goto out; 1154 } 1155 PROC_UNLOCK(p); 1156 error = proc_rwmem(p, &uio); 1157 piod->piod_len -= uio.uio_resid; 1158 PROC_LOCK(p); 1159 break; 1160 1161 case PT_KILL: 1162 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1163 data = SIGKILL; 1164 goto sendsig; /* in PT_CONTINUE above */ 1165 1166 case PT_SETREGS: 1167 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1168 p->p_pid); 1169 td2->td_dbgflags |= TDB_USERWR; 1170 error = PROC_WRITE(regs, td2, addr); 1171 break; 1172 1173 case PT_GETREGS: 1174 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1175 p->p_pid); 1176 error = PROC_READ(regs, td2, addr); 1177 break; 1178 1179 case PT_SETFPREGS: 1180 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1181 p->p_pid); 1182 td2->td_dbgflags |= TDB_USERWR; 1183 error = PROC_WRITE(fpregs, td2, addr); 1184 break; 1185 1186 case PT_GETFPREGS: 1187 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1188 p->p_pid); 1189 error = PROC_READ(fpregs, td2, addr); 1190 break; 1191 1192 case PT_SETDBREGS: 1193 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1194 p->p_pid); 1195 td2->td_dbgflags |= TDB_USERWR; 1196 error = PROC_WRITE(dbregs, td2, addr); 1197 break; 1198 1199 case PT_GETDBREGS: 1200 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1201 p->p_pid); 1202 error = PROC_READ(dbregs, td2, addr); 1203 break; 1204 1205 case PT_LWPINFO: 1206 if (data <= 0 || data > sizeof(*pl)) { 1207 error = EINVAL; 1208 break; 1209 } 1210 pl = addr; 1211 bzero(pl, sizeof(*pl)); 1212 pl->pl_lwpid = td2->td_tid; 1213 pl->pl_event = PL_EVENT_NONE; 1214 pl->pl_flags = 0; 1215 if (td2->td_dbgflags & TDB_XSIG) { 1216 pl->pl_event = PL_EVENT_SIGNAL; 1217 if (td2->td_si.si_signo != 0 && 1218 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1219 + sizeof(pl->pl_siginfo)){ 1220 pl->pl_flags |= PL_FLAG_SI; 1221 pl->pl_siginfo = td2->td_si; 1222 } 1223 } 1224 if (td2->td_dbgflags & TDB_SCE) 1225 pl->pl_flags |= PL_FLAG_SCE; 1226 else if (td2->td_dbgflags & TDB_SCX) 1227 pl->pl_flags |= PL_FLAG_SCX; 1228 if (td2->td_dbgflags & TDB_EXEC) 1229 pl->pl_flags |= PL_FLAG_EXEC; 1230 if (td2->td_dbgflags & TDB_FORK) { 1231 pl->pl_flags |= PL_FLAG_FORKED; 1232 pl->pl_child_pid = td2->td_dbg_forked; 1233 if (td2->td_dbgflags & TDB_VFORK) 1234 pl->pl_flags |= PL_FLAG_VFORKED; 1235 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1236 TDB_VFORK) 1237 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1238 if (td2->td_dbgflags & TDB_CHILD) 1239 pl->pl_flags |= PL_FLAG_CHILD; 1240 if (td2->td_dbgflags & TDB_BORN) 1241 pl->pl_flags |= PL_FLAG_BORN; 1242 if (td2->td_dbgflags & TDB_EXIT) 1243 pl->pl_flags |= PL_FLAG_EXITED; 1244 pl->pl_sigmask = td2->td_sigmask; 1245 pl->pl_siglist = td2->td_siglist; 1246 strcpy(pl->pl_tdname, td2->td_name); 1247 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1248 pl->pl_syscall_code = td2->td_sa.code; 1249 pl->pl_syscall_narg = td2->td_sa.callp->sy_narg; 1250 } else { 1251 pl->pl_syscall_code = 0; 1252 pl->pl_syscall_narg = 0; 1253 } 1254 CTR6(KTR_PTRACE, 1255 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1256 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1257 pl->pl_child_pid, pl->pl_syscall_code); 1258 break; 1259 1260 case PT_GETNUMLWPS: 1261 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1262 p->p_numthreads); 1263 td->td_retval[0] = p->p_numthreads; 1264 break; 1265 1266 case PT_GETLWPLIST: 1267 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1268 p->p_pid, data, p->p_numthreads); 1269 if (data <= 0) { 1270 error = EINVAL; 1271 break; 1272 } 1273 num = imin(p->p_numthreads, data); 1274 PROC_UNLOCK(p); 1275 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1276 tmp = 0; 1277 PROC_LOCK(p); 1278 FOREACH_THREAD_IN_PROC(p, td2) { 1279 if (tmp >= num) 1280 break; 1281 buf[tmp++] = td2->td_tid; 1282 } 1283 PROC_UNLOCK(p); 1284 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1285 free(buf, M_TEMP); 1286 if (!error) 1287 td->td_retval[0] = tmp; 1288 PROC_LOCK(p); 1289 break; 1290 1291 case PT_VM_TIMESTAMP: 1292 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1293 p->p_pid, p->p_vmspace->vm_map.timestamp); 1294 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1295 break; 1296 1297 case PT_VM_ENTRY: 1298 PROC_UNLOCK(p); 1299 error = ptrace_vm_entry(td, p, addr); 1300 PROC_LOCK(p); 1301 break; 1302 1303 default: 1304 #ifdef __HAVE_PTRACE_MACHDEP 1305 if (req >= PT_FIRSTMACH) { 1306 PROC_UNLOCK(p); 1307 error = cpu_ptrace(td2, req, addr, data); 1308 PROC_LOCK(p); 1309 } else 1310 #endif 1311 /* Unknown request. */ 1312 error = EINVAL; 1313 break; 1314 } 1315 1316 out: 1317 /* Drop our hold on this process now that the request has completed. */ 1318 _PRELE(p); 1319 fail: 1320 PROC_UNLOCK(p); 1321 if (proctree_locked) 1322 sx_xunlock(&proctree_lock); 1323 return (error); 1324 } 1325 #undef PROC_READ 1326 #undef PROC_WRITE 1327