1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/pioctl.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/ptrace.h> 49 #include <sys/rwlock.h> 50 #include <sys/sx.h> 51 #include <sys/malloc.h> 52 #include <sys/signalvar.h> 53 54 #include <machine/reg.h> 55 56 #include <security/audit/audit.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_param.h> 66 67 #ifdef COMPAT_FREEBSD32 68 #include <sys/procfs.h> 69 #include <compat/freebsd32/freebsd32_signal.h> 70 71 struct ptrace_io_desc32 { 72 int piod_op; 73 uint32_t piod_offs; 74 uint32_t piod_addr; 75 uint32_t piod_len; 76 }; 77 78 struct ptrace_vm_entry32 { 79 int pve_entry; 80 int pve_timestamp; 81 uint32_t pve_start; 82 uint32_t pve_end; 83 uint32_t pve_offset; 84 u_int pve_prot; 85 u_int pve_pathlen; 86 int32_t pve_fileid; 87 u_int pve_fsid; 88 uint32_t pve_path; 89 }; 90 91 struct ptrace_lwpinfo32 { 92 lwpid_t pl_lwpid; /* LWP described. */ 93 int pl_event; /* Event that stopped the LWP. */ 94 int pl_flags; /* LWP flags. */ 95 sigset_t pl_sigmask; /* LWP signal mask */ 96 sigset_t pl_siglist; /* LWP pending signal */ 97 struct siginfo32 pl_siginfo; /* siginfo for signal */ 98 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 99 int pl_child_pid; /* New child pid */ 100 u_int pl_syscall_code; 101 u_int pl_syscall_narg; 102 }; 103 104 #endif 105 106 /* 107 * Functions implemented using PROC_ACTION(): 108 * 109 * proc_read_regs(proc, regs) 110 * Get the current user-visible register set from the process 111 * and copy it into the regs structure (<machine/reg.h>). 112 * The process is stopped at the time read_regs is called. 113 * 114 * proc_write_regs(proc, regs) 115 * Update the current register set from the passed in regs 116 * structure. Take care to avoid clobbering special CPU 117 * registers or privileged bits in the PSL. 118 * Depending on the architecture this may have fix-up work to do, 119 * especially if the IAR or PCW are modified. 120 * The process is stopped at the time write_regs is called. 121 * 122 * proc_read_fpregs, proc_write_fpregs 123 * deal with the floating point register set, otherwise as above. 124 * 125 * proc_read_dbregs, proc_write_dbregs 126 * deal with the processor debug register set, otherwise as above. 127 * 128 * proc_sstep(proc) 129 * Arrange for the process to trap after executing a single instruction. 130 */ 131 132 #define PROC_ACTION(action) do { \ 133 int error; \ 134 \ 135 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 136 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 137 error = EIO; \ 138 else \ 139 error = (action); \ 140 return (error); \ 141 } while(0) 142 143 int 144 proc_read_regs(struct thread *td, struct reg *regs) 145 { 146 147 PROC_ACTION(fill_regs(td, regs)); 148 } 149 150 int 151 proc_write_regs(struct thread *td, struct reg *regs) 152 { 153 154 PROC_ACTION(set_regs(td, regs)); 155 } 156 157 int 158 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 159 { 160 161 PROC_ACTION(fill_dbregs(td, dbregs)); 162 } 163 164 int 165 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 166 { 167 168 PROC_ACTION(set_dbregs(td, dbregs)); 169 } 170 171 /* 172 * Ptrace doesn't support fpregs at all, and there are no security holes 173 * or translations for fpregs, so we can just copy them. 174 */ 175 int 176 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 177 { 178 179 PROC_ACTION(fill_fpregs(td, fpregs)); 180 } 181 182 int 183 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 184 { 185 186 PROC_ACTION(set_fpregs(td, fpregs)); 187 } 188 189 #ifdef COMPAT_FREEBSD32 190 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 191 int 192 proc_read_regs32(struct thread *td, struct reg32 *regs32) 193 { 194 195 PROC_ACTION(fill_regs32(td, regs32)); 196 } 197 198 int 199 proc_write_regs32(struct thread *td, struct reg32 *regs32) 200 { 201 202 PROC_ACTION(set_regs32(td, regs32)); 203 } 204 205 int 206 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 207 { 208 209 PROC_ACTION(fill_dbregs32(td, dbregs32)); 210 } 211 212 int 213 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 214 { 215 216 PROC_ACTION(set_dbregs32(td, dbregs32)); 217 } 218 219 int 220 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 221 { 222 223 PROC_ACTION(fill_fpregs32(td, fpregs32)); 224 } 225 226 int 227 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 228 { 229 230 PROC_ACTION(set_fpregs32(td, fpregs32)); 231 } 232 #endif 233 234 int 235 proc_sstep(struct thread *td) 236 { 237 238 PROC_ACTION(ptrace_single_step(td)); 239 } 240 241 int 242 proc_rwmem(struct proc *p, struct uio *uio) 243 { 244 vm_map_t map; 245 vm_offset_t pageno; /* page number */ 246 vm_prot_t reqprot; 247 int error, fault_flags, page_offset, writing; 248 249 /* 250 * Assert that someone has locked this vmspace. (Should be 251 * curthread but we can't assert that.) This keeps the process 252 * from exiting out from under us until this operation completes. 253 */ 254 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 255 p, p->p_pid)); 256 257 /* 258 * The map we want... 259 */ 260 map = &p->p_vmspace->vm_map; 261 262 /* 263 * If we are writing, then we request vm_fault() to create a private 264 * copy of each page. Since these copies will not be writeable by the 265 * process, we must explicity request that they be dirtied. 266 */ 267 writing = uio->uio_rw == UIO_WRITE; 268 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 269 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 270 271 /* 272 * Only map in one page at a time. We don't have to, but it 273 * makes things easier. This way is trivial - right? 274 */ 275 do { 276 vm_offset_t uva; 277 u_int len; 278 vm_page_t m; 279 280 uva = (vm_offset_t)uio->uio_offset; 281 282 /* 283 * Get the page number of this segment. 284 */ 285 pageno = trunc_page(uva); 286 page_offset = uva - pageno; 287 288 /* 289 * How many bytes to copy 290 */ 291 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 292 293 /* 294 * Fault and hold the page on behalf of the process. 295 */ 296 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 297 if (error != KERN_SUCCESS) { 298 if (error == KERN_RESOURCE_SHORTAGE) 299 error = ENOMEM; 300 else 301 error = EFAULT; 302 break; 303 } 304 305 /* 306 * Now do the i/o move. 307 */ 308 error = uiomove_fromphys(&m, page_offset, len, uio); 309 310 /* Make the I-cache coherent for breakpoints. */ 311 if (writing && error == 0) { 312 vm_map_lock_read(map); 313 if (vm_map_check_protection(map, pageno, pageno + 314 PAGE_SIZE, VM_PROT_EXECUTE)) 315 vm_sync_icache(map, uva, len); 316 vm_map_unlock_read(map); 317 } 318 319 /* 320 * Release the page. 321 */ 322 vm_page_lock(m); 323 vm_page_unhold(m); 324 vm_page_unlock(m); 325 326 } while (error == 0 && uio->uio_resid > 0); 327 328 return (error); 329 } 330 331 static int 332 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 333 { 334 struct vattr vattr; 335 vm_map_t map; 336 vm_map_entry_t entry; 337 vm_object_t obj, tobj, lobj; 338 struct vmspace *vm; 339 struct vnode *vp; 340 char *freepath, *fullpath; 341 u_int pathlen; 342 int error, index; 343 344 error = 0; 345 obj = NULL; 346 347 vm = vmspace_acquire_ref(p); 348 map = &vm->vm_map; 349 vm_map_lock_read(map); 350 351 do { 352 entry = map->header.next; 353 index = 0; 354 while (index < pve->pve_entry && entry != &map->header) { 355 entry = entry->next; 356 index++; 357 } 358 if (index != pve->pve_entry) { 359 error = EINVAL; 360 break; 361 } 362 while (entry != &map->header && 363 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 364 entry = entry->next; 365 index++; 366 } 367 if (entry == &map->header) { 368 error = ENOENT; 369 break; 370 } 371 372 /* We got an entry. */ 373 pve->pve_entry = index + 1; 374 pve->pve_timestamp = map->timestamp; 375 pve->pve_start = entry->start; 376 pve->pve_end = entry->end - 1; 377 pve->pve_offset = entry->offset; 378 pve->pve_prot = entry->protection; 379 380 /* Backing object's path needed? */ 381 if (pve->pve_pathlen == 0) 382 break; 383 384 pathlen = pve->pve_pathlen; 385 pve->pve_pathlen = 0; 386 387 obj = entry->object.vm_object; 388 if (obj != NULL) 389 VM_OBJECT_RLOCK(obj); 390 } while (0); 391 392 vm_map_unlock_read(map); 393 vmspace_free(vm); 394 395 pve->pve_fsid = VNOVAL; 396 pve->pve_fileid = VNOVAL; 397 398 if (error == 0 && obj != NULL) { 399 lobj = obj; 400 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 401 if (tobj != obj) 402 VM_OBJECT_RLOCK(tobj); 403 if (lobj != obj) 404 VM_OBJECT_RUNLOCK(lobj); 405 lobj = tobj; 406 pve->pve_offset += tobj->backing_object_offset; 407 } 408 vp = vm_object_vnode(lobj); 409 if (vp != NULL) 410 vref(vp); 411 if (lobj != obj) 412 VM_OBJECT_RUNLOCK(lobj); 413 VM_OBJECT_RUNLOCK(obj); 414 415 if (vp != NULL) { 416 freepath = NULL; 417 fullpath = NULL; 418 vn_fullpath(td, vp, &fullpath, &freepath); 419 vn_lock(vp, LK_SHARED | LK_RETRY); 420 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 421 pve->pve_fileid = vattr.va_fileid; 422 pve->pve_fsid = vattr.va_fsid; 423 } 424 vput(vp); 425 426 if (fullpath != NULL) { 427 pve->pve_pathlen = strlen(fullpath) + 1; 428 if (pve->pve_pathlen <= pathlen) { 429 error = copyout(fullpath, pve->pve_path, 430 pve->pve_pathlen); 431 } else 432 error = ENAMETOOLONG; 433 } 434 if (freepath != NULL) 435 free(freepath, M_TEMP); 436 } 437 } 438 if (error == 0) 439 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 440 p->p_pid, pve->pve_entry, pve->pve_start); 441 442 return (error); 443 } 444 445 #ifdef COMPAT_FREEBSD32 446 static int 447 ptrace_vm_entry32(struct thread *td, struct proc *p, 448 struct ptrace_vm_entry32 *pve32) 449 { 450 struct ptrace_vm_entry pve; 451 int error; 452 453 pve.pve_entry = pve32->pve_entry; 454 pve.pve_pathlen = pve32->pve_pathlen; 455 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 456 457 error = ptrace_vm_entry(td, p, &pve); 458 if (error == 0) { 459 pve32->pve_entry = pve.pve_entry; 460 pve32->pve_timestamp = pve.pve_timestamp; 461 pve32->pve_start = pve.pve_start; 462 pve32->pve_end = pve.pve_end; 463 pve32->pve_offset = pve.pve_offset; 464 pve32->pve_prot = pve.pve_prot; 465 pve32->pve_fileid = pve.pve_fileid; 466 pve32->pve_fsid = pve.pve_fsid; 467 } 468 469 pve32->pve_pathlen = pve.pve_pathlen; 470 return (error); 471 } 472 473 static void 474 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 475 struct ptrace_lwpinfo32 *pl32) 476 { 477 478 pl32->pl_lwpid = pl->pl_lwpid; 479 pl32->pl_event = pl->pl_event; 480 pl32->pl_flags = pl->pl_flags; 481 pl32->pl_sigmask = pl->pl_sigmask; 482 pl32->pl_siglist = pl->pl_siglist; 483 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 484 strcpy(pl32->pl_tdname, pl->pl_tdname); 485 pl32->pl_child_pid = pl->pl_child_pid; 486 pl32->pl_syscall_code = pl->pl_syscall_code; 487 pl32->pl_syscall_narg = pl->pl_syscall_narg; 488 } 489 #endif /* COMPAT_FREEBSD32 */ 490 491 /* 492 * Process debugging system call. 493 */ 494 #ifndef _SYS_SYSPROTO_H_ 495 struct ptrace_args { 496 int req; 497 pid_t pid; 498 caddr_t addr; 499 int data; 500 }; 501 #endif 502 503 #ifdef COMPAT_FREEBSD32 504 /* 505 * This CPP subterfuge is to try and reduce the number of ifdefs in 506 * the body of the code. 507 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 508 * becomes either: 509 * copyin(uap->addr, &r.reg, sizeof r.reg); 510 * or 511 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 512 * .. except this is done at runtime. 513 */ 514 #define COPYIN(u, k, s) wrap32 ? \ 515 copyin(u, k ## 32, s ## 32) : \ 516 copyin(u, k, s) 517 #define COPYOUT(k, u, s) wrap32 ? \ 518 copyout(k ## 32, u, s ## 32) : \ 519 copyout(k, u, s) 520 #else 521 #define COPYIN(u, k, s) copyin(u, k, s) 522 #define COPYOUT(k, u, s) copyout(k, u, s) 523 #endif 524 int 525 sys_ptrace(struct thread *td, struct ptrace_args *uap) 526 { 527 /* 528 * XXX this obfuscation is to reduce stack usage, but the register 529 * structs may be too large to put on the stack anyway. 530 */ 531 union { 532 struct ptrace_io_desc piod; 533 struct ptrace_lwpinfo pl; 534 struct ptrace_vm_entry pve; 535 struct dbreg dbreg; 536 struct fpreg fpreg; 537 struct reg reg; 538 #ifdef COMPAT_FREEBSD32 539 struct dbreg32 dbreg32; 540 struct fpreg32 fpreg32; 541 struct reg32 reg32; 542 struct ptrace_io_desc32 piod32; 543 struct ptrace_lwpinfo32 pl32; 544 struct ptrace_vm_entry32 pve32; 545 #endif 546 } r; 547 void *addr; 548 int error = 0; 549 #ifdef COMPAT_FREEBSD32 550 int wrap32 = 0; 551 552 if (SV_CURPROC_FLAG(SV_ILP32)) 553 wrap32 = 1; 554 #endif 555 AUDIT_ARG_PID(uap->pid); 556 AUDIT_ARG_CMD(uap->req); 557 AUDIT_ARG_VALUE(uap->data); 558 addr = &r; 559 switch (uap->req) { 560 case PT_GETREGS: 561 case PT_GETFPREGS: 562 case PT_GETDBREGS: 563 case PT_LWPINFO: 564 break; 565 case PT_SETREGS: 566 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 567 break; 568 case PT_SETFPREGS: 569 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 570 break; 571 case PT_SETDBREGS: 572 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 573 break; 574 case PT_IO: 575 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 576 break; 577 case PT_VM_ENTRY: 578 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 579 break; 580 default: 581 addr = uap->addr; 582 break; 583 } 584 if (error) 585 return (error); 586 587 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 588 if (error) 589 return (error); 590 591 switch (uap->req) { 592 case PT_VM_ENTRY: 593 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 594 break; 595 case PT_IO: 596 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 597 break; 598 case PT_GETREGS: 599 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 600 break; 601 case PT_GETFPREGS: 602 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 603 break; 604 case PT_GETDBREGS: 605 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 606 break; 607 case PT_LWPINFO: 608 error = copyout(&r.pl, uap->addr, uap->data); 609 break; 610 } 611 612 return (error); 613 } 614 #undef COPYIN 615 #undef COPYOUT 616 617 #ifdef COMPAT_FREEBSD32 618 /* 619 * PROC_READ(regs, td2, addr); 620 * becomes either: 621 * proc_read_regs(td2, addr); 622 * or 623 * proc_read_regs32(td2, addr); 624 * .. except this is done at runtime. There is an additional 625 * complication in that PROC_WRITE disallows 32 bit consumers 626 * from writing to 64 bit address space targets. 627 */ 628 #define PROC_READ(w, t, a) wrap32 ? \ 629 proc_read_ ## w ## 32(t, a) : \ 630 proc_read_ ## w (t, a) 631 #define PROC_WRITE(w, t, a) wrap32 ? \ 632 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 633 proc_write_ ## w (t, a) 634 #else 635 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 636 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 637 #endif 638 639 int 640 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 641 { 642 struct iovec iov; 643 struct uio uio; 644 struct proc *curp, *p, *pp; 645 struct thread *td2 = NULL, *td3; 646 struct ptrace_io_desc *piod = NULL; 647 struct ptrace_lwpinfo *pl; 648 int error, write, tmp, num; 649 int proctree_locked = 0; 650 lwpid_t tid = 0, *buf; 651 #ifdef COMPAT_FREEBSD32 652 int wrap32 = 0, safe = 0; 653 struct ptrace_io_desc32 *piod32 = NULL; 654 struct ptrace_lwpinfo32 *pl32 = NULL; 655 struct ptrace_lwpinfo plr; 656 #endif 657 658 curp = td->td_proc; 659 660 /* Lock proctree before locking the process. */ 661 switch (req) { 662 case PT_TRACE_ME: 663 case PT_ATTACH: 664 case PT_STEP: 665 case PT_CONTINUE: 666 case PT_TO_SCE: 667 case PT_TO_SCX: 668 case PT_SYSCALL: 669 case PT_FOLLOW_FORK: 670 case PT_DETACH: 671 sx_xlock(&proctree_lock); 672 proctree_locked = 1; 673 break; 674 default: 675 break; 676 } 677 678 write = 0; 679 if (req == PT_TRACE_ME) { 680 p = td->td_proc; 681 PROC_LOCK(p); 682 } else { 683 if (pid <= PID_MAX) { 684 if ((p = pfind(pid)) == NULL) { 685 if (proctree_locked) 686 sx_xunlock(&proctree_lock); 687 return (ESRCH); 688 } 689 } else { 690 td2 = tdfind(pid, -1); 691 if (td2 == NULL) { 692 if (proctree_locked) 693 sx_xunlock(&proctree_lock); 694 return (ESRCH); 695 } 696 p = td2->td_proc; 697 tid = pid; 698 pid = p->p_pid; 699 } 700 } 701 AUDIT_ARG_PROCESS(p); 702 703 if ((p->p_flag & P_WEXIT) != 0) { 704 error = ESRCH; 705 goto fail; 706 } 707 if ((error = p_cansee(td, p)) != 0) 708 goto fail; 709 710 if ((error = p_candebug(td, p)) != 0) 711 goto fail; 712 713 /* 714 * System processes can't be debugged. 715 */ 716 if ((p->p_flag & P_SYSTEM) != 0) { 717 error = EINVAL; 718 goto fail; 719 } 720 721 if (tid == 0) { 722 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 723 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 724 td2 = p->p_xthread; 725 } else { 726 td2 = FIRST_THREAD_IN_PROC(p); 727 } 728 tid = td2->td_tid; 729 } 730 731 #ifdef COMPAT_FREEBSD32 732 /* 733 * Test if we're a 32 bit client and what the target is. 734 * Set the wrap controls accordingly. 735 */ 736 if (SV_CURPROC_FLAG(SV_ILP32)) { 737 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 738 safe = 1; 739 wrap32 = 1; 740 } 741 #endif 742 /* 743 * Permissions check 744 */ 745 switch (req) { 746 case PT_TRACE_ME: 747 /* Always legal. */ 748 break; 749 750 case PT_ATTACH: 751 /* Self */ 752 if (p->p_pid == td->td_proc->p_pid) { 753 error = EINVAL; 754 goto fail; 755 } 756 757 /* Already traced */ 758 if (p->p_flag & P_TRACED) { 759 error = EBUSY; 760 goto fail; 761 } 762 763 /* Can't trace an ancestor if you're being traced. */ 764 if (curp->p_flag & P_TRACED) { 765 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 766 if (pp == p) { 767 error = EINVAL; 768 goto fail; 769 } 770 } 771 } 772 773 774 /* OK */ 775 break; 776 777 case PT_CLEARSTEP: 778 /* Allow thread to clear single step for itself */ 779 if (td->td_tid == tid) 780 break; 781 782 /* FALLTHROUGH */ 783 default: 784 /* not being traced... */ 785 if ((p->p_flag & P_TRACED) == 0) { 786 error = EPERM; 787 goto fail; 788 } 789 790 /* not being traced by YOU */ 791 if (p->p_pptr != td->td_proc) { 792 error = EBUSY; 793 goto fail; 794 } 795 796 /* not currently stopped */ 797 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 798 p->p_suspcount != p->p_numthreads || 799 (p->p_flag & P_WAITED) == 0) { 800 error = EBUSY; 801 goto fail; 802 } 803 804 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 805 static int count = 0; 806 if (count++ == 0) 807 printf("P_STOPPED_TRACE not set.\n"); 808 } 809 810 /* OK */ 811 break; 812 } 813 814 /* Keep this process around until we finish this request. */ 815 _PHOLD(p); 816 817 #ifdef FIX_SSTEP 818 /* 819 * Single step fixup ala procfs 820 */ 821 FIX_SSTEP(td2); 822 #endif 823 824 /* 825 * Actually do the requests 826 */ 827 828 td->td_retval[0] = 0; 829 830 switch (req) { 831 case PT_TRACE_ME: 832 /* set my trace flag and "owner" so it can read/write me */ 833 p->p_flag |= P_TRACED; 834 if (p->p_flag & P_PPWAIT) 835 p->p_flag |= P_PPTRACE; 836 p->p_oppid = p->p_pptr->p_pid; 837 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 838 break; 839 840 case PT_ATTACH: 841 /* security check done above */ 842 /* 843 * It would be nice if the tracing relationship was separate 844 * from the parent relationship but that would require 845 * another set of links in the proc struct or for "wait" 846 * to scan the entire proc table. To make life easier, 847 * we just re-parent the process we're trying to trace. 848 * The old parent is remembered so we can put things back 849 * on a "detach". 850 */ 851 p->p_flag |= P_TRACED; 852 p->p_oppid = p->p_pptr->p_pid; 853 if (p->p_pptr != td->td_proc) { 854 proc_reparent(p, td->td_proc); 855 } 856 data = SIGSTOP; 857 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 858 p->p_oppid); 859 goto sendsig; /* in PT_CONTINUE below */ 860 861 case PT_CLEARSTEP: 862 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 863 p->p_pid); 864 error = ptrace_clear_single_step(td2); 865 break; 866 867 case PT_SETSTEP: 868 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 869 p->p_pid); 870 error = ptrace_single_step(td2); 871 break; 872 873 case PT_SUSPEND: 874 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 875 p->p_pid); 876 td2->td_dbgflags |= TDB_SUSPEND; 877 thread_lock(td2); 878 td2->td_flags |= TDF_NEEDSUSPCHK; 879 thread_unlock(td2); 880 break; 881 882 case PT_RESUME: 883 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 884 p->p_pid); 885 td2->td_dbgflags &= ~TDB_SUSPEND; 886 break; 887 888 case PT_FOLLOW_FORK: 889 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 890 p->p_flag & P_FOLLOWFORK ? "enabled" : "disabled", 891 data ? "enabled" : "disabled"); 892 if (data) 893 p->p_flag |= P_FOLLOWFORK; 894 else 895 p->p_flag &= ~P_FOLLOWFORK; 896 break; 897 898 case PT_STEP: 899 case PT_CONTINUE: 900 case PT_TO_SCE: 901 case PT_TO_SCX: 902 case PT_SYSCALL: 903 case PT_DETACH: 904 /* Zero means do not send any signal */ 905 if (data < 0 || data > _SIG_MAXSIG) { 906 error = EINVAL; 907 break; 908 } 909 910 switch (req) { 911 case PT_STEP: 912 CTR2(KTR_PTRACE, "PT_STEP: tid %d (pid %d)", 913 td2->td_tid, p->p_pid); 914 error = ptrace_single_step(td2); 915 if (error) 916 goto out; 917 break; 918 case PT_CONTINUE: 919 case PT_TO_SCE: 920 case PT_TO_SCX: 921 case PT_SYSCALL: 922 if (addr != (void *)1) { 923 error = ptrace_set_pc(td2, 924 (u_long)(uintfptr_t)addr); 925 if (error) 926 goto out; 927 } 928 switch (req) { 929 case PT_TO_SCE: 930 p->p_stops |= S_PT_SCE; 931 CTR4(KTR_PTRACE, 932 "PT_TO_SCE: pid %d, stops = %#x, PC = %#lx, sig = %d", 933 p->p_pid, p->p_stops, 934 (u_long)(uintfptr_t)addr, data); 935 break; 936 case PT_TO_SCX: 937 p->p_stops |= S_PT_SCX; 938 CTR4(KTR_PTRACE, 939 "PT_TO_SCX: pid %d, stops = %#x, PC = %#lx, sig = %d", 940 p->p_pid, p->p_stops, 941 (u_long)(uintfptr_t)addr, data); 942 break; 943 case PT_SYSCALL: 944 p->p_stops |= S_PT_SCE | S_PT_SCX; 945 CTR4(KTR_PTRACE, 946 "PT_SYSCALL: pid %d, stops = %#x, PC = %#lx, sig = %d", 947 p->p_pid, p->p_stops, 948 (u_long)(uintfptr_t)addr, data); 949 break; 950 case PT_CONTINUE: 951 CTR3(KTR_PTRACE, 952 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 953 p->p_pid, (u_long)(uintfptr_t)addr, data); 954 break; 955 } 956 break; 957 case PT_DETACH: 958 /* 959 * Reset the process parent. 960 * 961 * NB: This clears P_TRACED before reparenting 962 * a detached process back to its original 963 * parent. Otherwise the debugee will be set 964 * as an orphan of the debugger. 965 */ 966 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 967 if (p->p_oppid != p->p_pptr->p_pid) { 968 PROC_LOCK(p->p_pptr); 969 sigqueue_take(p->p_ksi); 970 PROC_UNLOCK(p->p_pptr); 971 972 pp = proc_realparent(p); 973 proc_reparent(p, pp); 974 if (pp == initproc) 975 p->p_sigparent = SIGCHLD; 976 CTR3(KTR_PTRACE, 977 "PT_DETACH: pid %d reparented to pid %d, sig %d", 978 p->p_pid, pp->p_pid, data); 979 } else 980 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 981 p->p_pid, data); 982 p->p_oppid = 0; 983 p->p_stops = 0; 984 985 /* should we send SIGCHLD? */ 986 /* childproc_continued(p); */ 987 break; 988 } 989 990 sendsig: 991 if (proctree_locked) { 992 sx_xunlock(&proctree_lock); 993 proctree_locked = 0; 994 } 995 p->p_xsig = data; 996 p->p_xthread = NULL; 997 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 998 /* deliver or queue signal */ 999 td2->td_dbgflags &= ~TDB_XSIG; 1000 td2->td_xsig = data; 1001 1002 if (req == PT_DETACH) { 1003 FOREACH_THREAD_IN_PROC(p, td3) 1004 td3->td_dbgflags &= ~TDB_SUSPEND; 1005 } 1006 /* 1007 * unsuspend all threads, to not let a thread run, 1008 * you should use PT_SUSPEND to suspend it before 1009 * continuing process. 1010 */ 1011 PROC_SLOCK(p); 1012 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 1013 thread_unsuspend(p); 1014 PROC_SUNLOCK(p); 1015 if (req == PT_ATTACH) 1016 kern_psignal(p, data); 1017 } else { 1018 if (data) 1019 kern_psignal(p, data); 1020 } 1021 break; 1022 1023 case PT_WRITE_I: 1024 case PT_WRITE_D: 1025 td2->td_dbgflags |= TDB_USERWR; 1026 write = 1; 1027 /* FALLTHROUGH */ 1028 case PT_READ_I: 1029 case PT_READ_D: 1030 PROC_UNLOCK(p); 1031 tmp = 0; 1032 /* write = 0 set above */ 1033 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 1034 iov.iov_len = sizeof(int); 1035 uio.uio_iov = &iov; 1036 uio.uio_iovcnt = 1; 1037 uio.uio_offset = (off_t)(uintptr_t)addr; 1038 uio.uio_resid = sizeof(int); 1039 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 1040 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 1041 uio.uio_td = td; 1042 error = proc_rwmem(p, &uio); 1043 if (uio.uio_resid != 0) { 1044 /* 1045 * XXX proc_rwmem() doesn't currently return ENOSPC, 1046 * so I think write() can bogusly return 0. 1047 * XXX what happens for short writes? We don't want 1048 * to write partial data. 1049 * XXX proc_rwmem() returns EPERM for other invalid 1050 * addresses. Convert this to EINVAL. Does this 1051 * clobber returns of EPERM for other reasons? 1052 */ 1053 if (error == 0 || error == ENOSPC || error == EPERM) 1054 error = EINVAL; /* EOF */ 1055 } 1056 if (!write) 1057 td->td_retval[0] = tmp; 1058 if (error == 0) { 1059 if (write) 1060 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1061 p->p_pid, addr, data); 1062 else 1063 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1064 p->p_pid, addr, tmp); 1065 } 1066 PROC_LOCK(p); 1067 break; 1068 1069 case PT_IO: 1070 #ifdef COMPAT_FREEBSD32 1071 if (wrap32) { 1072 piod32 = addr; 1073 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1074 iov.iov_len = piod32->piod_len; 1075 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1076 uio.uio_resid = piod32->piod_len; 1077 } else 1078 #endif 1079 { 1080 piod = addr; 1081 iov.iov_base = piod->piod_addr; 1082 iov.iov_len = piod->piod_len; 1083 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1084 uio.uio_resid = piod->piod_len; 1085 } 1086 uio.uio_iov = &iov; 1087 uio.uio_iovcnt = 1; 1088 uio.uio_segflg = UIO_USERSPACE; 1089 uio.uio_td = td; 1090 #ifdef COMPAT_FREEBSD32 1091 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1092 #else 1093 tmp = piod->piod_op; 1094 #endif 1095 switch (tmp) { 1096 case PIOD_READ_D: 1097 case PIOD_READ_I: 1098 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1099 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1100 uio.uio_rw = UIO_READ; 1101 break; 1102 case PIOD_WRITE_D: 1103 case PIOD_WRITE_I: 1104 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1105 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1106 td2->td_dbgflags |= TDB_USERWR; 1107 uio.uio_rw = UIO_WRITE; 1108 break; 1109 default: 1110 error = EINVAL; 1111 goto out; 1112 } 1113 PROC_UNLOCK(p); 1114 error = proc_rwmem(p, &uio); 1115 #ifdef COMPAT_FREEBSD32 1116 if (wrap32) 1117 piod32->piod_len -= uio.uio_resid; 1118 else 1119 #endif 1120 piod->piod_len -= uio.uio_resid; 1121 PROC_LOCK(p); 1122 break; 1123 1124 case PT_KILL: 1125 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1126 data = SIGKILL; 1127 goto sendsig; /* in PT_CONTINUE above */ 1128 1129 case PT_SETREGS: 1130 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1131 p->p_pid); 1132 td2->td_dbgflags |= TDB_USERWR; 1133 error = PROC_WRITE(regs, td2, addr); 1134 break; 1135 1136 case PT_GETREGS: 1137 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1138 p->p_pid); 1139 error = PROC_READ(regs, td2, addr); 1140 break; 1141 1142 case PT_SETFPREGS: 1143 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1144 p->p_pid); 1145 td2->td_dbgflags |= TDB_USERWR; 1146 error = PROC_WRITE(fpregs, td2, addr); 1147 break; 1148 1149 case PT_GETFPREGS: 1150 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1151 p->p_pid); 1152 error = PROC_READ(fpregs, td2, addr); 1153 break; 1154 1155 case PT_SETDBREGS: 1156 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1157 p->p_pid); 1158 td2->td_dbgflags |= TDB_USERWR; 1159 error = PROC_WRITE(dbregs, td2, addr); 1160 break; 1161 1162 case PT_GETDBREGS: 1163 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1164 p->p_pid); 1165 error = PROC_READ(dbregs, td2, addr); 1166 break; 1167 1168 case PT_LWPINFO: 1169 if (data <= 0 || 1170 #ifdef COMPAT_FREEBSD32 1171 (!wrap32 && data > sizeof(*pl)) || 1172 (wrap32 && data > sizeof(*pl32))) { 1173 #else 1174 data > sizeof(*pl)) { 1175 #endif 1176 error = EINVAL; 1177 break; 1178 } 1179 #ifdef COMPAT_FREEBSD32 1180 if (wrap32) { 1181 pl = &plr; 1182 pl32 = addr; 1183 } else 1184 #endif 1185 pl = addr; 1186 pl->pl_lwpid = td2->td_tid; 1187 pl->pl_event = PL_EVENT_NONE; 1188 pl->pl_flags = 0; 1189 if (td2->td_dbgflags & TDB_XSIG) { 1190 pl->pl_event = PL_EVENT_SIGNAL; 1191 if (td2->td_dbgksi.ksi_signo != 0 && 1192 #ifdef COMPAT_FREEBSD32 1193 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1194 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1195 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1196 pl_siginfo) + sizeof(struct siginfo32))) 1197 #else 1198 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1199 + sizeof(pl->pl_siginfo) 1200 #endif 1201 ){ 1202 pl->pl_flags |= PL_FLAG_SI; 1203 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1204 } 1205 } 1206 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1207 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1208 if (td2->td_dbgflags & TDB_SCE) 1209 pl->pl_flags |= PL_FLAG_SCE; 1210 else if (td2->td_dbgflags & TDB_SCX) 1211 pl->pl_flags |= PL_FLAG_SCX; 1212 if (td2->td_dbgflags & TDB_EXEC) 1213 pl->pl_flags |= PL_FLAG_EXEC; 1214 if (td2->td_dbgflags & TDB_FORK) { 1215 pl->pl_flags |= PL_FLAG_FORKED; 1216 pl->pl_child_pid = td2->td_dbg_forked; 1217 } 1218 if (td2->td_dbgflags & TDB_CHILD) 1219 pl->pl_flags |= PL_FLAG_CHILD; 1220 pl->pl_sigmask = td2->td_sigmask; 1221 pl->pl_siglist = td2->td_siglist; 1222 strcpy(pl->pl_tdname, td2->td_name); 1223 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1224 pl->pl_syscall_code = td2->td_dbg_sc_code; 1225 pl->pl_syscall_narg = td2->td_dbg_sc_narg; 1226 } else { 1227 pl->pl_syscall_code = 0; 1228 pl->pl_syscall_narg = 0; 1229 } 1230 #ifdef COMPAT_FREEBSD32 1231 if (wrap32) 1232 ptrace_lwpinfo_to32(pl, pl32); 1233 #endif 1234 CTR6(KTR_PTRACE, 1235 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1236 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1237 pl->pl_child_pid, pl->pl_syscall_code); 1238 break; 1239 1240 case PT_GETNUMLWPS: 1241 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1242 p->p_numthreads); 1243 td->td_retval[0] = p->p_numthreads; 1244 break; 1245 1246 case PT_GETLWPLIST: 1247 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1248 p->p_pid, data, p->p_numthreads); 1249 if (data <= 0) { 1250 error = EINVAL; 1251 break; 1252 } 1253 num = imin(p->p_numthreads, data); 1254 PROC_UNLOCK(p); 1255 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1256 tmp = 0; 1257 PROC_LOCK(p); 1258 FOREACH_THREAD_IN_PROC(p, td2) { 1259 if (tmp >= num) 1260 break; 1261 buf[tmp++] = td2->td_tid; 1262 } 1263 PROC_UNLOCK(p); 1264 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1265 free(buf, M_TEMP); 1266 if (!error) 1267 td->td_retval[0] = tmp; 1268 PROC_LOCK(p); 1269 break; 1270 1271 case PT_VM_TIMESTAMP: 1272 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1273 p->p_pid, p->p_vmspace->vm_map.timestamp); 1274 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1275 break; 1276 1277 case PT_VM_ENTRY: 1278 PROC_UNLOCK(p); 1279 #ifdef COMPAT_FREEBSD32 1280 if (wrap32) 1281 error = ptrace_vm_entry32(td, p, addr); 1282 else 1283 #endif 1284 error = ptrace_vm_entry(td, p, addr); 1285 PROC_LOCK(p); 1286 break; 1287 1288 default: 1289 #ifdef __HAVE_PTRACE_MACHDEP 1290 if (req >= PT_FIRSTMACH) { 1291 PROC_UNLOCK(p); 1292 error = cpu_ptrace(td2, req, addr, data); 1293 PROC_LOCK(p); 1294 } else 1295 #endif 1296 /* Unknown request. */ 1297 error = EINVAL; 1298 break; 1299 } 1300 1301 out: 1302 /* Drop our hold on this process now that the request has completed. */ 1303 _PRELE(p); 1304 fail: 1305 PROC_UNLOCK(p); 1306 if (proctree_locked) 1307 sx_xunlock(&proctree_lock); 1308 return (error); 1309 } 1310 #undef PROC_READ 1311 #undef PROC_WRITE 1312 1313 /* 1314 * Stop a process because of a debugging event; 1315 * stay stopped until p->p_step is cleared 1316 * (cleared by PIOCCONT in procfs). 1317 */ 1318 void 1319 stopevent(struct proc *p, unsigned int event, unsigned int val) 1320 { 1321 1322 PROC_LOCK_ASSERT(p, MA_OWNED); 1323 p->p_step = 1; 1324 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1325 val); 1326 do { 1327 if (event != S_EXIT) 1328 p->p_xsig = val; 1329 p->p_xthread = NULL; 1330 p->p_stype = event; /* Which event caused the stop? */ 1331 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1332 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1333 } while (p->p_step); 1334 } 1335