1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/pioctl.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/ptrace.h> 49 #include <sys/rwlock.h> 50 #include <sys/sx.h> 51 #include <sys/malloc.h> 52 #include <sys/signalvar.h> 53 54 #include <machine/reg.h> 55 56 #include <security/audit/audit.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_param.h> 66 67 #ifdef COMPAT_FREEBSD32 68 #include <sys/procfs.h> 69 #include <compat/freebsd32/freebsd32_signal.h> 70 71 struct ptrace_io_desc32 { 72 int piod_op; 73 uint32_t piod_offs; 74 uint32_t piod_addr; 75 uint32_t piod_len; 76 }; 77 78 struct ptrace_vm_entry32 { 79 int pve_entry; 80 int pve_timestamp; 81 uint32_t pve_start; 82 uint32_t pve_end; 83 uint32_t pve_offset; 84 u_int pve_prot; 85 u_int pve_pathlen; 86 int32_t pve_fileid; 87 u_int pve_fsid; 88 uint32_t pve_path; 89 }; 90 91 struct ptrace_lwpinfo32 { 92 lwpid_t pl_lwpid; /* LWP described. */ 93 int pl_event; /* Event that stopped the LWP. */ 94 int pl_flags; /* LWP flags. */ 95 sigset_t pl_sigmask; /* LWP signal mask */ 96 sigset_t pl_siglist; /* LWP pending signal */ 97 struct siginfo32 pl_siginfo; /* siginfo for signal */ 98 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 99 pid_t pl_child_pid; /* New child pid */ 100 u_int pl_syscall_code; 101 u_int pl_syscall_narg; 102 }; 103 104 #endif 105 106 /* 107 * Functions implemented using PROC_ACTION(): 108 * 109 * proc_read_regs(proc, regs) 110 * Get the current user-visible register set from the process 111 * and copy it into the regs structure (<machine/reg.h>). 112 * The process is stopped at the time read_regs is called. 113 * 114 * proc_write_regs(proc, regs) 115 * Update the current register set from the passed in regs 116 * structure. Take care to avoid clobbering special CPU 117 * registers or privileged bits in the PSL. 118 * Depending on the architecture this may have fix-up work to do, 119 * especially if the IAR or PCW are modified. 120 * The process is stopped at the time write_regs is called. 121 * 122 * proc_read_fpregs, proc_write_fpregs 123 * deal with the floating point register set, otherwise as above. 124 * 125 * proc_read_dbregs, proc_write_dbregs 126 * deal with the processor debug register set, otherwise as above. 127 * 128 * proc_sstep(proc) 129 * Arrange for the process to trap after executing a single instruction. 130 */ 131 132 #define PROC_ACTION(action) do { \ 133 int error; \ 134 \ 135 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 136 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 137 error = EIO; \ 138 else \ 139 error = (action); \ 140 return (error); \ 141 } while(0) 142 143 int 144 proc_read_regs(struct thread *td, struct reg *regs) 145 { 146 147 PROC_ACTION(fill_regs(td, regs)); 148 } 149 150 int 151 proc_write_regs(struct thread *td, struct reg *regs) 152 { 153 154 PROC_ACTION(set_regs(td, regs)); 155 } 156 157 int 158 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 159 { 160 161 PROC_ACTION(fill_dbregs(td, dbregs)); 162 } 163 164 int 165 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 166 { 167 168 PROC_ACTION(set_dbregs(td, dbregs)); 169 } 170 171 /* 172 * Ptrace doesn't support fpregs at all, and there are no security holes 173 * or translations for fpregs, so we can just copy them. 174 */ 175 int 176 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 177 { 178 179 PROC_ACTION(fill_fpregs(td, fpregs)); 180 } 181 182 int 183 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 184 { 185 186 PROC_ACTION(set_fpregs(td, fpregs)); 187 } 188 189 #ifdef COMPAT_FREEBSD32 190 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 191 int 192 proc_read_regs32(struct thread *td, struct reg32 *regs32) 193 { 194 195 PROC_ACTION(fill_regs32(td, regs32)); 196 } 197 198 int 199 proc_write_regs32(struct thread *td, struct reg32 *regs32) 200 { 201 202 PROC_ACTION(set_regs32(td, regs32)); 203 } 204 205 int 206 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 207 { 208 209 PROC_ACTION(fill_dbregs32(td, dbregs32)); 210 } 211 212 int 213 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 214 { 215 216 PROC_ACTION(set_dbregs32(td, dbregs32)); 217 } 218 219 int 220 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 221 { 222 223 PROC_ACTION(fill_fpregs32(td, fpregs32)); 224 } 225 226 int 227 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 228 { 229 230 PROC_ACTION(set_fpregs32(td, fpregs32)); 231 } 232 #endif 233 234 int 235 proc_sstep(struct thread *td) 236 { 237 238 PROC_ACTION(ptrace_single_step(td)); 239 } 240 241 int 242 proc_rwmem(struct proc *p, struct uio *uio) 243 { 244 vm_map_t map; 245 vm_offset_t pageno; /* page number */ 246 vm_prot_t reqprot; 247 int error, fault_flags, page_offset, writing; 248 249 /* 250 * Assert that someone has locked this vmspace. (Should be 251 * curthread but we can't assert that.) This keeps the process 252 * from exiting out from under us until this operation completes. 253 */ 254 PROC_ASSERT_HELD(p); 255 256 /* 257 * The map we want... 258 */ 259 map = &p->p_vmspace->vm_map; 260 261 /* 262 * If we are writing, then we request vm_fault() to create a private 263 * copy of each page. Since these copies will not be writeable by the 264 * process, we must explicity request that they be dirtied. 265 */ 266 writing = uio->uio_rw == UIO_WRITE; 267 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 268 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 269 270 /* 271 * Only map in one page at a time. We don't have to, but it 272 * makes things easier. This way is trivial - right? 273 */ 274 do { 275 vm_offset_t uva; 276 u_int len; 277 vm_page_t m; 278 279 uva = (vm_offset_t)uio->uio_offset; 280 281 /* 282 * Get the page number of this segment. 283 */ 284 pageno = trunc_page(uva); 285 page_offset = uva - pageno; 286 287 /* 288 * How many bytes to copy 289 */ 290 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 291 292 /* 293 * Fault and hold the page on behalf of the process. 294 */ 295 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 296 if (error != KERN_SUCCESS) { 297 if (error == KERN_RESOURCE_SHORTAGE) 298 error = ENOMEM; 299 else 300 error = EFAULT; 301 break; 302 } 303 304 /* 305 * Now do the i/o move. 306 */ 307 error = uiomove_fromphys(&m, page_offset, len, uio); 308 309 /* Make the I-cache coherent for breakpoints. */ 310 if (writing && error == 0) { 311 vm_map_lock_read(map); 312 if (vm_map_check_protection(map, pageno, pageno + 313 PAGE_SIZE, VM_PROT_EXECUTE)) 314 vm_sync_icache(map, uva, len); 315 vm_map_unlock_read(map); 316 } 317 318 /* 319 * Release the page. 320 */ 321 vm_page_lock(m); 322 vm_page_unhold(m); 323 vm_page_unlock(m); 324 325 } while (error == 0 && uio->uio_resid > 0); 326 327 return (error); 328 } 329 330 static int 331 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 332 { 333 struct vattr vattr; 334 vm_map_t map; 335 vm_map_entry_t entry; 336 vm_object_t obj, tobj, lobj; 337 struct vmspace *vm; 338 struct vnode *vp; 339 char *freepath, *fullpath; 340 u_int pathlen; 341 int error, index; 342 343 error = 0; 344 obj = NULL; 345 346 vm = vmspace_acquire_ref(p); 347 map = &vm->vm_map; 348 vm_map_lock_read(map); 349 350 do { 351 entry = map->header.next; 352 index = 0; 353 while (index < pve->pve_entry && entry != &map->header) { 354 entry = entry->next; 355 index++; 356 } 357 if (index != pve->pve_entry) { 358 error = EINVAL; 359 break; 360 } 361 while (entry != &map->header && 362 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 363 entry = entry->next; 364 index++; 365 } 366 if (entry == &map->header) { 367 error = ENOENT; 368 break; 369 } 370 371 /* We got an entry. */ 372 pve->pve_entry = index + 1; 373 pve->pve_timestamp = map->timestamp; 374 pve->pve_start = entry->start; 375 pve->pve_end = entry->end - 1; 376 pve->pve_offset = entry->offset; 377 pve->pve_prot = entry->protection; 378 379 /* Backing object's path needed? */ 380 if (pve->pve_pathlen == 0) 381 break; 382 383 pathlen = pve->pve_pathlen; 384 pve->pve_pathlen = 0; 385 386 obj = entry->object.vm_object; 387 if (obj != NULL) 388 VM_OBJECT_RLOCK(obj); 389 } while (0); 390 391 vm_map_unlock_read(map); 392 vmspace_free(vm); 393 394 pve->pve_fsid = VNOVAL; 395 pve->pve_fileid = VNOVAL; 396 397 if (error == 0 && obj != NULL) { 398 lobj = obj; 399 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 400 if (tobj != obj) 401 VM_OBJECT_RLOCK(tobj); 402 if (lobj != obj) 403 VM_OBJECT_RUNLOCK(lobj); 404 lobj = tobj; 405 pve->pve_offset += tobj->backing_object_offset; 406 } 407 vp = vm_object_vnode(lobj); 408 if (vp != NULL) 409 vref(vp); 410 if (lobj != obj) 411 VM_OBJECT_RUNLOCK(lobj); 412 VM_OBJECT_RUNLOCK(obj); 413 414 if (vp != NULL) { 415 freepath = NULL; 416 fullpath = NULL; 417 vn_fullpath(td, vp, &fullpath, &freepath); 418 vn_lock(vp, LK_SHARED | LK_RETRY); 419 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 420 pve->pve_fileid = vattr.va_fileid; 421 pve->pve_fsid = vattr.va_fsid; 422 } 423 vput(vp); 424 425 if (fullpath != NULL) { 426 pve->pve_pathlen = strlen(fullpath) + 1; 427 if (pve->pve_pathlen <= pathlen) { 428 error = copyout(fullpath, pve->pve_path, 429 pve->pve_pathlen); 430 } else 431 error = ENAMETOOLONG; 432 } 433 if (freepath != NULL) 434 free(freepath, M_TEMP); 435 } 436 } 437 if (error == 0) 438 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 439 p->p_pid, pve->pve_entry, pve->pve_start); 440 441 return (error); 442 } 443 444 #ifdef COMPAT_FREEBSD32 445 static int 446 ptrace_vm_entry32(struct thread *td, struct proc *p, 447 struct ptrace_vm_entry32 *pve32) 448 { 449 struct ptrace_vm_entry pve; 450 int error; 451 452 pve.pve_entry = pve32->pve_entry; 453 pve.pve_pathlen = pve32->pve_pathlen; 454 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 455 456 error = ptrace_vm_entry(td, p, &pve); 457 if (error == 0) { 458 pve32->pve_entry = pve.pve_entry; 459 pve32->pve_timestamp = pve.pve_timestamp; 460 pve32->pve_start = pve.pve_start; 461 pve32->pve_end = pve.pve_end; 462 pve32->pve_offset = pve.pve_offset; 463 pve32->pve_prot = pve.pve_prot; 464 pve32->pve_fileid = pve.pve_fileid; 465 pve32->pve_fsid = pve.pve_fsid; 466 } 467 468 pve32->pve_pathlen = pve.pve_pathlen; 469 return (error); 470 } 471 472 static void 473 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 474 struct ptrace_lwpinfo32 *pl32) 475 { 476 477 pl32->pl_lwpid = pl->pl_lwpid; 478 pl32->pl_event = pl->pl_event; 479 pl32->pl_flags = pl->pl_flags; 480 pl32->pl_sigmask = pl->pl_sigmask; 481 pl32->pl_siglist = pl->pl_siglist; 482 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 483 strcpy(pl32->pl_tdname, pl->pl_tdname); 484 pl32->pl_child_pid = pl->pl_child_pid; 485 pl32->pl_syscall_code = pl->pl_syscall_code; 486 pl32->pl_syscall_narg = pl->pl_syscall_narg; 487 } 488 #endif /* COMPAT_FREEBSD32 */ 489 490 /* 491 * Process debugging system call. 492 */ 493 #ifndef _SYS_SYSPROTO_H_ 494 struct ptrace_args { 495 int req; 496 pid_t pid; 497 caddr_t addr; 498 int data; 499 }; 500 #endif 501 502 #ifdef COMPAT_FREEBSD32 503 /* 504 * This CPP subterfuge is to try and reduce the number of ifdefs in 505 * the body of the code. 506 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 507 * becomes either: 508 * copyin(uap->addr, &r.reg, sizeof r.reg); 509 * or 510 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 511 * .. except this is done at runtime. 512 */ 513 #define COPYIN(u, k, s) wrap32 ? \ 514 copyin(u, k ## 32, s ## 32) : \ 515 copyin(u, k, s) 516 #define COPYOUT(k, u, s) wrap32 ? \ 517 copyout(k ## 32, u, s ## 32) : \ 518 copyout(k, u, s) 519 #else 520 #define COPYIN(u, k, s) copyin(u, k, s) 521 #define COPYOUT(k, u, s) copyout(k, u, s) 522 #endif 523 int 524 sys_ptrace(struct thread *td, struct ptrace_args *uap) 525 { 526 /* 527 * XXX this obfuscation is to reduce stack usage, but the register 528 * structs may be too large to put on the stack anyway. 529 */ 530 union { 531 struct ptrace_io_desc piod; 532 struct ptrace_lwpinfo pl; 533 struct ptrace_vm_entry pve; 534 struct dbreg dbreg; 535 struct fpreg fpreg; 536 struct reg reg; 537 #ifdef COMPAT_FREEBSD32 538 struct dbreg32 dbreg32; 539 struct fpreg32 fpreg32; 540 struct reg32 reg32; 541 struct ptrace_io_desc32 piod32; 542 struct ptrace_lwpinfo32 pl32; 543 struct ptrace_vm_entry32 pve32; 544 #endif 545 } r; 546 void *addr; 547 int error = 0; 548 #ifdef COMPAT_FREEBSD32 549 int wrap32 = 0; 550 551 if (SV_CURPROC_FLAG(SV_ILP32)) 552 wrap32 = 1; 553 #endif 554 AUDIT_ARG_PID(uap->pid); 555 AUDIT_ARG_CMD(uap->req); 556 AUDIT_ARG_VALUE(uap->data); 557 addr = &r; 558 switch (uap->req) { 559 case PT_GETREGS: 560 case PT_GETFPREGS: 561 case PT_GETDBREGS: 562 case PT_LWPINFO: 563 break; 564 case PT_SETREGS: 565 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 566 break; 567 case PT_SETFPREGS: 568 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 569 break; 570 case PT_SETDBREGS: 571 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 572 break; 573 case PT_IO: 574 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 575 break; 576 case PT_VM_ENTRY: 577 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 578 break; 579 default: 580 addr = uap->addr; 581 break; 582 } 583 if (error) 584 return (error); 585 586 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 587 if (error) 588 return (error); 589 590 switch (uap->req) { 591 case PT_VM_ENTRY: 592 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 593 break; 594 case PT_IO: 595 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 596 break; 597 case PT_GETREGS: 598 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 599 break; 600 case PT_GETFPREGS: 601 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 602 break; 603 case PT_GETDBREGS: 604 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 605 break; 606 case PT_LWPINFO: 607 error = copyout(&r.pl, uap->addr, uap->data); 608 break; 609 } 610 611 return (error); 612 } 613 #undef COPYIN 614 #undef COPYOUT 615 616 #ifdef COMPAT_FREEBSD32 617 /* 618 * PROC_READ(regs, td2, addr); 619 * becomes either: 620 * proc_read_regs(td2, addr); 621 * or 622 * proc_read_regs32(td2, addr); 623 * .. except this is done at runtime. There is an additional 624 * complication in that PROC_WRITE disallows 32 bit consumers 625 * from writing to 64 bit address space targets. 626 */ 627 #define PROC_READ(w, t, a) wrap32 ? \ 628 proc_read_ ## w ## 32(t, a) : \ 629 proc_read_ ## w (t, a) 630 #define PROC_WRITE(w, t, a) wrap32 ? \ 631 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 632 proc_write_ ## w (t, a) 633 #else 634 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 635 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 636 #endif 637 638 int 639 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 640 { 641 struct iovec iov; 642 struct uio uio; 643 struct proc *curp, *p, *pp; 644 struct thread *td2 = NULL, *td3; 645 struct ptrace_io_desc *piod = NULL; 646 struct ptrace_lwpinfo *pl; 647 int error, write, tmp, num; 648 int proctree_locked = 0; 649 lwpid_t tid = 0, *buf; 650 #ifdef COMPAT_FREEBSD32 651 int wrap32 = 0, safe = 0; 652 struct ptrace_io_desc32 *piod32 = NULL; 653 struct ptrace_lwpinfo32 *pl32 = NULL; 654 struct ptrace_lwpinfo plr; 655 #endif 656 657 curp = td->td_proc; 658 659 /* Lock proctree before locking the process. */ 660 switch (req) { 661 case PT_TRACE_ME: 662 case PT_ATTACH: 663 case PT_STEP: 664 case PT_CONTINUE: 665 case PT_TO_SCE: 666 case PT_TO_SCX: 667 case PT_SYSCALL: 668 case PT_FOLLOW_FORK: 669 case PT_DETACH: 670 sx_xlock(&proctree_lock); 671 proctree_locked = 1; 672 break; 673 default: 674 break; 675 } 676 677 write = 0; 678 if (req == PT_TRACE_ME) { 679 p = td->td_proc; 680 PROC_LOCK(p); 681 } else { 682 if (pid <= PID_MAX) { 683 if ((p = pfind(pid)) == NULL) { 684 if (proctree_locked) 685 sx_xunlock(&proctree_lock); 686 return (ESRCH); 687 } 688 } else { 689 td2 = tdfind(pid, -1); 690 if (td2 == NULL) { 691 if (proctree_locked) 692 sx_xunlock(&proctree_lock); 693 return (ESRCH); 694 } 695 p = td2->td_proc; 696 tid = pid; 697 pid = p->p_pid; 698 } 699 } 700 AUDIT_ARG_PROCESS(p); 701 702 if ((p->p_flag & P_WEXIT) != 0) { 703 error = ESRCH; 704 goto fail; 705 } 706 if ((error = p_cansee(td, p)) != 0) 707 goto fail; 708 709 if ((error = p_candebug(td, p)) != 0) 710 goto fail; 711 712 /* 713 * System processes can't be debugged. 714 */ 715 if ((p->p_flag & P_SYSTEM) != 0) { 716 error = EINVAL; 717 goto fail; 718 } 719 720 if (tid == 0) { 721 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 722 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 723 td2 = p->p_xthread; 724 } else { 725 td2 = FIRST_THREAD_IN_PROC(p); 726 } 727 tid = td2->td_tid; 728 } 729 730 #ifdef COMPAT_FREEBSD32 731 /* 732 * Test if we're a 32 bit client and what the target is. 733 * Set the wrap controls accordingly. 734 */ 735 if (SV_CURPROC_FLAG(SV_ILP32)) { 736 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 737 safe = 1; 738 wrap32 = 1; 739 } 740 #endif 741 /* 742 * Permissions check 743 */ 744 switch (req) { 745 case PT_TRACE_ME: 746 /* 747 * Always legal, when there is a parent process which 748 * could trace us. Otherwise, reject. 749 */ 750 if ((p->p_flag & P_TRACED) != 0) { 751 error = EBUSY; 752 goto fail; 753 } 754 if (p->p_pptr == initproc) { 755 error = EPERM; 756 goto fail; 757 } 758 break; 759 760 case PT_ATTACH: 761 /* Self */ 762 if (p == td->td_proc) { 763 error = EINVAL; 764 goto fail; 765 } 766 767 /* Already traced */ 768 if (p->p_flag & P_TRACED) { 769 error = EBUSY; 770 goto fail; 771 } 772 773 /* Can't trace an ancestor if you're being traced. */ 774 if (curp->p_flag & P_TRACED) { 775 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 776 if (pp == p) { 777 error = EINVAL; 778 goto fail; 779 } 780 } 781 } 782 783 784 /* OK */ 785 break; 786 787 case PT_CLEARSTEP: 788 /* Allow thread to clear single step for itself */ 789 if (td->td_tid == tid) 790 break; 791 792 /* FALLTHROUGH */ 793 default: 794 /* not being traced... */ 795 if ((p->p_flag & P_TRACED) == 0) { 796 error = EPERM; 797 goto fail; 798 } 799 800 /* not being traced by YOU */ 801 if (p->p_pptr != td->td_proc) { 802 error = EBUSY; 803 goto fail; 804 } 805 806 /* not currently stopped */ 807 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 808 p->p_suspcount != p->p_numthreads || 809 (p->p_flag & P_WAITED) == 0) { 810 error = EBUSY; 811 goto fail; 812 } 813 814 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 815 static int count = 0; 816 if (count++ == 0) 817 printf("P_STOPPED_TRACE not set.\n"); 818 } 819 820 /* OK */ 821 break; 822 } 823 824 /* Keep this process around until we finish this request. */ 825 _PHOLD(p); 826 827 #ifdef FIX_SSTEP 828 /* 829 * Single step fixup ala procfs 830 */ 831 FIX_SSTEP(td2); 832 #endif 833 834 /* 835 * Actually do the requests 836 */ 837 838 td->td_retval[0] = 0; 839 840 switch (req) { 841 case PT_TRACE_ME: 842 /* set my trace flag and "owner" so it can read/write me */ 843 p->p_flag |= P_TRACED; 844 if (p->p_flag & P_PPWAIT) 845 p->p_flag |= P_PPTRACE; 846 p->p_oppid = p->p_pptr->p_pid; 847 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 848 break; 849 850 case PT_ATTACH: 851 /* security check done above */ 852 /* 853 * It would be nice if the tracing relationship was separate 854 * from the parent relationship but that would require 855 * another set of links in the proc struct or for "wait" 856 * to scan the entire proc table. To make life easier, 857 * we just re-parent the process we're trying to trace. 858 * The old parent is remembered so we can put things back 859 * on a "detach". 860 */ 861 p->p_flag |= P_TRACED; 862 p->p_oppid = p->p_pptr->p_pid; 863 if (p->p_pptr != td->td_proc) { 864 proc_reparent(p, td->td_proc); 865 } 866 data = SIGSTOP; 867 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 868 p->p_oppid); 869 goto sendsig; /* in PT_CONTINUE below */ 870 871 case PT_CLEARSTEP: 872 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 873 p->p_pid); 874 error = ptrace_clear_single_step(td2); 875 break; 876 877 case PT_SETSTEP: 878 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 879 p->p_pid); 880 error = ptrace_single_step(td2); 881 break; 882 883 case PT_SUSPEND: 884 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 885 p->p_pid); 886 td2->td_dbgflags |= TDB_SUSPEND; 887 thread_lock(td2); 888 td2->td_flags |= TDF_NEEDSUSPCHK; 889 thread_unlock(td2); 890 break; 891 892 case PT_RESUME: 893 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 894 p->p_pid); 895 td2->td_dbgflags &= ~TDB_SUSPEND; 896 break; 897 898 case PT_FOLLOW_FORK: 899 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 900 p->p_flag & P_FOLLOWFORK ? "enabled" : "disabled", 901 data ? "enabled" : "disabled"); 902 if (data) 903 p->p_flag |= P_FOLLOWFORK; 904 else 905 p->p_flag &= ~P_FOLLOWFORK; 906 break; 907 908 case PT_STEP: 909 case PT_CONTINUE: 910 case PT_TO_SCE: 911 case PT_TO_SCX: 912 case PT_SYSCALL: 913 case PT_DETACH: 914 /* Zero means do not send any signal */ 915 if (data < 0 || data > _SIG_MAXSIG) { 916 error = EINVAL; 917 break; 918 } 919 920 switch (req) { 921 case PT_STEP: 922 CTR2(KTR_PTRACE, "PT_STEP: tid %d (pid %d)", 923 td2->td_tid, p->p_pid); 924 error = ptrace_single_step(td2); 925 if (error) 926 goto out; 927 break; 928 case PT_CONTINUE: 929 case PT_TO_SCE: 930 case PT_TO_SCX: 931 case PT_SYSCALL: 932 if (addr != (void *)1) { 933 error = ptrace_set_pc(td2, 934 (u_long)(uintfptr_t)addr); 935 if (error) 936 goto out; 937 } 938 switch (req) { 939 case PT_TO_SCE: 940 p->p_stops |= S_PT_SCE; 941 CTR4(KTR_PTRACE, 942 "PT_TO_SCE: pid %d, stops = %#x, PC = %#lx, sig = %d", 943 p->p_pid, p->p_stops, 944 (u_long)(uintfptr_t)addr, data); 945 break; 946 case PT_TO_SCX: 947 p->p_stops |= S_PT_SCX; 948 CTR4(KTR_PTRACE, 949 "PT_TO_SCX: pid %d, stops = %#x, PC = %#lx, sig = %d", 950 p->p_pid, p->p_stops, 951 (u_long)(uintfptr_t)addr, data); 952 break; 953 case PT_SYSCALL: 954 p->p_stops |= S_PT_SCE | S_PT_SCX; 955 CTR4(KTR_PTRACE, 956 "PT_SYSCALL: pid %d, stops = %#x, PC = %#lx, sig = %d", 957 p->p_pid, p->p_stops, 958 (u_long)(uintfptr_t)addr, data); 959 break; 960 case PT_CONTINUE: 961 CTR3(KTR_PTRACE, 962 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 963 p->p_pid, (u_long)(uintfptr_t)addr, data); 964 break; 965 } 966 break; 967 case PT_DETACH: 968 /* 969 * Reset the process parent. 970 * 971 * NB: This clears P_TRACED before reparenting 972 * a detached process back to its original 973 * parent. Otherwise the debugee will be set 974 * as an orphan of the debugger. 975 */ 976 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 977 if (p->p_oppid != p->p_pptr->p_pid) { 978 PROC_LOCK(p->p_pptr); 979 sigqueue_take(p->p_ksi); 980 PROC_UNLOCK(p->p_pptr); 981 982 pp = proc_realparent(p); 983 proc_reparent(p, pp); 984 if (pp == initproc) 985 p->p_sigparent = SIGCHLD; 986 CTR3(KTR_PTRACE, 987 "PT_DETACH: pid %d reparented to pid %d, sig %d", 988 p->p_pid, pp->p_pid, data); 989 } else 990 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 991 p->p_pid, data); 992 p->p_oppid = 0; 993 p->p_stops = 0; 994 995 /* should we send SIGCHLD? */ 996 /* childproc_continued(p); */ 997 break; 998 } 999 1000 sendsig: 1001 if (proctree_locked) { 1002 sx_xunlock(&proctree_lock); 1003 proctree_locked = 0; 1004 } 1005 p->p_xsig = data; 1006 p->p_xthread = NULL; 1007 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 1008 /* deliver or queue signal */ 1009 td2->td_dbgflags &= ~TDB_XSIG; 1010 td2->td_xsig = data; 1011 1012 if (req == PT_DETACH) { 1013 FOREACH_THREAD_IN_PROC(p, td3) 1014 td3->td_dbgflags &= ~TDB_SUSPEND; 1015 } 1016 /* 1017 * unsuspend all threads, to not let a thread run, 1018 * you should use PT_SUSPEND to suspend it before 1019 * continuing process. 1020 */ 1021 PROC_SLOCK(p); 1022 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 1023 thread_unsuspend(p); 1024 PROC_SUNLOCK(p); 1025 if (req == PT_ATTACH) 1026 kern_psignal(p, data); 1027 } else { 1028 if (data) 1029 kern_psignal(p, data); 1030 } 1031 break; 1032 1033 case PT_WRITE_I: 1034 case PT_WRITE_D: 1035 td2->td_dbgflags |= TDB_USERWR; 1036 write = 1; 1037 /* FALLTHROUGH */ 1038 case PT_READ_I: 1039 case PT_READ_D: 1040 PROC_UNLOCK(p); 1041 tmp = 0; 1042 /* write = 0 set above */ 1043 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 1044 iov.iov_len = sizeof(int); 1045 uio.uio_iov = &iov; 1046 uio.uio_iovcnt = 1; 1047 uio.uio_offset = (off_t)(uintptr_t)addr; 1048 uio.uio_resid = sizeof(int); 1049 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 1050 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 1051 uio.uio_td = td; 1052 error = proc_rwmem(p, &uio); 1053 if (uio.uio_resid != 0) { 1054 /* 1055 * XXX proc_rwmem() doesn't currently return ENOSPC, 1056 * so I think write() can bogusly return 0. 1057 * XXX what happens for short writes? We don't want 1058 * to write partial data. 1059 * XXX proc_rwmem() returns EPERM for other invalid 1060 * addresses. Convert this to EINVAL. Does this 1061 * clobber returns of EPERM for other reasons? 1062 */ 1063 if (error == 0 || error == ENOSPC || error == EPERM) 1064 error = EINVAL; /* EOF */ 1065 } 1066 if (!write) 1067 td->td_retval[0] = tmp; 1068 if (error == 0) { 1069 if (write) 1070 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1071 p->p_pid, addr, data); 1072 else 1073 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1074 p->p_pid, addr, tmp); 1075 } 1076 PROC_LOCK(p); 1077 break; 1078 1079 case PT_IO: 1080 #ifdef COMPAT_FREEBSD32 1081 if (wrap32) { 1082 piod32 = addr; 1083 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1084 iov.iov_len = piod32->piod_len; 1085 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1086 uio.uio_resid = piod32->piod_len; 1087 } else 1088 #endif 1089 { 1090 piod = addr; 1091 iov.iov_base = piod->piod_addr; 1092 iov.iov_len = piod->piod_len; 1093 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1094 uio.uio_resid = piod->piod_len; 1095 } 1096 uio.uio_iov = &iov; 1097 uio.uio_iovcnt = 1; 1098 uio.uio_segflg = UIO_USERSPACE; 1099 uio.uio_td = td; 1100 #ifdef COMPAT_FREEBSD32 1101 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1102 #else 1103 tmp = piod->piod_op; 1104 #endif 1105 switch (tmp) { 1106 case PIOD_READ_D: 1107 case PIOD_READ_I: 1108 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1109 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1110 uio.uio_rw = UIO_READ; 1111 break; 1112 case PIOD_WRITE_D: 1113 case PIOD_WRITE_I: 1114 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1115 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1116 td2->td_dbgflags |= TDB_USERWR; 1117 uio.uio_rw = UIO_WRITE; 1118 break; 1119 default: 1120 error = EINVAL; 1121 goto out; 1122 } 1123 PROC_UNLOCK(p); 1124 error = proc_rwmem(p, &uio); 1125 #ifdef COMPAT_FREEBSD32 1126 if (wrap32) 1127 piod32->piod_len -= uio.uio_resid; 1128 else 1129 #endif 1130 piod->piod_len -= uio.uio_resid; 1131 PROC_LOCK(p); 1132 break; 1133 1134 case PT_KILL: 1135 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1136 data = SIGKILL; 1137 goto sendsig; /* in PT_CONTINUE above */ 1138 1139 case PT_SETREGS: 1140 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1141 p->p_pid); 1142 td2->td_dbgflags |= TDB_USERWR; 1143 error = PROC_WRITE(regs, td2, addr); 1144 break; 1145 1146 case PT_GETREGS: 1147 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1148 p->p_pid); 1149 error = PROC_READ(regs, td2, addr); 1150 break; 1151 1152 case PT_SETFPREGS: 1153 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1154 p->p_pid); 1155 td2->td_dbgflags |= TDB_USERWR; 1156 error = PROC_WRITE(fpregs, td2, addr); 1157 break; 1158 1159 case PT_GETFPREGS: 1160 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1161 p->p_pid); 1162 error = PROC_READ(fpregs, td2, addr); 1163 break; 1164 1165 case PT_SETDBREGS: 1166 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1167 p->p_pid); 1168 td2->td_dbgflags |= TDB_USERWR; 1169 error = PROC_WRITE(dbregs, td2, addr); 1170 break; 1171 1172 case PT_GETDBREGS: 1173 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1174 p->p_pid); 1175 error = PROC_READ(dbregs, td2, addr); 1176 break; 1177 1178 case PT_LWPINFO: 1179 if (data <= 0 || 1180 #ifdef COMPAT_FREEBSD32 1181 (!wrap32 && data > sizeof(*pl)) || 1182 (wrap32 && data > sizeof(*pl32))) { 1183 #else 1184 data > sizeof(*pl)) { 1185 #endif 1186 error = EINVAL; 1187 break; 1188 } 1189 #ifdef COMPAT_FREEBSD32 1190 if (wrap32) { 1191 pl = &plr; 1192 pl32 = addr; 1193 } else 1194 #endif 1195 pl = addr; 1196 pl->pl_lwpid = td2->td_tid; 1197 pl->pl_event = PL_EVENT_NONE; 1198 pl->pl_flags = 0; 1199 if (td2->td_dbgflags & TDB_XSIG) { 1200 pl->pl_event = PL_EVENT_SIGNAL; 1201 if (td2->td_dbgksi.ksi_signo != 0 && 1202 #ifdef COMPAT_FREEBSD32 1203 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1204 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1205 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1206 pl_siginfo) + sizeof(struct siginfo32))) 1207 #else 1208 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1209 + sizeof(pl->pl_siginfo) 1210 #endif 1211 ){ 1212 pl->pl_flags |= PL_FLAG_SI; 1213 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1214 } 1215 } 1216 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1217 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1218 if (td2->td_dbgflags & TDB_SCE) 1219 pl->pl_flags |= PL_FLAG_SCE; 1220 else if (td2->td_dbgflags & TDB_SCX) 1221 pl->pl_flags |= PL_FLAG_SCX; 1222 if (td2->td_dbgflags & TDB_EXEC) 1223 pl->pl_flags |= PL_FLAG_EXEC; 1224 if (td2->td_dbgflags & TDB_FORK) { 1225 pl->pl_flags |= PL_FLAG_FORKED; 1226 pl->pl_child_pid = td2->td_dbg_forked; 1227 } 1228 if (td2->td_dbgflags & TDB_CHILD) 1229 pl->pl_flags |= PL_FLAG_CHILD; 1230 pl->pl_sigmask = td2->td_sigmask; 1231 pl->pl_siglist = td2->td_siglist; 1232 strcpy(pl->pl_tdname, td2->td_name); 1233 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1234 pl->pl_syscall_code = td2->td_dbg_sc_code; 1235 pl->pl_syscall_narg = td2->td_dbg_sc_narg; 1236 } else { 1237 pl->pl_syscall_code = 0; 1238 pl->pl_syscall_narg = 0; 1239 } 1240 #ifdef COMPAT_FREEBSD32 1241 if (wrap32) 1242 ptrace_lwpinfo_to32(pl, pl32); 1243 #endif 1244 CTR6(KTR_PTRACE, 1245 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1246 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1247 pl->pl_child_pid, pl->pl_syscall_code); 1248 break; 1249 1250 case PT_GETNUMLWPS: 1251 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1252 p->p_numthreads); 1253 td->td_retval[0] = p->p_numthreads; 1254 break; 1255 1256 case PT_GETLWPLIST: 1257 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1258 p->p_pid, data, p->p_numthreads); 1259 if (data <= 0) { 1260 error = EINVAL; 1261 break; 1262 } 1263 num = imin(p->p_numthreads, data); 1264 PROC_UNLOCK(p); 1265 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1266 tmp = 0; 1267 PROC_LOCK(p); 1268 FOREACH_THREAD_IN_PROC(p, td2) { 1269 if (tmp >= num) 1270 break; 1271 buf[tmp++] = td2->td_tid; 1272 } 1273 PROC_UNLOCK(p); 1274 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1275 free(buf, M_TEMP); 1276 if (!error) 1277 td->td_retval[0] = tmp; 1278 PROC_LOCK(p); 1279 break; 1280 1281 case PT_VM_TIMESTAMP: 1282 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1283 p->p_pid, p->p_vmspace->vm_map.timestamp); 1284 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1285 break; 1286 1287 case PT_VM_ENTRY: 1288 PROC_UNLOCK(p); 1289 #ifdef COMPAT_FREEBSD32 1290 if (wrap32) 1291 error = ptrace_vm_entry32(td, p, addr); 1292 else 1293 #endif 1294 error = ptrace_vm_entry(td, p, addr); 1295 PROC_LOCK(p); 1296 break; 1297 1298 default: 1299 #ifdef __HAVE_PTRACE_MACHDEP 1300 if (req >= PT_FIRSTMACH) { 1301 PROC_UNLOCK(p); 1302 error = cpu_ptrace(td2, req, addr, data); 1303 PROC_LOCK(p); 1304 } else 1305 #endif 1306 /* Unknown request. */ 1307 error = EINVAL; 1308 break; 1309 } 1310 1311 out: 1312 /* Drop our hold on this process now that the request has completed. */ 1313 _PRELE(p); 1314 fail: 1315 PROC_UNLOCK(p); 1316 if (proctree_locked) 1317 sx_xunlock(&proctree_lock); 1318 return (error); 1319 } 1320 #undef PROC_READ 1321 #undef PROC_WRITE 1322 1323 /* 1324 * Stop a process because of a debugging event; 1325 * stay stopped until p->p_step is cleared 1326 * (cleared by PIOCCONT in procfs). 1327 */ 1328 void 1329 stopevent(struct proc *p, unsigned int event, unsigned int val) 1330 { 1331 1332 PROC_LOCK_ASSERT(p, MA_OWNED); 1333 p->p_step = 1; 1334 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1335 val); 1336 do { 1337 if (event != S_EXIT) 1338 p->p_xsig = val; 1339 p->p_xthread = NULL; 1340 p->p_stype = event; /* Which event caused the stop? */ 1341 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1342 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1343 } while (p->p_step); 1344 } 1345