1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/vnode.h> 47 #include <sys/ptrace.h> 48 #include <sys/rwlock.h> 49 #include <sys/sx.h> 50 #include <sys/malloc.h> 51 #include <sys/signalvar.h> 52 53 #include <machine/reg.h> 54 55 #include <security/audit/audit.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_extern.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_param.h> 65 66 #ifdef COMPAT_FREEBSD32 67 #include <sys/procfs.h> 68 #include <compat/freebsd32/freebsd32_signal.h> 69 70 struct ptrace_io_desc32 { 71 int piod_op; 72 uint32_t piod_offs; 73 uint32_t piod_addr; 74 uint32_t piod_len; 75 }; 76 77 struct ptrace_vm_entry32 { 78 int pve_entry; 79 int pve_timestamp; 80 uint32_t pve_start; 81 uint32_t pve_end; 82 uint32_t pve_offset; 83 u_int pve_prot; 84 u_int pve_pathlen; 85 int32_t pve_fileid; 86 u_int pve_fsid; 87 uint32_t pve_path; 88 }; 89 90 struct ptrace_lwpinfo32 { 91 lwpid_t pl_lwpid; /* LWP described. */ 92 int pl_event; /* Event that stopped the LWP. */ 93 int pl_flags; /* LWP flags. */ 94 sigset_t pl_sigmask; /* LWP signal mask */ 95 sigset_t pl_siglist; /* LWP pending signal */ 96 struct siginfo32 pl_siginfo; /* siginfo for signal */ 97 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 98 int pl_child_pid; /* New child pid */ 99 }; 100 101 #endif 102 103 /* 104 * Functions implemented using PROC_ACTION(): 105 * 106 * proc_read_regs(proc, regs) 107 * Get the current user-visible register set from the process 108 * and copy it into the regs structure (<machine/reg.h>). 109 * The process is stopped at the time read_regs is called. 110 * 111 * proc_write_regs(proc, regs) 112 * Update the current register set from the passed in regs 113 * structure. Take care to avoid clobbering special CPU 114 * registers or privileged bits in the PSL. 115 * Depending on the architecture this may have fix-up work to do, 116 * especially if the IAR or PCW are modified. 117 * The process is stopped at the time write_regs is called. 118 * 119 * proc_read_fpregs, proc_write_fpregs 120 * deal with the floating point register set, otherwise as above. 121 * 122 * proc_read_dbregs, proc_write_dbregs 123 * deal with the processor debug register set, otherwise as above. 124 * 125 * proc_sstep(proc) 126 * Arrange for the process to trap after executing a single instruction. 127 */ 128 129 #define PROC_ACTION(action) do { \ 130 int error; \ 131 \ 132 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 133 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 134 error = EIO; \ 135 else \ 136 error = (action); \ 137 return (error); \ 138 } while(0) 139 140 int 141 proc_read_regs(struct thread *td, struct reg *regs) 142 { 143 144 PROC_ACTION(fill_regs(td, regs)); 145 } 146 147 int 148 proc_write_regs(struct thread *td, struct reg *regs) 149 { 150 151 PROC_ACTION(set_regs(td, regs)); 152 } 153 154 int 155 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 156 { 157 158 PROC_ACTION(fill_dbregs(td, dbregs)); 159 } 160 161 int 162 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 163 { 164 165 PROC_ACTION(set_dbregs(td, dbregs)); 166 } 167 168 /* 169 * Ptrace doesn't support fpregs at all, and there are no security holes 170 * or translations for fpregs, so we can just copy them. 171 */ 172 int 173 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 174 { 175 176 PROC_ACTION(fill_fpregs(td, fpregs)); 177 } 178 179 int 180 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 181 { 182 183 PROC_ACTION(set_fpregs(td, fpregs)); 184 } 185 186 #ifdef COMPAT_FREEBSD32 187 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 188 int 189 proc_read_regs32(struct thread *td, struct reg32 *regs32) 190 { 191 192 PROC_ACTION(fill_regs32(td, regs32)); 193 } 194 195 int 196 proc_write_regs32(struct thread *td, struct reg32 *regs32) 197 { 198 199 PROC_ACTION(set_regs32(td, regs32)); 200 } 201 202 int 203 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 204 { 205 206 PROC_ACTION(fill_dbregs32(td, dbregs32)); 207 } 208 209 int 210 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 211 { 212 213 PROC_ACTION(set_dbregs32(td, dbregs32)); 214 } 215 216 int 217 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 218 { 219 220 PROC_ACTION(fill_fpregs32(td, fpregs32)); 221 } 222 223 int 224 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 225 { 226 227 PROC_ACTION(set_fpregs32(td, fpregs32)); 228 } 229 #endif 230 231 int 232 proc_sstep(struct thread *td) 233 { 234 235 PROC_ACTION(ptrace_single_step(td)); 236 } 237 238 int 239 proc_rwmem(struct proc *p, struct uio *uio) 240 { 241 vm_map_t map; 242 vm_offset_t pageno; /* page number */ 243 vm_prot_t reqprot; 244 int error, fault_flags, page_offset, writing; 245 246 /* 247 * Assert that someone has locked this vmspace. (Should be 248 * curthread but we can't assert that.) This keeps the process 249 * from exiting out from under us until this operation completes. 250 */ 251 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 252 p, p->p_pid)); 253 254 /* 255 * The map we want... 256 */ 257 map = &p->p_vmspace->vm_map; 258 259 /* 260 * If we are writing, then we request vm_fault() to create a private 261 * copy of each page. Since these copies will not be writeable by the 262 * process, we must explicity request that they be dirtied. 263 */ 264 writing = uio->uio_rw == UIO_WRITE; 265 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 266 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 267 268 /* 269 * Only map in one page at a time. We don't have to, but it 270 * makes things easier. This way is trivial - right? 271 */ 272 do { 273 vm_offset_t uva; 274 u_int len; 275 vm_page_t m; 276 277 uva = (vm_offset_t)uio->uio_offset; 278 279 /* 280 * Get the page number of this segment. 281 */ 282 pageno = trunc_page(uva); 283 page_offset = uva - pageno; 284 285 /* 286 * How many bytes to copy 287 */ 288 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 289 290 /* 291 * Fault and hold the page on behalf of the process. 292 */ 293 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 294 if (error != KERN_SUCCESS) { 295 if (error == KERN_RESOURCE_SHORTAGE) 296 error = ENOMEM; 297 else 298 error = EFAULT; 299 break; 300 } 301 302 /* 303 * Now do the i/o move. 304 */ 305 error = uiomove_fromphys(&m, page_offset, len, uio); 306 307 /* Make the I-cache coherent for breakpoints. */ 308 if (writing && error == 0) { 309 vm_map_lock_read(map); 310 if (vm_map_check_protection(map, pageno, pageno + 311 PAGE_SIZE, VM_PROT_EXECUTE)) 312 vm_sync_icache(map, uva, len); 313 vm_map_unlock_read(map); 314 } 315 316 /* 317 * Release the page. 318 */ 319 vm_page_lock(m); 320 vm_page_unhold(m); 321 vm_page_unlock(m); 322 323 } while (error == 0 && uio->uio_resid > 0); 324 325 return (error); 326 } 327 328 static int 329 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 330 { 331 struct vattr vattr; 332 vm_map_t map; 333 vm_map_entry_t entry; 334 vm_object_t obj, tobj, lobj; 335 struct vmspace *vm; 336 struct vnode *vp; 337 char *freepath, *fullpath; 338 u_int pathlen; 339 int error, index; 340 341 error = 0; 342 obj = NULL; 343 344 vm = vmspace_acquire_ref(p); 345 map = &vm->vm_map; 346 vm_map_lock_read(map); 347 348 do { 349 entry = map->header.next; 350 index = 0; 351 while (index < pve->pve_entry && entry != &map->header) { 352 entry = entry->next; 353 index++; 354 } 355 if (index != pve->pve_entry) { 356 error = EINVAL; 357 break; 358 } 359 while (entry != &map->header && 360 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 361 entry = entry->next; 362 index++; 363 } 364 if (entry == &map->header) { 365 error = ENOENT; 366 break; 367 } 368 369 /* We got an entry. */ 370 pve->pve_entry = index + 1; 371 pve->pve_timestamp = map->timestamp; 372 pve->pve_start = entry->start; 373 pve->pve_end = entry->end - 1; 374 pve->pve_offset = entry->offset; 375 pve->pve_prot = entry->protection; 376 377 /* Backing object's path needed? */ 378 if (pve->pve_pathlen == 0) 379 break; 380 381 pathlen = pve->pve_pathlen; 382 pve->pve_pathlen = 0; 383 384 obj = entry->object.vm_object; 385 if (obj != NULL) 386 VM_OBJECT_RLOCK(obj); 387 } while (0); 388 389 vm_map_unlock_read(map); 390 vmspace_free(vm); 391 392 pve->pve_fsid = VNOVAL; 393 pve->pve_fileid = VNOVAL; 394 395 if (error == 0 && obj != NULL) { 396 lobj = obj; 397 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 398 if (tobj != obj) 399 VM_OBJECT_RLOCK(tobj); 400 if (lobj != obj) 401 VM_OBJECT_RUNLOCK(lobj); 402 lobj = tobj; 403 pve->pve_offset += tobj->backing_object_offset; 404 } 405 vp = vm_object_vnode(lobj); 406 if (vp != NULL) 407 vref(vp); 408 if (lobj != obj) 409 VM_OBJECT_RUNLOCK(lobj); 410 VM_OBJECT_RUNLOCK(obj); 411 412 if (vp != NULL) { 413 freepath = NULL; 414 fullpath = NULL; 415 vn_fullpath(td, vp, &fullpath, &freepath); 416 vn_lock(vp, LK_SHARED | LK_RETRY); 417 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 418 pve->pve_fileid = vattr.va_fileid; 419 pve->pve_fsid = vattr.va_fsid; 420 } 421 vput(vp); 422 423 if (fullpath != NULL) { 424 pve->pve_pathlen = strlen(fullpath) + 1; 425 if (pve->pve_pathlen <= pathlen) { 426 error = copyout(fullpath, pve->pve_path, 427 pve->pve_pathlen); 428 } else 429 error = ENAMETOOLONG; 430 } 431 if (freepath != NULL) 432 free(freepath, M_TEMP); 433 } 434 } 435 if (error == 0) 436 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 437 p->p_pid, pve->pve_entry, pve->pve_start); 438 439 return (error); 440 } 441 442 #ifdef COMPAT_FREEBSD32 443 static int 444 ptrace_vm_entry32(struct thread *td, struct proc *p, 445 struct ptrace_vm_entry32 *pve32) 446 { 447 struct ptrace_vm_entry pve; 448 int error; 449 450 pve.pve_entry = pve32->pve_entry; 451 pve.pve_pathlen = pve32->pve_pathlen; 452 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 453 454 error = ptrace_vm_entry(td, p, &pve); 455 if (error == 0) { 456 pve32->pve_entry = pve.pve_entry; 457 pve32->pve_timestamp = pve.pve_timestamp; 458 pve32->pve_start = pve.pve_start; 459 pve32->pve_end = pve.pve_end; 460 pve32->pve_offset = pve.pve_offset; 461 pve32->pve_prot = pve.pve_prot; 462 pve32->pve_fileid = pve.pve_fileid; 463 pve32->pve_fsid = pve.pve_fsid; 464 } 465 466 pve32->pve_pathlen = pve.pve_pathlen; 467 return (error); 468 } 469 470 static void 471 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 472 struct ptrace_lwpinfo32 *pl32) 473 { 474 475 pl32->pl_lwpid = pl->pl_lwpid; 476 pl32->pl_event = pl->pl_event; 477 pl32->pl_flags = pl->pl_flags; 478 pl32->pl_sigmask = pl->pl_sigmask; 479 pl32->pl_siglist = pl->pl_siglist; 480 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 481 strcpy(pl32->pl_tdname, pl->pl_tdname); 482 pl32->pl_child_pid = pl->pl_child_pid; 483 } 484 #endif /* COMPAT_FREEBSD32 */ 485 486 /* 487 * Process debugging system call. 488 */ 489 #ifndef _SYS_SYSPROTO_H_ 490 struct ptrace_args { 491 int req; 492 pid_t pid; 493 caddr_t addr; 494 int data; 495 }; 496 #endif 497 498 #ifdef COMPAT_FREEBSD32 499 /* 500 * This CPP subterfuge is to try and reduce the number of ifdefs in 501 * the body of the code. 502 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 503 * becomes either: 504 * copyin(uap->addr, &r.reg, sizeof r.reg); 505 * or 506 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 507 * .. except this is done at runtime. 508 */ 509 #define COPYIN(u, k, s) wrap32 ? \ 510 copyin(u, k ## 32, s ## 32) : \ 511 copyin(u, k, s) 512 #define COPYOUT(k, u, s) wrap32 ? \ 513 copyout(k ## 32, u, s ## 32) : \ 514 copyout(k, u, s) 515 #else 516 #define COPYIN(u, k, s) copyin(u, k, s) 517 #define COPYOUT(k, u, s) copyout(k, u, s) 518 #endif 519 int 520 sys_ptrace(struct thread *td, struct ptrace_args *uap) 521 { 522 /* 523 * XXX this obfuscation is to reduce stack usage, but the register 524 * structs may be too large to put on the stack anyway. 525 */ 526 union { 527 struct ptrace_io_desc piod; 528 struct ptrace_lwpinfo pl; 529 struct ptrace_vm_entry pve; 530 struct dbreg dbreg; 531 struct fpreg fpreg; 532 struct reg reg; 533 #ifdef COMPAT_FREEBSD32 534 struct dbreg32 dbreg32; 535 struct fpreg32 fpreg32; 536 struct reg32 reg32; 537 struct ptrace_io_desc32 piod32; 538 struct ptrace_lwpinfo32 pl32; 539 struct ptrace_vm_entry32 pve32; 540 #endif 541 } r; 542 void *addr; 543 int error = 0; 544 #ifdef COMPAT_FREEBSD32 545 int wrap32 = 0; 546 547 if (SV_CURPROC_FLAG(SV_ILP32)) 548 wrap32 = 1; 549 #endif 550 AUDIT_ARG_PID(uap->pid); 551 AUDIT_ARG_CMD(uap->req); 552 AUDIT_ARG_VALUE(uap->data); 553 addr = &r; 554 switch (uap->req) { 555 case PT_GETREGS: 556 case PT_GETFPREGS: 557 case PT_GETDBREGS: 558 case PT_LWPINFO: 559 break; 560 case PT_SETREGS: 561 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 562 break; 563 case PT_SETFPREGS: 564 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 565 break; 566 case PT_SETDBREGS: 567 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 568 break; 569 case PT_IO: 570 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 571 break; 572 case PT_VM_ENTRY: 573 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 574 break; 575 default: 576 addr = uap->addr; 577 break; 578 } 579 if (error) 580 return (error); 581 582 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 583 if (error) 584 return (error); 585 586 switch (uap->req) { 587 case PT_VM_ENTRY: 588 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 589 break; 590 case PT_IO: 591 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 592 break; 593 case PT_GETREGS: 594 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 595 break; 596 case PT_GETFPREGS: 597 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 598 break; 599 case PT_GETDBREGS: 600 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 601 break; 602 case PT_LWPINFO: 603 error = copyout(&r.pl, uap->addr, uap->data); 604 break; 605 } 606 607 return (error); 608 } 609 #undef COPYIN 610 #undef COPYOUT 611 612 #ifdef COMPAT_FREEBSD32 613 /* 614 * PROC_READ(regs, td2, addr); 615 * becomes either: 616 * proc_read_regs(td2, addr); 617 * or 618 * proc_read_regs32(td2, addr); 619 * .. except this is done at runtime. There is an additional 620 * complication in that PROC_WRITE disallows 32 bit consumers 621 * from writing to 64 bit address space targets. 622 */ 623 #define PROC_READ(w, t, a) wrap32 ? \ 624 proc_read_ ## w ## 32(t, a) : \ 625 proc_read_ ## w (t, a) 626 #define PROC_WRITE(w, t, a) wrap32 ? \ 627 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 628 proc_write_ ## w (t, a) 629 #else 630 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 631 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 632 #endif 633 634 int 635 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 636 { 637 struct iovec iov; 638 struct uio uio; 639 struct proc *curp, *p, *pp; 640 struct thread *td2 = NULL, *td3; 641 struct ptrace_io_desc *piod = NULL; 642 struct ptrace_lwpinfo *pl; 643 int error, write, tmp, num; 644 int proctree_locked = 0; 645 lwpid_t tid = 0, *buf; 646 #ifdef COMPAT_FREEBSD32 647 int wrap32 = 0, safe = 0; 648 struct ptrace_io_desc32 *piod32 = NULL; 649 struct ptrace_lwpinfo32 *pl32 = NULL; 650 struct ptrace_lwpinfo plr; 651 #endif 652 653 curp = td->td_proc; 654 655 /* Lock proctree before locking the process. */ 656 switch (req) { 657 case PT_TRACE_ME: 658 case PT_ATTACH: 659 case PT_STEP: 660 case PT_CONTINUE: 661 case PT_TO_SCE: 662 case PT_TO_SCX: 663 case PT_SYSCALL: 664 case PT_FOLLOW_FORK: 665 case PT_DETACH: 666 sx_xlock(&proctree_lock); 667 proctree_locked = 1; 668 break; 669 default: 670 break; 671 } 672 673 write = 0; 674 if (req == PT_TRACE_ME) { 675 p = td->td_proc; 676 PROC_LOCK(p); 677 } else { 678 if (pid <= PID_MAX) { 679 if ((p = pfind(pid)) == NULL) { 680 if (proctree_locked) 681 sx_xunlock(&proctree_lock); 682 return (ESRCH); 683 } 684 } else { 685 td2 = tdfind(pid, -1); 686 if (td2 == NULL) { 687 if (proctree_locked) 688 sx_xunlock(&proctree_lock); 689 return (ESRCH); 690 } 691 p = td2->td_proc; 692 tid = pid; 693 pid = p->p_pid; 694 } 695 } 696 AUDIT_ARG_PROCESS(p); 697 698 if ((p->p_flag & P_WEXIT) != 0) { 699 error = ESRCH; 700 goto fail; 701 } 702 if ((error = p_cansee(td, p)) != 0) 703 goto fail; 704 705 if ((error = p_candebug(td, p)) != 0) 706 goto fail; 707 708 /* 709 * System processes can't be debugged. 710 */ 711 if ((p->p_flag & P_SYSTEM) != 0) { 712 error = EINVAL; 713 goto fail; 714 } 715 716 if (tid == 0) { 717 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 718 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 719 td2 = p->p_xthread; 720 } else { 721 td2 = FIRST_THREAD_IN_PROC(p); 722 } 723 tid = td2->td_tid; 724 } 725 726 #ifdef COMPAT_FREEBSD32 727 /* 728 * Test if we're a 32 bit client and what the target is. 729 * Set the wrap controls accordingly. 730 */ 731 if (SV_CURPROC_FLAG(SV_ILP32)) { 732 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 733 safe = 1; 734 wrap32 = 1; 735 } 736 #endif 737 /* 738 * Permissions check 739 */ 740 switch (req) { 741 case PT_TRACE_ME: 742 /* Always legal. */ 743 break; 744 745 case PT_ATTACH: 746 /* Self */ 747 if (p->p_pid == td->td_proc->p_pid) { 748 error = EINVAL; 749 goto fail; 750 } 751 752 /* Already traced */ 753 if (p->p_flag & P_TRACED) { 754 error = EBUSY; 755 goto fail; 756 } 757 758 /* Can't trace an ancestor if you're being traced. */ 759 if (curp->p_flag & P_TRACED) { 760 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 761 if (pp == p) { 762 error = EINVAL; 763 goto fail; 764 } 765 } 766 } 767 768 769 /* OK */ 770 break; 771 772 case PT_CLEARSTEP: 773 /* Allow thread to clear single step for itself */ 774 if (td->td_tid == tid) 775 break; 776 777 /* FALLTHROUGH */ 778 default: 779 /* not being traced... */ 780 if ((p->p_flag & P_TRACED) == 0) { 781 error = EPERM; 782 goto fail; 783 } 784 785 /* not being traced by YOU */ 786 if (p->p_pptr != td->td_proc) { 787 error = EBUSY; 788 goto fail; 789 } 790 791 /* not currently stopped */ 792 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 793 p->p_suspcount != p->p_numthreads || 794 (p->p_flag & P_WAITED) == 0) { 795 error = EBUSY; 796 goto fail; 797 } 798 799 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 800 static int count = 0; 801 if (count++ == 0) 802 printf("P_STOPPED_TRACE not set.\n"); 803 } 804 805 /* OK */ 806 break; 807 } 808 809 /* Keep this process around until we finish this request. */ 810 _PHOLD(p); 811 812 #ifdef FIX_SSTEP 813 /* 814 * Single step fixup ala procfs 815 */ 816 FIX_SSTEP(td2); 817 #endif 818 819 /* 820 * Actually do the requests 821 */ 822 823 td->td_retval[0] = 0; 824 825 switch (req) { 826 case PT_TRACE_ME: 827 /* set my trace flag and "owner" so it can read/write me */ 828 p->p_flag |= P_TRACED; 829 if (p->p_flag & P_PPWAIT) 830 p->p_flag |= P_PPTRACE; 831 p->p_oppid = p->p_pptr->p_pid; 832 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 833 break; 834 835 case PT_ATTACH: 836 /* security check done above */ 837 /* 838 * It would be nice if the tracing relationship was separate 839 * from the parent relationship but that would require 840 * another set of links in the proc struct or for "wait" 841 * to scan the entire proc table. To make life easier, 842 * we just re-parent the process we're trying to trace. 843 * The old parent is remembered so we can put things back 844 * on a "detach". 845 */ 846 p->p_flag |= P_TRACED; 847 p->p_oppid = p->p_pptr->p_pid; 848 if (p->p_pptr != td->td_proc) { 849 proc_reparent(p, td->td_proc); 850 } 851 data = SIGSTOP; 852 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 853 p->p_oppid); 854 goto sendsig; /* in PT_CONTINUE below */ 855 856 case PT_CLEARSTEP: 857 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 858 p->p_pid); 859 error = ptrace_clear_single_step(td2); 860 break; 861 862 case PT_SETSTEP: 863 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 864 p->p_pid); 865 error = ptrace_single_step(td2); 866 break; 867 868 case PT_SUSPEND: 869 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 870 p->p_pid); 871 td2->td_dbgflags |= TDB_SUSPEND; 872 thread_lock(td2); 873 td2->td_flags |= TDF_NEEDSUSPCHK; 874 thread_unlock(td2); 875 break; 876 877 case PT_RESUME: 878 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 879 p->p_pid); 880 td2->td_dbgflags &= ~TDB_SUSPEND; 881 break; 882 883 case PT_FOLLOW_FORK: 884 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 885 p->p_flag & P_FOLLOWFORK ? "enabled" : "disabled", 886 data ? "enabled" : "disabled"); 887 if (data) 888 p->p_flag |= P_FOLLOWFORK; 889 else 890 p->p_flag &= ~P_FOLLOWFORK; 891 break; 892 893 case PT_STEP: 894 case PT_CONTINUE: 895 case PT_TO_SCE: 896 case PT_TO_SCX: 897 case PT_SYSCALL: 898 case PT_DETACH: 899 /* Zero means do not send any signal */ 900 if (data < 0 || data > _SIG_MAXSIG) { 901 error = EINVAL; 902 break; 903 } 904 905 switch (req) { 906 case PT_STEP: 907 CTR2(KTR_PTRACE, "PT_STEP: tid %d (pid %d)", 908 td2->td_tid, p->p_pid); 909 error = ptrace_single_step(td2); 910 if (error) 911 goto out; 912 break; 913 case PT_CONTINUE: 914 case PT_TO_SCE: 915 case PT_TO_SCX: 916 case PT_SYSCALL: 917 if (addr != (void *)1) { 918 error = ptrace_set_pc(td2, 919 (u_long)(uintfptr_t)addr); 920 if (error) 921 goto out; 922 } 923 switch (req) { 924 case PT_TO_SCE: 925 p->p_stops |= S_PT_SCE; 926 CTR2(KTR_PTRACE, 927 "PT_TO_SCE: pid %d, stops = %#x", p->p_pid, 928 p->p_stops); 929 break; 930 case PT_TO_SCX: 931 p->p_stops |= S_PT_SCX; 932 CTR2(KTR_PTRACE, 933 "PT_TO_SCX: pid %d, stops = %#x", p->p_pid, 934 p->p_stops); 935 break; 936 case PT_SYSCALL: 937 p->p_stops |= S_PT_SCE | S_PT_SCX; 938 CTR2(KTR_PTRACE, 939 "PT_SYSCALL: pid %d, stops = %#x", p->p_pid, 940 p->p_stops); 941 break; 942 case PT_CONTINUE: 943 CTR1(KTR_PTRACE, 944 "PT_CONTINUE: pid %d", p->p_pid); 945 break; 946 } 947 break; 948 case PT_DETACH: 949 /* reset process parent */ 950 if (p->p_oppid != p->p_pptr->p_pid) { 951 PROC_LOCK(p->p_pptr); 952 sigqueue_take(p->p_ksi); 953 PROC_UNLOCK(p->p_pptr); 954 955 pp = proc_realparent(p); 956 proc_reparent(p, pp); 957 if (pp == initproc) 958 p->p_sigparent = SIGCHLD; 959 CTR2(KTR_PTRACE, 960 "PT_DETACH: pid %d reparented to pid %d", 961 p->p_pid, pp->p_pid); 962 } else 963 CTR1(KTR_PTRACE, "PT_DETACH: pid %d", p->p_pid); 964 p->p_oppid = 0; 965 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 966 p->p_stops = 0; 967 968 /* should we send SIGCHLD? */ 969 /* childproc_continued(p); */ 970 break; 971 } 972 973 sendsig: 974 if (proctree_locked) { 975 sx_xunlock(&proctree_lock); 976 proctree_locked = 0; 977 } 978 p->p_xstat = data; 979 p->p_xthread = NULL; 980 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 981 /* deliver or queue signal */ 982 td2->td_dbgflags &= ~TDB_XSIG; 983 td2->td_xsig = data; 984 985 if (req == PT_DETACH) { 986 FOREACH_THREAD_IN_PROC(p, td3) 987 td3->td_dbgflags &= ~TDB_SUSPEND; 988 } 989 /* 990 * unsuspend all threads, to not let a thread run, 991 * you should use PT_SUSPEND to suspend it before 992 * continuing process. 993 */ 994 PROC_SLOCK(p); 995 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 996 thread_unsuspend(p); 997 PROC_SUNLOCK(p); 998 if (req == PT_ATTACH) 999 kern_psignal(p, data); 1000 } else { 1001 if (data) 1002 kern_psignal(p, data); 1003 } 1004 break; 1005 1006 case PT_WRITE_I: 1007 case PT_WRITE_D: 1008 td2->td_dbgflags |= TDB_USERWR; 1009 write = 1; 1010 /* FALLTHROUGH */ 1011 case PT_READ_I: 1012 case PT_READ_D: 1013 PROC_UNLOCK(p); 1014 tmp = 0; 1015 /* write = 0 set above */ 1016 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 1017 iov.iov_len = sizeof(int); 1018 uio.uio_iov = &iov; 1019 uio.uio_iovcnt = 1; 1020 uio.uio_offset = (off_t)(uintptr_t)addr; 1021 uio.uio_resid = sizeof(int); 1022 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 1023 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 1024 uio.uio_td = td; 1025 error = proc_rwmem(p, &uio); 1026 if (uio.uio_resid != 0) { 1027 /* 1028 * XXX proc_rwmem() doesn't currently return ENOSPC, 1029 * so I think write() can bogusly return 0. 1030 * XXX what happens for short writes? We don't want 1031 * to write partial data. 1032 * XXX proc_rwmem() returns EPERM for other invalid 1033 * addresses. Convert this to EINVAL. Does this 1034 * clobber returns of EPERM for other reasons? 1035 */ 1036 if (error == 0 || error == ENOSPC || error == EPERM) 1037 error = EINVAL; /* EOF */ 1038 } 1039 if (!write) 1040 td->td_retval[0] = tmp; 1041 if (error == 0) { 1042 if (write) 1043 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1044 p->p_pid, addr, data); 1045 else 1046 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1047 p->p_pid, addr, tmp); 1048 } 1049 PROC_LOCK(p); 1050 break; 1051 1052 case PT_IO: 1053 #ifdef COMPAT_FREEBSD32 1054 if (wrap32) { 1055 piod32 = addr; 1056 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1057 iov.iov_len = piod32->piod_len; 1058 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1059 uio.uio_resid = piod32->piod_len; 1060 } else 1061 #endif 1062 { 1063 piod = addr; 1064 iov.iov_base = piod->piod_addr; 1065 iov.iov_len = piod->piod_len; 1066 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1067 uio.uio_resid = piod->piod_len; 1068 } 1069 uio.uio_iov = &iov; 1070 uio.uio_iovcnt = 1; 1071 uio.uio_segflg = UIO_USERSPACE; 1072 uio.uio_td = td; 1073 #ifdef COMPAT_FREEBSD32 1074 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1075 #else 1076 tmp = piod->piod_op; 1077 #endif 1078 switch (tmp) { 1079 case PIOD_READ_D: 1080 case PIOD_READ_I: 1081 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1082 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1083 uio.uio_rw = UIO_READ; 1084 break; 1085 case PIOD_WRITE_D: 1086 case PIOD_WRITE_I: 1087 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1088 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1089 td2->td_dbgflags |= TDB_USERWR; 1090 uio.uio_rw = UIO_WRITE; 1091 break; 1092 default: 1093 error = EINVAL; 1094 goto out; 1095 } 1096 PROC_UNLOCK(p); 1097 error = proc_rwmem(p, &uio); 1098 #ifdef COMPAT_FREEBSD32 1099 if (wrap32) 1100 piod32->piod_len -= uio.uio_resid; 1101 else 1102 #endif 1103 piod->piod_len -= uio.uio_resid; 1104 PROC_LOCK(p); 1105 break; 1106 1107 case PT_KILL: 1108 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1109 data = SIGKILL; 1110 goto sendsig; /* in PT_CONTINUE above */ 1111 1112 case PT_SETREGS: 1113 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1114 p->p_pid); 1115 td2->td_dbgflags |= TDB_USERWR; 1116 error = PROC_WRITE(regs, td2, addr); 1117 break; 1118 1119 case PT_GETREGS: 1120 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1121 p->p_pid); 1122 error = PROC_READ(regs, td2, addr); 1123 break; 1124 1125 case PT_SETFPREGS: 1126 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1127 p->p_pid); 1128 td2->td_dbgflags |= TDB_USERWR; 1129 error = PROC_WRITE(fpregs, td2, addr); 1130 break; 1131 1132 case PT_GETFPREGS: 1133 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1134 p->p_pid); 1135 error = PROC_READ(fpregs, td2, addr); 1136 break; 1137 1138 case PT_SETDBREGS: 1139 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1140 p->p_pid); 1141 td2->td_dbgflags |= TDB_USERWR; 1142 error = PROC_WRITE(dbregs, td2, addr); 1143 break; 1144 1145 case PT_GETDBREGS: 1146 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1147 p->p_pid); 1148 error = PROC_READ(dbregs, td2, addr); 1149 break; 1150 1151 case PT_LWPINFO: 1152 if (data <= 0 || 1153 #ifdef COMPAT_FREEBSD32 1154 (!wrap32 && data > sizeof(*pl)) || 1155 (wrap32 && data > sizeof(*pl32))) { 1156 #else 1157 data > sizeof(*pl)) { 1158 #endif 1159 error = EINVAL; 1160 break; 1161 } 1162 #ifdef COMPAT_FREEBSD32 1163 if (wrap32) { 1164 pl = &plr; 1165 pl32 = addr; 1166 } else 1167 #endif 1168 pl = addr; 1169 pl->pl_lwpid = td2->td_tid; 1170 pl->pl_event = PL_EVENT_NONE; 1171 pl->pl_flags = 0; 1172 if (td2->td_dbgflags & TDB_XSIG) { 1173 pl->pl_event = PL_EVENT_SIGNAL; 1174 if (td2->td_dbgksi.ksi_signo != 0 && 1175 #ifdef COMPAT_FREEBSD32 1176 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1177 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1178 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1179 pl_siginfo) + sizeof(struct siginfo32))) 1180 #else 1181 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1182 + sizeof(pl->pl_siginfo) 1183 #endif 1184 ){ 1185 pl->pl_flags |= PL_FLAG_SI; 1186 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1187 } 1188 } 1189 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1190 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1191 if (td2->td_dbgflags & TDB_SCE) 1192 pl->pl_flags |= PL_FLAG_SCE; 1193 else if (td2->td_dbgflags & TDB_SCX) 1194 pl->pl_flags |= PL_FLAG_SCX; 1195 if (td2->td_dbgflags & TDB_EXEC) 1196 pl->pl_flags |= PL_FLAG_EXEC; 1197 if (td2->td_dbgflags & TDB_FORK) { 1198 pl->pl_flags |= PL_FLAG_FORKED; 1199 pl->pl_child_pid = td2->td_dbg_forked; 1200 } 1201 if (td2->td_dbgflags & TDB_CHILD) 1202 pl->pl_flags |= PL_FLAG_CHILD; 1203 pl->pl_sigmask = td2->td_sigmask; 1204 pl->pl_siglist = td2->td_siglist; 1205 strcpy(pl->pl_tdname, td2->td_name); 1206 #ifdef COMPAT_FREEBSD32 1207 if (wrap32) 1208 ptrace_lwpinfo_to32(pl, pl32); 1209 #endif 1210 CTR5(KTR_PTRACE, 1211 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d", 1212 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1213 pl->pl_child_pid); 1214 break; 1215 1216 case PT_GETNUMLWPS: 1217 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1218 p->p_numthreads); 1219 td->td_retval[0] = p->p_numthreads; 1220 break; 1221 1222 case PT_GETLWPLIST: 1223 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1224 p->p_pid, data, p->p_numthreads); 1225 if (data <= 0) { 1226 error = EINVAL; 1227 break; 1228 } 1229 num = imin(p->p_numthreads, data); 1230 PROC_UNLOCK(p); 1231 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1232 tmp = 0; 1233 PROC_LOCK(p); 1234 FOREACH_THREAD_IN_PROC(p, td2) { 1235 if (tmp >= num) 1236 break; 1237 buf[tmp++] = td2->td_tid; 1238 } 1239 PROC_UNLOCK(p); 1240 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1241 free(buf, M_TEMP); 1242 if (!error) 1243 td->td_retval[0] = tmp; 1244 PROC_LOCK(p); 1245 break; 1246 1247 case PT_VM_TIMESTAMP: 1248 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1249 p->p_pid, p->p_vmspace->vm_map.timestamp); 1250 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1251 break; 1252 1253 case PT_VM_ENTRY: 1254 PROC_UNLOCK(p); 1255 #ifdef COMPAT_FREEBSD32 1256 if (wrap32) 1257 error = ptrace_vm_entry32(td, p, addr); 1258 else 1259 #endif 1260 error = ptrace_vm_entry(td, p, addr); 1261 PROC_LOCK(p); 1262 break; 1263 1264 default: 1265 #ifdef __HAVE_PTRACE_MACHDEP 1266 if (req >= PT_FIRSTMACH) { 1267 PROC_UNLOCK(p); 1268 error = cpu_ptrace(td2, req, addr, data); 1269 PROC_LOCK(p); 1270 } else 1271 #endif 1272 /* Unknown request. */ 1273 error = EINVAL; 1274 break; 1275 } 1276 1277 out: 1278 /* Drop our hold on this process now that the request has completed. */ 1279 _PRELE(p); 1280 fail: 1281 PROC_UNLOCK(p); 1282 if (proctree_locked) 1283 sx_xunlock(&proctree_lock); 1284 return (error); 1285 } 1286 #undef PROC_READ 1287 #undef PROC_WRITE 1288 1289 /* 1290 * Stop a process because of a debugging event; 1291 * stay stopped until p->p_step is cleared 1292 * (cleared by PIOCCONT in procfs). 1293 */ 1294 void 1295 stopevent(struct proc *p, unsigned int event, unsigned int val) 1296 { 1297 1298 PROC_LOCK_ASSERT(p, MA_OWNED); 1299 p->p_step = 1; 1300 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event, 1301 val); 1302 do { 1303 p->p_xstat = val; 1304 p->p_xthread = NULL; 1305 p->p_stype = event; /* Which event caused the stop? */ 1306 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1307 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1308 } while (p->p_step); 1309 } 1310