1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/ptrace.h> 47 #include <sys/sx.h> 48 #include <sys/malloc.h> 49 #include <sys/signalvar.h> 50 51 #include <machine/reg.h> 52 53 #include <security/audit/audit.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_param.h> 63 64 #ifdef COMPAT_FREEBSD32 65 #include <sys/procfs.h> 66 #include <compat/freebsd32/freebsd32_signal.h> 67 68 struct ptrace_io_desc32 { 69 int piod_op; 70 uint32_t piod_offs; 71 uint32_t piod_addr; 72 uint32_t piod_len; 73 }; 74 75 struct ptrace_vm_entry32 { 76 int pve_entry; 77 int pve_timestamp; 78 uint32_t pve_start; 79 uint32_t pve_end; 80 uint32_t pve_offset; 81 u_int pve_prot; 82 u_int pve_pathlen; 83 int32_t pve_fileid; 84 u_int pve_fsid; 85 uint32_t pve_path; 86 }; 87 88 struct ptrace_lwpinfo32 { 89 lwpid_t pl_lwpid; /* LWP described. */ 90 int pl_event; /* Event that stopped the LWP. */ 91 int pl_flags; /* LWP flags. */ 92 sigset_t pl_sigmask; /* LWP signal mask */ 93 sigset_t pl_siglist; /* LWP pending signal */ 94 struct siginfo32 pl_siginfo; /* siginfo for signal */ 95 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 96 int pl_child_pid; /* New child pid */ 97 }; 98 99 #endif 100 101 /* 102 * Functions implemented using PROC_ACTION(): 103 * 104 * proc_read_regs(proc, regs) 105 * Get the current user-visible register set from the process 106 * and copy it into the regs structure (<machine/reg.h>). 107 * The process is stopped at the time read_regs is called. 108 * 109 * proc_write_regs(proc, regs) 110 * Update the current register set from the passed in regs 111 * structure. Take care to avoid clobbering special CPU 112 * registers or privileged bits in the PSL. 113 * Depending on the architecture this may have fix-up work to do, 114 * especially if the IAR or PCW are modified. 115 * The process is stopped at the time write_regs is called. 116 * 117 * proc_read_fpregs, proc_write_fpregs 118 * deal with the floating point register set, otherwise as above. 119 * 120 * proc_read_dbregs, proc_write_dbregs 121 * deal with the processor debug register set, otherwise as above. 122 * 123 * proc_sstep(proc) 124 * Arrange for the process to trap after executing a single instruction. 125 */ 126 127 #define PROC_ACTION(action) do { \ 128 int error; \ 129 \ 130 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 131 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 132 error = EIO; \ 133 else \ 134 error = (action); \ 135 return (error); \ 136 } while(0) 137 138 int 139 proc_read_regs(struct thread *td, struct reg *regs) 140 { 141 142 PROC_ACTION(fill_regs(td, regs)); 143 } 144 145 int 146 proc_write_regs(struct thread *td, struct reg *regs) 147 { 148 149 PROC_ACTION(set_regs(td, regs)); 150 } 151 152 int 153 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 154 { 155 156 PROC_ACTION(fill_dbregs(td, dbregs)); 157 } 158 159 int 160 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 161 { 162 163 PROC_ACTION(set_dbregs(td, dbregs)); 164 } 165 166 /* 167 * Ptrace doesn't support fpregs at all, and there are no security holes 168 * or translations for fpregs, so we can just copy them. 169 */ 170 int 171 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 172 { 173 174 PROC_ACTION(fill_fpregs(td, fpregs)); 175 } 176 177 int 178 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 179 { 180 181 PROC_ACTION(set_fpregs(td, fpregs)); 182 } 183 184 #ifdef COMPAT_FREEBSD32 185 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 186 int 187 proc_read_regs32(struct thread *td, struct reg32 *regs32) 188 { 189 190 PROC_ACTION(fill_regs32(td, regs32)); 191 } 192 193 int 194 proc_write_regs32(struct thread *td, struct reg32 *regs32) 195 { 196 197 PROC_ACTION(set_regs32(td, regs32)); 198 } 199 200 int 201 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 202 { 203 204 PROC_ACTION(fill_dbregs32(td, dbregs32)); 205 } 206 207 int 208 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 209 { 210 211 PROC_ACTION(set_dbregs32(td, dbregs32)); 212 } 213 214 int 215 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 216 { 217 218 PROC_ACTION(fill_fpregs32(td, fpregs32)); 219 } 220 221 int 222 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 223 { 224 225 PROC_ACTION(set_fpregs32(td, fpregs32)); 226 } 227 #endif 228 229 int 230 proc_sstep(struct thread *td) 231 { 232 233 PROC_ACTION(ptrace_single_step(td)); 234 } 235 236 int 237 proc_rwmem(struct proc *p, struct uio *uio) 238 { 239 vm_map_t map; 240 vm_offset_t pageno; /* page number */ 241 vm_prot_t reqprot; 242 int error, fault_flags, page_offset, writing; 243 244 /* 245 * Assert that someone has locked this vmspace. (Should be 246 * curthread but we can't assert that.) This keeps the process 247 * from exiting out from under us until this operation completes. 248 */ 249 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 250 p, p->p_pid)); 251 252 /* 253 * The map we want... 254 */ 255 map = &p->p_vmspace->vm_map; 256 257 /* 258 * If we are writing, then we request vm_fault() to create a private 259 * copy of each page. Since these copies will not be writeable by the 260 * process, we must explicity request that they be dirtied. 261 */ 262 writing = uio->uio_rw == UIO_WRITE; 263 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 264 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 265 266 /* 267 * Only map in one page at a time. We don't have to, but it 268 * makes things easier. This way is trivial - right? 269 */ 270 do { 271 vm_offset_t uva; 272 u_int len; 273 vm_page_t m; 274 275 uva = (vm_offset_t)uio->uio_offset; 276 277 /* 278 * Get the page number of this segment. 279 */ 280 pageno = trunc_page(uva); 281 page_offset = uva - pageno; 282 283 /* 284 * How many bytes to copy 285 */ 286 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 287 288 /* 289 * Fault and hold the page on behalf of the process. 290 */ 291 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 292 if (error != KERN_SUCCESS) { 293 if (error == KERN_RESOURCE_SHORTAGE) 294 error = ENOMEM; 295 else 296 error = EFAULT; 297 break; 298 } 299 300 /* 301 * Now do the i/o move. 302 */ 303 error = uiomove_fromphys(&m, page_offset, len, uio); 304 305 /* Make the I-cache coherent for breakpoints. */ 306 if (writing && error == 0) { 307 vm_map_lock_read(map); 308 if (vm_map_check_protection(map, pageno, pageno + 309 PAGE_SIZE, VM_PROT_EXECUTE)) 310 vm_sync_icache(map, uva, len); 311 vm_map_unlock_read(map); 312 } 313 314 /* 315 * Release the page. 316 */ 317 vm_page_lock(m); 318 vm_page_unhold(m); 319 vm_page_unlock(m); 320 321 } while (error == 0 && uio->uio_resid > 0); 322 323 return (error); 324 } 325 326 static int 327 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 328 { 329 struct vattr vattr; 330 vm_map_t map; 331 vm_map_entry_t entry; 332 vm_object_t obj, tobj, lobj; 333 struct vmspace *vm; 334 struct vnode *vp; 335 char *freepath, *fullpath; 336 u_int pathlen; 337 int error, index; 338 339 error = 0; 340 obj = NULL; 341 342 vm = vmspace_acquire_ref(p); 343 map = &vm->vm_map; 344 vm_map_lock_read(map); 345 346 do { 347 entry = map->header.next; 348 index = 0; 349 while (index < pve->pve_entry && entry != &map->header) { 350 entry = entry->next; 351 index++; 352 } 353 if (index != pve->pve_entry) { 354 error = EINVAL; 355 break; 356 } 357 while (entry != &map->header && 358 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 359 entry = entry->next; 360 index++; 361 } 362 if (entry == &map->header) { 363 error = ENOENT; 364 break; 365 } 366 367 /* We got an entry. */ 368 pve->pve_entry = index + 1; 369 pve->pve_timestamp = map->timestamp; 370 pve->pve_start = entry->start; 371 pve->pve_end = entry->end - 1; 372 pve->pve_offset = entry->offset; 373 pve->pve_prot = entry->protection; 374 375 /* Backing object's path needed? */ 376 if (pve->pve_pathlen == 0) 377 break; 378 379 pathlen = pve->pve_pathlen; 380 pve->pve_pathlen = 0; 381 382 obj = entry->object.vm_object; 383 if (obj != NULL) 384 VM_OBJECT_LOCK(obj); 385 } while (0); 386 387 vm_map_unlock_read(map); 388 vmspace_free(vm); 389 390 pve->pve_fsid = VNOVAL; 391 pve->pve_fileid = VNOVAL; 392 393 if (error == 0 && obj != NULL) { 394 lobj = obj; 395 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 396 if (tobj != obj) 397 VM_OBJECT_LOCK(tobj); 398 if (lobj != obj) 399 VM_OBJECT_UNLOCK(lobj); 400 lobj = tobj; 401 pve->pve_offset += tobj->backing_object_offset; 402 } 403 vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL; 404 if (vp != NULL) 405 vref(vp); 406 if (lobj != obj) 407 VM_OBJECT_UNLOCK(lobj); 408 VM_OBJECT_UNLOCK(obj); 409 410 if (vp != NULL) { 411 freepath = NULL; 412 fullpath = NULL; 413 vn_fullpath(td, vp, &fullpath, &freepath); 414 vn_lock(vp, LK_SHARED | LK_RETRY); 415 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 416 pve->pve_fileid = vattr.va_fileid; 417 pve->pve_fsid = vattr.va_fsid; 418 } 419 vput(vp); 420 421 if (fullpath != NULL) { 422 pve->pve_pathlen = strlen(fullpath) + 1; 423 if (pve->pve_pathlen <= pathlen) { 424 error = copyout(fullpath, pve->pve_path, 425 pve->pve_pathlen); 426 } else 427 error = ENAMETOOLONG; 428 } 429 if (freepath != NULL) 430 free(freepath, M_TEMP); 431 } 432 } 433 434 return (error); 435 } 436 437 #ifdef COMPAT_FREEBSD32 438 static int 439 ptrace_vm_entry32(struct thread *td, struct proc *p, 440 struct ptrace_vm_entry32 *pve32) 441 { 442 struct ptrace_vm_entry pve; 443 int error; 444 445 pve.pve_entry = pve32->pve_entry; 446 pve.pve_pathlen = pve32->pve_pathlen; 447 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 448 449 error = ptrace_vm_entry(td, p, &pve); 450 if (error == 0) { 451 pve32->pve_entry = pve.pve_entry; 452 pve32->pve_timestamp = pve.pve_timestamp; 453 pve32->pve_start = pve.pve_start; 454 pve32->pve_end = pve.pve_end; 455 pve32->pve_offset = pve.pve_offset; 456 pve32->pve_prot = pve.pve_prot; 457 pve32->pve_fileid = pve.pve_fileid; 458 pve32->pve_fsid = pve.pve_fsid; 459 } 460 461 pve32->pve_pathlen = pve.pve_pathlen; 462 return (error); 463 } 464 465 static void 466 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 467 struct ptrace_lwpinfo32 *pl32) 468 { 469 470 pl32->pl_lwpid = pl->pl_lwpid; 471 pl32->pl_event = pl->pl_event; 472 pl32->pl_flags = pl->pl_flags; 473 pl32->pl_sigmask = pl->pl_sigmask; 474 pl32->pl_siglist = pl->pl_siglist; 475 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 476 strcpy(pl32->pl_tdname, pl->pl_tdname); 477 pl32->pl_child_pid = pl->pl_child_pid; 478 } 479 #endif /* COMPAT_FREEBSD32 */ 480 481 /* 482 * Process debugging system call. 483 */ 484 #ifndef _SYS_SYSPROTO_H_ 485 struct ptrace_args { 486 int req; 487 pid_t pid; 488 caddr_t addr; 489 int data; 490 }; 491 #endif 492 493 #ifdef COMPAT_FREEBSD32 494 /* 495 * This CPP subterfuge is to try and reduce the number of ifdefs in 496 * the body of the code. 497 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 498 * becomes either: 499 * copyin(uap->addr, &r.reg, sizeof r.reg); 500 * or 501 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 502 * .. except this is done at runtime. 503 */ 504 #define COPYIN(u, k, s) wrap32 ? \ 505 copyin(u, k ## 32, s ## 32) : \ 506 copyin(u, k, s) 507 #define COPYOUT(k, u, s) wrap32 ? \ 508 copyout(k ## 32, u, s ## 32) : \ 509 copyout(k, u, s) 510 #else 511 #define COPYIN(u, k, s) copyin(u, k, s) 512 #define COPYOUT(k, u, s) copyout(k, u, s) 513 #endif 514 int 515 sys_ptrace(struct thread *td, struct ptrace_args *uap) 516 { 517 /* 518 * XXX this obfuscation is to reduce stack usage, but the register 519 * structs may be too large to put on the stack anyway. 520 */ 521 union { 522 struct ptrace_io_desc piod; 523 struct ptrace_lwpinfo pl; 524 struct ptrace_vm_entry pve; 525 struct dbreg dbreg; 526 struct fpreg fpreg; 527 struct reg reg; 528 #ifdef COMPAT_FREEBSD32 529 struct dbreg32 dbreg32; 530 struct fpreg32 fpreg32; 531 struct reg32 reg32; 532 struct ptrace_io_desc32 piod32; 533 struct ptrace_lwpinfo32 pl32; 534 struct ptrace_vm_entry32 pve32; 535 #endif 536 } r; 537 void *addr; 538 int error = 0; 539 #ifdef COMPAT_FREEBSD32 540 int wrap32 = 0; 541 542 if (SV_CURPROC_FLAG(SV_ILP32)) 543 wrap32 = 1; 544 #endif 545 AUDIT_ARG_PID(uap->pid); 546 AUDIT_ARG_CMD(uap->req); 547 AUDIT_ARG_VALUE(uap->data); 548 addr = &r; 549 switch (uap->req) { 550 case PT_GETREGS: 551 case PT_GETFPREGS: 552 case PT_GETDBREGS: 553 case PT_LWPINFO: 554 break; 555 case PT_SETREGS: 556 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 557 break; 558 case PT_SETFPREGS: 559 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 560 break; 561 case PT_SETDBREGS: 562 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 563 break; 564 case PT_IO: 565 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 566 break; 567 case PT_VM_ENTRY: 568 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 569 break; 570 default: 571 addr = uap->addr; 572 break; 573 } 574 if (error) 575 return (error); 576 577 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 578 if (error) 579 return (error); 580 581 switch (uap->req) { 582 case PT_VM_ENTRY: 583 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 584 break; 585 case PT_IO: 586 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 587 break; 588 case PT_GETREGS: 589 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 590 break; 591 case PT_GETFPREGS: 592 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 593 break; 594 case PT_GETDBREGS: 595 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 596 break; 597 case PT_LWPINFO: 598 error = copyout(&r.pl, uap->addr, uap->data); 599 break; 600 } 601 602 return (error); 603 } 604 #undef COPYIN 605 #undef COPYOUT 606 607 #ifdef COMPAT_FREEBSD32 608 /* 609 * PROC_READ(regs, td2, addr); 610 * becomes either: 611 * proc_read_regs(td2, addr); 612 * or 613 * proc_read_regs32(td2, addr); 614 * .. except this is done at runtime. There is an additional 615 * complication in that PROC_WRITE disallows 32 bit consumers 616 * from writing to 64 bit address space targets. 617 */ 618 #define PROC_READ(w, t, a) wrap32 ? \ 619 proc_read_ ## w ## 32(t, a) : \ 620 proc_read_ ## w (t, a) 621 #define PROC_WRITE(w, t, a) wrap32 ? \ 622 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 623 proc_write_ ## w (t, a) 624 #else 625 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 626 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 627 #endif 628 629 int 630 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 631 { 632 struct iovec iov; 633 struct uio uio; 634 struct proc *curp, *p, *pp; 635 struct thread *td2 = NULL, *td3; 636 struct ptrace_io_desc *piod = NULL; 637 struct ptrace_lwpinfo *pl; 638 int error, write, tmp, num; 639 int proctree_locked = 0; 640 lwpid_t tid = 0, *buf; 641 #ifdef COMPAT_FREEBSD32 642 int wrap32 = 0, safe = 0; 643 struct ptrace_io_desc32 *piod32 = NULL; 644 struct ptrace_lwpinfo32 *pl32 = NULL; 645 struct ptrace_lwpinfo plr; 646 #endif 647 648 curp = td->td_proc; 649 650 /* Lock proctree before locking the process. */ 651 switch (req) { 652 case PT_TRACE_ME: 653 case PT_ATTACH: 654 case PT_STEP: 655 case PT_CONTINUE: 656 case PT_TO_SCE: 657 case PT_TO_SCX: 658 case PT_SYSCALL: 659 case PT_FOLLOW_FORK: 660 case PT_DETACH: 661 sx_xlock(&proctree_lock); 662 proctree_locked = 1; 663 break; 664 default: 665 break; 666 } 667 668 write = 0; 669 if (req == PT_TRACE_ME) { 670 p = td->td_proc; 671 PROC_LOCK(p); 672 } else { 673 if (pid <= PID_MAX) { 674 if ((p = pfind(pid)) == NULL) { 675 if (proctree_locked) 676 sx_xunlock(&proctree_lock); 677 return (ESRCH); 678 } 679 } else { 680 td2 = tdfind(pid, -1); 681 if (td2 == NULL) { 682 if (proctree_locked) 683 sx_xunlock(&proctree_lock); 684 return (ESRCH); 685 } 686 p = td2->td_proc; 687 tid = pid; 688 pid = p->p_pid; 689 } 690 } 691 AUDIT_ARG_PROCESS(p); 692 693 if ((p->p_flag & P_WEXIT) != 0) { 694 error = ESRCH; 695 goto fail; 696 } 697 if ((error = p_cansee(td, p)) != 0) 698 goto fail; 699 700 if ((error = p_candebug(td, p)) != 0) 701 goto fail; 702 703 /* 704 * System processes can't be debugged. 705 */ 706 if ((p->p_flag & P_SYSTEM) != 0) { 707 error = EINVAL; 708 goto fail; 709 } 710 711 if (tid == 0) { 712 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 713 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 714 td2 = p->p_xthread; 715 } else { 716 td2 = FIRST_THREAD_IN_PROC(p); 717 } 718 tid = td2->td_tid; 719 } 720 721 #ifdef COMPAT_FREEBSD32 722 /* 723 * Test if we're a 32 bit client and what the target is. 724 * Set the wrap controls accordingly. 725 */ 726 if (SV_CURPROC_FLAG(SV_ILP32)) { 727 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 728 safe = 1; 729 wrap32 = 1; 730 } 731 #endif 732 /* 733 * Permissions check 734 */ 735 switch (req) { 736 case PT_TRACE_ME: 737 /* Always legal. */ 738 break; 739 740 case PT_ATTACH: 741 /* Self */ 742 if (p->p_pid == td->td_proc->p_pid) { 743 error = EINVAL; 744 goto fail; 745 } 746 747 /* Already traced */ 748 if (p->p_flag & P_TRACED) { 749 error = EBUSY; 750 goto fail; 751 } 752 753 /* Can't trace an ancestor if you're being traced. */ 754 if (curp->p_flag & P_TRACED) { 755 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 756 if (pp == p) { 757 error = EINVAL; 758 goto fail; 759 } 760 } 761 } 762 763 764 /* OK */ 765 break; 766 767 case PT_CLEARSTEP: 768 /* Allow thread to clear single step for itself */ 769 if (td->td_tid == tid) 770 break; 771 772 /* FALLTHROUGH */ 773 default: 774 /* not being traced... */ 775 if ((p->p_flag & P_TRACED) == 0) { 776 error = EPERM; 777 goto fail; 778 } 779 780 /* not being traced by YOU */ 781 if (p->p_pptr != td->td_proc) { 782 error = EBUSY; 783 goto fail; 784 } 785 786 /* not currently stopped */ 787 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 788 p->p_suspcount != p->p_numthreads || 789 (p->p_flag & P_WAITED) == 0) { 790 error = EBUSY; 791 goto fail; 792 } 793 794 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 795 static int count = 0; 796 if (count++ == 0) 797 printf("P_STOPPED_TRACE not set.\n"); 798 } 799 800 /* OK */ 801 break; 802 } 803 804 /* Keep this process around until we finish this request. */ 805 _PHOLD(p); 806 807 #ifdef FIX_SSTEP 808 /* 809 * Single step fixup ala procfs 810 */ 811 FIX_SSTEP(td2); 812 #endif 813 814 /* 815 * Actually do the requests 816 */ 817 818 td->td_retval[0] = 0; 819 820 switch (req) { 821 case PT_TRACE_ME: 822 /* set my trace flag and "owner" so it can read/write me */ 823 p->p_flag |= P_TRACED; 824 if (p->p_flag & P_PPWAIT) 825 p->p_flag |= P_PPTRACE; 826 p->p_oppid = p->p_pptr->p_pid; 827 break; 828 829 case PT_ATTACH: 830 /* security check done above */ 831 /* 832 * It would be nice if the tracing relationship was separate 833 * from the parent relationship but that would require 834 * another set of links in the proc struct or for "wait" 835 * to scan the entire proc table. To make life easier, 836 * we just re-parent the process we're trying to trace. 837 * The old parent is remembered so we can put things back 838 * on a "detach". 839 */ 840 p->p_flag |= P_TRACED; 841 p->p_oppid = p->p_pptr->p_pid; 842 if (p->p_pptr != td->td_proc) { 843 proc_reparent(p, td->td_proc); 844 } 845 data = SIGSTOP; 846 goto sendsig; /* in PT_CONTINUE below */ 847 848 case PT_CLEARSTEP: 849 error = ptrace_clear_single_step(td2); 850 break; 851 852 case PT_SETSTEP: 853 error = ptrace_single_step(td2); 854 break; 855 856 case PT_SUSPEND: 857 td2->td_dbgflags |= TDB_SUSPEND; 858 thread_lock(td2); 859 td2->td_flags |= TDF_NEEDSUSPCHK; 860 thread_unlock(td2); 861 break; 862 863 case PT_RESUME: 864 td2->td_dbgflags &= ~TDB_SUSPEND; 865 break; 866 867 case PT_FOLLOW_FORK: 868 if (data) 869 p->p_flag |= P_FOLLOWFORK; 870 else 871 p->p_flag &= ~P_FOLLOWFORK; 872 break; 873 874 case PT_STEP: 875 case PT_CONTINUE: 876 case PT_TO_SCE: 877 case PT_TO_SCX: 878 case PT_SYSCALL: 879 case PT_DETACH: 880 /* Zero means do not send any signal */ 881 if (data < 0 || data > _SIG_MAXSIG) { 882 error = EINVAL; 883 break; 884 } 885 886 switch (req) { 887 case PT_STEP: 888 error = ptrace_single_step(td2); 889 if (error) 890 goto out; 891 break; 892 case PT_CONTINUE: 893 case PT_TO_SCE: 894 case PT_TO_SCX: 895 case PT_SYSCALL: 896 if (addr != (void *)1) { 897 error = ptrace_set_pc(td2, 898 (u_long)(uintfptr_t)addr); 899 if (error) 900 goto out; 901 } 902 switch (req) { 903 case PT_TO_SCE: 904 p->p_stops |= S_PT_SCE; 905 break; 906 case PT_TO_SCX: 907 p->p_stops |= S_PT_SCX; 908 break; 909 case PT_SYSCALL: 910 p->p_stops |= S_PT_SCE | S_PT_SCX; 911 break; 912 } 913 break; 914 case PT_DETACH: 915 /* reset process parent */ 916 if (p->p_oppid != p->p_pptr->p_pid) { 917 struct proc *pp; 918 919 PROC_LOCK(p->p_pptr); 920 sigqueue_take(p->p_ksi); 921 PROC_UNLOCK(p->p_pptr); 922 923 PROC_UNLOCK(p); 924 pp = pfind(p->p_oppid); 925 if (pp == NULL) 926 pp = initproc; 927 else 928 PROC_UNLOCK(pp); 929 PROC_LOCK(p); 930 proc_reparent(p, pp); 931 if (pp == initproc) 932 p->p_sigparent = SIGCHLD; 933 } 934 p->p_oppid = 0; 935 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 936 937 /* should we send SIGCHLD? */ 938 /* childproc_continued(p); */ 939 break; 940 } 941 942 sendsig: 943 if (proctree_locked) { 944 sx_xunlock(&proctree_lock); 945 proctree_locked = 0; 946 } 947 p->p_xstat = data; 948 p->p_xthread = NULL; 949 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 950 /* deliver or queue signal */ 951 td2->td_dbgflags &= ~TDB_XSIG; 952 td2->td_xsig = data; 953 954 if (req == PT_DETACH) { 955 FOREACH_THREAD_IN_PROC(p, td3) 956 td3->td_dbgflags &= ~TDB_SUSPEND; 957 } 958 /* 959 * unsuspend all threads, to not let a thread run, 960 * you should use PT_SUSPEND to suspend it before 961 * continuing process. 962 */ 963 PROC_SLOCK(p); 964 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 965 thread_unsuspend(p); 966 PROC_SUNLOCK(p); 967 if (req == PT_ATTACH) 968 kern_psignal(p, data); 969 } else { 970 if (data) 971 kern_psignal(p, data); 972 } 973 break; 974 975 case PT_WRITE_I: 976 case PT_WRITE_D: 977 td2->td_dbgflags |= TDB_USERWR; 978 write = 1; 979 /* FALLTHROUGH */ 980 case PT_READ_I: 981 case PT_READ_D: 982 PROC_UNLOCK(p); 983 tmp = 0; 984 /* write = 0 set above */ 985 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 986 iov.iov_len = sizeof(int); 987 uio.uio_iov = &iov; 988 uio.uio_iovcnt = 1; 989 uio.uio_offset = (off_t)(uintptr_t)addr; 990 uio.uio_resid = sizeof(int); 991 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 992 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 993 uio.uio_td = td; 994 error = proc_rwmem(p, &uio); 995 if (uio.uio_resid != 0) { 996 /* 997 * XXX proc_rwmem() doesn't currently return ENOSPC, 998 * so I think write() can bogusly return 0. 999 * XXX what happens for short writes? We don't want 1000 * to write partial data. 1001 * XXX proc_rwmem() returns EPERM for other invalid 1002 * addresses. Convert this to EINVAL. Does this 1003 * clobber returns of EPERM for other reasons? 1004 */ 1005 if (error == 0 || error == ENOSPC || error == EPERM) 1006 error = EINVAL; /* EOF */ 1007 } 1008 if (!write) 1009 td->td_retval[0] = tmp; 1010 PROC_LOCK(p); 1011 break; 1012 1013 case PT_IO: 1014 #ifdef COMPAT_FREEBSD32 1015 if (wrap32) { 1016 piod32 = addr; 1017 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1018 iov.iov_len = piod32->piod_len; 1019 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1020 uio.uio_resid = piod32->piod_len; 1021 } else 1022 #endif 1023 { 1024 piod = addr; 1025 iov.iov_base = piod->piod_addr; 1026 iov.iov_len = piod->piod_len; 1027 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1028 uio.uio_resid = piod->piod_len; 1029 } 1030 uio.uio_iov = &iov; 1031 uio.uio_iovcnt = 1; 1032 uio.uio_segflg = UIO_USERSPACE; 1033 uio.uio_td = td; 1034 #ifdef COMPAT_FREEBSD32 1035 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1036 #else 1037 tmp = piod->piod_op; 1038 #endif 1039 switch (tmp) { 1040 case PIOD_READ_D: 1041 case PIOD_READ_I: 1042 uio.uio_rw = UIO_READ; 1043 break; 1044 case PIOD_WRITE_D: 1045 case PIOD_WRITE_I: 1046 td2->td_dbgflags |= TDB_USERWR; 1047 uio.uio_rw = UIO_WRITE; 1048 break; 1049 default: 1050 error = EINVAL; 1051 goto out; 1052 } 1053 PROC_UNLOCK(p); 1054 error = proc_rwmem(p, &uio); 1055 #ifdef COMPAT_FREEBSD32 1056 if (wrap32) 1057 piod32->piod_len -= uio.uio_resid; 1058 else 1059 #endif 1060 piod->piod_len -= uio.uio_resid; 1061 PROC_LOCK(p); 1062 break; 1063 1064 case PT_KILL: 1065 data = SIGKILL; 1066 goto sendsig; /* in PT_CONTINUE above */ 1067 1068 case PT_SETREGS: 1069 td2->td_dbgflags |= TDB_USERWR; 1070 error = PROC_WRITE(regs, td2, addr); 1071 break; 1072 1073 case PT_GETREGS: 1074 error = PROC_READ(regs, td2, addr); 1075 break; 1076 1077 case PT_SETFPREGS: 1078 td2->td_dbgflags |= TDB_USERWR; 1079 error = PROC_WRITE(fpregs, td2, addr); 1080 break; 1081 1082 case PT_GETFPREGS: 1083 error = PROC_READ(fpregs, td2, addr); 1084 break; 1085 1086 case PT_SETDBREGS: 1087 td2->td_dbgflags |= TDB_USERWR; 1088 error = PROC_WRITE(dbregs, td2, addr); 1089 break; 1090 1091 case PT_GETDBREGS: 1092 error = PROC_READ(dbregs, td2, addr); 1093 break; 1094 1095 case PT_LWPINFO: 1096 if (data <= 0 || 1097 #ifdef COMPAT_FREEBSD32 1098 (!wrap32 && data > sizeof(*pl)) || 1099 (wrap32 && data > sizeof(*pl32))) { 1100 #else 1101 data > sizeof(*pl)) { 1102 #endif 1103 error = EINVAL; 1104 break; 1105 } 1106 #ifdef COMPAT_FREEBSD32 1107 if (wrap32) { 1108 pl = &plr; 1109 pl32 = addr; 1110 } else 1111 #endif 1112 pl = addr; 1113 pl->pl_lwpid = td2->td_tid; 1114 pl->pl_event = PL_EVENT_NONE; 1115 pl->pl_flags = 0; 1116 if (td2->td_dbgflags & TDB_XSIG) { 1117 pl->pl_event = PL_EVENT_SIGNAL; 1118 if (td2->td_dbgksi.ksi_signo != 0 && 1119 #ifdef COMPAT_FREEBSD32 1120 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1121 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1122 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1123 pl_siginfo) + sizeof(struct siginfo32))) 1124 #else 1125 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1126 + sizeof(pl->pl_siginfo) 1127 #endif 1128 ){ 1129 pl->pl_flags |= PL_FLAG_SI; 1130 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1131 } 1132 } 1133 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1134 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1135 if (td2->td_dbgflags & TDB_SCE) 1136 pl->pl_flags |= PL_FLAG_SCE; 1137 else if (td2->td_dbgflags & TDB_SCX) 1138 pl->pl_flags |= PL_FLAG_SCX; 1139 if (td2->td_dbgflags & TDB_EXEC) 1140 pl->pl_flags |= PL_FLAG_EXEC; 1141 if (td2->td_dbgflags & TDB_FORK) { 1142 pl->pl_flags |= PL_FLAG_FORKED; 1143 pl->pl_child_pid = td2->td_dbg_forked; 1144 } 1145 if (td2->td_dbgflags & TDB_CHILD) 1146 pl->pl_flags |= PL_FLAG_CHILD; 1147 pl->pl_sigmask = td2->td_sigmask; 1148 pl->pl_siglist = td2->td_siglist; 1149 strcpy(pl->pl_tdname, td2->td_name); 1150 #ifdef COMPAT_FREEBSD32 1151 if (wrap32) 1152 ptrace_lwpinfo_to32(pl, pl32); 1153 #endif 1154 break; 1155 1156 case PT_GETNUMLWPS: 1157 td->td_retval[0] = p->p_numthreads; 1158 break; 1159 1160 case PT_GETLWPLIST: 1161 if (data <= 0) { 1162 error = EINVAL; 1163 break; 1164 } 1165 num = imin(p->p_numthreads, data); 1166 PROC_UNLOCK(p); 1167 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1168 tmp = 0; 1169 PROC_LOCK(p); 1170 FOREACH_THREAD_IN_PROC(p, td2) { 1171 if (tmp >= num) 1172 break; 1173 buf[tmp++] = td2->td_tid; 1174 } 1175 PROC_UNLOCK(p); 1176 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1177 free(buf, M_TEMP); 1178 if (!error) 1179 td->td_retval[0] = tmp; 1180 PROC_LOCK(p); 1181 break; 1182 1183 case PT_VM_TIMESTAMP: 1184 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1185 break; 1186 1187 case PT_VM_ENTRY: 1188 PROC_UNLOCK(p); 1189 #ifdef COMPAT_FREEBSD32 1190 if (wrap32) 1191 error = ptrace_vm_entry32(td, p, addr); 1192 else 1193 #endif 1194 error = ptrace_vm_entry(td, p, addr); 1195 PROC_LOCK(p); 1196 break; 1197 1198 default: 1199 #ifdef __HAVE_PTRACE_MACHDEP 1200 if (req >= PT_FIRSTMACH) { 1201 PROC_UNLOCK(p); 1202 error = cpu_ptrace(td2, req, addr, data); 1203 PROC_LOCK(p); 1204 } else 1205 #endif 1206 /* Unknown request. */ 1207 error = EINVAL; 1208 break; 1209 } 1210 1211 out: 1212 /* Drop our hold on this process now that the request has completed. */ 1213 _PRELE(p); 1214 fail: 1215 PROC_UNLOCK(p); 1216 if (proctree_locked) 1217 sx_xunlock(&proctree_lock); 1218 return (error); 1219 } 1220 #undef PROC_READ 1221 #undef PROC_WRITE 1222 1223 /* 1224 * Stop a process because of a debugging event; 1225 * stay stopped until p->p_step is cleared 1226 * (cleared by PIOCCONT in procfs). 1227 */ 1228 void 1229 stopevent(struct proc *p, unsigned int event, unsigned int val) 1230 { 1231 1232 PROC_LOCK_ASSERT(p, MA_OWNED); 1233 p->p_step = 1; 1234 do { 1235 p->p_xstat = val; 1236 p->p_xthread = NULL; 1237 p->p_stype = event; /* Which event caused the stop? */ 1238 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1239 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1240 } while (p->p_step); 1241 } 1242