1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/ptrace.h> 47 #include <sys/sx.h> 48 #include <sys/malloc.h> 49 #include <sys/signalvar.h> 50 51 #include <machine/reg.h> 52 53 #include <security/audit/audit.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 #include <vm/vm_param.h> 64 65 #ifdef COMPAT_FREEBSD32 66 #include <sys/procfs.h> 67 #include <compat/freebsd32/freebsd32_signal.h> 68 69 struct ptrace_io_desc32 { 70 int piod_op; 71 uint32_t piod_offs; 72 uint32_t piod_addr; 73 uint32_t piod_len; 74 }; 75 76 struct ptrace_vm_entry32 { 77 int pve_entry; 78 int pve_timestamp; 79 uint32_t pve_start; 80 uint32_t pve_end; 81 uint32_t pve_offset; 82 u_int pve_prot; 83 u_int pve_pathlen; 84 int32_t pve_fileid; 85 u_int pve_fsid; 86 uint32_t pve_path; 87 }; 88 89 struct ptrace_lwpinfo32 { 90 lwpid_t pl_lwpid; /* LWP described. */ 91 int pl_event; /* Event that stopped the LWP. */ 92 int pl_flags; /* LWP flags. */ 93 sigset_t pl_sigmask; /* LWP signal mask */ 94 sigset_t pl_siglist; /* LWP pending signal */ 95 struct siginfo32 pl_siginfo; /* siginfo for signal */ 96 }; 97 98 #endif 99 100 /* 101 * Functions implemented using PROC_ACTION(): 102 * 103 * proc_read_regs(proc, regs) 104 * Get the current user-visible register set from the process 105 * and copy it into the regs structure (<machine/reg.h>). 106 * The process is stopped at the time read_regs is called. 107 * 108 * proc_write_regs(proc, regs) 109 * Update the current register set from the passed in regs 110 * structure. Take care to avoid clobbering special CPU 111 * registers or privileged bits in the PSL. 112 * Depending on the architecture this may have fix-up work to do, 113 * especially if the IAR or PCW are modified. 114 * The process is stopped at the time write_regs is called. 115 * 116 * proc_read_fpregs, proc_write_fpregs 117 * deal with the floating point register set, otherwise as above. 118 * 119 * proc_read_dbregs, proc_write_dbregs 120 * deal with the processor debug register set, otherwise as above. 121 * 122 * proc_sstep(proc) 123 * Arrange for the process to trap after executing a single instruction. 124 */ 125 126 #define PROC_ACTION(action) do { \ 127 int error; \ 128 \ 129 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 130 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 131 error = EIO; \ 132 else \ 133 error = (action); \ 134 return (error); \ 135 } while(0) 136 137 int 138 proc_read_regs(struct thread *td, struct reg *regs) 139 { 140 141 PROC_ACTION(fill_regs(td, regs)); 142 } 143 144 int 145 proc_write_regs(struct thread *td, struct reg *regs) 146 { 147 148 PROC_ACTION(set_regs(td, regs)); 149 } 150 151 int 152 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 153 { 154 155 PROC_ACTION(fill_dbregs(td, dbregs)); 156 } 157 158 int 159 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 160 { 161 162 PROC_ACTION(set_dbregs(td, dbregs)); 163 } 164 165 /* 166 * Ptrace doesn't support fpregs at all, and there are no security holes 167 * or translations for fpregs, so we can just copy them. 168 */ 169 int 170 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 171 { 172 173 PROC_ACTION(fill_fpregs(td, fpregs)); 174 } 175 176 int 177 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 178 { 179 180 PROC_ACTION(set_fpregs(td, fpregs)); 181 } 182 183 #ifdef COMPAT_FREEBSD32 184 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 185 int 186 proc_read_regs32(struct thread *td, struct reg32 *regs32) 187 { 188 189 PROC_ACTION(fill_regs32(td, regs32)); 190 } 191 192 int 193 proc_write_regs32(struct thread *td, struct reg32 *regs32) 194 { 195 196 PROC_ACTION(set_regs32(td, regs32)); 197 } 198 199 int 200 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 201 { 202 203 PROC_ACTION(fill_dbregs32(td, dbregs32)); 204 } 205 206 int 207 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 208 { 209 210 PROC_ACTION(set_dbregs32(td, dbregs32)); 211 } 212 213 int 214 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 215 { 216 217 PROC_ACTION(fill_fpregs32(td, fpregs32)); 218 } 219 220 int 221 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 222 { 223 224 PROC_ACTION(set_fpregs32(td, fpregs32)); 225 } 226 #endif 227 228 int 229 proc_sstep(struct thread *td) 230 { 231 232 PROC_ACTION(ptrace_single_step(td)); 233 } 234 235 int 236 proc_rwmem(struct proc *p, struct uio *uio) 237 { 238 vm_map_t map; 239 vm_object_t backing_object, object; 240 vm_offset_t pageno; /* page number */ 241 vm_prot_t reqprot; 242 int error, writing; 243 244 /* 245 * Assert that someone has locked this vmspace. (Should be 246 * curthread but we can't assert that.) This keeps the process 247 * from exiting out from under us until this operation completes. 248 */ 249 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 250 p, p->p_pid)); 251 252 /* 253 * The map we want... 254 */ 255 map = &p->p_vmspace->vm_map; 256 257 writing = uio->uio_rw == UIO_WRITE; 258 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 259 260 /* 261 * Only map in one page at a time. We don't have to, but it 262 * makes things easier. This way is trivial - right? 263 */ 264 do { 265 vm_map_t tmap; 266 vm_offset_t uva; 267 int page_offset; /* offset into page */ 268 vm_map_entry_t out_entry; 269 vm_prot_t out_prot; 270 boolean_t wired; 271 vm_pindex_t pindex; 272 u_int len; 273 vm_page_t m; 274 275 object = NULL; 276 277 uva = (vm_offset_t)uio->uio_offset; 278 279 /* 280 * Get the page number of this segment. 281 */ 282 pageno = trunc_page(uva); 283 page_offset = uva - pageno; 284 285 /* 286 * How many bytes to copy 287 */ 288 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 289 290 /* 291 * Fault the page on behalf of the process 292 */ 293 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 294 if (error) { 295 if (error == KERN_RESOURCE_SHORTAGE) 296 error = ENOMEM; 297 else 298 error = EFAULT; 299 break; 300 } 301 302 /* 303 * Now we need to get the page. out_entry and wired 304 * aren't used. One would think the vm code 305 * would be a *bit* nicer... We use tmap because 306 * vm_map_lookup() can change the map argument. 307 */ 308 tmap = map; 309 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 310 &object, &pindex, &out_prot, &wired); 311 if (error) { 312 error = EFAULT; 313 break; 314 } 315 VM_OBJECT_LOCK(object); 316 while ((m = vm_page_lookup(object, pindex)) == NULL && 317 !writing && 318 (backing_object = object->backing_object) != NULL) { 319 /* 320 * Allow fallback to backing objects if we are reading. 321 */ 322 VM_OBJECT_LOCK(backing_object); 323 pindex += OFF_TO_IDX(object->backing_object_offset); 324 VM_OBJECT_UNLOCK(object); 325 object = backing_object; 326 } 327 if (writing && m != NULL) { 328 vm_page_dirty(m); 329 vm_pager_page_unswapped(m); 330 } 331 VM_OBJECT_UNLOCK(object); 332 if (m == NULL) { 333 vm_map_lookup_done(tmap, out_entry); 334 error = EFAULT; 335 break; 336 } 337 338 /* 339 * Hold the page in memory. 340 */ 341 vm_page_lock(m); 342 vm_page_hold(m); 343 vm_page_unlock(m); 344 345 /* 346 * We're done with tmap now. 347 */ 348 vm_map_lookup_done(tmap, out_entry); 349 350 /* 351 * Now do the i/o move. 352 */ 353 error = uiomove_fromphys(&m, page_offset, len, uio); 354 355 /* Make the I-cache coherent for breakpoints. */ 356 if (!error && writing && (out_prot & VM_PROT_EXECUTE)) 357 vm_sync_icache(map, uva, len); 358 359 /* 360 * Release the page. 361 */ 362 vm_page_lock(m); 363 vm_page_unhold(m); 364 vm_page_unlock(m); 365 366 } while (error == 0 && uio->uio_resid > 0); 367 368 return (error); 369 } 370 371 static int 372 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 373 { 374 struct vattr vattr; 375 vm_map_t map; 376 vm_map_entry_t entry; 377 vm_object_t obj, tobj, lobj; 378 struct vmspace *vm; 379 struct vnode *vp; 380 char *freepath, *fullpath; 381 u_int pathlen; 382 int error, index, vfslocked; 383 384 error = 0; 385 obj = NULL; 386 387 vm = vmspace_acquire_ref(p); 388 map = &vm->vm_map; 389 vm_map_lock_read(map); 390 391 do { 392 entry = map->header.next; 393 index = 0; 394 while (index < pve->pve_entry && entry != &map->header) { 395 entry = entry->next; 396 index++; 397 } 398 if (index != pve->pve_entry) { 399 error = EINVAL; 400 break; 401 } 402 while (entry != &map->header && 403 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 404 entry = entry->next; 405 index++; 406 } 407 if (entry == &map->header) { 408 error = ENOENT; 409 break; 410 } 411 412 /* We got an entry. */ 413 pve->pve_entry = index + 1; 414 pve->pve_timestamp = map->timestamp; 415 pve->pve_start = entry->start; 416 pve->pve_end = entry->end - 1; 417 pve->pve_offset = entry->offset; 418 pve->pve_prot = entry->protection; 419 420 /* Backing object's path needed? */ 421 if (pve->pve_pathlen == 0) 422 break; 423 424 pathlen = pve->pve_pathlen; 425 pve->pve_pathlen = 0; 426 427 obj = entry->object.vm_object; 428 if (obj != NULL) 429 VM_OBJECT_LOCK(obj); 430 } while (0); 431 432 vm_map_unlock_read(map); 433 vmspace_free(vm); 434 435 pve->pve_fsid = VNOVAL; 436 pve->pve_fileid = VNOVAL; 437 438 if (error == 0 && obj != NULL) { 439 lobj = obj; 440 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 441 if (tobj != obj) 442 VM_OBJECT_LOCK(tobj); 443 if (lobj != obj) 444 VM_OBJECT_UNLOCK(lobj); 445 lobj = tobj; 446 pve->pve_offset += tobj->backing_object_offset; 447 } 448 vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL; 449 if (vp != NULL) 450 vref(vp); 451 if (lobj != obj) 452 VM_OBJECT_UNLOCK(lobj); 453 VM_OBJECT_UNLOCK(obj); 454 455 if (vp != NULL) { 456 freepath = NULL; 457 fullpath = NULL; 458 vn_fullpath(td, vp, &fullpath, &freepath); 459 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 460 vn_lock(vp, LK_SHARED | LK_RETRY); 461 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 462 pve->pve_fileid = vattr.va_fileid; 463 pve->pve_fsid = vattr.va_fsid; 464 } 465 vput(vp); 466 VFS_UNLOCK_GIANT(vfslocked); 467 468 if (fullpath != NULL) { 469 pve->pve_pathlen = strlen(fullpath) + 1; 470 if (pve->pve_pathlen <= pathlen) { 471 error = copyout(fullpath, pve->pve_path, 472 pve->pve_pathlen); 473 } else 474 error = ENAMETOOLONG; 475 } 476 if (freepath != NULL) 477 free(freepath, M_TEMP); 478 } 479 } 480 481 return (error); 482 } 483 484 #ifdef COMPAT_FREEBSD32 485 static int 486 ptrace_vm_entry32(struct thread *td, struct proc *p, 487 struct ptrace_vm_entry32 *pve32) 488 { 489 struct ptrace_vm_entry pve; 490 int error; 491 492 pve.pve_entry = pve32->pve_entry; 493 pve.pve_pathlen = pve32->pve_pathlen; 494 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 495 496 error = ptrace_vm_entry(td, p, &pve); 497 if (error == 0) { 498 pve32->pve_entry = pve.pve_entry; 499 pve32->pve_timestamp = pve.pve_timestamp; 500 pve32->pve_start = pve.pve_start; 501 pve32->pve_end = pve.pve_end; 502 pve32->pve_offset = pve.pve_offset; 503 pve32->pve_prot = pve.pve_prot; 504 pve32->pve_fileid = pve.pve_fileid; 505 pve32->pve_fsid = pve.pve_fsid; 506 } 507 508 pve32->pve_pathlen = pve.pve_pathlen; 509 return (error); 510 } 511 512 static void 513 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 514 struct ptrace_lwpinfo32 *pl32) 515 { 516 517 pl32->pl_lwpid = pl->pl_lwpid; 518 pl32->pl_event = pl->pl_event; 519 pl32->pl_flags = pl->pl_flags; 520 pl32->pl_sigmask = pl->pl_sigmask; 521 pl32->pl_siglist = pl->pl_siglist; 522 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 523 } 524 #endif /* COMPAT_FREEBSD32 */ 525 526 /* 527 * Process debugging system call. 528 */ 529 #ifndef _SYS_SYSPROTO_H_ 530 struct ptrace_args { 531 int req; 532 pid_t pid; 533 caddr_t addr; 534 int data; 535 }; 536 #endif 537 538 #ifdef COMPAT_FREEBSD32 539 /* 540 * This CPP subterfuge is to try and reduce the number of ifdefs in 541 * the body of the code. 542 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 543 * becomes either: 544 * copyin(uap->addr, &r.reg, sizeof r.reg); 545 * or 546 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 547 * .. except this is done at runtime. 548 */ 549 #define COPYIN(u, k, s) wrap32 ? \ 550 copyin(u, k ## 32, s ## 32) : \ 551 copyin(u, k, s) 552 #define COPYOUT(k, u, s) wrap32 ? \ 553 copyout(k ## 32, u, s ## 32) : \ 554 copyout(k, u, s) 555 #else 556 #define COPYIN(u, k, s) copyin(u, k, s) 557 #define COPYOUT(k, u, s) copyout(k, u, s) 558 #endif 559 int 560 ptrace(struct thread *td, struct ptrace_args *uap) 561 { 562 /* 563 * XXX this obfuscation is to reduce stack usage, but the register 564 * structs may be too large to put on the stack anyway. 565 */ 566 union { 567 struct ptrace_io_desc piod; 568 struct ptrace_lwpinfo pl; 569 struct ptrace_vm_entry pve; 570 struct dbreg dbreg; 571 struct fpreg fpreg; 572 struct reg reg; 573 #ifdef COMPAT_FREEBSD32 574 struct dbreg32 dbreg32; 575 struct fpreg32 fpreg32; 576 struct reg32 reg32; 577 struct ptrace_io_desc32 piod32; 578 struct ptrace_lwpinfo32 pl32; 579 struct ptrace_vm_entry32 pve32; 580 #endif 581 } r; 582 void *addr; 583 int error = 0; 584 #ifdef COMPAT_FREEBSD32 585 int wrap32 = 0; 586 587 if (SV_CURPROC_FLAG(SV_ILP32)) 588 wrap32 = 1; 589 #endif 590 AUDIT_ARG_PID(uap->pid); 591 AUDIT_ARG_CMD(uap->req); 592 AUDIT_ARG_VALUE(uap->data); 593 addr = &r; 594 switch (uap->req) { 595 case PT_GETREGS: 596 case PT_GETFPREGS: 597 case PT_GETDBREGS: 598 case PT_LWPINFO: 599 break; 600 case PT_SETREGS: 601 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 602 break; 603 case PT_SETFPREGS: 604 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 605 break; 606 case PT_SETDBREGS: 607 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 608 break; 609 case PT_IO: 610 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 611 break; 612 case PT_VM_ENTRY: 613 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 614 break; 615 default: 616 addr = uap->addr; 617 break; 618 } 619 if (error) 620 return (error); 621 622 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 623 if (error) 624 return (error); 625 626 switch (uap->req) { 627 case PT_VM_ENTRY: 628 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 629 break; 630 case PT_IO: 631 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 632 break; 633 case PT_GETREGS: 634 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 635 break; 636 case PT_GETFPREGS: 637 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 638 break; 639 case PT_GETDBREGS: 640 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 641 break; 642 case PT_LWPINFO: 643 error = copyout(&r.pl, uap->addr, uap->data); 644 break; 645 } 646 647 return (error); 648 } 649 #undef COPYIN 650 #undef COPYOUT 651 652 #ifdef COMPAT_FREEBSD32 653 /* 654 * PROC_READ(regs, td2, addr); 655 * becomes either: 656 * proc_read_regs(td2, addr); 657 * or 658 * proc_read_regs32(td2, addr); 659 * .. except this is done at runtime. There is an additional 660 * complication in that PROC_WRITE disallows 32 bit consumers 661 * from writing to 64 bit address space targets. 662 */ 663 #define PROC_READ(w, t, a) wrap32 ? \ 664 proc_read_ ## w ## 32(t, a) : \ 665 proc_read_ ## w (t, a) 666 #define PROC_WRITE(w, t, a) wrap32 ? \ 667 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 668 proc_write_ ## w (t, a) 669 #else 670 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 671 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 672 #endif 673 674 int 675 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 676 { 677 struct iovec iov; 678 struct uio uio; 679 struct proc *curp, *p, *pp; 680 struct thread *td2 = NULL; 681 struct ptrace_io_desc *piod = NULL; 682 struct ptrace_lwpinfo *pl; 683 int error, write, tmp, num; 684 int proctree_locked = 0; 685 lwpid_t tid = 0, *buf; 686 #ifdef COMPAT_FREEBSD32 687 int wrap32 = 0, safe = 0; 688 struct ptrace_io_desc32 *piod32 = NULL; 689 struct ptrace_lwpinfo32 *pl32 = NULL; 690 struct ptrace_lwpinfo plr; 691 #endif 692 693 curp = td->td_proc; 694 695 /* Lock proctree before locking the process. */ 696 switch (req) { 697 case PT_TRACE_ME: 698 case PT_ATTACH: 699 case PT_STEP: 700 case PT_CONTINUE: 701 case PT_TO_SCE: 702 case PT_TO_SCX: 703 case PT_SYSCALL: 704 case PT_DETACH: 705 sx_xlock(&proctree_lock); 706 proctree_locked = 1; 707 break; 708 default: 709 break; 710 } 711 712 write = 0; 713 if (req == PT_TRACE_ME) { 714 p = td->td_proc; 715 PROC_LOCK(p); 716 } else { 717 if (pid <= PID_MAX) { 718 if ((p = pfind(pid)) == NULL) { 719 if (proctree_locked) 720 sx_xunlock(&proctree_lock); 721 return (ESRCH); 722 } 723 } else { 724 /* this is slow, should be optimized */ 725 sx_slock(&allproc_lock); 726 FOREACH_PROC_IN_SYSTEM(p) { 727 PROC_LOCK(p); 728 FOREACH_THREAD_IN_PROC(p, td2) { 729 if (td2->td_tid == pid) 730 break; 731 } 732 if (td2 != NULL) 733 break; /* proc lock held */ 734 PROC_UNLOCK(p); 735 } 736 sx_sunlock(&allproc_lock); 737 if (p == NULL) { 738 if (proctree_locked) 739 sx_xunlock(&proctree_lock); 740 return (ESRCH); 741 } 742 tid = pid; 743 pid = p->p_pid; 744 } 745 } 746 AUDIT_ARG_PROCESS(p); 747 748 if ((p->p_flag & P_WEXIT) != 0) { 749 error = ESRCH; 750 goto fail; 751 } 752 if ((error = p_cansee(td, p)) != 0) 753 goto fail; 754 755 if ((error = p_candebug(td, p)) != 0) 756 goto fail; 757 758 /* 759 * System processes can't be debugged. 760 */ 761 if ((p->p_flag & P_SYSTEM) != 0) { 762 error = EINVAL; 763 goto fail; 764 } 765 766 if (tid == 0) { 767 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 768 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 769 td2 = p->p_xthread; 770 } else { 771 td2 = FIRST_THREAD_IN_PROC(p); 772 } 773 tid = td2->td_tid; 774 } 775 776 #ifdef COMPAT_FREEBSD32 777 /* 778 * Test if we're a 32 bit client and what the target is. 779 * Set the wrap controls accordingly. 780 */ 781 if (SV_CURPROC_FLAG(SV_ILP32)) { 782 if (td2->td_proc->p_sysent->sv_flags & SV_ILP32) 783 safe = 1; 784 wrap32 = 1; 785 } 786 #endif 787 /* 788 * Permissions check 789 */ 790 switch (req) { 791 case PT_TRACE_ME: 792 /* Always legal. */ 793 break; 794 795 case PT_ATTACH: 796 /* Self */ 797 if (p->p_pid == td->td_proc->p_pid) { 798 error = EINVAL; 799 goto fail; 800 } 801 802 /* Already traced */ 803 if (p->p_flag & P_TRACED) { 804 error = EBUSY; 805 goto fail; 806 } 807 808 /* Can't trace an ancestor if you're being traced. */ 809 if (curp->p_flag & P_TRACED) { 810 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 811 if (pp == p) { 812 error = EINVAL; 813 goto fail; 814 } 815 } 816 } 817 818 819 /* OK */ 820 break; 821 822 case PT_CLEARSTEP: 823 /* Allow thread to clear single step for itself */ 824 if (td->td_tid == tid) 825 break; 826 827 /* FALLTHROUGH */ 828 default: 829 /* not being traced... */ 830 if ((p->p_flag & P_TRACED) == 0) { 831 error = EPERM; 832 goto fail; 833 } 834 835 /* not being traced by YOU */ 836 if (p->p_pptr != td->td_proc) { 837 error = EBUSY; 838 goto fail; 839 } 840 841 /* not currently stopped */ 842 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 843 p->p_suspcount != p->p_numthreads || 844 (p->p_flag & P_WAITED) == 0) { 845 error = EBUSY; 846 goto fail; 847 } 848 849 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 850 static int count = 0; 851 if (count++ == 0) 852 printf("P_STOPPED_TRACE not set.\n"); 853 } 854 855 /* OK */ 856 break; 857 } 858 859 /* Keep this process around until we finish this request. */ 860 _PHOLD(p); 861 862 #ifdef FIX_SSTEP 863 /* 864 * Single step fixup ala procfs 865 */ 866 FIX_SSTEP(td2); 867 #endif 868 869 /* 870 * Actually do the requests 871 */ 872 873 td->td_retval[0] = 0; 874 875 switch (req) { 876 case PT_TRACE_ME: 877 /* set my trace flag and "owner" so it can read/write me */ 878 p->p_flag |= P_TRACED; 879 p->p_oppid = p->p_pptr->p_pid; 880 break; 881 882 case PT_ATTACH: 883 /* security check done above */ 884 p->p_flag |= P_TRACED; 885 p->p_oppid = p->p_pptr->p_pid; 886 if (p->p_pptr != td->td_proc) 887 proc_reparent(p, td->td_proc); 888 data = SIGSTOP; 889 goto sendsig; /* in PT_CONTINUE below */ 890 891 case PT_CLEARSTEP: 892 error = ptrace_clear_single_step(td2); 893 break; 894 895 case PT_SETSTEP: 896 error = ptrace_single_step(td2); 897 break; 898 899 case PT_SUSPEND: 900 td2->td_dbgflags |= TDB_SUSPEND; 901 thread_lock(td2); 902 td2->td_flags |= TDF_NEEDSUSPCHK; 903 thread_unlock(td2); 904 break; 905 906 case PT_RESUME: 907 td2->td_dbgflags &= ~TDB_SUSPEND; 908 break; 909 910 case PT_STEP: 911 case PT_CONTINUE: 912 case PT_TO_SCE: 913 case PT_TO_SCX: 914 case PT_SYSCALL: 915 case PT_DETACH: 916 /* Zero means do not send any signal */ 917 if (data < 0 || data > _SIG_MAXSIG) { 918 error = EINVAL; 919 break; 920 } 921 922 switch (req) { 923 case PT_STEP: 924 error = ptrace_single_step(td2); 925 if (error) 926 goto out; 927 break; 928 case PT_CONTINUE: 929 case PT_TO_SCE: 930 case PT_TO_SCX: 931 case PT_SYSCALL: 932 if (addr != (void *)1) { 933 error = ptrace_set_pc(td2, 934 (u_long)(uintfptr_t)addr); 935 if (error) 936 goto out; 937 } 938 switch (req) { 939 case PT_TO_SCE: 940 p->p_stops |= S_PT_SCE; 941 break; 942 case PT_TO_SCX: 943 p->p_stops |= S_PT_SCX; 944 break; 945 case PT_SYSCALL: 946 p->p_stops |= S_PT_SCE | S_PT_SCX; 947 break; 948 } 949 break; 950 case PT_DETACH: 951 /* reset process parent */ 952 if (p->p_oppid != p->p_pptr->p_pid) { 953 struct proc *pp; 954 955 PROC_LOCK(p->p_pptr); 956 sigqueue_take(p->p_ksi); 957 PROC_UNLOCK(p->p_pptr); 958 959 PROC_UNLOCK(p); 960 pp = pfind(p->p_oppid); 961 if (pp == NULL) 962 pp = initproc; 963 else 964 PROC_UNLOCK(pp); 965 PROC_LOCK(p); 966 proc_reparent(p, pp); 967 if (pp == initproc) 968 p->p_sigparent = SIGCHLD; 969 } 970 p->p_flag &= ~(P_TRACED | P_WAITED); 971 p->p_oppid = 0; 972 973 /* should we send SIGCHLD? */ 974 /* childproc_continued(p); */ 975 break; 976 } 977 978 sendsig: 979 if (proctree_locked) { 980 sx_xunlock(&proctree_lock); 981 proctree_locked = 0; 982 } 983 p->p_xstat = data; 984 p->p_xthread = NULL; 985 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 986 /* deliver or queue signal */ 987 td2->td_dbgflags &= ~TDB_XSIG; 988 td2->td_xsig = data; 989 990 if (req == PT_DETACH) { 991 struct thread *td3; 992 FOREACH_THREAD_IN_PROC(p, td3) { 993 td3->td_dbgflags &= ~TDB_SUSPEND; 994 } 995 } 996 /* 997 * unsuspend all threads, to not let a thread run, 998 * you should use PT_SUSPEND to suspend it before 999 * continuing process. 1000 */ 1001 PROC_SLOCK(p); 1002 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 1003 thread_unsuspend(p); 1004 PROC_SUNLOCK(p); 1005 } else { 1006 if (data) 1007 psignal(p, data); 1008 } 1009 break; 1010 1011 case PT_WRITE_I: 1012 case PT_WRITE_D: 1013 td2->td_dbgflags |= TDB_USERWR; 1014 write = 1; 1015 /* FALLTHROUGH */ 1016 case PT_READ_I: 1017 case PT_READ_D: 1018 PROC_UNLOCK(p); 1019 tmp = 0; 1020 /* write = 0 set above */ 1021 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 1022 iov.iov_len = sizeof(int); 1023 uio.uio_iov = &iov; 1024 uio.uio_iovcnt = 1; 1025 uio.uio_offset = (off_t)(uintptr_t)addr; 1026 uio.uio_resid = sizeof(int); 1027 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 1028 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 1029 uio.uio_td = td; 1030 error = proc_rwmem(p, &uio); 1031 if (uio.uio_resid != 0) { 1032 /* 1033 * XXX proc_rwmem() doesn't currently return ENOSPC, 1034 * so I think write() can bogusly return 0. 1035 * XXX what happens for short writes? We don't want 1036 * to write partial data. 1037 * XXX proc_rwmem() returns EPERM for other invalid 1038 * addresses. Convert this to EINVAL. Does this 1039 * clobber returns of EPERM for other reasons? 1040 */ 1041 if (error == 0 || error == ENOSPC || error == EPERM) 1042 error = EINVAL; /* EOF */ 1043 } 1044 if (!write) 1045 td->td_retval[0] = tmp; 1046 PROC_LOCK(p); 1047 break; 1048 1049 case PT_IO: 1050 #ifdef COMPAT_FREEBSD32 1051 if (wrap32) { 1052 piod32 = addr; 1053 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1054 iov.iov_len = piod32->piod_len; 1055 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1056 uio.uio_resid = piod32->piod_len; 1057 } else 1058 #endif 1059 { 1060 piod = addr; 1061 iov.iov_base = piod->piod_addr; 1062 iov.iov_len = piod->piod_len; 1063 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1064 uio.uio_resid = piod->piod_len; 1065 } 1066 uio.uio_iov = &iov; 1067 uio.uio_iovcnt = 1; 1068 uio.uio_segflg = UIO_USERSPACE; 1069 uio.uio_td = td; 1070 #ifdef COMPAT_FREEBSD32 1071 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1072 #else 1073 tmp = piod->piod_op; 1074 #endif 1075 switch (tmp) { 1076 case PIOD_READ_D: 1077 case PIOD_READ_I: 1078 uio.uio_rw = UIO_READ; 1079 break; 1080 case PIOD_WRITE_D: 1081 case PIOD_WRITE_I: 1082 td2->td_dbgflags |= TDB_USERWR; 1083 uio.uio_rw = UIO_WRITE; 1084 break; 1085 default: 1086 error = EINVAL; 1087 goto out; 1088 } 1089 PROC_UNLOCK(p); 1090 error = proc_rwmem(p, &uio); 1091 #ifdef COMPAT_FREEBSD32 1092 if (wrap32) 1093 piod32->piod_len -= uio.uio_resid; 1094 else 1095 #endif 1096 piod->piod_len -= uio.uio_resid; 1097 PROC_LOCK(p); 1098 break; 1099 1100 case PT_KILL: 1101 data = SIGKILL; 1102 goto sendsig; /* in PT_CONTINUE above */ 1103 1104 case PT_SETREGS: 1105 td2->td_dbgflags |= TDB_USERWR; 1106 error = PROC_WRITE(regs, td2, addr); 1107 break; 1108 1109 case PT_GETREGS: 1110 error = PROC_READ(regs, td2, addr); 1111 break; 1112 1113 case PT_SETFPREGS: 1114 td2->td_dbgflags |= TDB_USERWR; 1115 error = PROC_WRITE(fpregs, td2, addr); 1116 break; 1117 1118 case PT_GETFPREGS: 1119 error = PROC_READ(fpregs, td2, addr); 1120 break; 1121 1122 case PT_SETDBREGS: 1123 td2->td_dbgflags |= TDB_USERWR; 1124 error = PROC_WRITE(dbregs, td2, addr); 1125 break; 1126 1127 case PT_GETDBREGS: 1128 error = PROC_READ(dbregs, td2, addr); 1129 break; 1130 1131 case PT_LWPINFO: 1132 if (data <= 0 || 1133 #ifdef COMPAT_FREEBSD32 1134 (!wrap32 && data > sizeof(*pl)) || 1135 (wrap32 && data > sizeof(*pl32))) { 1136 #else 1137 data > sizeof(*pl)) { 1138 #endif 1139 error = EINVAL; 1140 break; 1141 } 1142 #ifdef COMPAT_FREEBSD32 1143 if (wrap32) { 1144 pl = &plr; 1145 pl32 = addr; 1146 } else 1147 #endif 1148 pl = addr; 1149 pl->pl_lwpid = td2->td_tid; 1150 pl->pl_flags = 0; 1151 if (td2->td_dbgflags & TDB_XSIG) { 1152 pl->pl_event = PL_EVENT_SIGNAL; 1153 if (td2->td_dbgksi.ksi_signo != 0 && 1154 #ifdef COMPAT_FREEBSD32 1155 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1156 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1157 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1158 pl_siginfo) + sizeof(struct siginfo32))) 1159 #else 1160 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1161 + sizeof(pl->pl_siginfo) 1162 #endif 1163 ){ 1164 pl->pl_flags |= PL_FLAG_SI; 1165 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1166 } 1167 } 1168 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1169 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1170 if (td2->td_dbgflags & TDB_SCE) 1171 pl->pl_flags |= PL_FLAG_SCE; 1172 else if (td2->td_dbgflags & TDB_SCX) 1173 pl->pl_flags |= PL_FLAG_SCX; 1174 if (td2->td_dbgflags & TDB_EXEC) 1175 pl->pl_flags |= PL_FLAG_EXEC; 1176 pl->pl_sigmask = td2->td_sigmask; 1177 pl->pl_siglist = td2->td_siglist; 1178 #ifdef COMPAT_FREEBSD32 1179 if (wrap32) 1180 ptrace_lwpinfo_to32(pl, pl32); 1181 #endif 1182 break; 1183 1184 case PT_GETNUMLWPS: 1185 td->td_retval[0] = p->p_numthreads; 1186 break; 1187 1188 case PT_GETLWPLIST: 1189 if (data <= 0) { 1190 error = EINVAL; 1191 break; 1192 } 1193 num = imin(p->p_numthreads, data); 1194 PROC_UNLOCK(p); 1195 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1196 tmp = 0; 1197 PROC_LOCK(p); 1198 FOREACH_THREAD_IN_PROC(p, td2) { 1199 if (tmp >= num) 1200 break; 1201 buf[tmp++] = td2->td_tid; 1202 } 1203 PROC_UNLOCK(p); 1204 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1205 free(buf, M_TEMP); 1206 if (!error) 1207 td->td_retval[0] = tmp; 1208 PROC_LOCK(p); 1209 break; 1210 1211 case PT_VM_TIMESTAMP: 1212 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1213 break; 1214 1215 case PT_VM_ENTRY: 1216 PROC_UNLOCK(p); 1217 #ifdef COMPAT_FREEBSD32 1218 if (wrap32) 1219 error = ptrace_vm_entry32(td, p, addr); 1220 else 1221 #endif 1222 error = ptrace_vm_entry(td, p, addr); 1223 PROC_LOCK(p); 1224 break; 1225 1226 default: 1227 #ifdef __HAVE_PTRACE_MACHDEP 1228 if (req >= PT_FIRSTMACH) { 1229 PROC_UNLOCK(p); 1230 error = cpu_ptrace(td2, req, addr, data); 1231 PROC_LOCK(p); 1232 } else 1233 #endif 1234 /* Unknown request. */ 1235 error = EINVAL; 1236 break; 1237 } 1238 1239 out: 1240 /* Drop our hold on this process now that the request has completed. */ 1241 _PRELE(p); 1242 fail: 1243 PROC_UNLOCK(p); 1244 if (proctree_locked) 1245 sx_xunlock(&proctree_lock); 1246 return (error); 1247 } 1248 #undef PROC_READ 1249 #undef PROC_WRITE 1250 1251 /* 1252 * Stop a process because of a debugging event; 1253 * stay stopped until p->p_step is cleared 1254 * (cleared by PIOCCONT in procfs). 1255 */ 1256 void 1257 stopevent(struct proc *p, unsigned int event, unsigned int val) 1258 { 1259 1260 PROC_LOCK_ASSERT(p, MA_OWNED); 1261 p->p_step = 1; 1262 do { 1263 p->p_xstat = val; 1264 p->p_xthread = NULL; 1265 p->p_stype = event; /* Which event caused the stop? */ 1266 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1267 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1268 } while (p->p_step); 1269 } 1270