1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/procctl.h> 47 #include <sys/vnode.h> 48 #include <sys/ptrace.h> 49 #include <sys/rwlock.h> 50 #include <sys/sx.h> 51 #include <sys/malloc.h> 52 #include <sys/signalvar.h> 53 54 #include <machine/reg.h> 55 56 #include <security/audit/audit.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_param.h> 66 67 #ifdef COMPAT_FREEBSD32 68 #include <sys/procfs.h> 69 #include <compat/freebsd32/freebsd32_signal.h> 70 71 struct ptrace_io_desc32 { 72 int piod_op; 73 uint32_t piod_offs; 74 uint32_t piod_addr; 75 uint32_t piod_len; 76 }; 77 78 struct ptrace_vm_entry32 { 79 int pve_entry; 80 int pve_timestamp; 81 uint32_t pve_start; 82 uint32_t pve_end; 83 uint32_t pve_offset; 84 u_int pve_prot; 85 u_int pve_pathlen; 86 int32_t pve_fileid; 87 u_int pve_fsid; 88 uint32_t pve_path; 89 }; 90 91 struct ptrace_lwpinfo32 { 92 lwpid_t pl_lwpid; /* LWP described. */ 93 int pl_event; /* Event that stopped the LWP. */ 94 int pl_flags; /* LWP flags. */ 95 sigset_t pl_sigmask; /* LWP signal mask */ 96 sigset_t pl_siglist; /* LWP pending signal */ 97 struct siginfo32 pl_siginfo; /* siginfo for signal */ 98 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 99 int pl_child_pid; /* New child pid */ 100 }; 101 102 #endif 103 104 /* 105 * Functions implemented using PROC_ACTION(): 106 * 107 * proc_read_regs(proc, regs) 108 * Get the current user-visible register set from the process 109 * and copy it into the regs structure (<machine/reg.h>). 110 * The process is stopped at the time read_regs is called. 111 * 112 * proc_write_regs(proc, regs) 113 * Update the current register set from the passed in regs 114 * structure. Take care to avoid clobbering special CPU 115 * registers or privileged bits in the PSL. 116 * Depending on the architecture this may have fix-up work to do, 117 * especially if the IAR or PCW are modified. 118 * The process is stopped at the time write_regs is called. 119 * 120 * proc_read_fpregs, proc_write_fpregs 121 * deal with the floating point register set, otherwise as above. 122 * 123 * proc_read_dbregs, proc_write_dbregs 124 * deal with the processor debug register set, otherwise as above. 125 * 126 * proc_sstep(proc) 127 * Arrange for the process to trap after executing a single instruction. 128 */ 129 130 #define PROC_ACTION(action) do { \ 131 int error; \ 132 \ 133 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 134 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 135 error = EIO; \ 136 else \ 137 error = (action); \ 138 return (error); \ 139 } while(0) 140 141 int 142 proc_read_regs(struct thread *td, struct reg *regs) 143 { 144 145 PROC_ACTION(fill_regs(td, regs)); 146 } 147 148 int 149 proc_write_regs(struct thread *td, struct reg *regs) 150 { 151 152 PROC_ACTION(set_regs(td, regs)); 153 } 154 155 int 156 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 157 { 158 159 PROC_ACTION(fill_dbregs(td, dbregs)); 160 } 161 162 int 163 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 164 { 165 166 PROC_ACTION(set_dbregs(td, dbregs)); 167 } 168 169 /* 170 * Ptrace doesn't support fpregs at all, and there are no security holes 171 * or translations for fpregs, so we can just copy them. 172 */ 173 int 174 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 175 { 176 177 PROC_ACTION(fill_fpregs(td, fpregs)); 178 } 179 180 int 181 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 182 { 183 184 PROC_ACTION(set_fpregs(td, fpregs)); 185 } 186 187 #ifdef COMPAT_FREEBSD32 188 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 189 int 190 proc_read_regs32(struct thread *td, struct reg32 *regs32) 191 { 192 193 PROC_ACTION(fill_regs32(td, regs32)); 194 } 195 196 int 197 proc_write_regs32(struct thread *td, struct reg32 *regs32) 198 { 199 200 PROC_ACTION(set_regs32(td, regs32)); 201 } 202 203 int 204 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 205 { 206 207 PROC_ACTION(fill_dbregs32(td, dbregs32)); 208 } 209 210 int 211 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 212 { 213 214 PROC_ACTION(set_dbregs32(td, dbregs32)); 215 } 216 217 int 218 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 219 { 220 221 PROC_ACTION(fill_fpregs32(td, fpregs32)); 222 } 223 224 int 225 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 226 { 227 228 PROC_ACTION(set_fpregs32(td, fpregs32)); 229 } 230 #endif 231 232 int 233 proc_sstep(struct thread *td) 234 { 235 236 PROC_ACTION(ptrace_single_step(td)); 237 } 238 239 int 240 proc_rwmem(struct proc *p, struct uio *uio) 241 { 242 vm_map_t map; 243 vm_offset_t pageno; /* page number */ 244 vm_prot_t reqprot; 245 int error, fault_flags, page_offset, writing; 246 247 /* 248 * Assert that someone has locked this vmspace. (Should be 249 * curthread but we can't assert that.) This keeps the process 250 * from exiting out from under us until this operation completes. 251 */ 252 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 253 p, p->p_pid)); 254 255 /* 256 * The map we want... 257 */ 258 map = &p->p_vmspace->vm_map; 259 260 /* 261 * If we are writing, then we request vm_fault() to create a private 262 * copy of each page. Since these copies will not be writeable by the 263 * process, we must explicity request that they be dirtied. 264 */ 265 writing = uio->uio_rw == UIO_WRITE; 266 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 267 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 268 269 /* 270 * Only map in one page at a time. We don't have to, but it 271 * makes things easier. This way is trivial - right? 272 */ 273 do { 274 vm_offset_t uva; 275 u_int len; 276 vm_page_t m; 277 278 uva = (vm_offset_t)uio->uio_offset; 279 280 /* 281 * Get the page number of this segment. 282 */ 283 pageno = trunc_page(uva); 284 page_offset = uva - pageno; 285 286 /* 287 * How many bytes to copy 288 */ 289 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 290 291 /* 292 * Fault and hold the page on behalf of the process. 293 */ 294 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 295 if (error != KERN_SUCCESS) { 296 if (error == KERN_RESOURCE_SHORTAGE) 297 error = ENOMEM; 298 else 299 error = EFAULT; 300 break; 301 } 302 303 /* 304 * Now do the i/o move. 305 */ 306 error = uiomove_fromphys(&m, page_offset, len, uio); 307 308 /* Make the I-cache coherent for breakpoints. */ 309 if (writing && error == 0) { 310 vm_map_lock_read(map); 311 if (vm_map_check_protection(map, pageno, pageno + 312 PAGE_SIZE, VM_PROT_EXECUTE)) 313 vm_sync_icache(map, uva, len); 314 vm_map_unlock_read(map); 315 } 316 317 /* 318 * Release the page. 319 */ 320 vm_page_lock(m); 321 vm_page_unhold(m); 322 vm_page_unlock(m); 323 324 } while (error == 0 && uio->uio_resid > 0); 325 326 return (error); 327 } 328 329 static int 330 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 331 { 332 struct vattr vattr; 333 vm_map_t map; 334 vm_map_entry_t entry; 335 vm_object_t obj, tobj, lobj; 336 struct vmspace *vm; 337 struct vnode *vp; 338 char *freepath, *fullpath; 339 u_int pathlen; 340 int error, index; 341 342 error = 0; 343 obj = NULL; 344 345 vm = vmspace_acquire_ref(p); 346 map = &vm->vm_map; 347 vm_map_lock_read(map); 348 349 do { 350 entry = map->header.next; 351 index = 0; 352 while (index < pve->pve_entry && entry != &map->header) { 353 entry = entry->next; 354 index++; 355 } 356 if (index != pve->pve_entry) { 357 error = EINVAL; 358 break; 359 } 360 while (entry != &map->header && 361 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 362 entry = entry->next; 363 index++; 364 } 365 if (entry == &map->header) { 366 error = ENOENT; 367 break; 368 } 369 370 /* We got an entry. */ 371 pve->pve_entry = index + 1; 372 pve->pve_timestamp = map->timestamp; 373 pve->pve_start = entry->start; 374 pve->pve_end = entry->end - 1; 375 pve->pve_offset = entry->offset; 376 pve->pve_prot = entry->protection; 377 378 /* Backing object's path needed? */ 379 if (pve->pve_pathlen == 0) 380 break; 381 382 pathlen = pve->pve_pathlen; 383 pve->pve_pathlen = 0; 384 385 obj = entry->object.vm_object; 386 if (obj != NULL) 387 VM_OBJECT_RLOCK(obj); 388 } while (0); 389 390 vm_map_unlock_read(map); 391 vmspace_free(vm); 392 393 pve->pve_fsid = VNOVAL; 394 pve->pve_fileid = VNOVAL; 395 396 if (error == 0 && obj != NULL) { 397 lobj = obj; 398 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 399 if (tobj != obj) 400 VM_OBJECT_RLOCK(tobj); 401 if (lobj != obj) 402 VM_OBJECT_RUNLOCK(lobj); 403 lobj = tobj; 404 pve->pve_offset += tobj->backing_object_offset; 405 } 406 vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL; 407 if (vp != NULL) 408 vref(vp); 409 if (lobj != obj) 410 VM_OBJECT_RUNLOCK(lobj); 411 VM_OBJECT_RUNLOCK(obj); 412 413 if (vp != NULL) { 414 freepath = NULL; 415 fullpath = NULL; 416 vn_fullpath(td, vp, &fullpath, &freepath); 417 vn_lock(vp, LK_SHARED | LK_RETRY); 418 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 419 pve->pve_fileid = vattr.va_fileid; 420 pve->pve_fsid = vattr.va_fsid; 421 } 422 vput(vp); 423 424 if (fullpath != NULL) { 425 pve->pve_pathlen = strlen(fullpath) + 1; 426 if (pve->pve_pathlen <= pathlen) { 427 error = copyout(fullpath, pve->pve_path, 428 pve->pve_pathlen); 429 } else 430 error = ENAMETOOLONG; 431 } 432 if (freepath != NULL) 433 free(freepath, M_TEMP); 434 } 435 } 436 437 return (error); 438 } 439 440 #ifdef COMPAT_FREEBSD32 441 static int 442 ptrace_vm_entry32(struct thread *td, struct proc *p, 443 struct ptrace_vm_entry32 *pve32) 444 { 445 struct ptrace_vm_entry pve; 446 int error; 447 448 pve.pve_entry = pve32->pve_entry; 449 pve.pve_pathlen = pve32->pve_pathlen; 450 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 451 452 error = ptrace_vm_entry(td, p, &pve); 453 if (error == 0) { 454 pve32->pve_entry = pve.pve_entry; 455 pve32->pve_timestamp = pve.pve_timestamp; 456 pve32->pve_start = pve.pve_start; 457 pve32->pve_end = pve.pve_end; 458 pve32->pve_offset = pve.pve_offset; 459 pve32->pve_prot = pve.pve_prot; 460 pve32->pve_fileid = pve.pve_fileid; 461 pve32->pve_fsid = pve.pve_fsid; 462 } 463 464 pve32->pve_pathlen = pve.pve_pathlen; 465 return (error); 466 } 467 468 static void 469 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 470 struct ptrace_lwpinfo32 *pl32) 471 { 472 473 pl32->pl_lwpid = pl->pl_lwpid; 474 pl32->pl_event = pl->pl_event; 475 pl32->pl_flags = pl->pl_flags; 476 pl32->pl_sigmask = pl->pl_sigmask; 477 pl32->pl_siglist = pl->pl_siglist; 478 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 479 strcpy(pl32->pl_tdname, pl->pl_tdname); 480 pl32->pl_child_pid = pl->pl_child_pid; 481 } 482 #endif /* COMPAT_FREEBSD32 */ 483 484 /* 485 * Process debugging system call. 486 */ 487 #ifndef _SYS_SYSPROTO_H_ 488 struct ptrace_args { 489 int req; 490 pid_t pid; 491 caddr_t addr; 492 int data; 493 }; 494 #endif 495 496 #ifdef COMPAT_FREEBSD32 497 /* 498 * This CPP subterfuge is to try and reduce the number of ifdefs in 499 * the body of the code. 500 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 501 * becomes either: 502 * copyin(uap->addr, &r.reg, sizeof r.reg); 503 * or 504 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 505 * .. except this is done at runtime. 506 */ 507 #define COPYIN(u, k, s) wrap32 ? \ 508 copyin(u, k ## 32, s ## 32) : \ 509 copyin(u, k, s) 510 #define COPYOUT(k, u, s) wrap32 ? \ 511 copyout(k ## 32, u, s ## 32) : \ 512 copyout(k, u, s) 513 #else 514 #define COPYIN(u, k, s) copyin(u, k, s) 515 #define COPYOUT(k, u, s) copyout(k, u, s) 516 #endif 517 int 518 sys_ptrace(struct thread *td, struct ptrace_args *uap) 519 { 520 /* 521 * XXX this obfuscation is to reduce stack usage, but the register 522 * structs may be too large to put on the stack anyway. 523 */ 524 union { 525 struct ptrace_io_desc piod; 526 struct ptrace_lwpinfo pl; 527 struct ptrace_vm_entry pve; 528 struct dbreg dbreg; 529 struct fpreg fpreg; 530 struct reg reg; 531 #ifdef COMPAT_FREEBSD32 532 struct dbreg32 dbreg32; 533 struct fpreg32 fpreg32; 534 struct reg32 reg32; 535 struct ptrace_io_desc32 piod32; 536 struct ptrace_lwpinfo32 pl32; 537 struct ptrace_vm_entry32 pve32; 538 #endif 539 } r; 540 void *addr; 541 int error = 0; 542 #ifdef COMPAT_FREEBSD32 543 int wrap32 = 0; 544 545 if (SV_CURPROC_FLAG(SV_ILP32)) 546 wrap32 = 1; 547 #endif 548 AUDIT_ARG_PID(uap->pid); 549 AUDIT_ARG_CMD(uap->req); 550 AUDIT_ARG_VALUE(uap->data); 551 addr = &r; 552 switch (uap->req) { 553 case PT_GETREGS: 554 case PT_GETFPREGS: 555 case PT_GETDBREGS: 556 case PT_LWPINFO: 557 break; 558 case PT_SETREGS: 559 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 560 break; 561 case PT_SETFPREGS: 562 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 563 break; 564 case PT_SETDBREGS: 565 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 566 break; 567 case PT_IO: 568 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 569 break; 570 case PT_VM_ENTRY: 571 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 572 break; 573 default: 574 addr = uap->addr; 575 break; 576 } 577 if (error) 578 return (error); 579 580 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 581 if (error) 582 return (error); 583 584 switch (uap->req) { 585 case PT_VM_ENTRY: 586 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 587 break; 588 case PT_IO: 589 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 590 break; 591 case PT_GETREGS: 592 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 593 break; 594 case PT_GETFPREGS: 595 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 596 break; 597 case PT_GETDBREGS: 598 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 599 break; 600 case PT_LWPINFO: 601 error = copyout(&r.pl, uap->addr, uap->data); 602 break; 603 } 604 605 return (error); 606 } 607 #undef COPYIN 608 #undef COPYOUT 609 610 #ifdef COMPAT_FREEBSD32 611 /* 612 * PROC_READ(regs, td2, addr); 613 * becomes either: 614 * proc_read_regs(td2, addr); 615 * or 616 * proc_read_regs32(td2, addr); 617 * .. except this is done at runtime. There is an additional 618 * complication in that PROC_WRITE disallows 32 bit consumers 619 * from writing to 64 bit address space targets. 620 */ 621 #define PROC_READ(w, t, a) wrap32 ? \ 622 proc_read_ ## w ## 32(t, a) : \ 623 proc_read_ ## w (t, a) 624 #define PROC_WRITE(w, t, a) wrap32 ? \ 625 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 626 proc_write_ ## w (t, a) 627 #else 628 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 629 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 630 #endif 631 632 int 633 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 634 { 635 struct iovec iov; 636 struct uio uio; 637 struct proc *curp, *p, *pp; 638 struct thread *td2 = NULL, *td3; 639 struct ptrace_io_desc *piod = NULL; 640 struct ptrace_lwpinfo *pl; 641 int error, write, tmp, num; 642 int proctree_locked = 0; 643 lwpid_t tid = 0, *buf; 644 #ifdef COMPAT_FREEBSD32 645 int wrap32 = 0, safe = 0; 646 struct ptrace_io_desc32 *piod32 = NULL; 647 struct ptrace_lwpinfo32 *pl32 = NULL; 648 struct ptrace_lwpinfo plr; 649 #endif 650 651 curp = td->td_proc; 652 653 /* Lock proctree before locking the process. */ 654 switch (req) { 655 case PT_TRACE_ME: 656 case PT_ATTACH: 657 case PT_STEP: 658 case PT_CONTINUE: 659 case PT_TO_SCE: 660 case PT_TO_SCX: 661 case PT_SYSCALL: 662 case PT_FOLLOW_FORK: 663 case PT_DETACH: 664 sx_xlock(&proctree_lock); 665 proctree_locked = 1; 666 break; 667 default: 668 break; 669 } 670 671 write = 0; 672 if (req == PT_TRACE_ME) { 673 p = td->td_proc; 674 PROC_LOCK(p); 675 } else { 676 if (pid <= PID_MAX) { 677 if ((p = pfind(pid)) == NULL) { 678 if (proctree_locked) 679 sx_xunlock(&proctree_lock); 680 return (ESRCH); 681 } 682 } else { 683 td2 = tdfind(pid, -1); 684 if (td2 == NULL) { 685 if (proctree_locked) 686 sx_xunlock(&proctree_lock); 687 return (ESRCH); 688 } 689 p = td2->td_proc; 690 tid = pid; 691 pid = p->p_pid; 692 } 693 } 694 AUDIT_ARG_PROCESS(p); 695 696 if ((p->p_flag & P_WEXIT) != 0) { 697 error = ESRCH; 698 goto fail; 699 } 700 if ((error = p_cansee(td, p)) != 0) 701 goto fail; 702 703 if ((error = p_candebug(td, p)) != 0) 704 goto fail; 705 706 /* 707 * System processes can't be debugged. 708 */ 709 if ((p->p_flag & P_SYSTEM) != 0) { 710 error = EINVAL; 711 goto fail; 712 } 713 714 if (tid == 0) { 715 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 716 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 717 td2 = p->p_xthread; 718 } else { 719 td2 = FIRST_THREAD_IN_PROC(p); 720 } 721 tid = td2->td_tid; 722 } 723 724 #ifdef COMPAT_FREEBSD32 725 /* 726 * Test if we're a 32 bit client and what the target is. 727 * Set the wrap controls accordingly. 728 */ 729 if (SV_CURPROC_FLAG(SV_ILP32)) { 730 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 731 safe = 1; 732 wrap32 = 1; 733 } 734 #endif 735 /* 736 * Permissions check 737 */ 738 switch (req) { 739 case PT_TRACE_ME: 740 /* Always legal. */ 741 break; 742 743 case PT_ATTACH: 744 /* Self */ 745 if (p->p_pid == td->td_proc->p_pid) { 746 error = EINVAL; 747 goto fail; 748 } 749 750 /* Already traced */ 751 if (p->p_flag & P_TRACED) { 752 error = EBUSY; 753 goto fail; 754 } 755 756 /* Can't trace an ancestor if you're being traced. */ 757 if (curp->p_flag & P_TRACED) { 758 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 759 if (pp == p) { 760 error = EINVAL; 761 goto fail; 762 } 763 } 764 } 765 766 767 /* OK */ 768 break; 769 770 case PT_CLEARSTEP: 771 /* Allow thread to clear single step for itself */ 772 if (td->td_tid == tid) 773 break; 774 775 /* FALLTHROUGH */ 776 default: 777 /* not being traced... */ 778 if ((p->p_flag & P_TRACED) == 0) { 779 error = EPERM; 780 goto fail; 781 } 782 783 /* not being traced by YOU */ 784 if (p->p_pptr != td->td_proc) { 785 error = EBUSY; 786 goto fail; 787 } 788 789 /* not currently stopped */ 790 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 791 p->p_suspcount != p->p_numthreads || 792 (p->p_flag & P_WAITED) == 0) { 793 error = EBUSY; 794 goto fail; 795 } 796 797 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 798 static int count = 0; 799 if (count++ == 0) 800 printf("P_STOPPED_TRACE not set.\n"); 801 } 802 803 /* OK */ 804 break; 805 } 806 807 /* Keep this process around until we finish this request. */ 808 _PHOLD(p); 809 810 #ifdef FIX_SSTEP 811 /* 812 * Single step fixup ala procfs 813 */ 814 FIX_SSTEP(td2); 815 #endif 816 817 /* 818 * Actually do the requests 819 */ 820 821 td->td_retval[0] = 0; 822 823 switch (req) { 824 case PT_TRACE_ME: 825 /* set my trace flag and "owner" so it can read/write me */ 826 p->p_flag |= P_TRACED; 827 if (p->p_flag & P_PPWAIT) 828 p->p_flag |= P_PPTRACE; 829 p->p_oppid = p->p_pptr->p_pid; 830 break; 831 832 case PT_ATTACH: 833 /* security check done above */ 834 /* 835 * It would be nice if the tracing relationship was separate 836 * from the parent relationship but that would require 837 * another set of links in the proc struct or for "wait" 838 * to scan the entire proc table. To make life easier, 839 * we just re-parent the process we're trying to trace. 840 * The old parent is remembered so we can put things back 841 * on a "detach". 842 */ 843 p->p_flag |= P_TRACED; 844 p->p_oppid = p->p_pptr->p_pid; 845 if (p->p_pptr != td->td_proc) { 846 proc_reparent(p, td->td_proc); 847 } 848 data = SIGSTOP; 849 goto sendsig; /* in PT_CONTINUE below */ 850 851 case PT_CLEARSTEP: 852 error = ptrace_clear_single_step(td2); 853 break; 854 855 case PT_SETSTEP: 856 error = ptrace_single_step(td2); 857 break; 858 859 case PT_SUSPEND: 860 td2->td_dbgflags |= TDB_SUSPEND; 861 thread_lock(td2); 862 td2->td_flags |= TDF_NEEDSUSPCHK; 863 thread_unlock(td2); 864 break; 865 866 case PT_RESUME: 867 td2->td_dbgflags &= ~TDB_SUSPEND; 868 break; 869 870 case PT_FOLLOW_FORK: 871 if (data) 872 p->p_flag |= P_FOLLOWFORK; 873 else 874 p->p_flag &= ~P_FOLLOWFORK; 875 break; 876 877 case PT_STEP: 878 case PT_CONTINUE: 879 case PT_TO_SCE: 880 case PT_TO_SCX: 881 case PT_SYSCALL: 882 case PT_DETACH: 883 /* Zero means do not send any signal */ 884 if (data < 0 || data > _SIG_MAXSIG) { 885 error = EINVAL; 886 break; 887 } 888 889 switch (req) { 890 case PT_STEP: 891 error = ptrace_single_step(td2); 892 if (error) 893 goto out; 894 break; 895 case PT_CONTINUE: 896 case PT_TO_SCE: 897 case PT_TO_SCX: 898 case PT_SYSCALL: 899 if (addr != (void *)1) { 900 error = ptrace_set_pc(td2, 901 (u_long)(uintfptr_t)addr); 902 if (error) 903 goto out; 904 } 905 switch (req) { 906 case PT_TO_SCE: 907 p->p_stops |= S_PT_SCE; 908 break; 909 case PT_TO_SCX: 910 p->p_stops |= S_PT_SCX; 911 break; 912 case PT_SYSCALL: 913 p->p_stops |= S_PT_SCE | S_PT_SCX; 914 break; 915 } 916 break; 917 case PT_DETACH: 918 /* reset process parent */ 919 if (p->p_oppid != p->p_pptr->p_pid) { 920 PROC_LOCK(p->p_pptr); 921 sigqueue_take(p->p_ksi); 922 PROC_UNLOCK(p->p_pptr); 923 924 pp = proc_realparent(p); 925 proc_reparent(p, pp); 926 if (pp == initproc) 927 p->p_sigparent = SIGCHLD; 928 } 929 p->p_oppid = 0; 930 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 931 932 /* should we send SIGCHLD? */ 933 /* childproc_continued(p); */ 934 break; 935 } 936 937 sendsig: 938 if (proctree_locked) { 939 sx_xunlock(&proctree_lock); 940 proctree_locked = 0; 941 } 942 p->p_xstat = data; 943 p->p_xthread = NULL; 944 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 945 /* deliver or queue signal */ 946 td2->td_dbgflags &= ~TDB_XSIG; 947 td2->td_xsig = data; 948 949 if (req == PT_DETACH) { 950 FOREACH_THREAD_IN_PROC(p, td3) 951 td3->td_dbgflags &= ~TDB_SUSPEND; 952 } 953 /* 954 * unsuspend all threads, to not let a thread run, 955 * you should use PT_SUSPEND to suspend it before 956 * continuing process. 957 */ 958 PROC_SLOCK(p); 959 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 960 thread_unsuspend(p); 961 PROC_SUNLOCK(p); 962 if (req == PT_ATTACH) 963 kern_psignal(p, data); 964 } else { 965 if (data) 966 kern_psignal(p, data); 967 } 968 break; 969 970 case PT_WRITE_I: 971 case PT_WRITE_D: 972 td2->td_dbgflags |= TDB_USERWR; 973 write = 1; 974 /* FALLTHROUGH */ 975 case PT_READ_I: 976 case PT_READ_D: 977 PROC_UNLOCK(p); 978 tmp = 0; 979 /* write = 0 set above */ 980 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 981 iov.iov_len = sizeof(int); 982 uio.uio_iov = &iov; 983 uio.uio_iovcnt = 1; 984 uio.uio_offset = (off_t)(uintptr_t)addr; 985 uio.uio_resid = sizeof(int); 986 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 987 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 988 uio.uio_td = td; 989 error = proc_rwmem(p, &uio); 990 if (uio.uio_resid != 0) { 991 /* 992 * XXX proc_rwmem() doesn't currently return ENOSPC, 993 * so I think write() can bogusly return 0. 994 * XXX what happens for short writes? We don't want 995 * to write partial data. 996 * XXX proc_rwmem() returns EPERM for other invalid 997 * addresses. Convert this to EINVAL. Does this 998 * clobber returns of EPERM for other reasons? 999 */ 1000 if (error == 0 || error == ENOSPC || error == EPERM) 1001 error = EINVAL; /* EOF */ 1002 } 1003 if (!write) 1004 td->td_retval[0] = tmp; 1005 PROC_LOCK(p); 1006 break; 1007 1008 case PT_IO: 1009 #ifdef COMPAT_FREEBSD32 1010 if (wrap32) { 1011 piod32 = addr; 1012 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1013 iov.iov_len = piod32->piod_len; 1014 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1015 uio.uio_resid = piod32->piod_len; 1016 } else 1017 #endif 1018 { 1019 piod = addr; 1020 iov.iov_base = piod->piod_addr; 1021 iov.iov_len = piod->piod_len; 1022 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1023 uio.uio_resid = piod->piod_len; 1024 } 1025 uio.uio_iov = &iov; 1026 uio.uio_iovcnt = 1; 1027 uio.uio_segflg = UIO_USERSPACE; 1028 uio.uio_td = td; 1029 #ifdef COMPAT_FREEBSD32 1030 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1031 #else 1032 tmp = piod->piod_op; 1033 #endif 1034 switch (tmp) { 1035 case PIOD_READ_D: 1036 case PIOD_READ_I: 1037 uio.uio_rw = UIO_READ; 1038 break; 1039 case PIOD_WRITE_D: 1040 case PIOD_WRITE_I: 1041 td2->td_dbgflags |= TDB_USERWR; 1042 uio.uio_rw = UIO_WRITE; 1043 break; 1044 default: 1045 error = EINVAL; 1046 goto out; 1047 } 1048 PROC_UNLOCK(p); 1049 error = proc_rwmem(p, &uio); 1050 #ifdef COMPAT_FREEBSD32 1051 if (wrap32) 1052 piod32->piod_len -= uio.uio_resid; 1053 else 1054 #endif 1055 piod->piod_len -= uio.uio_resid; 1056 PROC_LOCK(p); 1057 break; 1058 1059 case PT_KILL: 1060 data = SIGKILL; 1061 goto sendsig; /* in PT_CONTINUE above */ 1062 1063 case PT_SETREGS: 1064 td2->td_dbgflags |= TDB_USERWR; 1065 error = PROC_WRITE(regs, td2, addr); 1066 break; 1067 1068 case PT_GETREGS: 1069 error = PROC_READ(regs, td2, addr); 1070 break; 1071 1072 case PT_SETFPREGS: 1073 td2->td_dbgflags |= TDB_USERWR; 1074 error = PROC_WRITE(fpregs, td2, addr); 1075 break; 1076 1077 case PT_GETFPREGS: 1078 error = PROC_READ(fpregs, td2, addr); 1079 break; 1080 1081 case PT_SETDBREGS: 1082 td2->td_dbgflags |= TDB_USERWR; 1083 error = PROC_WRITE(dbregs, td2, addr); 1084 break; 1085 1086 case PT_GETDBREGS: 1087 error = PROC_READ(dbregs, td2, addr); 1088 break; 1089 1090 case PT_LWPINFO: 1091 if (data <= 0 || 1092 #ifdef COMPAT_FREEBSD32 1093 (!wrap32 && data > sizeof(*pl)) || 1094 (wrap32 && data > sizeof(*pl32))) { 1095 #else 1096 data > sizeof(*pl)) { 1097 #endif 1098 error = EINVAL; 1099 break; 1100 } 1101 #ifdef COMPAT_FREEBSD32 1102 if (wrap32) { 1103 pl = &plr; 1104 pl32 = addr; 1105 } else 1106 #endif 1107 pl = addr; 1108 pl->pl_lwpid = td2->td_tid; 1109 pl->pl_event = PL_EVENT_NONE; 1110 pl->pl_flags = 0; 1111 if (td2->td_dbgflags & TDB_XSIG) { 1112 pl->pl_event = PL_EVENT_SIGNAL; 1113 if (td2->td_dbgksi.ksi_signo != 0 && 1114 #ifdef COMPAT_FREEBSD32 1115 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1116 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1117 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1118 pl_siginfo) + sizeof(struct siginfo32))) 1119 #else 1120 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1121 + sizeof(pl->pl_siginfo) 1122 #endif 1123 ){ 1124 pl->pl_flags |= PL_FLAG_SI; 1125 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1126 } 1127 } 1128 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1129 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1130 if (td2->td_dbgflags & TDB_SCE) 1131 pl->pl_flags |= PL_FLAG_SCE; 1132 else if (td2->td_dbgflags & TDB_SCX) 1133 pl->pl_flags |= PL_FLAG_SCX; 1134 if (td2->td_dbgflags & TDB_EXEC) 1135 pl->pl_flags |= PL_FLAG_EXEC; 1136 if (td2->td_dbgflags & TDB_FORK) { 1137 pl->pl_flags |= PL_FLAG_FORKED; 1138 pl->pl_child_pid = td2->td_dbg_forked; 1139 } 1140 if (td2->td_dbgflags & TDB_CHILD) 1141 pl->pl_flags |= PL_FLAG_CHILD; 1142 pl->pl_sigmask = td2->td_sigmask; 1143 pl->pl_siglist = td2->td_siglist; 1144 strcpy(pl->pl_tdname, td2->td_name); 1145 #ifdef COMPAT_FREEBSD32 1146 if (wrap32) 1147 ptrace_lwpinfo_to32(pl, pl32); 1148 #endif 1149 break; 1150 1151 case PT_GETNUMLWPS: 1152 td->td_retval[0] = p->p_numthreads; 1153 break; 1154 1155 case PT_GETLWPLIST: 1156 if (data <= 0) { 1157 error = EINVAL; 1158 break; 1159 } 1160 num = imin(p->p_numthreads, data); 1161 PROC_UNLOCK(p); 1162 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1163 tmp = 0; 1164 PROC_LOCK(p); 1165 FOREACH_THREAD_IN_PROC(p, td2) { 1166 if (tmp >= num) 1167 break; 1168 buf[tmp++] = td2->td_tid; 1169 } 1170 PROC_UNLOCK(p); 1171 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1172 free(buf, M_TEMP); 1173 if (!error) 1174 td->td_retval[0] = tmp; 1175 PROC_LOCK(p); 1176 break; 1177 1178 case PT_VM_TIMESTAMP: 1179 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1180 break; 1181 1182 case PT_VM_ENTRY: 1183 PROC_UNLOCK(p); 1184 #ifdef COMPAT_FREEBSD32 1185 if (wrap32) 1186 error = ptrace_vm_entry32(td, p, addr); 1187 else 1188 #endif 1189 error = ptrace_vm_entry(td, p, addr); 1190 PROC_LOCK(p); 1191 break; 1192 1193 default: 1194 #ifdef __HAVE_PTRACE_MACHDEP 1195 if (req >= PT_FIRSTMACH) { 1196 PROC_UNLOCK(p); 1197 error = cpu_ptrace(td2, req, addr, data); 1198 PROC_LOCK(p); 1199 } else 1200 #endif 1201 /* Unknown request. */ 1202 error = EINVAL; 1203 break; 1204 } 1205 1206 out: 1207 /* Drop our hold on this process now that the request has completed. */ 1208 _PRELE(p); 1209 fail: 1210 PROC_UNLOCK(p); 1211 if (proctree_locked) 1212 sx_xunlock(&proctree_lock); 1213 return (error); 1214 } 1215 #undef PROC_READ 1216 #undef PROC_WRITE 1217 1218 /* 1219 * Stop a process because of a debugging event; 1220 * stay stopped until p->p_step is cleared 1221 * (cleared by PIOCCONT in procfs). 1222 */ 1223 void 1224 stopevent(struct proc *p, unsigned int event, unsigned int val) 1225 { 1226 1227 PROC_LOCK_ASSERT(p, MA_OWNED); 1228 p->p_step = 1; 1229 do { 1230 p->p_xstat = val; 1231 p->p_xthread = NULL; 1232 p->p_stype = event; /* Which event caused the stop? */ 1233 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1234 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1235 } while (p->p_step); 1236 } 1237 1238 static int 1239 protect_setchild(struct thread *td, struct proc *p, int flags) 1240 { 1241 1242 PROC_LOCK_ASSERT(p, MA_OWNED); 1243 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0) 1244 return (0); 1245 if (flags & PPROT_SET) { 1246 p->p_flag |= P_PROTECTED; 1247 if (flags & PPROT_INHERIT) 1248 p->p_flag2 |= P2_INHERIT_PROTECTED; 1249 } else { 1250 p->p_flag &= ~P_PROTECTED; 1251 p->p_flag2 &= ~P2_INHERIT_PROTECTED; 1252 } 1253 return (1); 1254 } 1255 1256 static int 1257 protect_setchildren(struct thread *td, struct proc *top, int flags) 1258 { 1259 struct proc *p; 1260 int ret; 1261 1262 p = top; 1263 ret = 0; 1264 sx_assert(&proctree_lock, SX_LOCKED); 1265 for (;;) { 1266 ret |= protect_setchild(td, p, flags); 1267 PROC_UNLOCK(p); 1268 /* 1269 * If this process has children, descend to them next, 1270 * otherwise do any siblings, and if done with this level, 1271 * follow back up the tree (but not past top). 1272 */ 1273 if (!LIST_EMPTY(&p->p_children)) 1274 p = LIST_FIRST(&p->p_children); 1275 else for (;;) { 1276 if (p == top) { 1277 PROC_LOCK(p); 1278 return (ret); 1279 } 1280 if (LIST_NEXT(p, p_sibling)) { 1281 p = LIST_NEXT(p, p_sibling); 1282 break; 1283 } 1284 p = p->p_pptr; 1285 } 1286 PROC_LOCK(p); 1287 } 1288 } 1289 1290 static int 1291 protect_set(struct thread *td, struct proc *p, int flags) 1292 { 1293 int error, ret; 1294 1295 switch (PPROT_OP(flags)) { 1296 case PPROT_SET: 1297 case PPROT_CLEAR: 1298 break; 1299 default: 1300 return (EINVAL); 1301 } 1302 1303 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0) 1304 return (EINVAL); 1305 1306 error = priv_check(td, PRIV_VM_MADV_PROTECT); 1307 if (error) 1308 return (error); 1309 1310 if (flags & PPROT_DESCEND) 1311 ret = protect_setchildren(td, p, flags); 1312 else 1313 ret = protect_setchild(td, p, flags); 1314 if (ret == 0) 1315 return (EPERM); 1316 return (0); 1317 } 1318 1319 #ifndef _SYS_SYSPROTO_H_ 1320 struct procctl_args { 1321 idtype_t idtype; 1322 id_t id; 1323 int com; 1324 void *data; 1325 }; 1326 #endif 1327 /* ARGSUSED */ 1328 int 1329 sys_procctl(struct thread *td, struct procctl_args *uap) 1330 { 1331 int error, flags; 1332 void *data; 1333 1334 switch (uap->com) { 1335 case PROC_SPROTECT: 1336 error = copyin(uap->data, &flags, sizeof(flags)); 1337 if (error) 1338 return (error); 1339 data = &flags; 1340 break; 1341 default: 1342 return (EINVAL); 1343 } 1344 1345 return (kern_procctl(td, uap->idtype, uap->id, uap->com, data)); 1346 } 1347 1348 static int 1349 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) 1350 { 1351 1352 PROC_LOCK_ASSERT(p, MA_OWNED); 1353 switch (com) { 1354 case PROC_SPROTECT: 1355 return (protect_set(td, p, *(int *)data)); 1356 default: 1357 return (EINVAL); 1358 } 1359 } 1360 1361 int 1362 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) 1363 { 1364 struct pgrp *pg; 1365 struct proc *p; 1366 int error, first_error, ok; 1367 1368 sx_slock(&proctree_lock); 1369 switch (idtype) { 1370 case P_PID: 1371 p = pfind(id); 1372 if (p == NULL) { 1373 error = ESRCH; 1374 break; 1375 } 1376 error = p_cansee(td, p); 1377 if (error == 0) 1378 error = kern_procctl_single(td, p, com, data); 1379 PROC_UNLOCK(p); 1380 break; 1381 case P_PGID: 1382 /* 1383 * Attempt to apply the operation to all members of the 1384 * group. Ignore processes in the group that can't be 1385 * seen. Ignore errors so long as at least one process is 1386 * able to complete the request successfully. 1387 */ 1388 pg = pgfind(id); 1389 if (pg == NULL) { 1390 error = ESRCH; 1391 break; 1392 } 1393 PGRP_UNLOCK(pg); 1394 ok = 0; 1395 first_error = 0; 1396 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1397 PROC_LOCK(p); 1398 if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) { 1399 PROC_UNLOCK(p); 1400 continue; 1401 } 1402 error = kern_procctl_single(td, p, com, data); 1403 PROC_UNLOCK(p); 1404 if (error == 0) 1405 ok = 1; 1406 else if (first_error == 0) 1407 first_error = error; 1408 } 1409 if (ok) 1410 error = 0; 1411 else if (first_error != 0) 1412 error = first_error; 1413 else 1414 /* 1415 * Was not able to see any processes in the 1416 * process group. 1417 */ 1418 error = ESRCH; 1419 break; 1420 default: 1421 error = EINVAL; 1422 break; 1423 } 1424 sx_sunlock(&proctree_lock); 1425 return (error); 1426 } 1427