1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/syscallsubr.h> 44 #include <sys/sysent.h> 45 #include <sys/sysproto.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/vnode.h> 49 #include <sys/ptrace.h> 50 #include <sys/rwlock.h> 51 #include <sys/sx.h> 52 #include <sys/malloc.h> 53 #include <sys/signalvar.h> 54 #include <sys/caprights.h> 55 #include <sys/filedesc.h> 56 57 #include <machine/reg.h> 58 59 #include <security/audit/audit.h> 60 61 #include <vm/vm.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_extern.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_kern.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_page.h> 68 #include <vm/vm_param.h> 69 70 #ifdef COMPAT_FREEBSD32 71 #include <sys/procfs.h> 72 #endif 73 74 /* 75 * Functions implemented using PROC_ACTION(): 76 * 77 * proc_read_regs(proc, regs) 78 * Get the current user-visible register set from the process 79 * and copy it into the regs structure (<machine/reg.h>). 80 * The process is stopped at the time read_regs is called. 81 * 82 * proc_write_regs(proc, regs) 83 * Update the current register set from the passed in regs 84 * structure. Take care to avoid clobbering special CPU 85 * registers or privileged bits in the PSL. 86 * Depending on the architecture this may have fix-up work to do, 87 * especially if the IAR or PCW are modified. 88 * The process is stopped at the time write_regs is called. 89 * 90 * proc_read_fpregs, proc_write_fpregs 91 * deal with the floating point register set, otherwise as above. 92 * 93 * proc_read_dbregs, proc_write_dbregs 94 * deal with the processor debug register set, otherwise as above. 95 * 96 * proc_sstep(proc) 97 * Arrange for the process to trap after executing a single instruction. 98 */ 99 100 #define PROC_ACTION(action) do { \ 101 int error; \ 102 \ 103 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 104 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 105 error = EIO; \ 106 else \ 107 error = (action); \ 108 return (error); \ 109 } while (0) 110 111 int 112 proc_read_regs(struct thread *td, struct reg *regs) 113 { 114 115 PROC_ACTION(fill_regs(td, regs)); 116 } 117 118 int 119 proc_write_regs(struct thread *td, struct reg *regs) 120 { 121 122 PROC_ACTION(set_regs(td, regs)); 123 } 124 125 int 126 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 127 { 128 129 PROC_ACTION(fill_dbregs(td, dbregs)); 130 } 131 132 int 133 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 134 { 135 136 PROC_ACTION(set_dbregs(td, dbregs)); 137 } 138 139 /* 140 * Ptrace doesn't support fpregs at all, and there are no security holes 141 * or translations for fpregs, so we can just copy them. 142 */ 143 int 144 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 145 { 146 147 PROC_ACTION(fill_fpregs(td, fpregs)); 148 } 149 150 int 151 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 152 { 153 154 PROC_ACTION(set_fpregs(td, fpregs)); 155 } 156 157 #ifdef COMPAT_FREEBSD32 158 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 159 int 160 proc_read_regs32(struct thread *td, struct reg32 *regs32) 161 { 162 163 PROC_ACTION(fill_regs32(td, regs32)); 164 } 165 166 int 167 proc_write_regs32(struct thread *td, struct reg32 *regs32) 168 { 169 170 PROC_ACTION(set_regs32(td, regs32)); 171 } 172 173 int 174 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 175 { 176 177 PROC_ACTION(fill_dbregs32(td, dbregs32)); 178 } 179 180 int 181 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 182 { 183 184 PROC_ACTION(set_dbregs32(td, dbregs32)); 185 } 186 187 int 188 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 189 { 190 191 PROC_ACTION(fill_fpregs32(td, fpregs32)); 192 } 193 194 int 195 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 196 { 197 198 PROC_ACTION(set_fpregs32(td, fpregs32)); 199 } 200 #endif 201 202 int 203 proc_sstep(struct thread *td) 204 { 205 206 PROC_ACTION(ptrace_single_step(td)); 207 } 208 209 int 210 proc_rwmem(struct proc *p, struct uio *uio) 211 { 212 vm_map_t map; 213 vm_offset_t pageno; /* page number */ 214 vm_prot_t reqprot; 215 int error, fault_flags, page_offset, writing; 216 217 /* 218 * Assert that someone has locked this vmspace. (Should be 219 * curthread but we can't assert that.) This keeps the process 220 * from exiting out from under us until this operation completes. 221 */ 222 PROC_ASSERT_HELD(p); 223 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 224 225 /* 226 * The map we want... 227 */ 228 map = &p->p_vmspace->vm_map; 229 230 /* 231 * If we are writing, then we request vm_fault() to create a private 232 * copy of each page. Since these copies will not be writeable by the 233 * process, we must explicity request that they be dirtied. 234 */ 235 writing = uio->uio_rw == UIO_WRITE; 236 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 237 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 238 239 /* 240 * Only map in one page at a time. We don't have to, but it 241 * makes things easier. This way is trivial - right? 242 */ 243 do { 244 vm_offset_t uva; 245 u_int len; 246 vm_page_t m; 247 248 uva = (vm_offset_t)uio->uio_offset; 249 250 /* 251 * Get the page number of this segment. 252 */ 253 pageno = trunc_page(uva); 254 page_offset = uva - pageno; 255 256 /* 257 * How many bytes to copy 258 */ 259 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 260 261 /* 262 * Fault and hold the page on behalf of the process. 263 */ 264 error = vm_fault(map, pageno, reqprot, fault_flags, &m); 265 if (error != KERN_SUCCESS) { 266 if (error == KERN_RESOURCE_SHORTAGE) 267 error = ENOMEM; 268 else 269 error = EFAULT; 270 break; 271 } 272 273 /* 274 * Now do the i/o move. 275 */ 276 error = uiomove_fromphys(&m, page_offset, len, uio); 277 278 /* Make the I-cache coherent for breakpoints. */ 279 if (writing && error == 0) { 280 vm_map_lock_read(map); 281 if (vm_map_check_protection(map, pageno, pageno + 282 PAGE_SIZE, VM_PROT_EXECUTE)) 283 vm_sync_icache(map, uva, len); 284 vm_map_unlock_read(map); 285 } 286 287 /* 288 * Release the page. 289 */ 290 vm_page_unwire(m, PQ_ACTIVE); 291 292 } while (error == 0 && uio->uio_resid > 0); 293 294 return (error); 295 } 296 297 static ssize_t 298 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 299 size_t len, enum uio_rw rw) 300 { 301 struct iovec iov; 302 struct uio uio; 303 ssize_t slen; 304 305 MPASS(len < SSIZE_MAX); 306 slen = (ssize_t)len; 307 308 iov.iov_base = (caddr_t)buf; 309 iov.iov_len = len; 310 uio.uio_iov = &iov; 311 uio.uio_iovcnt = 1; 312 uio.uio_offset = va; 313 uio.uio_resid = slen; 314 uio.uio_segflg = UIO_SYSSPACE; 315 uio.uio_rw = rw; 316 uio.uio_td = td; 317 proc_rwmem(p, &uio); 318 if (uio.uio_resid == slen) 319 return (-1); 320 return (slen - uio.uio_resid); 321 } 322 323 ssize_t 324 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 325 size_t len) 326 { 327 328 return (proc_iop(td, p, va, buf, len, UIO_READ)); 329 } 330 331 ssize_t 332 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 333 size_t len) 334 { 335 336 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 337 } 338 339 static int 340 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 341 { 342 struct vattr vattr; 343 vm_map_t map; 344 vm_map_entry_t entry; 345 vm_object_t obj, tobj, lobj; 346 struct vmspace *vm; 347 struct vnode *vp; 348 char *freepath, *fullpath; 349 u_int pathlen; 350 int error, index; 351 352 error = 0; 353 obj = NULL; 354 355 vm = vmspace_acquire_ref(p); 356 map = &vm->vm_map; 357 vm_map_lock_read(map); 358 359 do { 360 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 361 ("Submap in map header")); 362 index = 0; 363 VM_MAP_ENTRY_FOREACH(entry, map) { 364 if (index >= pve->pve_entry && 365 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 366 break; 367 index++; 368 } 369 if (index < pve->pve_entry) { 370 error = EINVAL; 371 break; 372 } 373 if (entry == &map->header) { 374 error = ENOENT; 375 break; 376 } 377 378 /* We got an entry. */ 379 pve->pve_entry = index + 1; 380 pve->pve_timestamp = map->timestamp; 381 pve->pve_start = entry->start; 382 pve->pve_end = entry->end - 1; 383 pve->pve_offset = entry->offset; 384 pve->pve_prot = entry->protection; 385 386 /* Backing object's path needed? */ 387 if (pve->pve_pathlen == 0) 388 break; 389 390 pathlen = pve->pve_pathlen; 391 pve->pve_pathlen = 0; 392 393 obj = entry->object.vm_object; 394 if (obj != NULL) 395 VM_OBJECT_RLOCK(obj); 396 } while (0); 397 398 vm_map_unlock_read(map); 399 400 pve->pve_fsid = VNOVAL; 401 pve->pve_fileid = VNOVAL; 402 403 if (error == 0 && obj != NULL) { 404 lobj = obj; 405 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 406 if (tobj != obj) 407 VM_OBJECT_RLOCK(tobj); 408 if (lobj != obj) 409 VM_OBJECT_RUNLOCK(lobj); 410 lobj = tobj; 411 pve->pve_offset += tobj->backing_object_offset; 412 } 413 vp = vm_object_vnode(lobj); 414 if (vp != NULL) 415 vref(vp); 416 if (lobj != obj) 417 VM_OBJECT_RUNLOCK(lobj); 418 VM_OBJECT_RUNLOCK(obj); 419 420 if (vp != NULL) { 421 freepath = NULL; 422 fullpath = NULL; 423 vn_fullpath(vp, &fullpath, &freepath); 424 vn_lock(vp, LK_SHARED | LK_RETRY); 425 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 426 pve->pve_fileid = vattr.va_fileid; 427 pve->pve_fsid = vattr.va_fsid; 428 } 429 vput(vp); 430 431 if (fullpath != NULL) { 432 pve->pve_pathlen = strlen(fullpath) + 1; 433 if (pve->pve_pathlen <= pathlen) { 434 error = copyout(fullpath, pve->pve_path, 435 pve->pve_pathlen); 436 } else 437 error = ENAMETOOLONG; 438 } 439 if (freepath != NULL) 440 free(freepath, M_TEMP); 441 } 442 } 443 vmspace_free(vm); 444 if (error == 0) 445 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 446 p->p_pid, pve->pve_entry, pve->pve_start); 447 448 return (error); 449 } 450 451 /* 452 * Process debugging system call. 453 */ 454 #ifndef _SYS_SYSPROTO_H_ 455 struct ptrace_args { 456 int req; 457 pid_t pid; 458 caddr_t addr; 459 int data; 460 }; 461 #endif 462 463 int 464 sys_ptrace(struct thread *td, struct ptrace_args *uap) 465 { 466 /* 467 * XXX this obfuscation is to reduce stack usage, but the register 468 * structs may be too large to put on the stack anyway. 469 */ 470 union { 471 struct ptrace_io_desc piod; 472 struct ptrace_lwpinfo pl; 473 struct ptrace_vm_entry pve; 474 struct ptrace_coredump pc; 475 struct dbreg dbreg; 476 struct fpreg fpreg; 477 struct reg reg; 478 char args[sizeof(td->td_sa.args)]; 479 struct ptrace_sc_ret psr; 480 int ptevents; 481 } r; 482 void *addr; 483 int error = 0; 484 485 AUDIT_ARG_PID(uap->pid); 486 AUDIT_ARG_CMD(uap->req); 487 AUDIT_ARG_VALUE(uap->data); 488 addr = &r; 489 switch (uap->req) { 490 case PT_GET_EVENT_MASK: 491 case PT_LWPINFO: 492 case PT_GET_SC_ARGS: 493 case PT_GET_SC_RET: 494 break; 495 case PT_GETREGS: 496 bzero(&r.reg, sizeof(r.reg)); 497 break; 498 case PT_GETFPREGS: 499 bzero(&r.fpreg, sizeof(r.fpreg)); 500 break; 501 case PT_GETDBREGS: 502 bzero(&r.dbreg, sizeof(r.dbreg)); 503 break; 504 case PT_SETREGS: 505 error = copyin(uap->addr, &r.reg, sizeof(r.reg)); 506 break; 507 case PT_SETFPREGS: 508 error = copyin(uap->addr, &r.fpreg, sizeof(r.fpreg)); 509 break; 510 case PT_SETDBREGS: 511 error = copyin(uap->addr, &r.dbreg, sizeof(r.dbreg)); 512 break; 513 case PT_SET_EVENT_MASK: 514 if (uap->data != sizeof(r.ptevents)) 515 error = EINVAL; 516 else 517 error = copyin(uap->addr, &r.ptevents, uap->data); 518 break; 519 case PT_IO: 520 error = copyin(uap->addr, &r.piod, sizeof(r.piod)); 521 break; 522 case PT_VM_ENTRY: 523 error = copyin(uap->addr, &r.pve, sizeof(r.pve)); 524 break; 525 case PT_COREDUMP: 526 if (uap->data != sizeof(r.pc)) 527 error = EINVAL; 528 else 529 error = copyin(uap->addr, &r.pc, uap->data); 530 break; 531 default: 532 addr = uap->addr; 533 break; 534 } 535 if (error) 536 return (error); 537 538 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 539 if (error) 540 return (error); 541 542 switch (uap->req) { 543 case PT_VM_ENTRY: 544 error = copyout(&r.pve, uap->addr, sizeof(r.pve)); 545 break; 546 case PT_IO: 547 error = copyout(&r.piod, uap->addr, sizeof(r.piod)); 548 break; 549 case PT_GETREGS: 550 error = copyout(&r.reg, uap->addr, sizeof(r.reg)); 551 break; 552 case PT_GETFPREGS: 553 error = copyout(&r.fpreg, uap->addr, sizeof(r.fpreg)); 554 break; 555 case PT_GETDBREGS: 556 error = copyout(&r.dbreg, uap->addr, sizeof(r.dbreg)); 557 break; 558 case PT_GET_EVENT_MASK: 559 /* NB: The size in uap->data is validated in kern_ptrace(). */ 560 error = copyout(&r.ptevents, uap->addr, uap->data); 561 break; 562 case PT_LWPINFO: 563 /* NB: The size in uap->data is validated in kern_ptrace(). */ 564 error = copyout(&r.pl, uap->addr, uap->data); 565 break; 566 case PT_GET_SC_ARGS: 567 error = copyout(r.args, uap->addr, MIN(uap->data, 568 sizeof(r.args))); 569 break; 570 case PT_GET_SC_RET: 571 error = copyout(&r.psr, uap->addr, MIN(uap->data, 572 sizeof(r.psr))); 573 break; 574 } 575 576 return (error); 577 } 578 579 #ifdef COMPAT_FREEBSD32 580 /* 581 * PROC_READ(regs, td2, addr); 582 * becomes either: 583 * proc_read_regs(td2, addr); 584 * or 585 * proc_read_regs32(td2, addr); 586 * .. except this is done at runtime. There is an additional 587 * complication in that PROC_WRITE disallows 32 bit consumers 588 * from writing to 64 bit address space targets. 589 */ 590 #define PROC_READ(w, t, a) wrap32 ? \ 591 proc_read_ ## w ## 32(t, a) : \ 592 proc_read_ ## w (t, a) 593 #define PROC_WRITE(w, t, a) wrap32 ? \ 594 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 595 proc_write_ ## w (t, a) 596 #else 597 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 598 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 599 #endif 600 601 void 602 proc_set_traced(struct proc *p, bool stop) 603 { 604 605 sx_assert(&proctree_lock, SX_XLOCKED); 606 PROC_LOCK_ASSERT(p, MA_OWNED); 607 p->p_flag |= P_TRACED; 608 if (stop) 609 p->p_flag2 |= P2_PTRACE_FSTP; 610 p->p_ptevents = PTRACE_DEFAULT; 611 } 612 613 void 614 ptrace_unsuspend(struct proc *p) 615 { 616 PROC_LOCK_ASSERT(p, MA_OWNED); 617 618 PROC_SLOCK(p); 619 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 620 thread_unsuspend(p); 621 PROC_SUNLOCK(p); 622 itimer_proc_continue(p); 623 kqtimer_proc_continue(p); 624 } 625 626 static int 627 proc_can_ptrace(struct thread *td, struct proc *p) 628 { 629 int error; 630 631 PROC_LOCK_ASSERT(p, MA_OWNED); 632 633 if ((p->p_flag & P_WEXIT) != 0) 634 return (ESRCH); 635 636 if ((error = p_cansee(td, p)) != 0) 637 return (error); 638 if ((error = p_candebug(td, p)) != 0) 639 return (error); 640 641 /* not being traced... */ 642 if ((p->p_flag & P_TRACED) == 0) 643 return (EPERM); 644 645 /* not being traced by YOU */ 646 if (p->p_pptr != td->td_proc) 647 return (EBUSY); 648 649 /* not currently stopped */ 650 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 651 p->p_suspcount != p->p_numthreads || 652 (p->p_flag & P_WAITED) == 0) 653 return (EBUSY); 654 655 return (0); 656 } 657 658 static struct thread * 659 ptrace_sel_coredump_thread(struct proc *p) 660 { 661 struct thread *td2; 662 663 PROC_LOCK_ASSERT(p, MA_OWNED); 664 MPASS((p->p_flag & P_STOPPED_TRACE) != 0); 665 666 FOREACH_THREAD_IN_PROC(p, td2) { 667 if ((td2->td_dbgflags & TDB_SSWITCH) != 0) 668 return (td2); 669 } 670 return (NULL); 671 } 672 673 int 674 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 675 { 676 struct iovec iov; 677 struct uio uio; 678 struct proc *curp, *p, *pp; 679 struct thread *td2 = NULL, *td3; 680 struct ptrace_io_desc *piod = NULL; 681 struct ptrace_lwpinfo *pl; 682 struct ptrace_sc_ret *psr; 683 struct file *fp; 684 struct ptrace_coredump *pc; 685 struct thr_coredump_req *tcq; 686 int error, num, tmp; 687 lwpid_t tid = 0, *buf; 688 #ifdef COMPAT_FREEBSD32 689 int wrap32 = 0, safe = 0; 690 #endif 691 bool proctree_locked, p2_req_set; 692 693 curp = td->td_proc; 694 proctree_locked = false; 695 p2_req_set = false; 696 697 /* Lock proctree before locking the process. */ 698 switch (req) { 699 case PT_TRACE_ME: 700 case PT_ATTACH: 701 case PT_STEP: 702 case PT_CONTINUE: 703 case PT_TO_SCE: 704 case PT_TO_SCX: 705 case PT_SYSCALL: 706 case PT_FOLLOW_FORK: 707 case PT_LWP_EVENTS: 708 case PT_GET_EVENT_MASK: 709 case PT_SET_EVENT_MASK: 710 case PT_DETACH: 711 case PT_GET_SC_ARGS: 712 sx_xlock(&proctree_lock); 713 proctree_locked = true; 714 break; 715 default: 716 break; 717 } 718 719 if (req == PT_TRACE_ME) { 720 p = td->td_proc; 721 PROC_LOCK(p); 722 } else { 723 if (pid <= PID_MAX) { 724 if ((p = pfind(pid)) == NULL) { 725 if (proctree_locked) 726 sx_xunlock(&proctree_lock); 727 return (ESRCH); 728 } 729 } else { 730 td2 = tdfind(pid, -1); 731 if (td2 == NULL) { 732 if (proctree_locked) 733 sx_xunlock(&proctree_lock); 734 return (ESRCH); 735 } 736 p = td2->td_proc; 737 tid = pid; 738 pid = p->p_pid; 739 } 740 } 741 AUDIT_ARG_PROCESS(p); 742 743 if ((p->p_flag & P_WEXIT) != 0) { 744 error = ESRCH; 745 goto fail; 746 } 747 if ((error = p_cansee(td, p)) != 0) 748 goto fail; 749 750 if ((error = p_candebug(td, p)) != 0) 751 goto fail; 752 753 /* 754 * System processes can't be debugged. 755 */ 756 if ((p->p_flag & P_SYSTEM) != 0) { 757 error = EINVAL; 758 goto fail; 759 } 760 761 if (tid == 0) { 762 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 763 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 764 td2 = p->p_xthread; 765 } else { 766 td2 = FIRST_THREAD_IN_PROC(p); 767 } 768 tid = td2->td_tid; 769 } 770 771 #ifdef COMPAT_FREEBSD32 772 /* 773 * Test if we're a 32 bit client and what the target is. 774 * Set the wrap controls accordingly. 775 */ 776 if (SV_CURPROC_FLAG(SV_ILP32)) { 777 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 778 safe = 1; 779 wrap32 = 1; 780 } 781 #endif 782 /* 783 * Permissions check 784 */ 785 switch (req) { 786 case PT_TRACE_ME: 787 /* 788 * Always legal, when there is a parent process which 789 * could trace us. Otherwise, reject. 790 */ 791 if ((p->p_flag & P_TRACED) != 0) { 792 error = EBUSY; 793 goto fail; 794 } 795 if (p->p_pptr == initproc) { 796 error = EPERM; 797 goto fail; 798 } 799 break; 800 801 case PT_ATTACH: 802 /* Self */ 803 if (p == td->td_proc) { 804 error = EINVAL; 805 goto fail; 806 } 807 808 /* Already traced */ 809 if (p->p_flag & P_TRACED) { 810 error = EBUSY; 811 goto fail; 812 } 813 814 /* Can't trace an ancestor if you're being traced. */ 815 if (curp->p_flag & P_TRACED) { 816 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 817 if (pp == p) { 818 error = EINVAL; 819 goto fail; 820 } 821 } 822 } 823 824 /* OK */ 825 break; 826 827 case PT_CLEARSTEP: 828 /* Allow thread to clear single step for itself */ 829 if (td->td_tid == tid) 830 break; 831 832 /* FALLTHROUGH */ 833 default: 834 /* 835 * Check for ptrace eligibility before waiting for 836 * holds to drain. 837 */ 838 error = proc_can_ptrace(td, p); 839 if (error != 0) 840 goto fail; 841 842 /* 843 * Block parallel ptrace requests. Most important, do 844 * not allow other thread in debugger to continue the 845 * debuggee until coredump finished. 846 */ 847 while ((p->p_flag2 & P2_PTRACEREQ) != 0) { 848 if (proctree_locked) 849 sx_xunlock(&proctree_lock); 850 error = msleep(&p->p_flag2, &p->p_mtx, PPAUSE | PCATCH | 851 (proctree_locked ? PDROP : 0), "pptrace", 0); 852 if (proctree_locked) { 853 sx_xlock(&proctree_lock); 854 PROC_LOCK(p); 855 } 856 if (error == 0 && td2->td_proc != p) 857 error = ESRCH; 858 if (error == 0) 859 error = proc_can_ptrace(td, p); 860 if (error != 0) 861 goto fail; 862 } 863 864 /* Ok */ 865 break; 866 } 867 868 /* 869 * Keep this process around and request parallel ptrace() 870 * request to wait until we finish this request. 871 */ 872 MPASS((p->p_flag2 & P2_PTRACEREQ) == 0); 873 p->p_flag2 |= P2_PTRACEREQ; 874 p2_req_set = true; 875 _PHOLD(p); 876 877 /* 878 * Actually do the requests 879 */ 880 881 td->td_retval[0] = 0; 882 883 switch (req) { 884 case PT_TRACE_ME: 885 /* set my trace flag and "owner" so it can read/write me */ 886 proc_set_traced(p, false); 887 if (p->p_flag & P_PPWAIT) 888 p->p_flag |= P_PPTRACE; 889 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 890 break; 891 892 case PT_ATTACH: 893 /* security check done above */ 894 /* 895 * It would be nice if the tracing relationship was separate 896 * from the parent relationship but that would require 897 * another set of links in the proc struct or for "wait" 898 * to scan the entire proc table. To make life easier, 899 * we just re-parent the process we're trying to trace. 900 * The old parent is remembered so we can put things back 901 * on a "detach". 902 */ 903 proc_set_traced(p, true); 904 proc_reparent(p, td->td_proc, false); 905 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 906 p->p_oppid); 907 908 sx_xunlock(&proctree_lock); 909 proctree_locked = false; 910 MPASS(p->p_xthread == NULL); 911 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 912 913 /* 914 * If already stopped due to a stop signal, clear the 915 * existing stop before triggering a traced SIGSTOP. 916 */ 917 if ((p->p_flag & P_STOPPED_SIG) != 0) { 918 PROC_SLOCK(p); 919 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 920 thread_unsuspend(p); 921 PROC_SUNLOCK(p); 922 } 923 924 kern_psignal(p, SIGSTOP); 925 break; 926 927 case PT_CLEARSTEP: 928 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 929 p->p_pid); 930 error = ptrace_clear_single_step(td2); 931 break; 932 933 case PT_SETSTEP: 934 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 935 p->p_pid); 936 error = ptrace_single_step(td2); 937 break; 938 939 case PT_SUSPEND: 940 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 941 p->p_pid); 942 td2->td_dbgflags |= TDB_SUSPEND; 943 thread_lock(td2); 944 td2->td_flags |= TDF_NEEDSUSPCHK; 945 thread_unlock(td2); 946 break; 947 948 case PT_RESUME: 949 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 950 p->p_pid); 951 td2->td_dbgflags &= ~TDB_SUSPEND; 952 break; 953 954 case PT_FOLLOW_FORK: 955 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 956 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 957 data ? "enabled" : "disabled"); 958 if (data) 959 p->p_ptevents |= PTRACE_FORK; 960 else 961 p->p_ptevents &= ~PTRACE_FORK; 962 break; 963 964 case PT_LWP_EVENTS: 965 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 966 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 967 data ? "enabled" : "disabled"); 968 if (data) 969 p->p_ptevents |= PTRACE_LWP; 970 else 971 p->p_ptevents &= ~PTRACE_LWP; 972 break; 973 974 case PT_GET_EVENT_MASK: 975 if (data != sizeof(p->p_ptevents)) { 976 error = EINVAL; 977 break; 978 } 979 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 980 p->p_ptevents); 981 *(int *)addr = p->p_ptevents; 982 break; 983 984 case PT_SET_EVENT_MASK: 985 if (data != sizeof(p->p_ptevents)) { 986 error = EINVAL; 987 break; 988 } 989 tmp = *(int *)addr; 990 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 991 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 992 error = EINVAL; 993 break; 994 } 995 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 996 p->p_pid, p->p_ptevents, tmp); 997 p->p_ptevents = tmp; 998 break; 999 1000 case PT_GET_SC_ARGS: 1001 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1002 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 1003 #ifdef COMPAT_FREEBSD32 1004 || (wrap32 && !safe) 1005 #endif 1006 ) { 1007 error = EINVAL; 1008 break; 1009 } 1010 bzero(addr, sizeof(td2->td_sa.args)); 1011 bcopy(td2->td_sa.args, addr, td2->td_sa.callp->sy_narg * 1012 sizeof(register_t)); 1013 break; 1014 1015 case PT_GET_SC_RET: 1016 if ((td2->td_dbgflags & (TDB_SCX)) == 0 1017 #ifdef COMPAT_FREEBSD32 1018 || (wrap32 && !safe) 1019 #endif 1020 ) { 1021 error = EINVAL; 1022 break; 1023 } 1024 psr = addr; 1025 bzero(psr, sizeof(*psr)); 1026 psr->sr_error = td2->td_errno; 1027 if (psr->sr_error == 0) { 1028 psr->sr_retval[0] = td2->td_retval[0]; 1029 psr->sr_retval[1] = td2->td_retval[1]; 1030 } 1031 CTR4(KTR_PTRACE, 1032 "PT_GET_SC_RET: pid %d error %d retval %#lx,%#lx", 1033 p->p_pid, psr->sr_error, psr->sr_retval[0], 1034 psr->sr_retval[1]); 1035 break; 1036 1037 case PT_STEP: 1038 case PT_CONTINUE: 1039 case PT_TO_SCE: 1040 case PT_TO_SCX: 1041 case PT_SYSCALL: 1042 case PT_DETACH: 1043 /* Zero means do not send any signal */ 1044 if (data < 0 || data > _SIG_MAXSIG) { 1045 error = EINVAL; 1046 break; 1047 } 1048 1049 switch (req) { 1050 case PT_STEP: 1051 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 1052 td2->td_tid, p->p_pid, data); 1053 error = ptrace_single_step(td2); 1054 if (error) 1055 goto out; 1056 break; 1057 case PT_CONTINUE: 1058 case PT_TO_SCE: 1059 case PT_TO_SCX: 1060 case PT_SYSCALL: 1061 if (addr != (void *)1) { 1062 error = ptrace_set_pc(td2, 1063 (u_long)(uintfptr_t)addr); 1064 if (error) 1065 goto out; 1066 } 1067 switch (req) { 1068 case PT_TO_SCE: 1069 p->p_ptevents |= PTRACE_SCE; 1070 CTR4(KTR_PTRACE, 1071 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1072 p->p_pid, p->p_ptevents, 1073 (u_long)(uintfptr_t)addr, data); 1074 break; 1075 case PT_TO_SCX: 1076 p->p_ptevents |= PTRACE_SCX; 1077 CTR4(KTR_PTRACE, 1078 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1079 p->p_pid, p->p_ptevents, 1080 (u_long)(uintfptr_t)addr, data); 1081 break; 1082 case PT_SYSCALL: 1083 p->p_ptevents |= PTRACE_SYSCALL; 1084 CTR4(KTR_PTRACE, 1085 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1086 p->p_pid, p->p_ptevents, 1087 (u_long)(uintfptr_t)addr, data); 1088 break; 1089 case PT_CONTINUE: 1090 CTR3(KTR_PTRACE, 1091 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1092 p->p_pid, (u_long)(uintfptr_t)addr, data); 1093 break; 1094 } 1095 break; 1096 case PT_DETACH: 1097 /* 1098 * Clear P_TRACED before reparenting 1099 * a detached process back to its original 1100 * parent. Otherwise the debugee will be set 1101 * as an orphan of the debugger. 1102 */ 1103 p->p_flag &= ~(P_TRACED | P_WAITED); 1104 1105 /* 1106 * Reset the process parent. 1107 */ 1108 if (p->p_oppid != p->p_pptr->p_pid) { 1109 PROC_LOCK(p->p_pptr); 1110 sigqueue_take(p->p_ksi); 1111 PROC_UNLOCK(p->p_pptr); 1112 1113 pp = proc_realparent(p); 1114 proc_reparent(p, pp, false); 1115 if (pp == initproc) 1116 p->p_sigparent = SIGCHLD; 1117 CTR3(KTR_PTRACE, 1118 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1119 p->p_pid, pp->p_pid, data); 1120 } else { 1121 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1122 p->p_pid, data); 1123 } 1124 1125 p->p_ptevents = 0; 1126 FOREACH_THREAD_IN_PROC(p, td3) { 1127 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1128 sigqueue_delete(&td3->td_sigqueue, 1129 SIGSTOP); 1130 } 1131 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1132 TDB_SUSPEND); 1133 } 1134 1135 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1136 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1137 p->p_flag2 &= ~P2_PTRACE_FSTP; 1138 } 1139 1140 /* should we send SIGCHLD? */ 1141 /* childproc_continued(p); */ 1142 break; 1143 } 1144 1145 sx_xunlock(&proctree_lock); 1146 proctree_locked = false; 1147 1148 sendsig: 1149 MPASS(!proctree_locked); 1150 1151 /* 1152 * Clear the pending event for the thread that just 1153 * reported its event (p_xthread). This may not be 1154 * the thread passed to PT_CONTINUE, PT_STEP, etc. if 1155 * the debugger is resuming a different thread. 1156 * 1157 * Deliver any pending signal via the reporting thread. 1158 */ 1159 MPASS(p->p_xthread != NULL); 1160 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1161 p->p_xthread->td_xsig = data; 1162 p->p_xthread = NULL; 1163 p->p_xsig = data; 1164 1165 /* 1166 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1167 * always works immediately, even if another thread is 1168 * unsuspended first and attempts to handle a 1169 * different signal or if the POSIX.1b style signal 1170 * queue cannot accommodate any new signals. 1171 */ 1172 if (data == SIGKILL) 1173 proc_wkilled(p); 1174 1175 /* 1176 * Unsuspend all threads. To leave a thread 1177 * suspended, use PT_SUSPEND to suspend it before 1178 * continuing the process. 1179 */ 1180 ptrace_unsuspend(p); 1181 break; 1182 1183 case PT_WRITE_I: 1184 case PT_WRITE_D: 1185 td2->td_dbgflags |= TDB_USERWR; 1186 PROC_UNLOCK(p); 1187 error = 0; 1188 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1189 sizeof(int)) != sizeof(int)) 1190 error = ENOMEM; 1191 else 1192 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1193 p->p_pid, addr, data); 1194 PROC_LOCK(p); 1195 break; 1196 1197 case PT_READ_I: 1198 case PT_READ_D: 1199 PROC_UNLOCK(p); 1200 error = tmp = 0; 1201 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1202 sizeof(int)) != sizeof(int)) 1203 error = ENOMEM; 1204 else 1205 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1206 p->p_pid, addr, tmp); 1207 td->td_retval[0] = tmp; 1208 PROC_LOCK(p); 1209 break; 1210 1211 case PT_IO: 1212 piod = addr; 1213 iov.iov_base = piod->piod_addr; 1214 iov.iov_len = piod->piod_len; 1215 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1216 uio.uio_resid = piod->piod_len; 1217 uio.uio_iov = &iov; 1218 uio.uio_iovcnt = 1; 1219 uio.uio_segflg = UIO_USERSPACE; 1220 uio.uio_td = td; 1221 switch (piod->piod_op) { 1222 case PIOD_READ_D: 1223 case PIOD_READ_I: 1224 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1225 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1226 uio.uio_rw = UIO_READ; 1227 break; 1228 case PIOD_WRITE_D: 1229 case PIOD_WRITE_I: 1230 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1231 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1232 td2->td_dbgflags |= TDB_USERWR; 1233 uio.uio_rw = UIO_WRITE; 1234 break; 1235 default: 1236 error = EINVAL; 1237 goto out; 1238 } 1239 PROC_UNLOCK(p); 1240 error = proc_rwmem(p, &uio); 1241 piod->piod_len -= uio.uio_resid; 1242 PROC_LOCK(p); 1243 break; 1244 1245 case PT_KILL: 1246 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1247 data = SIGKILL; 1248 goto sendsig; /* in PT_CONTINUE above */ 1249 1250 case PT_SETREGS: 1251 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1252 p->p_pid); 1253 td2->td_dbgflags |= TDB_USERWR; 1254 error = PROC_WRITE(regs, td2, addr); 1255 break; 1256 1257 case PT_GETREGS: 1258 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1259 p->p_pid); 1260 error = PROC_READ(regs, td2, addr); 1261 break; 1262 1263 case PT_SETFPREGS: 1264 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1265 p->p_pid); 1266 td2->td_dbgflags |= TDB_USERWR; 1267 error = PROC_WRITE(fpregs, td2, addr); 1268 break; 1269 1270 case PT_GETFPREGS: 1271 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1272 p->p_pid); 1273 error = PROC_READ(fpregs, td2, addr); 1274 break; 1275 1276 case PT_SETDBREGS: 1277 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1278 p->p_pid); 1279 td2->td_dbgflags |= TDB_USERWR; 1280 error = PROC_WRITE(dbregs, td2, addr); 1281 break; 1282 1283 case PT_GETDBREGS: 1284 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1285 p->p_pid); 1286 error = PROC_READ(dbregs, td2, addr); 1287 break; 1288 1289 case PT_LWPINFO: 1290 if (data <= 0 || data > sizeof(*pl)) { 1291 error = EINVAL; 1292 break; 1293 } 1294 pl = addr; 1295 bzero(pl, sizeof(*pl)); 1296 pl->pl_lwpid = td2->td_tid; 1297 pl->pl_event = PL_EVENT_NONE; 1298 pl->pl_flags = 0; 1299 if (td2->td_dbgflags & TDB_XSIG) { 1300 pl->pl_event = PL_EVENT_SIGNAL; 1301 if (td2->td_si.si_signo != 0 && 1302 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1303 + sizeof(pl->pl_siginfo)){ 1304 pl->pl_flags |= PL_FLAG_SI; 1305 pl->pl_siginfo = td2->td_si; 1306 } 1307 } 1308 if (td2->td_dbgflags & TDB_SCE) 1309 pl->pl_flags |= PL_FLAG_SCE; 1310 else if (td2->td_dbgflags & TDB_SCX) 1311 pl->pl_flags |= PL_FLAG_SCX; 1312 if (td2->td_dbgflags & TDB_EXEC) 1313 pl->pl_flags |= PL_FLAG_EXEC; 1314 if (td2->td_dbgflags & TDB_FORK) { 1315 pl->pl_flags |= PL_FLAG_FORKED; 1316 pl->pl_child_pid = td2->td_dbg_forked; 1317 if (td2->td_dbgflags & TDB_VFORK) 1318 pl->pl_flags |= PL_FLAG_VFORKED; 1319 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1320 TDB_VFORK) 1321 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1322 if (td2->td_dbgflags & TDB_CHILD) 1323 pl->pl_flags |= PL_FLAG_CHILD; 1324 if (td2->td_dbgflags & TDB_BORN) 1325 pl->pl_flags |= PL_FLAG_BORN; 1326 if (td2->td_dbgflags & TDB_EXIT) 1327 pl->pl_flags |= PL_FLAG_EXITED; 1328 pl->pl_sigmask = td2->td_sigmask; 1329 pl->pl_siglist = td2->td_siglist; 1330 strcpy(pl->pl_tdname, td2->td_name); 1331 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1332 pl->pl_syscall_code = td2->td_sa.code; 1333 pl->pl_syscall_narg = td2->td_sa.callp->sy_narg; 1334 } else { 1335 pl->pl_syscall_code = 0; 1336 pl->pl_syscall_narg = 0; 1337 } 1338 CTR6(KTR_PTRACE, 1339 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1340 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1341 pl->pl_child_pid, pl->pl_syscall_code); 1342 break; 1343 1344 case PT_GETNUMLWPS: 1345 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1346 p->p_numthreads); 1347 td->td_retval[0] = p->p_numthreads; 1348 break; 1349 1350 case PT_GETLWPLIST: 1351 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1352 p->p_pid, data, p->p_numthreads); 1353 if (data <= 0) { 1354 error = EINVAL; 1355 break; 1356 } 1357 num = imin(p->p_numthreads, data); 1358 PROC_UNLOCK(p); 1359 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1360 tmp = 0; 1361 PROC_LOCK(p); 1362 FOREACH_THREAD_IN_PROC(p, td2) { 1363 if (tmp >= num) 1364 break; 1365 buf[tmp++] = td2->td_tid; 1366 } 1367 PROC_UNLOCK(p); 1368 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1369 free(buf, M_TEMP); 1370 if (!error) 1371 td->td_retval[0] = tmp; 1372 PROC_LOCK(p); 1373 break; 1374 1375 case PT_VM_TIMESTAMP: 1376 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1377 p->p_pid, p->p_vmspace->vm_map.timestamp); 1378 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1379 break; 1380 1381 case PT_VM_ENTRY: 1382 PROC_UNLOCK(p); 1383 error = ptrace_vm_entry(td, p, addr); 1384 PROC_LOCK(p); 1385 break; 1386 1387 case PT_COREDUMP: 1388 pc = addr; 1389 CTR2(KTR_PTRACE, "PT_COREDUMP: pid %d, fd %d", 1390 p->p_pid, pc->pc_fd); 1391 1392 if ((pc->pc_flags & ~(PC_COMPRESS | PC_ALL)) != 0) { 1393 error = EINVAL; 1394 break; 1395 } 1396 PROC_UNLOCK(p); 1397 1398 tcq = malloc(sizeof(*tcq), M_TEMP, M_WAITOK | M_ZERO); 1399 fp = NULL; 1400 error = fget_write(td, pc->pc_fd, &cap_write_rights, &fp); 1401 if (error != 0) 1402 goto coredump_cleanup_nofp; 1403 if (fp->f_type != DTYPE_VNODE || fp->f_vnode->v_type != VREG) { 1404 error = EPIPE; 1405 goto coredump_cleanup; 1406 } 1407 1408 PROC_LOCK(p); 1409 error = proc_can_ptrace(td, p); 1410 if (error != 0) 1411 goto coredump_cleanup_locked; 1412 1413 td2 = ptrace_sel_coredump_thread(p); 1414 if (td2 == NULL) { 1415 error = EBUSY; 1416 goto coredump_cleanup_locked; 1417 } 1418 KASSERT((td2->td_dbgflags & TDB_COREDUMPRQ) == 0, 1419 ("proc %d tid %d req coredump", p->p_pid, td2->td_tid)); 1420 1421 tcq->tc_vp = fp->f_vnode; 1422 tcq->tc_limit = pc->pc_limit == 0 ? OFF_MAX : pc->pc_limit; 1423 tcq->tc_flags = SVC_PT_COREDUMP; 1424 if ((pc->pc_flags & PC_COMPRESS) == 0) 1425 tcq->tc_flags |= SVC_NOCOMPRESS; 1426 if ((pc->pc_flags & PC_ALL) != 0) 1427 tcq->tc_flags |= SVC_ALL; 1428 td2->td_coredump = tcq; 1429 td2->td_dbgflags |= TDB_COREDUMPRQ; 1430 thread_run_flash(td2); 1431 while ((td2->td_dbgflags & TDB_COREDUMPRQ) != 0) 1432 msleep(p, &p->p_mtx, PPAUSE, "crdmp", 0); 1433 error = tcq->tc_error; 1434 coredump_cleanup_locked: 1435 PROC_UNLOCK(p); 1436 coredump_cleanup: 1437 fdrop(fp, td); 1438 coredump_cleanup_nofp: 1439 free(tcq, M_TEMP); 1440 PROC_LOCK(p); 1441 break; 1442 1443 default: 1444 #ifdef __HAVE_PTRACE_MACHDEP 1445 if (req >= PT_FIRSTMACH) { 1446 PROC_UNLOCK(p); 1447 error = cpu_ptrace(td2, req, addr, data); 1448 PROC_LOCK(p); 1449 } else 1450 #endif 1451 /* Unknown request. */ 1452 error = EINVAL; 1453 break; 1454 } 1455 out: 1456 /* Drop our hold on this process now that the request has completed. */ 1457 _PRELE(p); 1458 fail: 1459 if (p2_req_set) { 1460 if ((p->p_flag2 & P2_PTRACEREQ) != 0) 1461 wakeup(&p->p_flag2); 1462 p->p_flag2 &= ~P2_PTRACEREQ; 1463 } 1464 PROC_UNLOCK(p); 1465 if (proctree_locked) 1466 sx_xunlock(&proctree_lock); 1467 return (error); 1468 } 1469 #undef PROC_READ 1470 #undef PROC_WRITE 1471