1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/ktr.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/reg.h> 44 #include <sys/syscallsubr.h> 45 #include <sys/sysent.h> 46 #include <sys/sysproto.h> 47 #include <sys/priv.h> 48 #include <sys/proc.h> 49 #include <sys/vnode.h> 50 #include <sys/ptrace.h> 51 #include <sys/rwlock.h> 52 #include <sys/sx.h> 53 #include <sys/malloc.h> 54 #include <sys/signalvar.h> 55 #include <sys/caprights.h> 56 #include <sys/filedesc.h> 57 58 #include <security/audit/audit.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_map.h> 64 #include <vm/vm_kern.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_param.h> 68 69 #ifdef COMPAT_FREEBSD32 70 #include <sys/procfs.h> 71 #endif 72 73 /* Assert it's safe to unlock a process, e.g. to allocate working memory */ 74 #define PROC_ASSERT_TRACEREQ(p) MPASS(((p)->p_flag2 & P2_PTRACEREQ) != 0) 75 76 /* 77 * Functions implemented using PROC_ACTION(): 78 * 79 * proc_read_regs(proc, regs) 80 * Get the current user-visible register set from the process 81 * and copy it into the regs structure (<machine/reg.h>). 82 * The process is stopped at the time read_regs is called. 83 * 84 * proc_write_regs(proc, regs) 85 * Update the current register set from the passed in regs 86 * structure. Take care to avoid clobbering special CPU 87 * registers or privileged bits in the PSL. 88 * Depending on the architecture this may have fix-up work to do, 89 * especially if the IAR or PCW are modified. 90 * The process is stopped at the time write_regs is called. 91 * 92 * proc_read_fpregs, proc_write_fpregs 93 * deal with the floating point register set, otherwise as above. 94 * 95 * proc_read_dbregs, proc_write_dbregs 96 * deal with the processor debug register set, otherwise as above. 97 * 98 * proc_sstep(proc) 99 * Arrange for the process to trap after executing a single instruction. 100 */ 101 102 #define PROC_ACTION(action) do { \ 103 int error; \ 104 \ 105 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 106 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 107 error = EIO; \ 108 else \ 109 error = (action); \ 110 return (error); \ 111 } while (0) 112 113 int 114 proc_read_regs(struct thread *td, struct reg *regs) 115 { 116 117 PROC_ACTION(fill_regs(td, regs)); 118 } 119 120 int 121 proc_write_regs(struct thread *td, struct reg *regs) 122 { 123 124 PROC_ACTION(set_regs(td, regs)); 125 } 126 127 int 128 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 129 { 130 131 PROC_ACTION(fill_dbregs(td, dbregs)); 132 } 133 134 int 135 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 136 { 137 138 PROC_ACTION(set_dbregs(td, dbregs)); 139 } 140 141 /* 142 * Ptrace doesn't support fpregs at all, and there are no security holes 143 * or translations for fpregs, so we can just copy them. 144 */ 145 int 146 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 147 { 148 149 PROC_ACTION(fill_fpregs(td, fpregs)); 150 } 151 152 int 153 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 154 { 155 156 PROC_ACTION(set_fpregs(td, fpregs)); 157 } 158 159 static struct regset * 160 proc_find_regset(struct thread *td, int note) 161 { 162 struct regset **regsetp, **regset_end, *regset; 163 struct sysentvec *sv; 164 165 sv = td->td_proc->p_sysent; 166 regsetp = sv->sv_regset_begin; 167 if (regsetp == NULL) 168 return (NULL); 169 regset_end = sv->sv_regset_end; 170 MPASS(regset_end != NULL); 171 for (; regsetp < regset_end; regsetp++) { 172 regset = *regsetp; 173 if (regset->note != note) 174 continue; 175 176 return (regset); 177 } 178 179 return (NULL); 180 } 181 182 static int 183 proc_read_regset(struct thread *td, int note, struct iovec *iov) 184 { 185 struct regset *regset; 186 struct proc *p; 187 void *buf; 188 size_t size; 189 int error; 190 191 regset = proc_find_regset(td, note); 192 if (regset == NULL) 193 return (EINVAL); 194 195 if (iov->iov_base == NULL) { 196 iov->iov_len = regset->size; 197 if (iov->iov_len == 0) 198 return (EINVAL); 199 200 return (0); 201 } 202 203 /* The length is wrong, return an error */ 204 if (iov->iov_len != regset->size) 205 return (EINVAL); 206 207 if (regset->get == NULL) 208 return (EINVAL); 209 210 error = 0; 211 size = regset->size; 212 p = td->td_proc; 213 214 /* Drop the proc lock while allocating the temp buffer */ 215 PROC_ASSERT_TRACEREQ(p); 216 PROC_UNLOCK(p); 217 buf = malloc(size, M_TEMP, M_WAITOK); 218 PROC_LOCK(p); 219 220 if (!regset->get(regset, td, buf, &size)) { 221 error = EINVAL; 222 } else { 223 KASSERT(size == regset->size, 224 ("%s: Getter function changed the size", __func__)); 225 226 iov->iov_len = size; 227 PROC_UNLOCK(p); 228 error = copyout(buf, iov->iov_base, size); 229 PROC_LOCK(p); 230 } 231 232 free(buf, M_TEMP); 233 234 return (error); 235 } 236 237 static int 238 proc_write_regset(struct thread *td, int note, struct iovec *iov) 239 { 240 struct regset *regset; 241 struct proc *p; 242 void *buf; 243 size_t size; 244 int error; 245 246 regset = proc_find_regset(td, note); 247 if (regset == NULL) 248 return (EINVAL); 249 250 /* The length is wrong, return an error */ 251 if (iov->iov_len != regset->size) 252 return (EINVAL); 253 254 if (regset->set == NULL) 255 return (EINVAL); 256 257 size = regset->size; 258 p = td->td_proc; 259 260 /* Drop the proc lock while allocating the temp buffer */ 261 PROC_ASSERT_TRACEREQ(p); 262 PROC_UNLOCK(p); 263 buf = malloc(size, M_TEMP, M_WAITOK); 264 error = copyin(iov->iov_base, buf, size); 265 PROC_LOCK(p); 266 267 if (error == 0) { 268 if (!regset->set(regset, td, buf, size)) { 269 error = EINVAL; 270 } 271 } 272 273 free(buf, M_TEMP); 274 275 return (error); 276 } 277 278 #ifdef COMPAT_FREEBSD32 279 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 280 int 281 proc_read_regs32(struct thread *td, struct reg32 *regs32) 282 { 283 284 PROC_ACTION(fill_regs32(td, regs32)); 285 } 286 287 int 288 proc_write_regs32(struct thread *td, struct reg32 *regs32) 289 { 290 291 PROC_ACTION(set_regs32(td, regs32)); 292 } 293 294 int 295 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 296 { 297 298 PROC_ACTION(fill_dbregs32(td, dbregs32)); 299 } 300 301 int 302 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 303 { 304 305 PROC_ACTION(set_dbregs32(td, dbregs32)); 306 } 307 308 int 309 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 310 { 311 312 PROC_ACTION(fill_fpregs32(td, fpregs32)); 313 } 314 315 int 316 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 317 { 318 319 PROC_ACTION(set_fpregs32(td, fpregs32)); 320 } 321 #endif 322 323 int 324 proc_sstep(struct thread *td) 325 { 326 327 PROC_ACTION(ptrace_single_step(td)); 328 } 329 330 int 331 proc_rwmem(struct proc *p, struct uio *uio) 332 { 333 vm_map_t map; 334 vm_offset_t pageno; /* page number */ 335 vm_prot_t reqprot; 336 int error, fault_flags, page_offset, writing; 337 338 /* 339 * Assert that someone has locked this vmspace. (Should be 340 * curthread but we can't assert that.) This keeps the process 341 * from exiting out from under us until this operation completes. 342 */ 343 PROC_ASSERT_HELD(p); 344 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 345 346 /* 347 * The map we want... 348 */ 349 map = &p->p_vmspace->vm_map; 350 351 /* 352 * If we are writing, then we request vm_fault() to create a private 353 * copy of each page. Since these copies will not be writeable by the 354 * process, we must explicity request that they be dirtied. 355 */ 356 writing = uio->uio_rw == UIO_WRITE; 357 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 358 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 359 360 /* 361 * Only map in one page at a time. We don't have to, but it 362 * makes things easier. This way is trivial - right? 363 */ 364 do { 365 vm_offset_t uva; 366 u_int len; 367 vm_page_t m; 368 369 uva = (vm_offset_t)uio->uio_offset; 370 371 /* 372 * Get the page number of this segment. 373 */ 374 pageno = trunc_page(uva); 375 page_offset = uva - pageno; 376 377 /* 378 * How many bytes to copy 379 */ 380 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 381 382 /* 383 * Fault and hold the page on behalf of the process. 384 */ 385 error = vm_fault(map, pageno, reqprot, fault_flags, &m); 386 if (error != KERN_SUCCESS) { 387 if (error == KERN_RESOURCE_SHORTAGE) 388 error = ENOMEM; 389 else 390 error = EFAULT; 391 break; 392 } 393 394 /* 395 * Now do the i/o move. 396 */ 397 error = uiomove_fromphys(&m, page_offset, len, uio); 398 399 /* Make the I-cache coherent for breakpoints. */ 400 if (writing && error == 0) { 401 vm_map_lock_read(map); 402 if (vm_map_check_protection(map, pageno, pageno + 403 PAGE_SIZE, VM_PROT_EXECUTE)) 404 vm_sync_icache(map, uva, len); 405 vm_map_unlock_read(map); 406 } 407 408 /* 409 * Release the page. 410 */ 411 vm_page_unwire(m, PQ_ACTIVE); 412 413 } while (error == 0 && uio->uio_resid > 0); 414 415 return (error); 416 } 417 418 static ssize_t 419 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 420 size_t len, enum uio_rw rw) 421 { 422 struct iovec iov; 423 struct uio uio; 424 ssize_t slen; 425 426 MPASS(len < SSIZE_MAX); 427 slen = (ssize_t)len; 428 429 iov.iov_base = (caddr_t)buf; 430 iov.iov_len = len; 431 uio.uio_iov = &iov; 432 uio.uio_iovcnt = 1; 433 uio.uio_offset = va; 434 uio.uio_resid = slen; 435 uio.uio_segflg = UIO_SYSSPACE; 436 uio.uio_rw = rw; 437 uio.uio_td = td; 438 proc_rwmem(p, &uio); 439 if (uio.uio_resid == slen) 440 return (-1); 441 return (slen - uio.uio_resid); 442 } 443 444 ssize_t 445 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 446 size_t len) 447 { 448 449 return (proc_iop(td, p, va, buf, len, UIO_READ)); 450 } 451 452 ssize_t 453 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 454 size_t len) 455 { 456 457 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 458 } 459 460 static int 461 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 462 { 463 struct vattr vattr; 464 vm_map_t map; 465 vm_map_entry_t entry; 466 vm_object_t obj, tobj, lobj; 467 struct vmspace *vm; 468 struct vnode *vp; 469 char *freepath, *fullpath; 470 u_int pathlen; 471 int error, index; 472 473 error = 0; 474 obj = NULL; 475 476 vm = vmspace_acquire_ref(p); 477 map = &vm->vm_map; 478 vm_map_lock_read(map); 479 480 do { 481 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 482 ("Submap in map header")); 483 index = 0; 484 VM_MAP_ENTRY_FOREACH(entry, map) { 485 if (index >= pve->pve_entry && 486 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 487 break; 488 index++; 489 } 490 if (index < pve->pve_entry) { 491 error = EINVAL; 492 break; 493 } 494 if (entry == &map->header) { 495 error = ENOENT; 496 break; 497 } 498 499 /* We got an entry. */ 500 pve->pve_entry = index + 1; 501 pve->pve_timestamp = map->timestamp; 502 pve->pve_start = entry->start; 503 pve->pve_end = entry->end - 1; 504 pve->pve_offset = entry->offset; 505 pve->pve_prot = entry->protection; 506 507 /* Backing object's path needed? */ 508 if (pve->pve_pathlen == 0) 509 break; 510 511 pathlen = pve->pve_pathlen; 512 pve->pve_pathlen = 0; 513 514 obj = entry->object.vm_object; 515 if (obj != NULL) 516 VM_OBJECT_RLOCK(obj); 517 } while (0); 518 519 vm_map_unlock_read(map); 520 521 pve->pve_fsid = VNOVAL; 522 pve->pve_fileid = VNOVAL; 523 524 if (error == 0 && obj != NULL) { 525 lobj = obj; 526 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 527 if (tobj != obj) 528 VM_OBJECT_RLOCK(tobj); 529 if (lobj != obj) 530 VM_OBJECT_RUNLOCK(lobj); 531 lobj = tobj; 532 pve->pve_offset += tobj->backing_object_offset; 533 } 534 vp = vm_object_vnode(lobj); 535 if (vp != NULL) 536 vref(vp); 537 if (lobj != obj) 538 VM_OBJECT_RUNLOCK(lobj); 539 VM_OBJECT_RUNLOCK(obj); 540 541 if (vp != NULL) { 542 freepath = NULL; 543 fullpath = NULL; 544 vn_fullpath(vp, &fullpath, &freepath); 545 vn_lock(vp, LK_SHARED | LK_RETRY); 546 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 547 pve->pve_fileid = vattr.va_fileid; 548 pve->pve_fsid = vattr.va_fsid; 549 } 550 vput(vp); 551 552 if (fullpath != NULL) { 553 pve->pve_pathlen = strlen(fullpath) + 1; 554 if (pve->pve_pathlen <= pathlen) { 555 error = copyout(fullpath, pve->pve_path, 556 pve->pve_pathlen); 557 } else 558 error = ENAMETOOLONG; 559 } 560 if (freepath != NULL) 561 free(freepath, M_TEMP); 562 } 563 } 564 vmspace_free(vm); 565 if (error == 0) 566 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 567 p->p_pid, pve->pve_entry, pve->pve_start); 568 569 return (error); 570 } 571 572 /* 573 * Process debugging system call. 574 */ 575 #ifndef _SYS_SYSPROTO_H_ 576 struct ptrace_args { 577 int req; 578 pid_t pid; 579 caddr_t addr; 580 int data; 581 }; 582 #endif 583 584 int 585 sys_ptrace(struct thread *td, struct ptrace_args *uap) 586 { 587 /* 588 * XXX this obfuscation is to reduce stack usage, but the register 589 * structs may be too large to put on the stack anyway. 590 */ 591 union { 592 struct ptrace_io_desc piod; 593 struct ptrace_lwpinfo pl; 594 struct ptrace_vm_entry pve; 595 struct ptrace_coredump pc; 596 struct dbreg dbreg; 597 struct fpreg fpreg; 598 struct reg reg; 599 struct iovec vec; 600 char args[sizeof(td->td_sa.args)]; 601 struct ptrace_sc_ret psr; 602 int ptevents; 603 } r; 604 void *addr; 605 int error; 606 607 if (!allow_ptrace) 608 return (ENOSYS); 609 error = 0; 610 611 AUDIT_ARG_PID(uap->pid); 612 AUDIT_ARG_CMD(uap->req); 613 AUDIT_ARG_VALUE(uap->data); 614 addr = &r; 615 switch (uap->req) { 616 case PT_GET_EVENT_MASK: 617 case PT_LWPINFO: 618 case PT_GET_SC_ARGS: 619 case PT_GET_SC_RET: 620 break; 621 case PT_GETREGS: 622 bzero(&r.reg, sizeof(r.reg)); 623 break; 624 case PT_GETFPREGS: 625 bzero(&r.fpreg, sizeof(r.fpreg)); 626 break; 627 case PT_GETDBREGS: 628 bzero(&r.dbreg, sizeof(r.dbreg)); 629 break; 630 case PT_GETREGSET: 631 case PT_SETREGSET: 632 error = copyin(uap->addr, &r.vec, sizeof(r.vec)); 633 break; 634 case PT_SETREGS: 635 error = copyin(uap->addr, &r.reg, sizeof(r.reg)); 636 break; 637 case PT_SETFPREGS: 638 error = copyin(uap->addr, &r.fpreg, sizeof(r.fpreg)); 639 break; 640 case PT_SETDBREGS: 641 error = copyin(uap->addr, &r.dbreg, sizeof(r.dbreg)); 642 break; 643 case PT_SET_EVENT_MASK: 644 if (uap->data != sizeof(r.ptevents)) 645 error = EINVAL; 646 else 647 error = copyin(uap->addr, &r.ptevents, uap->data); 648 break; 649 case PT_IO: 650 error = copyin(uap->addr, &r.piod, sizeof(r.piod)); 651 break; 652 case PT_VM_ENTRY: 653 error = copyin(uap->addr, &r.pve, sizeof(r.pve)); 654 break; 655 case PT_COREDUMP: 656 if (uap->data != sizeof(r.pc)) 657 error = EINVAL; 658 else 659 error = copyin(uap->addr, &r.pc, uap->data); 660 break; 661 default: 662 addr = uap->addr; 663 break; 664 } 665 if (error) 666 return (error); 667 668 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 669 if (error) 670 return (error); 671 672 switch (uap->req) { 673 case PT_VM_ENTRY: 674 error = copyout(&r.pve, uap->addr, sizeof(r.pve)); 675 break; 676 case PT_IO: 677 error = copyout(&r.piod, uap->addr, sizeof(r.piod)); 678 break; 679 case PT_GETREGS: 680 error = copyout(&r.reg, uap->addr, sizeof(r.reg)); 681 break; 682 case PT_GETFPREGS: 683 error = copyout(&r.fpreg, uap->addr, sizeof(r.fpreg)); 684 break; 685 case PT_GETDBREGS: 686 error = copyout(&r.dbreg, uap->addr, sizeof(r.dbreg)); 687 break; 688 case PT_GETREGSET: 689 error = copyout(&r.vec, uap->addr, sizeof(r.vec)); 690 break; 691 case PT_GET_EVENT_MASK: 692 /* NB: The size in uap->data is validated in kern_ptrace(). */ 693 error = copyout(&r.ptevents, uap->addr, uap->data); 694 break; 695 case PT_LWPINFO: 696 /* NB: The size in uap->data is validated in kern_ptrace(). */ 697 error = copyout(&r.pl, uap->addr, uap->data); 698 break; 699 case PT_GET_SC_ARGS: 700 error = copyout(r.args, uap->addr, MIN(uap->data, 701 sizeof(r.args))); 702 break; 703 case PT_GET_SC_RET: 704 error = copyout(&r.psr, uap->addr, MIN(uap->data, 705 sizeof(r.psr))); 706 break; 707 } 708 709 return (error); 710 } 711 712 #ifdef COMPAT_FREEBSD32 713 /* 714 * PROC_READ(regs, td2, addr); 715 * becomes either: 716 * proc_read_regs(td2, addr); 717 * or 718 * proc_read_regs32(td2, addr); 719 * .. except this is done at runtime. There is an additional 720 * complication in that PROC_WRITE disallows 32 bit consumers 721 * from writing to 64 bit address space targets. 722 */ 723 #define PROC_READ(w, t, a) wrap32 ? \ 724 proc_read_ ## w ## 32(t, a) : \ 725 proc_read_ ## w (t, a) 726 #define PROC_WRITE(w, t, a) wrap32 ? \ 727 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 728 proc_write_ ## w (t, a) 729 #else 730 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 731 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 732 #endif 733 734 void 735 proc_set_traced(struct proc *p, bool stop) 736 { 737 738 sx_assert(&proctree_lock, SX_XLOCKED); 739 PROC_LOCK_ASSERT(p, MA_OWNED); 740 p->p_flag |= P_TRACED; 741 if (stop) 742 p->p_flag2 |= P2_PTRACE_FSTP; 743 p->p_ptevents = PTRACE_DEFAULT; 744 } 745 746 void 747 ptrace_unsuspend(struct proc *p) 748 { 749 PROC_LOCK_ASSERT(p, MA_OWNED); 750 751 PROC_SLOCK(p); 752 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 753 thread_unsuspend(p); 754 PROC_SUNLOCK(p); 755 itimer_proc_continue(p); 756 kqtimer_proc_continue(p); 757 } 758 759 static int 760 proc_can_ptrace(struct thread *td, struct proc *p) 761 { 762 int error; 763 764 PROC_LOCK_ASSERT(p, MA_OWNED); 765 766 if ((p->p_flag & P_WEXIT) != 0) 767 return (ESRCH); 768 769 if ((error = p_cansee(td, p)) != 0) 770 return (error); 771 if ((error = p_candebug(td, p)) != 0) 772 return (error); 773 774 /* not being traced... */ 775 if ((p->p_flag & P_TRACED) == 0) 776 return (EPERM); 777 778 /* not being traced by YOU */ 779 if (p->p_pptr != td->td_proc) 780 return (EBUSY); 781 782 /* not currently stopped */ 783 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 784 p->p_suspcount != p->p_numthreads || 785 (p->p_flag & P_WAITED) == 0) 786 return (EBUSY); 787 788 return (0); 789 } 790 791 static struct thread * 792 ptrace_sel_coredump_thread(struct proc *p) 793 { 794 struct thread *td2; 795 796 PROC_LOCK_ASSERT(p, MA_OWNED); 797 MPASS((p->p_flag & P_STOPPED_TRACE) != 0); 798 799 FOREACH_THREAD_IN_PROC(p, td2) { 800 if ((td2->td_dbgflags & TDB_SSWITCH) != 0) 801 return (td2); 802 } 803 return (NULL); 804 } 805 806 int 807 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 808 { 809 struct iovec iov; 810 struct uio uio; 811 struct proc *curp, *p, *pp; 812 struct thread *td2 = NULL, *td3; 813 struct ptrace_io_desc *piod = NULL; 814 struct ptrace_lwpinfo *pl; 815 struct ptrace_sc_ret *psr; 816 struct file *fp; 817 struct ptrace_coredump *pc; 818 struct thr_coredump_req *tcq; 819 int error, num, tmp; 820 lwpid_t tid = 0, *buf; 821 #ifdef COMPAT_FREEBSD32 822 int wrap32 = 0, safe = 0; 823 #endif 824 bool proctree_locked, p2_req_set; 825 826 curp = td->td_proc; 827 proctree_locked = false; 828 p2_req_set = false; 829 830 /* Lock proctree before locking the process. */ 831 switch (req) { 832 case PT_TRACE_ME: 833 case PT_ATTACH: 834 case PT_STEP: 835 case PT_CONTINUE: 836 case PT_TO_SCE: 837 case PT_TO_SCX: 838 case PT_SYSCALL: 839 case PT_FOLLOW_FORK: 840 case PT_LWP_EVENTS: 841 case PT_GET_EVENT_MASK: 842 case PT_SET_EVENT_MASK: 843 case PT_DETACH: 844 case PT_GET_SC_ARGS: 845 sx_xlock(&proctree_lock); 846 proctree_locked = true; 847 break; 848 default: 849 break; 850 } 851 852 if (req == PT_TRACE_ME) { 853 p = td->td_proc; 854 PROC_LOCK(p); 855 } else { 856 if (pid <= PID_MAX) { 857 if ((p = pfind(pid)) == NULL) { 858 if (proctree_locked) 859 sx_xunlock(&proctree_lock); 860 return (ESRCH); 861 } 862 } else { 863 td2 = tdfind(pid, -1); 864 if (td2 == NULL) { 865 if (proctree_locked) 866 sx_xunlock(&proctree_lock); 867 return (ESRCH); 868 } 869 p = td2->td_proc; 870 tid = pid; 871 pid = p->p_pid; 872 } 873 } 874 AUDIT_ARG_PROCESS(p); 875 876 if ((p->p_flag & P_WEXIT) != 0) { 877 error = ESRCH; 878 goto fail; 879 } 880 if ((error = p_cansee(td, p)) != 0) 881 goto fail; 882 883 if ((error = p_candebug(td, p)) != 0) 884 goto fail; 885 886 /* 887 * System processes can't be debugged. 888 */ 889 if ((p->p_flag & P_SYSTEM) != 0) { 890 error = EINVAL; 891 goto fail; 892 } 893 894 if (tid == 0) { 895 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 896 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 897 td2 = p->p_xthread; 898 } else { 899 td2 = FIRST_THREAD_IN_PROC(p); 900 } 901 tid = td2->td_tid; 902 } 903 904 #ifdef COMPAT_FREEBSD32 905 /* 906 * Test if we're a 32 bit client and what the target is. 907 * Set the wrap controls accordingly. 908 */ 909 if (SV_CURPROC_FLAG(SV_ILP32)) { 910 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 911 safe = 1; 912 wrap32 = 1; 913 } 914 #endif 915 /* 916 * Permissions check 917 */ 918 switch (req) { 919 case PT_TRACE_ME: 920 /* 921 * Always legal, when there is a parent process which 922 * could trace us. Otherwise, reject. 923 */ 924 if ((p->p_flag & P_TRACED) != 0) { 925 error = EBUSY; 926 goto fail; 927 } 928 if (p->p_pptr == initproc) { 929 error = EPERM; 930 goto fail; 931 } 932 break; 933 934 case PT_ATTACH: 935 /* Self */ 936 if (p == td->td_proc) { 937 error = EINVAL; 938 goto fail; 939 } 940 941 /* Already traced */ 942 if (p->p_flag & P_TRACED) { 943 error = EBUSY; 944 goto fail; 945 } 946 947 /* Can't trace an ancestor if you're being traced. */ 948 if (curp->p_flag & P_TRACED) { 949 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 950 if (pp == p) { 951 error = EINVAL; 952 goto fail; 953 } 954 } 955 } 956 957 /* OK */ 958 break; 959 960 case PT_CLEARSTEP: 961 /* Allow thread to clear single step for itself */ 962 if (td->td_tid == tid) 963 break; 964 965 /* FALLTHROUGH */ 966 default: 967 /* 968 * Check for ptrace eligibility before waiting for 969 * holds to drain. 970 */ 971 error = proc_can_ptrace(td, p); 972 if (error != 0) 973 goto fail; 974 975 /* 976 * Block parallel ptrace requests. Most important, do 977 * not allow other thread in debugger to continue the 978 * debuggee until coredump finished. 979 */ 980 while ((p->p_flag2 & P2_PTRACEREQ) != 0) { 981 if (proctree_locked) 982 sx_xunlock(&proctree_lock); 983 error = msleep(&p->p_flag2, &p->p_mtx, PPAUSE | PCATCH | 984 (proctree_locked ? PDROP : 0), "pptrace", 0); 985 if (proctree_locked) { 986 sx_xlock(&proctree_lock); 987 PROC_LOCK(p); 988 } 989 if (error == 0 && td2->td_proc != p) 990 error = ESRCH; 991 if (error == 0) 992 error = proc_can_ptrace(td, p); 993 if (error != 0) 994 goto fail; 995 } 996 997 /* Ok */ 998 break; 999 } 1000 1001 /* 1002 * Keep this process around and request parallel ptrace() 1003 * request to wait until we finish this request. 1004 */ 1005 MPASS((p->p_flag2 & P2_PTRACEREQ) == 0); 1006 p->p_flag2 |= P2_PTRACEREQ; 1007 p2_req_set = true; 1008 _PHOLD(p); 1009 1010 /* 1011 * Actually do the requests 1012 */ 1013 1014 td->td_retval[0] = 0; 1015 1016 switch (req) { 1017 case PT_TRACE_ME: 1018 /* set my trace flag and "owner" so it can read/write me */ 1019 proc_set_traced(p, false); 1020 if (p->p_flag & P_PPWAIT) 1021 p->p_flag |= P_PPTRACE; 1022 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 1023 break; 1024 1025 case PT_ATTACH: 1026 /* security check done above */ 1027 /* 1028 * It would be nice if the tracing relationship was separate 1029 * from the parent relationship but that would require 1030 * another set of links in the proc struct or for "wait" 1031 * to scan the entire proc table. To make life easier, 1032 * we just re-parent the process we're trying to trace. 1033 * The old parent is remembered so we can put things back 1034 * on a "detach". 1035 */ 1036 proc_set_traced(p, true); 1037 proc_reparent(p, td->td_proc, false); 1038 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 1039 p->p_oppid); 1040 1041 sx_xunlock(&proctree_lock); 1042 proctree_locked = false; 1043 MPASS(p->p_xthread == NULL); 1044 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 1045 1046 /* 1047 * If already stopped due to a stop signal, clear the 1048 * existing stop before triggering a traced SIGSTOP. 1049 */ 1050 if ((p->p_flag & P_STOPPED_SIG) != 0) { 1051 PROC_SLOCK(p); 1052 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 1053 thread_unsuspend(p); 1054 PROC_SUNLOCK(p); 1055 } 1056 1057 kern_psignal(p, SIGSTOP); 1058 break; 1059 1060 case PT_CLEARSTEP: 1061 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 1062 p->p_pid); 1063 error = ptrace_clear_single_step(td2); 1064 break; 1065 1066 case PT_SETSTEP: 1067 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 1068 p->p_pid); 1069 error = ptrace_single_step(td2); 1070 break; 1071 1072 case PT_SUSPEND: 1073 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 1074 p->p_pid); 1075 td2->td_dbgflags |= TDB_SUSPEND; 1076 thread_lock(td2); 1077 td2->td_flags |= TDF_NEEDSUSPCHK; 1078 thread_unlock(td2); 1079 break; 1080 1081 case PT_RESUME: 1082 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 1083 p->p_pid); 1084 td2->td_dbgflags &= ~TDB_SUSPEND; 1085 break; 1086 1087 case PT_FOLLOW_FORK: 1088 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 1089 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 1090 data ? "enabled" : "disabled"); 1091 if (data) 1092 p->p_ptevents |= PTRACE_FORK; 1093 else 1094 p->p_ptevents &= ~PTRACE_FORK; 1095 break; 1096 1097 case PT_LWP_EVENTS: 1098 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 1099 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 1100 data ? "enabled" : "disabled"); 1101 if (data) 1102 p->p_ptevents |= PTRACE_LWP; 1103 else 1104 p->p_ptevents &= ~PTRACE_LWP; 1105 break; 1106 1107 case PT_GET_EVENT_MASK: 1108 if (data != sizeof(p->p_ptevents)) { 1109 error = EINVAL; 1110 break; 1111 } 1112 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 1113 p->p_ptevents); 1114 *(int *)addr = p->p_ptevents; 1115 break; 1116 1117 case PT_SET_EVENT_MASK: 1118 if (data != sizeof(p->p_ptevents)) { 1119 error = EINVAL; 1120 break; 1121 } 1122 tmp = *(int *)addr; 1123 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 1124 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 1125 error = EINVAL; 1126 break; 1127 } 1128 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 1129 p->p_pid, p->p_ptevents, tmp); 1130 p->p_ptevents = tmp; 1131 break; 1132 1133 case PT_GET_SC_ARGS: 1134 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1135 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 1136 #ifdef COMPAT_FREEBSD32 1137 || (wrap32 && !safe) 1138 #endif 1139 ) { 1140 error = EINVAL; 1141 break; 1142 } 1143 bzero(addr, sizeof(td2->td_sa.args)); 1144 /* See the explanation in linux_ptrace_get_syscall_info(). */ 1145 bcopy(td2->td_sa.args, addr, SV_PROC_ABI(td->td_proc) == 1146 SV_ABI_LINUX ? sizeof(td2->td_sa.args) : 1147 td2->td_sa.callp->sy_narg * sizeof(register_t)); 1148 break; 1149 1150 case PT_GET_SC_RET: 1151 if ((td2->td_dbgflags & (TDB_SCX)) == 0 1152 #ifdef COMPAT_FREEBSD32 1153 || (wrap32 && !safe) 1154 #endif 1155 ) { 1156 error = EINVAL; 1157 break; 1158 } 1159 psr = addr; 1160 bzero(psr, sizeof(*psr)); 1161 psr->sr_error = td2->td_errno; 1162 if (psr->sr_error == 0) { 1163 psr->sr_retval[0] = td2->td_retval[0]; 1164 psr->sr_retval[1] = td2->td_retval[1]; 1165 } 1166 CTR4(KTR_PTRACE, 1167 "PT_GET_SC_RET: pid %d error %d retval %#lx,%#lx", 1168 p->p_pid, psr->sr_error, psr->sr_retval[0], 1169 psr->sr_retval[1]); 1170 break; 1171 1172 case PT_STEP: 1173 case PT_CONTINUE: 1174 case PT_TO_SCE: 1175 case PT_TO_SCX: 1176 case PT_SYSCALL: 1177 case PT_DETACH: 1178 /* Zero means do not send any signal */ 1179 if (data < 0 || data > _SIG_MAXSIG) { 1180 error = EINVAL; 1181 break; 1182 } 1183 1184 switch (req) { 1185 case PT_STEP: 1186 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 1187 td2->td_tid, p->p_pid, data); 1188 error = ptrace_single_step(td2); 1189 if (error) 1190 goto out; 1191 break; 1192 case PT_CONTINUE: 1193 case PT_TO_SCE: 1194 case PT_TO_SCX: 1195 case PT_SYSCALL: 1196 if (addr != (void *)1) { 1197 error = ptrace_set_pc(td2, 1198 (u_long)(uintfptr_t)addr); 1199 if (error) 1200 goto out; 1201 } 1202 switch (req) { 1203 case PT_TO_SCE: 1204 p->p_ptevents |= PTRACE_SCE; 1205 CTR4(KTR_PTRACE, 1206 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1207 p->p_pid, p->p_ptevents, 1208 (u_long)(uintfptr_t)addr, data); 1209 break; 1210 case PT_TO_SCX: 1211 p->p_ptevents |= PTRACE_SCX; 1212 CTR4(KTR_PTRACE, 1213 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1214 p->p_pid, p->p_ptevents, 1215 (u_long)(uintfptr_t)addr, data); 1216 break; 1217 case PT_SYSCALL: 1218 p->p_ptevents |= PTRACE_SYSCALL; 1219 CTR4(KTR_PTRACE, 1220 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1221 p->p_pid, p->p_ptevents, 1222 (u_long)(uintfptr_t)addr, data); 1223 break; 1224 case PT_CONTINUE: 1225 CTR3(KTR_PTRACE, 1226 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1227 p->p_pid, (u_long)(uintfptr_t)addr, data); 1228 break; 1229 } 1230 break; 1231 case PT_DETACH: 1232 /* 1233 * Clear P_TRACED before reparenting 1234 * a detached process back to its original 1235 * parent. Otherwise the debugee will be set 1236 * as an orphan of the debugger. 1237 */ 1238 p->p_flag &= ~(P_TRACED | P_WAITED); 1239 1240 /* 1241 * Reset the process parent. 1242 */ 1243 if (p->p_oppid != p->p_pptr->p_pid) { 1244 PROC_LOCK(p->p_pptr); 1245 sigqueue_take(p->p_ksi); 1246 PROC_UNLOCK(p->p_pptr); 1247 1248 pp = proc_realparent(p); 1249 proc_reparent(p, pp, false); 1250 if (pp == initproc) 1251 p->p_sigparent = SIGCHLD; 1252 CTR3(KTR_PTRACE, 1253 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1254 p->p_pid, pp->p_pid, data); 1255 } else { 1256 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1257 p->p_pid, data); 1258 } 1259 1260 p->p_ptevents = 0; 1261 FOREACH_THREAD_IN_PROC(p, td3) { 1262 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1263 sigqueue_delete(&td3->td_sigqueue, 1264 SIGSTOP); 1265 } 1266 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1267 TDB_SUSPEND); 1268 } 1269 1270 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1271 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1272 p->p_flag2 &= ~P2_PTRACE_FSTP; 1273 } 1274 1275 /* should we send SIGCHLD? */ 1276 /* childproc_continued(p); */ 1277 break; 1278 } 1279 1280 sx_xunlock(&proctree_lock); 1281 proctree_locked = false; 1282 1283 sendsig: 1284 MPASS(!proctree_locked); 1285 1286 /* 1287 * Clear the pending event for the thread that just 1288 * reported its event (p_xthread). This may not be 1289 * the thread passed to PT_CONTINUE, PT_STEP, etc. if 1290 * the debugger is resuming a different thread. 1291 * 1292 * Deliver any pending signal via the reporting thread. 1293 */ 1294 MPASS(p->p_xthread != NULL); 1295 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1296 p->p_xthread->td_xsig = data; 1297 p->p_xthread = NULL; 1298 p->p_xsig = data; 1299 1300 /* 1301 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1302 * always works immediately, even if another thread is 1303 * unsuspended first and attempts to handle a 1304 * different signal or if the POSIX.1b style signal 1305 * queue cannot accommodate any new signals. 1306 */ 1307 if (data == SIGKILL) 1308 proc_wkilled(p); 1309 1310 /* 1311 * Unsuspend all threads. To leave a thread 1312 * suspended, use PT_SUSPEND to suspend it before 1313 * continuing the process. 1314 */ 1315 ptrace_unsuspend(p); 1316 break; 1317 1318 case PT_WRITE_I: 1319 case PT_WRITE_D: 1320 td2->td_dbgflags |= TDB_USERWR; 1321 PROC_UNLOCK(p); 1322 error = 0; 1323 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1324 sizeof(int)) != sizeof(int)) 1325 error = ENOMEM; 1326 else 1327 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1328 p->p_pid, addr, data); 1329 PROC_LOCK(p); 1330 break; 1331 1332 case PT_READ_I: 1333 case PT_READ_D: 1334 PROC_UNLOCK(p); 1335 error = tmp = 0; 1336 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1337 sizeof(int)) != sizeof(int)) 1338 error = ENOMEM; 1339 else 1340 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1341 p->p_pid, addr, tmp); 1342 td->td_retval[0] = tmp; 1343 PROC_LOCK(p); 1344 break; 1345 1346 case PT_IO: 1347 piod = addr; 1348 iov.iov_base = piod->piod_addr; 1349 iov.iov_len = piod->piod_len; 1350 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1351 uio.uio_resid = piod->piod_len; 1352 uio.uio_iov = &iov; 1353 uio.uio_iovcnt = 1; 1354 uio.uio_segflg = UIO_USERSPACE; 1355 uio.uio_td = td; 1356 switch (piod->piod_op) { 1357 case PIOD_READ_D: 1358 case PIOD_READ_I: 1359 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1360 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1361 uio.uio_rw = UIO_READ; 1362 break; 1363 case PIOD_WRITE_D: 1364 case PIOD_WRITE_I: 1365 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1366 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1367 td2->td_dbgflags |= TDB_USERWR; 1368 uio.uio_rw = UIO_WRITE; 1369 break; 1370 default: 1371 error = EINVAL; 1372 goto out; 1373 } 1374 PROC_UNLOCK(p); 1375 error = proc_rwmem(p, &uio); 1376 piod->piod_len -= uio.uio_resid; 1377 PROC_LOCK(p); 1378 break; 1379 1380 case PT_KILL: 1381 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1382 data = SIGKILL; 1383 goto sendsig; /* in PT_CONTINUE above */ 1384 1385 case PT_SETREGS: 1386 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1387 p->p_pid); 1388 td2->td_dbgflags |= TDB_USERWR; 1389 error = PROC_WRITE(regs, td2, addr); 1390 break; 1391 1392 case PT_GETREGS: 1393 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1394 p->p_pid); 1395 error = PROC_READ(regs, td2, addr); 1396 break; 1397 1398 case PT_SETFPREGS: 1399 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1400 p->p_pid); 1401 td2->td_dbgflags |= TDB_USERWR; 1402 error = PROC_WRITE(fpregs, td2, addr); 1403 break; 1404 1405 case PT_GETFPREGS: 1406 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1407 p->p_pid); 1408 error = PROC_READ(fpregs, td2, addr); 1409 break; 1410 1411 case PT_SETDBREGS: 1412 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1413 p->p_pid); 1414 td2->td_dbgflags |= TDB_USERWR; 1415 error = PROC_WRITE(dbregs, td2, addr); 1416 break; 1417 1418 case PT_GETDBREGS: 1419 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1420 p->p_pid); 1421 error = PROC_READ(dbregs, td2, addr); 1422 break; 1423 1424 case PT_SETREGSET: 1425 CTR2(KTR_PTRACE, "PT_SETREGSET: tid %d (pid %d)", td2->td_tid, 1426 p->p_pid); 1427 error = proc_write_regset(td2, data, addr); 1428 break; 1429 1430 case PT_GETREGSET: 1431 CTR2(KTR_PTRACE, "PT_GETREGSET: tid %d (pid %d)", td2->td_tid, 1432 p->p_pid); 1433 error = proc_read_regset(td2, data, addr); 1434 break; 1435 1436 case PT_LWPINFO: 1437 if (data <= 0 || data > sizeof(*pl)) { 1438 error = EINVAL; 1439 break; 1440 } 1441 pl = addr; 1442 bzero(pl, sizeof(*pl)); 1443 pl->pl_lwpid = td2->td_tid; 1444 pl->pl_event = PL_EVENT_NONE; 1445 pl->pl_flags = 0; 1446 if (td2->td_dbgflags & TDB_XSIG) { 1447 pl->pl_event = PL_EVENT_SIGNAL; 1448 if (td2->td_si.si_signo != 0 && 1449 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1450 + sizeof(pl->pl_siginfo)){ 1451 pl->pl_flags |= PL_FLAG_SI; 1452 pl->pl_siginfo = td2->td_si; 1453 } 1454 } 1455 if (td2->td_dbgflags & TDB_SCE) 1456 pl->pl_flags |= PL_FLAG_SCE; 1457 else if (td2->td_dbgflags & TDB_SCX) 1458 pl->pl_flags |= PL_FLAG_SCX; 1459 if (td2->td_dbgflags & TDB_EXEC) 1460 pl->pl_flags |= PL_FLAG_EXEC; 1461 if (td2->td_dbgflags & TDB_FORK) { 1462 pl->pl_flags |= PL_FLAG_FORKED; 1463 pl->pl_child_pid = td2->td_dbg_forked; 1464 if (td2->td_dbgflags & TDB_VFORK) 1465 pl->pl_flags |= PL_FLAG_VFORKED; 1466 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1467 TDB_VFORK) 1468 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1469 if (td2->td_dbgflags & TDB_CHILD) 1470 pl->pl_flags |= PL_FLAG_CHILD; 1471 if (td2->td_dbgflags & TDB_BORN) 1472 pl->pl_flags |= PL_FLAG_BORN; 1473 if (td2->td_dbgflags & TDB_EXIT) 1474 pl->pl_flags |= PL_FLAG_EXITED; 1475 pl->pl_sigmask = td2->td_sigmask; 1476 pl->pl_siglist = td2->td_siglist; 1477 strcpy(pl->pl_tdname, td2->td_name); 1478 if ((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) != 0) { 1479 pl->pl_syscall_code = td2->td_sa.code; 1480 pl->pl_syscall_narg = td2->td_sa.callp->sy_narg; 1481 } else { 1482 pl->pl_syscall_code = 0; 1483 pl->pl_syscall_narg = 0; 1484 } 1485 CTR6(KTR_PTRACE, 1486 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1487 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1488 pl->pl_child_pid, pl->pl_syscall_code); 1489 break; 1490 1491 case PT_GETNUMLWPS: 1492 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1493 p->p_numthreads); 1494 td->td_retval[0] = p->p_numthreads; 1495 break; 1496 1497 case PT_GETLWPLIST: 1498 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1499 p->p_pid, data, p->p_numthreads); 1500 if (data <= 0) { 1501 error = EINVAL; 1502 break; 1503 } 1504 num = imin(p->p_numthreads, data); 1505 PROC_UNLOCK(p); 1506 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1507 tmp = 0; 1508 PROC_LOCK(p); 1509 FOREACH_THREAD_IN_PROC(p, td2) { 1510 if (tmp >= num) 1511 break; 1512 buf[tmp++] = td2->td_tid; 1513 } 1514 PROC_UNLOCK(p); 1515 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1516 free(buf, M_TEMP); 1517 if (!error) 1518 td->td_retval[0] = tmp; 1519 PROC_LOCK(p); 1520 break; 1521 1522 case PT_VM_TIMESTAMP: 1523 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1524 p->p_pid, p->p_vmspace->vm_map.timestamp); 1525 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1526 break; 1527 1528 case PT_VM_ENTRY: 1529 PROC_UNLOCK(p); 1530 error = ptrace_vm_entry(td, p, addr); 1531 PROC_LOCK(p); 1532 break; 1533 1534 case PT_COREDUMP: 1535 pc = addr; 1536 CTR2(KTR_PTRACE, "PT_COREDUMP: pid %d, fd %d", 1537 p->p_pid, pc->pc_fd); 1538 1539 if ((pc->pc_flags & ~(PC_COMPRESS | PC_ALL)) != 0) { 1540 error = EINVAL; 1541 break; 1542 } 1543 PROC_UNLOCK(p); 1544 1545 tcq = malloc(sizeof(*tcq), M_TEMP, M_WAITOK | M_ZERO); 1546 fp = NULL; 1547 error = fget_write(td, pc->pc_fd, &cap_write_rights, &fp); 1548 if (error != 0) 1549 goto coredump_cleanup_nofp; 1550 if (fp->f_type != DTYPE_VNODE || fp->f_vnode->v_type != VREG) { 1551 error = EPIPE; 1552 goto coredump_cleanup; 1553 } 1554 1555 PROC_LOCK(p); 1556 error = proc_can_ptrace(td, p); 1557 if (error != 0) 1558 goto coredump_cleanup_locked; 1559 1560 td2 = ptrace_sel_coredump_thread(p); 1561 if (td2 == NULL) { 1562 error = EBUSY; 1563 goto coredump_cleanup_locked; 1564 } 1565 KASSERT((td2->td_dbgflags & TDB_COREDUMPRQ) == 0, 1566 ("proc %d tid %d req coredump", p->p_pid, td2->td_tid)); 1567 1568 tcq->tc_vp = fp->f_vnode; 1569 tcq->tc_limit = pc->pc_limit == 0 ? OFF_MAX : pc->pc_limit; 1570 tcq->tc_flags = SVC_PT_COREDUMP; 1571 if ((pc->pc_flags & PC_COMPRESS) == 0) 1572 tcq->tc_flags |= SVC_NOCOMPRESS; 1573 if ((pc->pc_flags & PC_ALL) != 0) 1574 tcq->tc_flags |= SVC_ALL; 1575 td2->td_coredump = tcq; 1576 td2->td_dbgflags |= TDB_COREDUMPRQ; 1577 thread_run_flash(td2); 1578 while ((td2->td_dbgflags & TDB_COREDUMPRQ) != 0) 1579 msleep(p, &p->p_mtx, PPAUSE, "crdmp", 0); 1580 error = tcq->tc_error; 1581 coredump_cleanup_locked: 1582 PROC_UNLOCK(p); 1583 coredump_cleanup: 1584 fdrop(fp, td); 1585 coredump_cleanup_nofp: 1586 free(tcq, M_TEMP); 1587 PROC_LOCK(p); 1588 break; 1589 1590 default: 1591 #ifdef __HAVE_PTRACE_MACHDEP 1592 if (req >= PT_FIRSTMACH) { 1593 PROC_UNLOCK(p); 1594 error = cpu_ptrace(td2, req, addr, data); 1595 PROC_LOCK(p); 1596 } else 1597 #endif 1598 /* Unknown request. */ 1599 error = EINVAL; 1600 break; 1601 } 1602 out: 1603 /* Drop our hold on this process now that the request has completed. */ 1604 _PRELE(p); 1605 fail: 1606 if (p2_req_set) { 1607 if ((p->p_flag2 & P2_PTRACEREQ) != 0) 1608 wakeup(&p->p_flag2); 1609 p->p_flag2 &= ~P2_PTRACEREQ; 1610 } 1611 PROC_UNLOCK(p); 1612 if (proctree_locked) 1613 sx_xunlock(&proctree_lock); 1614 return (error); 1615 } 1616 #undef PROC_READ 1617 #undef PROC_WRITE 1618