1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994, Sean Eric Fagan 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Sean Eric Fagan. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/ktr.h> 37 #include <sys/limits.h> 38 #include <sys/lock.h> 39 #include <sys/mman.h> 40 #include <sys/mutex.h> 41 #include <sys/reg.h> 42 #include <sys/syscallsubr.h> 43 #include <sys/sysent.h> 44 #include <sys/sysproto.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/ptrace.h> 49 #include <sys/rwlock.h> 50 #include <sys/sx.h> 51 #include <sys/malloc.h> 52 #include <sys/signalvar.h> 53 #include <sys/caprights.h> 54 #include <sys/filedesc.h> 55 56 #include <security/audit/audit.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_param.h> 66 67 #ifdef COMPAT_FREEBSD32 68 #include <sys/procfs.h> 69 #endif 70 71 /* Assert it's safe to unlock a process, e.g. to allocate working memory */ 72 #define PROC_ASSERT_TRACEREQ(p) MPASS(((p)->p_flag2 & P2_PTRACEREQ) != 0) 73 74 /* 75 * Functions implemented below: 76 * 77 * proc_read_regs(proc, regs) 78 * Get the current user-visible register set from the process 79 * and copy it into the regs structure (<machine/reg.h>). 80 * The process is stopped at the time read_regs is called. 81 * 82 * proc_write_regs(proc, regs) 83 * Update the current register set from the passed in regs 84 * structure. Take care to avoid clobbering special CPU 85 * registers or privileged bits in the PSL. 86 * Depending on the architecture this may have fix-up work to do, 87 * especially if the IAR or PCW are modified. 88 * The process is stopped at the time write_regs is called. 89 * 90 * proc_read_fpregs, proc_write_fpregs 91 * deal with the floating point register set, otherwise as above. 92 * 93 * proc_read_dbregs, proc_write_dbregs 94 * deal with the processor debug register set, otherwise as above. 95 * 96 * proc_sstep(proc) 97 * Arrange for the process to trap after executing a single instruction. 98 */ 99 100 int 101 proc_read_regs(struct thread *td, struct reg *regs) 102 { 103 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 104 return (fill_regs(td, regs)); 105 } 106 107 int 108 proc_write_regs(struct thread *td, struct reg *regs) 109 { 110 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 111 return (set_regs(td, regs)); 112 } 113 114 int 115 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 116 { 117 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 118 return (fill_dbregs(td, dbregs)); 119 } 120 121 int 122 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 123 { 124 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 125 return (set_dbregs(td, dbregs)); 126 } 127 128 /* 129 * Ptrace doesn't support fpregs at all, and there are no security holes 130 * or translations for fpregs, so we can just copy them. 131 */ 132 int 133 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 134 { 135 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 136 return (fill_fpregs(td, fpregs)); 137 } 138 139 int 140 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 141 { 142 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 143 return (set_fpregs(td, fpregs)); 144 } 145 146 static struct regset * 147 proc_find_regset(struct thread *td, int note) 148 { 149 struct regset **regsetp, **regset_end, *regset; 150 struct sysentvec *sv; 151 152 sv = td->td_proc->p_sysent; 153 regsetp = sv->sv_regset_begin; 154 if (regsetp == NULL) 155 return (NULL); 156 regset_end = sv->sv_regset_end; 157 MPASS(regset_end != NULL); 158 for (; regsetp < regset_end; regsetp++) { 159 regset = *regsetp; 160 if (regset->note != note) 161 continue; 162 163 return (regset); 164 } 165 166 return (NULL); 167 } 168 169 static int 170 proc_read_regset(struct thread *td, int note, struct iovec *iov) 171 { 172 struct regset *regset; 173 struct proc *p; 174 void *buf; 175 size_t size; 176 int error; 177 178 regset = proc_find_regset(td, note); 179 if (regset == NULL) 180 return (EINVAL); 181 182 if (regset->get == NULL) 183 return (EINVAL); 184 185 size = regset->size; 186 /* 187 * The regset is dynamically sized, e.g. the size could change 188 * depending on the hardware, or may have a per-thread size. 189 */ 190 if (size == 0) { 191 if (!regset->get(regset, td, NULL, &size)) 192 return (EINVAL); 193 } 194 195 if (iov->iov_base == NULL) { 196 iov->iov_len = size; 197 if (iov->iov_len == 0) 198 return (EINVAL); 199 200 return (0); 201 } 202 203 /* The length is wrong, return an error */ 204 if (iov->iov_len != size) 205 return (EINVAL); 206 207 error = 0; 208 p = td->td_proc; 209 210 /* Drop the proc lock while allocating the temp buffer */ 211 PROC_ASSERT_TRACEREQ(p); 212 PROC_UNLOCK(p); 213 buf = malloc(size, M_TEMP, M_WAITOK); 214 PROC_LOCK(p); 215 216 if (!regset->get(regset, td, buf, &size)) { 217 error = EINVAL; 218 } else { 219 KASSERT(size == regset->size || regset->size == 0, 220 ("%s: Getter function changed the size", __func__)); 221 222 iov->iov_len = size; 223 PROC_UNLOCK(p); 224 error = copyout(buf, iov->iov_base, size); 225 PROC_LOCK(p); 226 } 227 228 free(buf, M_TEMP); 229 230 return (error); 231 } 232 233 static int 234 proc_write_regset(struct thread *td, int note, struct iovec *iov) 235 { 236 struct regset *regset; 237 struct proc *p; 238 void *buf; 239 size_t size; 240 int error; 241 242 regset = proc_find_regset(td, note); 243 if (regset == NULL) 244 return (EINVAL); 245 246 size = regset->size; 247 /* 248 * The regset is dynamically sized, e.g. the size could change 249 * depending on the hardware, or may have a per-thread size. 250 */ 251 if (size == 0) { 252 if (!regset->get(regset, td, NULL, &size)) 253 return (EINVAL); 254 } 255 256 /* The length is wrong, return an error */ 257 if (iov->iov_len != size) 258 return (EINVAL); 259 260 if (regset->set == NULL) 261 return (EINVAL); 262 263 p = td->td_proc; 264 265 /* Drop the proc lock while allocating the temp buffer */ 266 PROC_ASSERT_TRACEREQ(p); 267 PROC_UNLOCK(p); 268 buf = malloc(size, M_TEMP, M_WAITOK); 269 error = copyin(iov->iov_base, buf, size); 270 PROC_LOCK(p); 271 272 if (error == 0) { 273 if (!regset->set(regset, td, buf, size)) { 274 error = EINVAL; 275 } 276 } 277 278 free(buf, M_TEMP); 279 280 return (error); 281 } 282 283 #ifdef COMPAT_FREEBSD32 284 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 285 int 286 proc_read_regs32(struct thread *td, struct reg32 *regs32) 287 { 288 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 289 return (fill_regs32(td, regs32)); 290 } 291 292 int 293 proc_write_regs32(struct thread *td, struct reg32 *regs32) 294 { 295 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 296 return (set_regs32(td, regs32)); 297 } 298 299 int 300 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 301 { 302 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 303 return (fill_dbregs32(td, dbregs32)); 304 } 305 306 int 307 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 308 { 309 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 310 return (set_dbregs32(td, dbregs32)); 311 } 312 313 int 314 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 315 { 316 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 317 return (fill_fpregs32(td, fpregs32)); 318 } 319 320 int 321 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 322 { 323 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 324 return (set_fpregs32(td, fpregs32)); 325 } 326 #endif 327 328 int 329 proc_sstep(struct thread *td) 330 { 331 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 332 return (ptrace_single_step(td)); 333 } 334 335 int 336 proc_rwmem(struct proc *p, struct uio *uio) 337 { 338 vm_map_t map; 339 vm_offset_t pageno; /* page number */ 340 vm_prot_t reqprot; 341 int error, fault_flags, page_offset, writing; 342 343 /* 344 * Make sure that the process' vmspace remains live. 345 */ 346 if (p != curproc) 347 PROC_ASSERT_HELD(p); 348 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 349 350 /* 351 * The map we want... 352 */ 353 map = &p->p_vmspace->vm_map; 354 355 /* 356 * If we are writing, then we request vm_fault() to create a private 357 * copy of each page. Since these copies will not be writeable by the 358 * process, we must explicitly request that they be dirtied. 359 */ 360 writing = uio->uio_rw == UIO_WRITE; 361 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 362 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 363 364 if (writing) { 365 error = priv_check_cred(p->p_ucred, PRIV_PROC_MEM_WRITE); 366 if (error) 367 return (error); 368 } 369 370 /* 371 * Only map in one page at a time. We don't have to, but it 372 * makes things easier. This way is trivial - right? 373 */ 374 do { 375 vm_offset_t uva; 376 u_int len; 377 vm_page_t m; 378 379 uva = (vm_offset_t)uio->uio_offset; 380 381 /* 382 * Get the page number of this segment. 383 */ 384 pageno = trunc_page(uva); 385 page_offset = uva - pageno; 386 387 /* 388 * How many bytes to copy 389 */ 390 len = MIN(PAGE_SIZE - page_offset, uio->uio_resid); 391 392 /* 393 * Fault and hold the page on behalf of the process. 394 */ 395 error = vm_fault(map, pageno, reqprot, fault_flags, &m); 396 if (error != KERN_SUCCESS) { 397 if (error == KERN_RESOURCE_SHORTAGE) 398 error = ENOMEM; 399 else 400 error = EFAULT; 401 break; 402 } 403 404 /* 405 * Now do the i/o move. 406 */ 407 error = uiomove_fromphys(&m, page_offset, len, uio); 408 409 /* Make the I-cache coherent for breakpoints. */ 410 if (writing && error == 0) { 411 vm_map_lock_read(map); 412 if (vm_map_check_protection(map, pageno, pageno + 413 PAGE_SIZE, VM_PROT_EXECUTE)) 414 vm_sync_icache(map, uva, len); 415 vm_map_unlock_read(map); 416 } 417 418 /* 419 * Release the page. 420 */ 421 vm_page_unwire(m, PQ_ACTIVE); 422 423 } while (error == 0 && uio->uio_resid > 0); 424 425 return (error); 426 } 427 428 static ssize_t 429 proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 430 size_t len, enum uio_rw rw) 431 { 432 struct iovec iov; 433 struct uio uio; 434 ssize_t slen; 435 436 MPASS(len < SSIZE_MAX); 437 slen = (ssize_t)len; 438 439 iov.iov_base = (caddr_t)buf; 440 iov.iov_len = len; 441 uio.uio_iov = &iov; 442 uio.uio_iovcnt = 1; 443 uio.uio_offset = va; 444 uio.uio_resid = slen; 445 uio.uio_segflg = UIO_SYSSPACE; 446 uio.uio_rw = rw; 447 uio.uio_td = td; 448 proc_rwmem(p, &uio); 449 if (uio.uio_resid == slen) 450 return (-1); 451 return (slen - uio.uio_resid); 452 } 453 454 ssize_t 455 proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 456 size_t len) 457 { 458 459 return (proc_iop(td, p, va, buf, len, UIO_READ)); 460 } 461 462 ssize_t 463 proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf, 464 size_t len) 465 { 466 467 return (proc_iop(td, p, va, buf, len, UIO_WRITE)); 468 } 469 470 static int 471 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 472 { 473 struct vattr vattr; 474 vm_map_t map; 475 vm_map_entry_t entry; 476 vm_object_t obj, tobj, lobj; 477 struct vmspace *vm; 478 struct vnode *vp; 479 char *freepath, *fullpath; 480 u_int pathlen; 481 int error, index; 482 483 error = 0; 484 obj = NULL; 485 486 vm = vmspace_acquire_ref(p); 487 map = &vm->vm_map; 488 vm_map_lock_read(map); 489 490 do { 491 KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 492 ("Submap in map header")); 493 index = 0; 494 VM_MAP_ENTRY_FOREACH(entry, map) { 495 if (index >= pve->pve_entry && 496 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 497 break; 498 index++; 499 } 500 if (index < pve->pve_entry) { 501 error = EINVAL; 502 break; 503 } 504 if (entry == &map->header) { 505 error = ENOENT; 506 break; 507 } 508 509 /* We got an entry. */ 510 pve->pve_entry = index + 1; 511 pve->pve_timestamp = map->timestamp; 512 pve->pve_start = entry->start; 513 pve->pve_end = entry->end - 1; 514 pve->pve_offset = entry->offset; 515 pve->pve_prot = entry->protection | 516 PROT_MAX(entry->max_protection); 517 518 /* Backing object's path needed? */ 519 if (pve->pve_pathlen == 0) 520 break; 521 522 pathlen = pve->pve_pathlen; 523 pve->pve_pathlen = 0; 524 525 obj = entry->object.vm_object; 526 if (obj != NULL) 527 VM_OBJECT_RLOCK(obj); 528 } while (0); 529 530 vm_map_unlock_read(map); 531 532 pve->pve_fsid = VNOVAL; 533 pve->pve_fileid = VNOVAL; 534 535 if (error == 0 && obj != NULL) { 536 lobj = obj; 537 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 538 if (tobj != obj) 539 VM_OBJECT_RLOCK(tobj); 540 if (lobj != obj) 541 VM_OBJECT_RUNLOCK(lobj); 542 lobj = tobj; 543 pve->pve_offset += tobj->backing_object_offset; 544 } 545 vp = vm_object_vnode(lobj); 546 if (vp != NULL) 547 vref(vp); 548 if (lobj != obj) 549 VM_OBJECT_RUNLOCK(lobj); 550 VM_OBJECT_RUNLOCK(obj); 551 552 if (vp != NULL) { 553 freepath = NULL; 554 fullpath = NULL; 555 vn_fullpath(vp, &fullpath, &freepath); 556 vn_lock(vp, LK_SHARED | LK_RETRY); 557 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 558 pve->pve_fileid = vattr.va_fileid; 559 pve->pve_fsid = vattr.va_fsid; 560 } 561 vput(vp); 562 563 if (fullpath != NULL) { 564 pve->pve_pathlen = strlen(fullpath) + 1; 565 if (pve->pve_pathlen <= pathlen) { 566 error = copyout(fullpath, pve->pve_path, 567 pve->pve_pathlen); 568 } else 569 error = ENAMETOOLONG; 570 } 571 if (freepath != NULL) 572 free(freepath, M_TEMP); 573 } 574 } 575 vmspace_free(vm); 576 if (error == 0) 577 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p", 578 p->p_pid, pve->pve_entry, pve->pve_start); 579 580 return (error); 581 } 582 583 /* 584 * Process debugging system call. 585 */ 586 #ifndef _SYS_SYSPROTO_H_ 587 struct ptrace_args { 588 int req; 589 pid_t pid; 590 caddr_t addr; 591 int data; 592 }; 593 #endif 594 595 int 596 sys_ptrace(struct thread *td, struct ptrace_args *uap) 597 { 598 /* 599 * XXX this obfuscation is to reduce stack usage, but the register 600 * structs may be too large to put on the stack anyway. 601 */ 602 union { 603 struct ptrace_io_desc piod; 604 struct ptrace_lwpinfo pl; 605 struct ptrace_vm_entry pve; 606 struct ptrace_coredump pc; 607 struct ptrace_sc_remote sr; 608 struct dbreg dbreg; 609 struct fpreg fpreg; 610 struct reg reg; 611 struct iovec vec; 612 syscallarg_t args[nitems(td->td_sa.args)]; 613 struct ptrace_sc_ret psr; 614 int ptevents; 615 } r; 616 syscallarg_t pscr_args[nitems(td->td_sa.args)]; 617 void *addr; 618 int error; 619 620 if (!allow_ptrace) 621 return (ENOSYS); 622 error = 0; 623 624 AUDIT_ARG_PID(uap->pid); 625 AUDIT_ARG_CMD(uap->req); 626 AUDIT_ARG_VALUE(uap->data); 627 addr = &r; 628 switch (uap->req) { 629 case PT_GET_EVENT_MASK: 630 case PT_LWPINFO: 631 case PT_GET_SC_ARGS: 632 case PT_GET_SC_RET: 633 break; 634 case PT_GETREGS: 635 bzero(&r.reg, sizeof(r.reg)); 636 break; 637 case PT_GETFPREGS: 638 bzero(&r.fpreg, sizeof(r.fpreg)); 639 break; 640 case PT_GETDBREGS: 641 bzero(&r.dbreg, sizeof(r.dbreg)); 642 break; 643 case PT_GETREGSET: 644 case PT_SETREGSET: 645 error = copyin(uap->addr, &r.vec, sizeof(r.vec)); 646 break; 647 case PT_SETREGS: 648 error = copyin(uap->addr, &r.reg, sizeof(r.reg)); 649 break; 650 case PT_SETFPREGS: 651 error = copyin(uap->addr, &r.fpreg, sizeof(r.fpreg)); 652 break; 653 case PT_SETDBREGS: 654 error = copyin(uap->addr, &r.dbreg, sizeof(r.dbreg)); 655 break; 656 case PT_SET_EVENT_MASK: 657 if (uap->data != sizeof(r.ptevents)) 658 error = EINVAL; 659 else 660 error = copyin(uap->addr, &r.ptevents, uap->data); 661 break; 662 case PT_IO: 663 error = copyin(uap->addr, &r.piod, sizeof(r.piod)); 664 break; 665 case PT_VM_ENTRY: 666 error = copyin(uap->addr, &r.pve, sizeof(r.pve)); 667 break; 668 case PT_COREDUMP: 669 if (uap->data != sizeof(r.pc)) 670 error = EINVAL; 671 else 672 error = copyin(uap->addr, &r.pc, uap->data); 673 break; 674 case PT_SC_REMOTE: 675 if (uap->data != sizeof(r.sr)) { 676 error = EINVAL; 677 break; 678 } 679 error = copyin(uap->addr, &r.sr, uap->data); 680 if (error != 0) 681 break; 682 if (r.sr.pscr_nargs > nitems(td->td_sa.args)) { 683 error = EINVAL; 684 break; 685 } 686 error = copyin(r.sr.pscr_args, pscr_args, 687 sizeof(u_long) * r.sr.pscr_nargs); 688 if (error != 0) 689 break; 690 r.sr.pscr_args = pscr_args; 691 break; 692 default: 693 addr = uap->addr; 694 break; 695 } 696 if (error) 697 return (error); 698 699 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 700 if (error) 701 return (error); 702 703 switch (uap->req) { 704 case PT_VM_ENTRY: 705 error = copyout(&r.pve, uap->addr, sizeof(r.pve)); 706 break; 707 case PT_IO: 708 error = copyout(&r.piod, uap->addr, sizeof(r.piod)); 709 break; 710 case PT_GETREGS: 711 error = copyout(&r.reg, uap->addr, sizeof(r.reg)); 712 break; 713 case PT_GETFPREGS: 714 error = copyout(&r.fpreg, uap->addr, sizeof(r.fpreg)); 715 break; 716 case PT_GETDBREGS: 717 error = copyout(&r.dbreg, uap->addr, sizeof(r.dbreg)); 718 break; 719 case PT_GETREGSET: 720 error = copyout(&r.vec, uap->addr, sizeof(r.vec)); 721 break; 722 case PT_GET_EVENT_MASK: 723 /* NB: The size in uap->data is validated in kern_ptrace(). */ 724 error = copyout(&r.ptevents, uap->addr, uap->data); 725 break; 726 case PT_LWPINFO: 727 /* NB: The size in uap->data is validated in kern_ptrace(). */ 728 error = copyout(&r.pl, uap->addr, uap->data); 729 break; 730 case PT_GET_SC_ARGS: 731 error = copyout(r.args, uap->addr, MIN(uap->data, 732 sizeof(r.args))); 733 break; 734 case PT_GET_SC_RET: 735 error = copyout(&r.psr, uap->addr, MIN(uap->data, 736 sizeof(r.psr))); 737 break; 738 case PT_SC_REMOTE: 739 error = copyout(&r.sr.pscr_ret, uap->addr + 740 offsetof(struct ptrace_sc_remote, pscr_ret), 741 sizeof(r.sr.pscr_ret)); 742 break; 743 } 744 745 return (error); 746 } 747 748 #ifdef COMPAT_FREEBSD32 749 /* 750 * PROC_READ(regs, td2, addr); 751 * becomes either: 752 * proc_read_regs(td2, addr); 753 * or 754 * proc_read_regs32(td2, addr); 755 * .. except this is done at runtime. There is an additional 756 * complication in that PROC_WRITE disallows 32 bit consumers 757 * from writing to 64 bit address space targets. 758 */ 759 #define PROC_READ(w, t, a) wrap32 ? \ 760 proc_read_ ## w ## 32(t, a) : \ 761 proc_read_ ## w (t, a) 762 #define PROC_WRITE(w, t, a) wrap32 ? \ 763 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 764 proc_write_ ## w (t, a) 765 #else 766 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 767 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 768 #endif 769 770 void 771 proc_set_traced(struct proc *p, bool stop) 772 { 773 774 sx_assert(&proctree_lock, SX_XLOCKED); 775 PROC_LOCK_ASSERT(p, MA_OWNED); 776 p->p_flag |= P_TRACED; 777 if (stop) 778 p->p_flag2 |= P2_PTRACE_FSTP; 779 p->p_ptevents = PTRACE_DEFAULT; 780 } 781 782 void 783 ptrace_unsuspend(struct proc *p) 784 { 785 PROC_LOCK_ASSERT(p, MA_OWNED); 786 787 PROC_SLOCK(p); 788 p->p_flag &= ~(P_STOPPED_TRACE | P_STOPPED_SIG | P_WAITED); 789 thread_unsuspend(p); 790 PROC_SUNLOCK(p); 791 itimer_proc_continue(p); 792 kqtimer_proc_continue(p); 793 } 794 795 static int 796 proc_can_ptrace(struct thread *td, struct proc *p) 797 { 798 int error; 799 800 PROC_LOCK_ASSERT(p, MA_OWNED); 801 802 if ((p->p_flag & P_WEXIT) != 0) 803 return (ESRCH); 804 805 if ((error = p_cansee(td, p)) != 0) 806 return (error); 807 if ((error = p_candebug(td, p)) != 0) 808 return (error); 809 810 /* not being traced... */ 811 if ((p->p_flag & P_TRACED) == 0) 812 return (EPERM); 813 814 /* not being traced by YOU */ 815 if (p->p_pptr != td->td_proc) 816 return (EBUSY); 817 818 /* not currently stopped */ 819 if ((p->p_flag & P_STOPPED_TRACE) == 0 || 820 p->p_suspcount != p->p_numthreads || 821 (p->p_flag & P_WAITED) == 0) 822 return (EBUSY); 823 824 return (0); 825 } 826 827 static struct thread * 828 ptrace_sel_coredump_thread(struct proc *p) 829 { 830 struct thread *td2; 831 832 PROC_LOCK_ASSERT(p, MA_OWNED); 833 MPASS((p->p_flag & P_STOPPED_TRACE) != 0); 834 835 FOREACH_THREAD_IN_PROC(p, td2) { 836 if ((td2->td_dbgflags & TDB_SSWITCH) != 0) 837 return (td2); 838 } 839 return (NULL); 840 } 841 842 int 843 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 844 { 845 struct iovec iov; 846 struct uio uio; 847 struct proc *curp, *p, *pp; 848 struct thread *td2 = NULL, *td3; 849 struct ptrace_io_desc *piod = NULL; 850 struct ptrace_lwpinfo *pl; 851 struct ptrace_sc_ret *psr; 852 struct ptrace_sc_remote *pscr; 853 struct file *fp; 854 struct ptrace_coredump *pc; 855 struct thr_coredump_req *tcq; 856 struct thr_syscall_req *tsr; 857 int error, num, tmp; 858 lwpid_t tid = 0, *buf; 859 #ifdef COMPAT_FREEBSD32 860 int wrap32 = 0, safe = 0; 861 #endif 862 bool proctree_locked, p2_req_set; 863 864 curp = td->td_proc; 865 proctree_locked = false; 866 p2_req_set = false; 867 868 /* Lock proctree before locking the process. */ 869 switch (req) { 870 case PT_TRACE_ME: 871 case PT_ATTACH: 872 case PT_STEP: 873 case PT_CONTINUE: 874 case PT_TO_SCE: 875 case PT_TO_SCX: 876 case PT_SYSCALL: 877 case PT_FOLLOW_FORK: 878 case PT_LWP_EVENTS: 879 case PT_GET_EVENT_MASK: 880 case PT_SET_EVENT_MASK: 881 case PT_DETACH: 882 case PT_GET_SC_ARGS: 883 sx_xlock(&proctree_lock); 884 proctree_locked = true; 885 break; 886 default: 887 break; 888 } 889 890 if (req == PT_TRACE_ME) { 891 p = td->td_proc; 892 PROC_LOCK(p); 893 } else { 894 if (pid <= PID_MAX) { 895 if ((p = pfind(pid)) == NULL) { 896 if (proctree_locked) 897 sx_xunlock(&proctree_lock); 898 return (ESRCH); 899 } 900 } else { 901 td2 = tdfind(pid, -1); 902 if (td2 == NULL) { 903 if (proctree_locked) 904 sx_xunlock(&proctree_lock); 905 return (ESRCH); 906 } 907 p = td2->td_proc; 908 tid = pid; 909 pid = p->p_pid; 910 } 911 } 912 AUDIT_ARG_PROCESS(p); 913 914 if ((p->p_flag & P_WEXIT) != 0) { 915 error = ESRCH; 916 goto fail; 917 } 918 if ((error = p_cansee(td, p)) != 0) 919 goto fail; 920 921 if ((error = p_candebug(td, p)) != 0) 922 goto fail; 923 924 /* 925 * System processes can't be debugged. 926 */ 927 if ((p->p_flag & P_SYSTEM) != 0) { 928 error = EINVAL; 929 goto fail; 930 } 931 932 if (tid == 0) { 933 if ((p->p_flag & P_STOPPED_TRACE) != 0) 934 td2 = p->p_xthread; 935 if (td2 == NULL) 936 td2 = FIRST_THREAD_IN_PROC(p); 937 tid = td2->td_tid; 938 } 939 940 #ifdef COMPAT_FREEBSD32 941 /* 942 * Test if we're a 32 bit client and what the target is. 943 * Set the wrap controls accordingly. 944 */ 945 if (SV_CURPROC_FLAG(SV_ILP32)) { 946 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 947 safe = 1; 948 wrap32 = 1; 949 } 950 #endif 951 /* 952 * Permissions check 953 */ 954 switch (req) { 955 case PT_TRACE_ME: 956 /* 957 * Always legal, when there is a parent process which 958 * could trace us. Otherwise, reject. 959 */ 960 if ((p->p_flag & P_TRACED) != 0) { 961 error = EBUSY; 962 goto fail; 963 } 964 if (p->p_pptr == initproc) { 965 error = EPERM; 966 goto fail; 967 } 968 break; 969 970 case PT_ATTACH: 971 /* Self */ 972 if (p == td->td_proc) { 973 error = EINVAL; 974 goto fail; 975 } 976 977 /* Already traced */ 978 if (p->p_flag & P_TRACED) { 979 error = EBUSY; 980 goto fail; 981 } 982 983 /* Can't trace an ancestor if you're being traced. */ 984 if (curp->p_flag & P_TRACED) { 985 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 986 if (pp == p) { 987 error = EINVAL; 988 goto fail; 989 } 990 } 991 } 992 993 /* OK */ 994 break; 995 996 case PT_CLEARSTEP: 997 /* Allow thread to clear single step for itself */ 998 if (td->td_tid == tid) 999 break; 1000 1001 /* FALLTHROUGH */ 1002 default: 1003 /* 1004 * Check for ptrace eligibility before waiting for 1005 * holds to drain. 1006 */ 1007 error = proc_can_ptrace(td, p); 1008 if (error != 0) 1009 goto fail; 1010 1011 /* 1012 * Block parallel ptrace requests. Most important, do 1013 * not allow other thread in debugger to continue the 1014 * debuggee until coredump finished. 1015 */ 1016 while ((p->p_flag2 & P2_PTRACEREQ) != 0) { 1017 if (proctree_locked) 1018 sx_xunlock(&proctree_lock); 1019 error = msleep(&p->p_flag2, &p->p_mtx, PPAUSE | PCATCH | 1020 (proctree_locked ? PDROP : 0), "pptrace", 0); 1021 if (proctree_locked) { 1022 sx_xlock(&proctree_lock); 1023 PROC_LOCK(p); 1024 } 1025 if (error == 0 && td2->td_proc != p) 1026 error = ESRCH; 1027 if (error == 0) 1028 error = proc_can_ptrace(td, p); 1029 if (error != 0) 1030 goto fail; 1031 } 1032 1033 /* Ok */ 1034 break; 1035 } 1036 1037 /* 1038 * Keep this process around and request parallel ptrace() 1039 * request to wait until we finish this request. 1040 */ 1041 MPASS((p->p_flag2 & P2_PTRACEREQ) == 0); 1042 p->p_flag2 |= P2_PTRACEREQ; 1043 p2_req_set = true; 1044 _PHOLD(p); 1045 1046 /* 1047 * Actually do the requests 1048 */ 1049 1050 td->td_retval[0] = 0; 1051 1052 switch (req) { 1053 case PT_TRACE_ME: 1054 /* set my trace flag and "owner" so it can read/write me */ 1055 proc_set_traced(p, false); 1056 if (p->p_flag & P_PPWAIT) 1057 p->p_flag |= P_PPTRACE; 1058 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid); 1059 break; 1060 1061 case PT_ATTACH: 1062 /* security check done above */ 1063 /* 1064 * It would be nice if the tracing relationship was separate 1065 * from the parent relationship but that would require 1066 * another set of links in the proc struct or for "wait" 1067 * to scan the entire proc table. To make life easier, 1068 * we just re-parent the process we're trying to trace. 1069 * The old parent is remembered so we can put things back 1070 * on a "detach". 1071 */ 1072 proc_set_traced(p, true); 1073 proc_reparent(p, td->td_proc, false); 1074 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid, 1075 p->p_oppid); 1076 1077 sx_xunlock(&proctree_lock); 1078 proctree_locked = false; 1079 MPASS(p->p_xthread == NULL); 1080 MPASS((p->p_flag & P_STOPPED_TRACE) == 0); 1081 1082 /* 1083 * If already stopped due to a stop signal, clear the 1084 * existing stop before triggering a traced SIGSTOP. 1085 */ 1086 if ((p->p_flag & P_STOPPED_SIG) != 0) { 1087 PROC_SLOCK(p); 1088 p->p_flag &= ~(P_STOPPED_SIG | P_WAITED); 1089 thread_unsuspend(p); 1090 PROC_SUNLOCK(p); 1091 } 1092 1093 kern_psignal(p, SIGSTOP); 1094 break; 1095 1096 case PT_CLEARSTEP: 1097 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid, 1098 p->p_pid); 1099 error = ptrace_clear_single_step(td2); 1100 break; 1101 1102 case PT_SETSTEP: 1103 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid, 1104 p->p_pid); 1105 error = ptrace_single_step(td2); 1106 break; 1107 1108 case PT_SUSPEND: 1109 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid, 1110 p->p_pid); 1111 td2->td_dbgflags |= TDB_SUSPEND; 1112 ast_sched(td2, TDA_SUSPEND); 1113 break; 1114 1115 case PT_RESUME: 1116 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid, 1117 p->p_pid); 1118 td2->td_dbgflags &= ~TDB_SUSPEND; 1119 break; 1120 1121 case PT_FOLLOW_FORK: 1122 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid, 1123 p->p_ptevents & PTRACE_FORK ? "enabled" : "disabled", 1124 data ? "enabled" : "disabled"); 1125 if (data) 1126 p->p_ptevents |= PTRACE_FORK; 1127 else 1128 p->p_ptevents &= ~PTRACE_FORK; 1129 break; 1130 1131 case PT_LWP_EVENTS: 1132 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid, 1133 p->p_ptevents & PTRACE_LWP ? "enabled" : "disabled", 1134 data ? "enabled" : "disabled"); 1135 if (data) 1136 p->p_ptevents |= PTRACE_LWP; 1137 else 1138 p->p_ptevents &= ~PTRACE_LWP; 1139 break; 1140 1141 case PT_GET_EVENT_MASK: 1142 if (data != sizeof(p->p_ptevents)) { 1143 error = EINVAL; 1144 break; 1145 } 1146 CTR2(KTR_PTRACE, "PT_GET_EVENT_MASK: pid %d mask %#x", p->p_pid, 1147 p->p_ptevents); 1148 *(int *)addr = p->p_ptevents; 1149 break; 1150 1151 case PT_SET_EVENT_MASK: 1152 if (data != sizeof(p->p_ptevents)) { 1153 error = EINVAL; 1154 break; 1155 } 1156 tmp = *(int *)addr; 1157 if ((tmp & ~(PTRACE_EXEC | PTRACE_SCE | PTRACE_SCX | 1158 PTRACE_FORK | PTRACE_LWP | PTRACE_VFORK)) != 0) { 1159 error = EINVAL; 1160 break; 1161 } 1162 CTR3(KTR_PTRACE, "PT_SET_EVENT_MASK: pid %d mask %#x -> %#x", 1163 p->p_pid, p->p_ptevents, tmp); 1164 p->p_ptevents = tmp; 1165 break; 1166 1167 case PT_GET_SC_ARGS: 1168 CTR1(KTR_PTRACE, "PT_GET_SC_ARGS: pid %d", p->p_pid); 1169 if (((td2->td_dbgflags & (TDB_SCE | TDB_SCX)) == 0 && 1170 td2->td_sa.code == 0) 1171 #ifdef COMPAT_FREEBSD32 1172 || (wrap32 && !safe) 1173 #endif 1174 ) { 1175 error = EINVAL; 1176 break; 1177 } 1178 bzero(addr, sizeof(td2->td_sa.args)); 1179 /* See the explanation in linux_ptrace_get_syscall_info(). */ 1180 bcopy(td2->td_sa.args, addr, SV_PROC_ABI(td->td_proc) == 1181 SV_ABI_LINUX ? sizeof(td2->td_sa.args) : 1182 td2->td_sa.callp->sy_narg * sizeof(syscallarg_t)); 1183 break; 1184 1185 case PT_GET_SC_RET: 1186 if ((td2->td_dbgflags & (TDB_SCX)) == 0 1187 #ifdef COMPAT_FREEBSD32 1188 || (wrap32 && !safe) 1189 #endif 1190 ) { 1191 error = EINVAL; 1192 break; 1193 } 1194 psr = addr; 1195 bzero(psr, sizeof(*psr)); 1196 psr->sr_error = td2->td_errno; 1197 if (psr->sr_error == 0) { 1198 psr->sr_retval[0] = td2->td_retval[0]; 1199 psr->sr_retval[1] = td2->td_retval[1]; 1200 } 1201 CTR4(KTR_PTRACE, 1202 "PT_GET_SC_RET: pid %d error %d retval %#lx,%#lx", 1203 p->p_pid, psr->sr_error, psr->sr_retval[0], 1204 psr->sr_retval[1]); 1205 break; 1206 1207 case PT_STEP: 1208 case PT_CONTINUE: 1209 case PT_TO_SCE: 1210 case PT_TO_SCX: 1211 case PT_SYSCALL: 1212 case PT_DETACH: 1213 /* Zero means do not send any signal */ 1214 if (data < 0 || data > _SIG_MAXSIG) { 1215 error = EINVAL; 1216 break; 1217 } 1218 1219 switch (req) { 1220 case PT_STEP: 1221 CTR3(KTR_PTRACE, "PT_STEP: tid %d (pid %d), sig = %d", 1222 td2->td_tid, p->p_pid, data); 1223 error = ptrace_single_step(td2); 1224 if (error) 1225 goto out; 1226 break; 1227 case PT_CONTINUE: 1228 case PT_TO_SCE: 1229 case PT_TO_SCX: 1230 case PT_SYSCALL: 1231 if (addr != (void *)1) { 1232 error = ptrace_set_pc(td2, 1233 (u_long)(uintfptr_t)addr); 1234 if (error) 1235 goto out; 1236 } 1237 switch (req) { 1238 case PT_TO_SCE: 1239 p->p_ptevents |= PTRACE_SCE; 1240 CTR4(KTR_PTRACE, 1241 "PT_TO_SCE: pid %d, events = %#x, PC = %#lx, sig = %d", 1242 p->p_pid, p->p_ptevents, 1243 (u_long)(uintfptr_t)addr, data); 1244 break; 1245 case PT_TO_SCX: 1246 p->p_ptevents |= PTRACE_SCX; 1247 CTR4(KTR_PTRACE, 1248 "PT_TO_SCX: pid %d, events = %#x, PC = %#lx, sig = %d", 1249 p->p_pid, p->p_ptevents, 1250 (u_long)(uintfptr_t)addr, data); 1251 break; 1252 case PT_SYSCALL: 1253 p->p_ptevents |= PTRACE_SYSCALL; 1254 CTR4(KTR_PTRACE, 1255 "PT_SYSCALL: pid %d, events = %#x, PC = %#lx, sig = %d", 1256 p->p_pid, p->p_ptevents, 1257 (u_long)(uintfptr_t)addr, data); 1258 break; 1259 case PT_CONTINUE: 1260 CTR3(KTR_PTRACE, 1261 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d", 1262 p->p_pid, (u_long)(uintfptr_t)addr, data); 1263 break; 1264 } 1265 break; 1266 case PT_DETACH: 1267 /* 1268 * Clear P_TRACED before reparenting 1269 * a detached process back to its original 1270 * parent. Otherwise the debugee will be set 1271 * as an orphan of the debugger. 1272 */ 1273 p->p_flag &= ~(P_TRACED | P_WAITED); 1274 1275 /* 1276 * Reset the process parent. 1277 */ 1278 if (p->p_oppid != p->p_pptr->p_pid) { 1279 PROC_LOCK(p->p_pptr); 1280 sigqueue_take(p->p_ksi); 1281 PROC_UNLOCK(p->p_pptr); 1282 1283 pp = proc_realparent(p); 1284 proc_reparent(p, pp, false); 1285 if (pp == initproc) 1286 p->p_sigparent = SIGCHLD; 1287 CTR3(KTR_PTRACE, 1288 "PT_DETACH: pid %d reparented to pid %d, sig %d", 1289 p->p_pid, pp->p_pid, data); 1290 } else { 1291 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d", 1292 p->p_pid, data); 1293 } 1294 1295 p->p_ptevents = 0; 1296 FOREACH_THREAD_IN_PROC(p, td3) { 1297 if ((td3->td_dbgflags & TDB_FSTP) != 0) { 1298 sigqueue_delete(&td3->td_sigqueue, 1299 SIGSTOP); 1300 } 1301 td3->td_dbgflags &= ~(TDB_XSIG | TDB_FSTP | 1302 TDB_SUSPEND | TDB_BORN); 1303 } 1304 1305 if ((p->p_flag2 & P2_PTRACE_FSTP) != 0) { 1306 sigqueue_delete(&p->p_sigqueue, SIGSTOP); 1307 p->p_flag2 &= ~P2_PTRACE_FSTP; 1308 } 1309 1310 /* should we send SIGCHLD? */ 1311 /* childproc_continued(p); */ 1312 break; 1313 } 1314 1315 sx_xunlock(&proctree_lock); 1316 proctree_locked = false; 1317 1318 sendsig: 1319 MPASS(!proctree_locked); 1320 1321 /* 1322 * Clear the pending event for the thread that just 1323 * reported its event (p_xthread), if any. This may 1324 * not be the thread passed to PT_CONTINUE, PT_STEP, 1325 * etc. if the debugger is resuming a different 1326 * thread. There might be no reporting thread if 1327 * the process was just attached. 1328 * 1329 * Deliver any pending signal via the reporting thread. 1330 */ 1331 if (p->p_xthread != NULL) { 1332 p->p_xthread->td_dbgflags &= ~TDB_XSIG; 1333 p->p_xthread->td_xsig = data; 1334 p->p_xthread = NULL; 1335 } 1336 p->p_xsig = data; 1337 1338 /* 1339 * P_WKILLED is insurance that a PT_KILL/SIGKILL 1340 * always works immediately, even if another thread is 1341 * unsuspended first and attempts to handle a 1342 * different signal or if the POSIX.1b style signal 1343 * queue cannot accommodate any new signals. 1344 */ 1345 if (data == SIGKILL) 1346 proc_wkilled(p); 1347 1348 /* 1349 * Unsuspend all threads. To leave a thread 1350 * suspended, use PT_SUSPEND to suspend it before 1351 * continuing the process. 1352 */ 1353 ptrace_unsuspend(p); 1354 break; 1355 1356 case PT_WRITE_I: 1357 case PT_WRITE_D: 1358 td2->td_dbgflags |= TDB_USERWR; 1359 PROC_UNLOCK(p); 1360 error = 0; 1361 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data, 1362 sizeof(int)) != sizeof(int)) 1363 error = ENOMEM; 1364 else 1365 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x", 1366 p->p_pid, addr, data); 1367 PROC_LOCK(p); 1368 break; 1369 1370 case PT_READ_I: 1371 case PT_READ_D: 1372 PROC_UNLOCK(p); 1373 error = tmp = 0; 1374 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp, 1375 sizeof(int)) != sizeof(int)) 1376 error = ENOMEM; 1377 else 1378 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x", 1379 p->p_pid, addr, tmp); 1380 td->td_retval[0] = tmp; 1381 PROC_LOCK(p); 1382 break; 1383 1384 case PT_IO: 1385 piod = addr; 1386 if (piod->piod_len > SSIZE_MAX) { 1387 error = EINVAL; 1388 goto out; 1389 } 1390 iov.iov_base = piod->piod_addr; 1391 iov.iov_len = piod->piod_len; 1392 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1393 uio.uio_resid = piod->piod_len; 1394 uio.uio_iov = &iov; 1395 uio.uio_iovcnt = 1; 1396 uio.uio_segflg = UIO_USERSPACE; 1397 uio.uio_td = td; 1398 switch (piod->piod_op) { 1399 case PIOD_READ_D: 1400 case PIOD_READ_I: 1401 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)", 1402 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1403 uio.uio_rw = UIO_READ; 1404 break; 1405 case PIOD_WRITE_D: 1406 case PIOD_WRITE_I: 1407 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)", 1408 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid); 1409 td2->td_dbgflags |= TDB_USERWR; 1410 uio.uio_rw = UIO_WRITE; 1411 break; 1412 default: 1413 error = EINVAL; 1414 goto out; 1415 } 1416 PROC_UNLOCK(p); 1417 error = proc_rwmem(p, &uio); 1418 piod->piod_len -= uio.uio_resid; 1419 PROC_LOCK(p); 1420 break; 1421 1422 case PT_KILL: 1423 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid); 1424 data = SIGKILL; 1425 goto sendsig; /* in PT_CONTINUE above */ 1426 1427 case PT_SETREGS: 1428 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid, 1429 p->p_pid); 1430 td2->td_dbgflags |= TDB_USERWR; 1431 error = PROC_WRITE(regs, td2, addr); 1432 break; 1433 1434 case PT_GETREGS: 1435 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid, 1436 p->p_pid); 1437 error = PROC_READ(regs, td2, addr); 1438 break; 1439 1440 case PT_SETFPREGS: 1441 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid, 1442 p->p_pid); 1443 td2->td_dbgflags |= TDB_USERWR; 1444 error = PROC_WRITE(fpregs, td2, addr); 1445 break; 1446 1447 case PT_GETFPREGS: 1448 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid, 1449 p->p_pid); 1450 error = PROC_READ(fpregs, td2, addr); 1451 break; 1452 1453 case PT_SETDBREGS: 1454 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid, 1455 p->p_pid); 1456 td2->td_dbgflags |= TDB_USERWR; 1457 error = PROC_WRITE(dbregs, td2, addr); 1458 break; 1459 1460 case PT_GETDBREGS: 1461 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid, 1462 p->p_pid); 1463 error = PROC_READ(dbregs, td2, addr); 1464 break; 1465 1466 case PT_SETREGSET: 1467 CTR2(KTR_PTRACE, "PT_SETREGSET: tid %d (pid %d)", td2->td_tid, 1468 p->p_pid); 1469 error = proc_write_regset(td2, data, addr); 1470 break; 1471 1472 case PT_GETREGSET: 1473 CTR2(KTR_PTRACE, "PT_GETREGSET: tid %d (pid %d)", td2->td_tid, 1474 p->p_pid); 1475 error = proc_read_regset(td2, data, addr); 1476 break; 1477 1478 case PT_LWPINFO: 1479 if (data <= 0 || data > sizeof(*pl)) { 1480 error = EINVAL; 1481 break; 1482 } 1483 pl = addr; 1484 bzero(pl, sizeof(*pl)); 1485 pl->pl_lwpid = td2->td_tid; 1486 pl->pl_event = PL_EVENT_NONE; 1487 pl->pl_flags = 0; 1488 if (td2->td_dbgflags & TDB_XSIG) { 1489 pl->pl_event = PL_EVENT_SIGNAL; 1490 if (td2->td_si.si_signo != 0 && 1491 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1492 + sizeof(pl->pl_siginfo)){ 1493 pl->pl_flags |= PL_FLAG_SI; 1494 pl->pl_siginfo = td2->td_si; 1495 } 1496 } 1497 if (td2->td_dbgflags & TDB_SCE) 1498 pl->pl_flags |= PL_FLAG_SCE; 1499 else if (td2->td_dbgflags & TDB_SCX) 1500 pl->pl_flags |= PL_FLAG_SCX; 1501 if (td2->td_dbgflags & TDB_EXEC) 1502 pl->pl_flags |= PL_FLAG_EXEC; 1503 if (td2->td_dbgflags & TDB_FORK) { 1504 pl->pl_flags |= PL_FLAG_FORKED; 1505 pl->pl_child_pid = td2->td_dbg_forked; 1506 if (td2->td_dbgflags & TDB_VFORK) 1507 pl->pl_flags |= PL_FLAG_VFORKED; 1508 } else if ((td2->td_dbgflags & (TDB_SCX | TDB_VFORK)) == 1509 TDB_VFORK) 1510 pl->pl_flags |= PL_FLAG_VFORK_DONE; 1511 if (td2->td_dbgflags & TDB_CHILD) 1512 pl->pl_flags |= PL_FLAG_CHILD; 1513 if (td2->td_dbgflags & TDB_BORN) 1514 pl->pl_flags |= PL_FLAG_BORN; 1515 if (td2->td_dbgflags & TDB_EXIT) 1516 pl->pl_flags |= PL_FLAG_EXITED; 1517 pl->pl_sigmask = td2->td_sigmask; 1518 pl->pl_siglist = td2->td_siglist; 1519 strcpy(pl->pl_tdname, td2->td_name); 1520 if (td2->td_sa.code != 0) { 1521 pl->pl_syscall_code = td2->td_sa.code; 1522 pl->pl_syscall_narg = td2->td_sa.callp->sy_narg; 1523 } 1524 CTR6(KTR_PTRACE, 1525 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d", 1526 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags, 1527 pl->pl_child_pid, pl->pl_syscall_code); 1528 break; 1529 1530 case PT_GETNUMLWPS: 1531 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid, 1532 p->p_numthreads); 1533 td->td_retval[0] = p->p_numthreads; 1534 break; 1535 1536 case PT_GETLWPLIST: 1537 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d", 1538 p->p_pid, data, p->p_numthreads); 1539 if (data <= 0) { 1540 error = EINVAL; 1541 break; 1542 } 1543 num = imin(p->p_numthreads, data); 1544 PROC_UNLOCK(p); 1545 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1546 tmp = 0; 1547 PROC_LOCK(p); 1548 FOREACH_THREAD_IN_PROC(p, td2) { 1549 if (tmp >= num) 1550 break; 1551 buf[tmp++] = td2->td_tid; 1552 } 1553 PROC_UNLOCK(p); 1554 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1555 free(buf, M_TEMP); 1556 if (!error) 1557 td->td_retval[0] = tmp; 1558 PROC_LOCK(p); 1559 break; 1560 1561 case PT_VM_TIMESTAMP: 1562 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d", 1563 p->p_pid, p->p_vmspace->vm_map.timestamp); 1564 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1565 break; 1566 1567 case PT_VM_ENTRY: 1568 PROC_UNLOCK(p); 1569 error = ptrace_vm_entry(td, p, addr); 1570 PROC_LOCK(p); 1571 break; 1572 1573 case PT_COREDUMP: 1574 pc = addr; 1575 CTR2(KTR_PTRACE, "PT_COREDUMP: pid %d, fd %d", 1576 p->p_pid, pc->pc_fd); 1577 1578 if ((pc->pc_flags & ~(PC_COMPRESS | PC_ALL)) != 0) { 1579 error = EINVAL; 1580 break; 1581 } 1582 PROC_UNLOCK(p); 1583 1584 tcq = malloc(sizeof(*tcq), M_TEMP, M_WAITOK | M_ZERO); 1585 fp = NULL; 1586 error = fget_write(td, pc->pc_fd, &cap_write_rights, &fp); 1587 if (error != 0) 1588 goto coredump_cleanup_nofp; 1589 if (fp->f_type != DTYPE_VNODE || fp->f_vnode->v_type != VREG) { 1590 error = EPIPE; 1591 goto coredump_cleanup; 1592 } 1593 1594 PROC_LOCK(p); 1595 error = proc_can_ptrace(td, p); 1596 if (error != 0) 1597 goto coredump_cleanup_locked; 1598 1599 td2 = ptrace_sel_coredump_thread(p); 1600 if (td2 == NULL) { 1601 error = EBUSY; 1602 goto coredump_cleanup_locked; 1603 } 1604 KASSERT((td2->td_dbgflags & (TDB_COREDUMPREQ | 1605 TDB_SCREMOTEREQ)) == 0, 1606 ("proc %d tid %d req coredump", p->p_pid, td2->td_tid)); 1607 1608 tcq->tc_vp = fp->f_vnode; 1609 tcq->tc_limit = pc->pc_limit == 0 ? OFF_MAX : pc->pc_limit; 1610 tcq->tc_flags = SVC_PT_COREDUMP; 1611 if ((pc->pc_flags & PC_COMPRESS) == 0) 1612 tcq->tc_flags |= SVC_NOCOMPRESS; 1613 if ((pc->pc_flags & PC_ALL) != 0) 1614 tcq->tc_flags |= SVC_ALL; 1615 td2->td_remotereq = tcq; 1616 td2->td_dbgflags |= TDB_COREDUMPREQ; 1617 thread_run_flash(td2); 1618 while ((td2->td_dbgflags & TDB_COREDUMPREQ) != 0) 1619 msleep(p, &p->p_mtx, PPAUSE, "crdmp", 0); 1620 error = tcq->tc_error; 1621 coredump_cleanup_locked: 1622 PROC_UNLOCK(p); 1623 coredump_cleanup: 1624 fdrop(fp, td); 1625 coredump_cleanup_nofp: 1626 free(tcq, M_TEMP); 1627 PROC_LOCK(p); 1628 break; 1629 1630 case PT_SC_REMOTE: 1631 pscr = addr; 1632 CTR2(KTR_PTRACE, "PT_SC_REMOTE: pid %d, syscall %d", 1633 p->p_pid, pscr->pscr_syscall); 1634 if ((td2->td_dbgflags & TDB_BOUNDARY) == 0) { 1635 error = EBUSY; 1636 break; 1637 } 1638 PROC_UNLOCK(p); 1639 MPASS(pscr->pscr_nargs <= nitems(td->td_sa.args)); 1640 1641 tsr = malloc(sizeof(struct thr_syscall_req), M_TEMP, 1642 M_WAITOK | M_ZERO); 1643 1644 tsr->ts_sa.code = pscr->pscr_syscall; 1645 tsr->ts_nargs = pscr->pscr_nargs; 1646 memcpy(&tsr->ts_sa.args, pscr->pscr_args, 1647 sizeof(syscallarg_t) * tsr->ts_nargs); 1648 1649 PROC_LOCK(p); 1650 error = proc_can_ptrace(td, p); 1651 if (error != 0) { 1652 free(tsr, M_TEMP); 1653 break; 1654 } 1655 if (td2->td_proc != p) { 1656 free(tsr, M_TEMP); 1657 error = ESRCH; 1658 break; 1659 } 1660 KASSERT((td2->td_dbgflags & (TDB_COREDUMPREQ | 1661 TDB_SCREMOTEREQ)) == 0, 1662 ("proc %d tid %d req coredump", p->p_pid, td2->td_tid)); 1663 1664 td2->td_remotereq = tsr; 1665 td2->td_dbgflags |= TDB_SCREMOTEREQ; 1666 thread_run_flash(td2); 1667 while ((td2->td_dbgflags & TDB_SCREMOTEREQ) != 0) 1668 msleep(p, &p->p_mtx, PPAUSE, "pscrx", 0); 1669 error = 0; 1670 memcpy(&pscr->pscr_ret, &tsr->ts_ret, sizeof(tsr->ts_ret)); 1671 free(tsr, M_TEMP); 1672 break; 1673 1674 default: 1675 #ifdef __HAVE_PTRACE_MACHDEP 1676 if (req >= PT_FIRSTMACH) { 1677 PROC_UNLOCK(p); 1678 error = cpu_ptrace(td2, req, addr, data); 1679 PROC_LOCK(p); 1680 } else 1681 #endif 1682 /* Unknown request. */ 1683 error = EINVAL; 1684 break; 1685 } 1686 out: 1687 /* Drop our hold on this process now that the request has completed. */ 1688 _PRELE(p); 1689 fail: 1690 if (p2_req_set) { 1691 if ((p->p_flag2 & P2_PTRACEREQ) != 0) 1692 wakeup(&p->p_flag2); 1693 p->p_flag2 &= ~P2_PTRACEREQ; 1694 } 1695 PROC_UNLOCK(p); 1696 if (proctree_locked) 1697 sx_xunlock(&proctree_lock); 1698 return (error); 1699 } 1700 #undef PROC_READ 1701 #undef PROC_WRITE 1702