1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/ptrace.h> 47 #include <sys/sx.h> 48 #include <sys/malloc.h> 49 #include <sys/signalvar.h> 50 51 #include <machine/reg.h> 52 53 #include <security/audit/audit.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_param.h> 63 64 #ifdef COMPAT_IA32 65 #include <sys/procfs.h> 66 #include <machine/fpu.h> 67 #include <compat/ia32/ia32_reg.h> 68 69 struct ptrace_io_desc32 { 70 int piod_op; 71 u_int32_t piod_offs; 72 u_int32_t piod_addr; 73 u_int32_t piod_len; 74 }; 75 #endif 76 77 /* 78 * Functions implemented using PROC_ACTION(): 79 * 80 * proc_read_regs(proc, regs) 81 * Get the current user-visible register set from the process 82 * and copy it into the regs structure (<machine/reg.h>). 83 * The process is stopped at the time read_regs is called. 84 * 85 * proc_write_regs(proc, regs) 86 * Update the current register set from the passed in regs 87 * structure. Take care to avoid clobbering special CPU 88 * registers or privileged bits in the PSL. 89 * Depending on the architecture this may have fix-up work to do, 90 * especially if the IAR or PCW are modified. 91 * The process is stopped at the time write_regs is called. 92 * 93 * proc_read_fpregs, proc_write_fpregs 94 * deal with the floating point register set, otherwise as above. 95 * 96 * proc_read_dbregs, proc_write_dbregs 97 * deal with the processor debug register set, otherwise as above. 98 * 99 * proc_sstep(proc) 100 * Arrange for the process to trap after executing a single instruction. 101 */ 102 103 #define PROC_ACTION(action) do { \ 104 int error; \ 105 \ 106 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 107 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 108 error = EIO; \ 109 else \ 110 error = (action); \ 111 return (error); \ 112 } while(0) 113 114 int 115 proc_read_regs(struct thread *td, struct reg *regs) 116 { 117 118 PROC_ACTION(fill_regs(td, regs)); 119 } 120 121 int 122 proc_write_regs(struct thread *td, struct reg *regs) 123 { 124 125 PROC_ACTION(set_regs(td, regs)); 126 } 127 128 int 129 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 130 { 131 132 PROC_ACTION(fill_dbregs(td, dbregs)); 133 } 134 135 int 136 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 137 { 138 139 PROC_ACTION(set_dbregs(td, dbregs)); 140 } 141 142 /* 143 * Ptrace doesn't support fpregs at all, and there are no security holes 144 * or translations for fpregs, so we can just copy them. 145 */ 146 int 147 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 148 { 149 150 PROC_ACTION(fill_fpregs(td, fpregs)); 151 } 152 153 int 154 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 155 { 156 157 PROC_ACTION(set_fpregs(td, fpregs)); 158 } 159 160 #ifdef COMPAT_IA32 161 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 162 int 163 proc_read_regs32(struct thread *td, struct reg32 *regs32) 164 { 165 166 PROC_ACTION(fill_regs32(td, regs32)); 167 } 168 169 int 170 proc_write_regs32(struct thread *td, struct reg32 *regs32) 171 { 172 173 PROC_ACTION(set_regs32(td, regs32)); 174 } 175 176 int 177 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 178 { 179 180 PROC_ACTION(fill_dbregs32(td, dbregs32)); 181 } 182 183 int 184 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 185 { 186 187 PROC_ACTION(set_dbregs32(td, dbregs32)); 188 } 189 190 int 191 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 192 { 193 194 PROC_ACTION(fill_fpregs32(td, fpregs32)); 195 } 196 197 int 198 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 199 { 200 201 PROC_ACTION(set_fpregs32(td, fpregs32)); 202 } 203 #endif 204 205 int 206 proc_sstep(struct thread *td) 207 { 208 209 PROC_ACTION(ptrace_single_step(td)); 210 } 211 212 int 213 proc_rwmem(struct proc *p, struct uio *uio) 214 { 215 vm_map_t map; 216 vm_object_t backing_object, object = NULL; 217 vm_offset_t pageno = 0; /* page number */ 218 vm_prot_t reqprot; 219 int error, fault_flags, writing; 220 221 /* 222 * Assert that someone has locked this vmspace. (Should be 223 * curthread but we can't assert that.) This keeps the process 224 * from exiting out from under us until this operation completes. 225 */ 226 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 227 p, p->p_pid)); 228 229 /* 230 * The map we want... 231 */ 232 map = &p->p_vmspace->vm_map; 233 234 writing = uio->uio_rw == UIO_WRITE; 235 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : 236 VM_PROT_READ; 237 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 238 239 /* 240 * Only map in one page at a time. We don't have to, but it 241 * makes things easier. This way is trivial - right? 242 */ 243 do { 244 vm_map_t tmap; 245 vm_offset_t uva; 246 int page_offset; /* offset into page */ 247 vm_map_entry_t out_entry; 248 vm_prot_t out_prot; 249 boolean_t wired; 250 vm_pindex_t pindex; 251 u_int len; 252 vm_page_t m; 253 254 object = NULL; 255 256 uva = (vm_offset_t)uio->uio_offset; 257 258 /* 259 * Get the page number of this segment. 260 */ 261 pageno = trunc_page(uva); 262 page_offset = uva - pageno; 263 264 /* 265 * How many bytes to copy 266 */ 267 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 268 269 /* 270 * Fault the page on behalf of the process 271 */ 272 error = vm_fault(map, pageno, reqprot, fault_flags); 273 if (error) { 274 if (error == KERN_RESOURCE_SHORTAGE) 275 error = ENOMEM; 276 else 277 error = EFAULT; 278 break; 279 } 280 281 /* 282 * Now we need to get the page. out_entry, wired, 283 * and single_use aren't used. One would think the vm code 284 * would be a *bit* nicer... We use tmap because 285 * vm_map_lookup() can change the map argument. 286 */ 287 tmap = map; 288 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 289 &object, &pindex, &out_prot, &wired); 290 if (error) { 291 error = EFAULT; 292 break; 293 } 294 VM_OBJECT_LOCK(object); 295 while ((m = vm_page_lookup(object, pindex)) == NULL && 296 !writing && 297 (backing_object = object->backing_object) != NULL) { 298 /* 299 * Allow fallback to backing objects if we are reading. 300 */ 301 VM_OBJECT_LOCK(backing_object); 302 pindex += OFF_TO_IDX(object->backing_object_offset); 303 VM_OBJECT_UNLOCK(object); 304 object = backing_object; 305 } 306 VM_OBJECT_UNLOCK(object); 307 if (m == NULL) { 308 vm_map_lookup_done(tmap, out_entry); 309 error = EFAULT; 310 break; 311 } 312 313 /* 314 * Hold the page in memory. 315 */ 316 vm_page_lock_queues(); 317 vm_page_hold(m); 318 vm_page_unlock_queues(); 319 320 /* 321 * We're done with tmap now. 322 */ 323 vm_map_lookup_done(tmap, out_entry); 324 325 /* 326 * Now do the i/o move. 327 */ 328 error = uiomove_fromphys(&m, page_offset, len, uio); 329 330 /* Make the I-cache coherent for breakpoints. */ 331 if (!error && writing && (out_prot & VM_PROT_EXECUTE)) 332 vm_sync_icache(map, uva, len); 333 334 /* 335 * Release the page. 336 */ 337 vm_page_lock_queues(); 338 vm_page_unhold(m); 339 vm_page_unlock_queues(); 340 341 } while (error == 0 && uio->uio_resid > 0); 342 343 return (error); 344 } 345 346 /* 347 * Process debugging system call. 348 */ 349 #ifndef _SYS_SYSPROTO_H_ 350 struct ptrace_args { 351 int req; 352 pid_t pid; 353 caddr_t addr; 354 int data; 355 }; 356 #endif 357 358 #ifdef COMPAT_IA32 359 /* 360 * This CPP subterfuge is to try and reduce the number of ifdefs in 361 * the body of the code. 362 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 363 * becomes either: 364 * copyin(uap->addr, &r.reg, sizeof r.reg); 365 * or 366 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 367 * .. except this is done at runtime. 368 */ 369 #define COPYIN(u, k, s) wrap32 ? \ 370 copyin(u, k ## 32, s ## 32) : \ 371 copyin(u, k, s) 372 #define COPYOUT(k, u, s) wrap32 ? \ 373 copyout(k ## 32, u, s ## 32) : \ 374 copyout(k, u, s) 375 #else 376 #define COPYIN(u, k, s) copyin(u, k, s) 377 #define COPYOUT(k, u, s) copyout(k, u, s) 378 #endif 379 int 380 ptrace(struct thread *td, struct ptrace_args *uap) 381 { 382 /* 383 * XXX this obfuscation is to reduce stack usage, but the register 384 * structs may be too large to put on the stack anyway. 385 */ 386 union { 387 struct ptrace_io_desc piod; 388 struct ptrace_lwpinfo pl; 389 struct dbreg dbreg; 390 struct fpreg fpreg; 391 struct reg reg; 392 #ifdef COMPAT_IA32 393 struct dbreg32 dbreg32; 394 struct fpreg32 fpreg32; 395 struct reg32 reg32; 396 struct ptrace_io_desc32 piod32; 397 #endif 398 } r; 399 void *addr; 400 int error = 0; 401 #ifdef COMPAT_IA32 402 int wrap32 = 0; 403 404 if (SV_CURPROC_FLAG(SV_ILP32)) 405 wrap32 = 1; 406 #endif 407 AUDIT_ARG_PID(uap->pid); 408 AUDIT_ARG_CMD(uap->req); 409 AUDIT_ARG_VALUE(uap->data); 410 addr = &r; 411 switch (uap->req) { 412 case PT_GETREGS: 413 case PT_GETFPREGS: 414 case PT_GETDBREGS: 415 case PT_LWPINFO: 416 break; 417 case PT_SETREGS: 418 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 419 break; 420 case PT_SETFPREGS: 421 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 422 break; 423 case PT_SETDBREGS: 424 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 425 break; 426 case PT_IO: 427 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 428 break; 429 default: 430 addr = uap->addr; 431 break; 432 } 433 if (error) 434 return (error); 435 436 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 437 if (error) 438 return (error); 439 440 switch (uap->req) { 441 case PT_IO: 442 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 443 break; 444 case PT_GETREGS: 445 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 446 break; 447 case PT_GETFPREGS: 448 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 449 break; 450 case PT_GETDBREGS: 451 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 452 break; 453 case PT_LWPINFO: 454 error = copyout(&r.pl, uap->addr, uap->data); 455 break; 456 } 457 458 return (error); 459 } 460 #undef COPYIN 461 #undef COPYOUT 462 463 #ifdef COMPAT_IA32 464 /* 465 * PROC_READ(regs, td2, addr); 466 * becomes either: 467 * proc_read_regs(td2, addr); 468 * or 469 * proc_read_regs32(td2, addr); 470 * .. except this is done at runtime. There is an additional 471 * complication in that PROC_WRITE disallows 32 bit consumers 472 * from writing to 64 bit address space targets. 473 */ 474 #define PROC_READ(w, t, a) wrap32 ? \ 475 proc_read_ ## w ## 32(t, a) : \ 476 proc_read_ ## w (t, a) 477 #define PROC_WRITE(w, t, a) wrap32 ? \ 478 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 479 proc_write_ ## w (t, a) 480 #else 481 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 482 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 483 #endif 484 485 int 486 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 487 { 488 struct iovec iov; 489 struct uio uio; 490 struct proc *curp, *p, *pp; 491 struct thread *td2 = NULL; 492 struct ptrace_io_desc *piod = NULL; 493 struct ptrace_lwpinfo *pl; 494 int error, write, tmp, num; 495 int proctree_locked = 0; 496 lwpid_t tid = 0, *buf; 497 #ifdef COMPAT_IA32 498 int wrap32 = 0, safe = 0; 499 struct ptrace_io_desc32 *piod32 = NULL; 500 #endif 501 502 curp = td->td_proc; 503 504 /* Lock proctree before locking the process. */ 505 switch (req) { 506 case PT_TRACE_ME: 507 case PT_ATTACH: 508 case PT_STEP: 509 case PT_CONTINUE: 510 case PT_TO_SCE: 511 case PT_TO_SCX: 512 case PT_SYSCALL: 513 case PT_DETACH: 514 sx_xlock(&proctree_lock); 515 proctree_locked = 1; 516 break; 517 default: 518 break; 519 } 520 521 write = 0; 522 if (req == PT_TRACE_ME) { 523 p = td->td_proc; 524 PROC_LOCK(p); 525 } else { 526 if (pid <= PID_MAX) { 527 if ((p = pfind(pid)) == NULL) { 528 if (proctree_locked) 529 sx_xunlock(&proctree_lock); 530 return (ESRCH); 531 } 532 } else { 533 /* this is slow, should be optimized */ 534 sx_slock(&allproc_lock); 535 FOREACH_PROC_IN_SYSTEM(p) { 536 PROC_LOCK(p); 537 FOREACH_THREAD_IN_PROC(p, td2) { 538 if (td2->td_tid == pid) 539 break; 540 } 541 if (td2 != NULL) 542 break; /* proc lock held */ 543 PROC_UNLOCK(p); 544 } 545 sx_sunlock(&allproc_lock); 546 if (p == NULL) { 547 if (proctree_locked) 548 sx_xunlock(&proctree_lock); 549 return (ESRCH); 550 } 551 tid = pid; 552 pid = p->p_pid; 553 } 554 } 555 AUDIT_ARG_PROCESS(p); 556 557 if ((p->p_flag & P_WEXIT) != 0) { 558 error = ESRCH; 559 goto fail; 560 } 561 if ((error = p_cansee(td, p)) != 0) 562 goto fail; 563 564 if ((error = p_candebug(td, p)) != 0) 565 goto fail; 566 567 /* 568 * System processes can't be debugged. 569 */ 570 if ((p->p_flag & P_SYSTEM) != 0) { 571 error = EINVAL; 572 goto fail; 573 } 574 575 if (tid == 0) { 576 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 577 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 578 td2 = p->p_xthread; 579 } else { 580 td2 = FIRST_THREAD_IN_PROC(p); 581 } 582 tid = td2->td_tid; 583 } 584 585 #ifdef COMPAT_IA32 586 /* 587 * Test if we're a 32 bit client and what the target is. 588 * Set the wrap controls accordingly. 589 */ 590 if (SV_CURPROC_FLAG(SV_ILP32)) { 591 if (td2->td_proc->p_sysent->sv_flags & SV_ILP32) 592 safe = 1; 593 wrap32 = 1; 594 } 595 #endif 596 /* 597 * Permissions check 598 */ 599 switch (req) { 600 case PT_TRACE_ME: 601 /* Always legal. */ 602 break; 603 604 case PT_ATTACH: 605 /* Self */ 606 if (p->p_pid == td->td_proc->p_pid) { 607 error = EINVAL; 608 goto fail; 609 } 610 611 /* Already traced */ 612 if (p->p_flag & P_TRACED) { 613 error = EBUSY; 614 goto fail; 615 } 616 617 /* Can't trace an ancestor if you're being traced. */ 618 if (curp->p_flag & P_TRACED) { 619 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 620 if (pp == p) { 621 error = EINVAL; 622 goto fail; 623 } 624 } 625 } 626 627 628 /* OK */ 629 break; 630 631 case PT_CLEARSTEP: 632 /* Allow thread to clear single step for itself */ 633 if (td->td_tid == tid) 634 break; 635 636 /* FALLTHROUGH */ 637 default: 638 /* not being traced... */ 639 if ((p->p_flag & P_TRACED) == 0) { 640 error = EPERM; 641 goto fail; 642 } 643 644 /* not being traced by YOU */ 645 if (p->p_pptr != td->td_proc) { 646 error = EBUSY; 647 goto fail; 648 } 649 650 /* not currently stopped */ 651 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 652 p->p_suspcount != p->p_numthreads || 653 (p->p_flag & P_WAITED) == 0) { 654 error = EBUSY; 655 goto fail; 656 } 657 658 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 659 static int count = 0; 660 if (count++ == 0) 661 printf("P_STOPPED_TRACE not set.\n"); 662 } 663 664 /* OK */ 665 break; 666 } 667 668 /* Keep this process around until we finish this request. */ 669 _PHOLD(p); 670 671 #ifdef FIX_SSTEP 672 /* 673 * Single step fixup ala procfs 674 */ 675 FIX_SSTEP(td2); 676 #endif 677 678 /* 679 * Actually do the requests 680 */ 681 682 td->td_retval[0] = 0; 683 684 switch (req) { 685 case PT_TRACE_ME: 686 /* set my trace flag and "owner" so it can read/write me */ 687 p->p_flag |= P_TRACED; 688 p->p_oppid = p->p_pptr->p_pid; 689 break; 690 691 case PT_ATTACH: 692 /* security check done above */ 693 p->p_flag |= P_TRACED; 694 p->p_oppid = p->p_pptr->p_pid; 695 if (p->p_pptr != td->td_proc) 696 proc_reparent(p, td->td_proc); 697 data = SIGSTOP; 698 goto sendsig; /* in PT_CONTINUE below */ 699 700 case PT_CLEARSTEP: 701 error = ptrace_clear_single_step(td2); 702 break; 703 704 case PT_SETSTEP: 705 error = ptrace_single_step(td2); 706 break; 707 708 case PT_SUSPEND: 709 td2->td_dbgflags |= TDB_SUSPEND; 710 thread_lock(td2); 711 td2->td_flags |= TDF_NEEDSUSPCHK; 712 thread_unlock(td2); 713 break; 714 715 case PT_RESUME: 716 td2->td_dbgflags &= ~TDB_SUSPEND; 717 break; 718 719 case PT_STEP: 720 case PT_CONTINUE: 721 case PT_TO_SCE: 722 case PT_TO_SCX: 723 case PT_SYSCALL: 724 case PT_DETACH: 725 /* Zero means do not send any signal */ 726 if (data < 0 || data > _SIG_MAXSIG) { 727 error = EINVAL; 728 break; 729 } 730 731 switch (req) { 732 case PT_STEP: 733 error = ptrace_single_step(td2); 734 if (error) 735 goto out; 736 break; 737 case PT_TO_SCE: 738 p->p_stops |= S_PT_SCE; 739 break; 740 case PT_TO_SCX: 741 p->p_stops |= S_PT_SCX; 742 break; 743 case PT_SYSCALL: 744 p->p_stops |= S_PT_SCE | S_PT_SCX; 745 break; 746 } 747 748 if (addr != (void *)1) { 749 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr); 750 if (error) 751 break; 752 } 753 754 if (req == PT_DETACH) { 755 /* reset process parent */ 756 if (p->p_oppid != p->p_pptr->p_pid) { 757 struct proc *pp; 758 759 PROC_LOCK(p->p_pptr); 760 sigqueue_take(p->p_ksi); 761 PROC_UNLOCK(p->p_pptr); 762 763 PROC_UNLOCK(p); 764 pp = pfind(p->p_oppid); 765 if (pp == NULL) 766 pp = initproc; 767 else 768 PROC_UNLOCK(pp); 769 PROC_LOCK(p); 770 proc_reparent(p, pp); 771 if (pp == initproc) 772 p->p_sigparent = SIGCHLD; 773 } 774 p->p_flag &= ~(P_TRACED | P_WAITED); 775 p->p_oppid = 0; 776 777 /* should we send SIGCHLD? */ 778 /* childproc_continued(p); */ 779 } 780 781 sendsig: 782 if (proctree_locked) { 783 sx_xunlock(&proctree_lock); 784 proctree_locked = 0; 785 } 786 p->p_xstat = data; 787 p->p_xthread = NULL; 788 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 789 /* deliver or queue signal */ 790 td2->td_dbgflags &= ~TDB_XSIG; 791 td2->td_xsig = data; 792 793 if (req == PT_DETACH) { 794 struct thread *td3; 795 FOREACH_THREAD_IN_PROC(p, td3) { 796 td3->td_dbgflags &= ~TDB_SUSPEND; 797 } 798 } 799 /* 800 * unsuspend all threads, to not let a thread run, 801 * you should use PT_SUSPEND to suspend it before 802 * continuing process. 803 */ 804 PROC_SLOCK(p); 805 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 806 thread_unsuspend(p); 807 PROC_SUNLOCK(p); 808 } else { 809 if (data) 810 psignal(p, data); 811 } 812 break; 813 814 case PT_WRITE_I: 815 case PT_WRITE_D: 816 write = 1; 817 /* FALLTHROUGH */ 818 case PT_READ_I: 819 case PT_READ_D: 820 PROC_UNLOCK(p); 821 tmp = 0; 822 /* write = 0 set above */ 823 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 824 iov.iov_len = sizeof(int); 825 uio.uio_iov = &iov; 826 uio.uio_iovcnt = 1; 827 uio.uio_offset = (off_t)(uintptr_t)addr; 828 uio.uio_resid = sizeof(int); 829 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 830 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 831 uio.uio_td = td; 832 error = proc_rwmem(p, &uio); 833 if (uio.uio_resid != 0) { 834 /* 835 * XXX proc_rwmem() doesn't currently return ENOSPC, 836 * so I think write() can bogusly return 0. 837 * XXX what happens for short writes? We don't want 838 * to write partial data. 839 * XXX proc_rwmem() returns EPERM for other invalid 840 * addresses. Convert this to EINVAL. Does this 841 * clobber returns of EPERM for other reasons? 842 */ 843 if (error == 0 || error == ENOSPC || error == EPERM) 844 error = EINVAL; /* EOF */ 845 } 846 if (!write) 847 td->td_retval[0] = tmp; 848 PROC_LOCK(p); 849 break; 850 851 case PT_IO: 852 #ifdef COMPAT_IA32 853 if (wrap32) { 854 piod32 = addr; 855 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 856 iov.iov_len = piod32->piod_len; 857 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 858 uio.uio_resid = piod32->piod_len; 859 } else 860 #endif 861 { 862 piod = addr; 863 iov.iov_base = piod->piod_addr; 864 iov.iov_len = piod->piod_len; 865 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 866 uio.uio_resid = piod->piod_len; 867 } 868 uio.uio_iov = &iov; 869 uio.uio_iovcnt = 1; 870 uio.uio_segflg = UIO_USERSPACE; 871 uio.uio_td = td; 872 #ifdef COMPAT_IA32 873 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 874 #else 875 tmp = piod->piod_op; 876 #endif 877 switch (tmp) { 878 case PIOD_READ_D: 879 case PIOD_READ_I: 880 uio.uio_rw = UIO_READ; 881 break; 882 case PIOD_WRITE_D: 883 case PIOD_WRITE_I: 884 uio.uio_rw = UIO_WRITE; 885 break; 886 default: 887 error = EINVAL; 888 goto out; 889 } 890 PROC_UNLOCK(p); 891 error = proc_rwmem(p, &uio); 892 #ifdef COMPAT_IA32 893 if (wrap32) 894 piod32->piod_len -= uio.uio_resid; 895 else 896 #endif 897 piod->piod_len -= uio.uio_resid; 898 PROC_LOCK(p); 899 break; 900 901 case PT_KILL: 902 data = SIGKILL; 903 goto sendsig; /* in PT_CONTINUE above */ 904 905 case PT_SETREGS: 906 error = PROC_WRITE(regs, td2, addr); 907 break; 908 909 case PT_GETREGS: 910 error = PROC_READ(regs, td2, addr); 911 break; 912 913 case PT_SETFPREGS: 914 error = PROC_WRITE(fpregs, td2, addr); 915 break; 916 917 case PT_GETFPREGS: 918 error = PROC_READ(fpregs, td2, addr); 919 break; 920 921 case PT_SETDBREGS: 922 error = PROC_WRITE(dbregs, td2, addr); 923 break; 924 925 case PT_GETDBREGS: 926 error = PROC_READ(dbregs, td2, addr); 927 break; 928 929 case PT_LWPINFO: 930 if (data <= 0 || data > sizeof(*pl)) { 931 error = EINVAL; 932 break; 933 } 934 pl = addr; 935 pl->pl_lwpid = td2->td_tid; 936 if (td2->td_dbgflags & TDB_XSIG) 937 pl->pl_event = PL_EVENT_SIGNAL; 938 else 939 pl->pl_event = 0; 940 pl->pl_flags = 0; 941 pl->pl_sigmask = td2->td_sigmask; 942 pl->pl_siglist = td2->td_siglist; 943 break; 944 945 case PT_GETNUMLWPS: 946 td->td_retval[0] = p->p_numthreads; 947 break; 948 949 case PT_GETLWPLIST: 950 if (data <= 0) { 951 error = EINVAL; 952 break; 953 } 954 num = imin(p->p_numthreads, data); 955 PROC_UNLOCK(p); 956 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 957 tmp = 0; 958 PROC_LOCK(p); 959 FOREACH_THREAD_IN_PROC(p, td2) { 960 if (tmp >= num) 961 break; 962 buf[tmp++] = td2->td_tid; 963 } 964 PROC_UNLOCK(p); 965 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 966 free(buf, M_TEMP); 967 if (!error) 968 td->td_retval[0] = tmp; 969 PROC_LOCK(p); 970 break; 971 972 default: 973 #ifdef __HAVE_PTRACE_MACHDEP 974 if (req >= PT_FIRSTMACH) { 975 PROC_UNLOCK(p); 976 error = cpu_ptrace(td2, req, addr, data); 977 PROC_LOCK(p); 978 } else 979 #endif 980 /* Unknown request. */ 981 error = EINVAL; 982 break; 983 } 984 985 out: 986 /* Drop our hold on this process now that the request has completed. */ 987 _PRELE(p); 988 fail: 989 PROC_UNLOCK(p); 990 if (proctree_locked) 991 sx_xunlock(&proctree_lock); 992 return (error); 993 } 994 #undef PROC_READ 995 #undef PROC_WRITE 996 997 /* 998 * Stop a process because of a debugging event; 999 * stay stopped until p->p_step is cleared 1000 * (cleared by PIOCCONT in procfs). 1001 */ 1002 void 1003 stopevent(struct proc *p, unsigned int event, unsigned int val) 1004 { 1005 1006 PROC_LOCK_ASSERT(p, MA_OWNED); 1007 p->p_step = 1; 1008 do { 1009 p->p_xstat = val; 1010 p->p_xthread = NULL; 1011 p->p_stype = event; /* Which event caused the stop? */ 1012 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1013 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1014 } while (p->p_step); 1015 } 1016