1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/ptrace.h> 47 #include <sys/sx.h> 48 #include <sys/malloc.h> 49 #include <sys/signalvar.h> 50 51 #include <machine/reg.h> 52 53 #include <security/audit/audit.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 #include <vm/vm_param.h> 64 65 #ifdef COMPAT_IA32 66 #include <sys/procfs.h> 67 #include <machine/fpu.h> 68 #include <compat/ia32/ia32_reg.h> 69 70 struct ptrace_io_desc32 { 71 int piod_op; 72 u_int32_t piod_offs; 73 u_int32_t piod_addr; 74 u_int32_t piod_len; 75 }; 76 #endif 77 78 /* 79 * Functions implemented using PROC_ACTION(): 80 * 81 * proc_read_regs(proc, regs) 82 * Get the current user-visible register set from the process 83 * and copy it into the regs structure (<machine/reg.h>). 84 * The process is stopped at the time read_regs is called. 85 * 86 * proc_write_regs(proc, regs) 87 * Update the current register set from the passed in regs 88 * structure. Take care to avoid clobbering special CPU 89 * registers or privileged bits in the PSL. 90 * Depending on the architecture this may have fix-up work to do, 91 * especially if the IAR or PCW are modified. 92 * The process is stopped at the time write_regs is called. 93 * 94 * proc_read_fpregs, proc_write_fpregs 95 * deal with the floating point register set, otherwise as above. 96 * 97 * proc_read_dbregs, proc_write_dbregs 98 * deal with the processor debug register set, otherwise as above. 99 * 100 * proc_sstep(proc) 101 * Arrange for the process to trap after executing a single instruction. 102 */ 103 104 #define PROC_ACTION(action) do { \ 105 int error; \ 106 \ 107 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 108 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 109 error = EIO; \ 110 else \ 111 error = (action); \ 112 return (error); \ 113 } while(0) 114 115 int 116 proc_read_regs(struct thread *td, struct reg *regs) 117 { 118 119 PROC_ACTION(fill_regs(td, regs)); 120 } 121 122 int 123 proc_write_regs(struct thread *td, struct reg *regs) 124 { 125 126 PROC_ACTION(set_regs(td, regs)); 127 } 128 129 int 130 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 131 { 132 133 PROC_ACTION(fill_dbregs(td, dbregs)); 134 } 135 136 int 137 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 138 { 139 140 PROC_ACTION(set_dbregs(td, dbregs)); 141 } 142 143 /* 144 * Ptrace doesn't support fpregs at all, and there are no security holes 145 * or translations for fpregs, so we can just copy them. 146 */ 147 int 148 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 149 { 150 151 PROC_ACTION(fill_fpregs(td, fpregs)); 152 } 153 154 int 155 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 156 { 157 158 PROC_ACTION(set_fpregs(td, fpregs)); 159 } 160 161 #ifdef COMPAT_IA32 162 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 163 int 164 proc_read_regs32(struct thread *td, struct reg32 *regs32) 165 { 166 167 PROC_ACTION(fill_regs32(td, regs32)); 168 } 169 170 int 171 proc_write_regs32(struct thread *td, struct reg32 *regs32) 172 { 173 174 PROC_ACTION(set_regs32(td, regs32)); 175 } 176 177 int 178 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 179 { 180 181 PROC_ACTION(fill_dbregs32(td, dbregs32)); 182 } 183 184 int 185 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 186 { 187 188 PROC_ACTION(set_dbregs32(td, dbregs32)); 189 } 190 191 int 192 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 193 { 194 195 PROC_ACTION(fill_fpregs32(td, fpregs32)); 196 } 197 198 int 199 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 200 { 201 202 PROC_ACTION(set_fpregs32(td, fpregs32)); 203 } 204 #endif 205 206 int 207 proc_sstep(struct thread *td) 208 { 209 210 PROC_ACTION(ptrace_single_step(td)); 211 } 212 213 int 214 proc_rwmem(struct proc *p, struct uio *uio) 215 { 216 vm_map_t map; 217 vm_object_t backing_object, object; 218 vm_offset_t pageno; /* page number */ 219 vm_prot_t reqprot; 220 int error, writing; 221 222 /* 223 * Assert that someone has locked this vmspace. (Should be 224 * curthread but we can't assert that.) This keeps the process 225 * from exiting out from under us until this operation completes. 226 */ 227 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 228 p, p->p_pid)); 229 230 /* 231 * The map we want... 232 */ 233 map = &p->p_vmspace->vm_map; 234 235 writing = uio->uio_rw == UIO_WRITE; 236 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 237 238 /* 239 * Only map in one page at a time. We don't have to, but it 240 * makes things easier. This way is trivial - right? 241 */ 242 do { 243 vm_map_t tmap; 244 vm_offset_t uva; 245 int page_offset; /* offset into page */ 246 vm_map_entry_t out_entry; 247 vm_prot_t out_prot; 248 boolean_t wired; 249 vm_pindex_t pindex; 250 u_int len; 251 vm_page_t m; 252 253 object = NULL; 254 255 uva = (vm_offset_t)uio->uio_offset; 256 257 /* 258 * Get the page number of this segment. 259 */ 260 pageno = trunc_page(uva); 261 page_offset = uva - pageno; 262 263 /* 264 * How many bytes to copy 265 */ 266 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 267 268 /* 269 * Fault the page on behalf of the process 270 */ 271 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 272 if (error) { 273 if (error == KERN_RESOURCE_SHORTAGE) 274 error = ENOMEM; 275 else 276 error = EFAULT; 277 break; 278 } 279 280 /* 281 * Now we need to get the page. out_entry and wired 282 * aren't used. One would think the vm code 283 * would be a *bit* nicer... We use tmap because 284 * vm_map_lookup() can change the map argument. 285 */ 286 tmap = map; 287 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 288 &object, &pindex, &out_prot, &wired); 289 if (error) { 290 error = EFAULT; 291 break; 292 } 293 VM_OBJECT_LOCK(object); 294 while ((m = vm_page_lookup(object, pindex)) == NULL && 295 !writing && 296 (backing_object = object->backing_object) != NULL) { 297 /* 298 * Allow fallback to backing objects if we are reading. 299 */ 300 VM_OBJECT_LOCK(backing_object); 301 pindex += OFF_TO_IDX(object->backing_object_offset); 302 VM_OBJECT_UNLOCK(object); 303 object = backing_object; 304 } 305 if (writing && m != NULL) { 306 vm_page_dirty(m); 307 vm_pager_page_unswapped(m); 308 } 309 VM_OBJECT_UNLOCK(object); 310 if (m == NULL) { 311 vm_map_lookup_done(tmap, out_entry); 312 error = EFAULT; 313 break; 314 } 315 316 /* 317 * Hold the page in memory. 318 */ 319 vm_page_lock_queues(); 320 vm_page_hold(m); 321 vm_page_unlock_queues(); 322 323 /* 324 * We're done with tmap now. 325 */ 326 vm_map_lookup_done(tmap, out_entry); 327 328 /* 329 * Now do the i/o move. 330 */ 331 error = uiomove_fromphys(&m, page_offset, len, uio); 332 333 /* Make the I-cache coherent for breakpoints. */ 334 if (!error && writing && (out_prot & VM_PROT_EXECUTE)) 335 vm_sync_icache(map, uva, len); 336 337 /* 338 * Release the page. 339 */ 340 vm_page_lock_queues(); 341 vm_page_unhold(m); 342 vm_page_unlock_queues(); 343 344 } while (error == 0 && uio->uio_resid > 0); 345 346 return (error); 347 } 348 349 /* 350 * Process debugging system call. 351 */ 352 #ifndef _SYS_SYSPROTO_H_ 353 struct ptrace_args { 354 int req; 355 pid_t pid; 356 caddr_t addr; 357 int data; 358 }; 359 #endif 360 361 #ifdef COMPAT_IA32 362 /* 363 * This CPP subterfuge is to try and reduce the number of ifdefs in 364 * the body of the code. 365 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 366 * becomes either: 367 * copyin(uap->addr, &r.reg, sizeof r.reg); 368 * or 369 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 370 * .. except this is done at runtime. 371 */ 372 #define COPYIN(u, k, s) wrap32 ? \ 373 copyin(u, k ## 32, s ## 32) : \ 374 copyin(u, k, s) 375 #define COPYOUT(k, u, s) wrap32 ? \ 376 copyout(k ## 32, u, s ## 32) : \ 377 copyout(k, u, s) 378 #else 379 #define COPYIN(u, k, s) copyin(u, k, s) 380 #define COPYOUT(k, u, s) copyout(k, u, s) 381 #endif 382 int 383 ptrace(struct thread *td, struct ptrace_args *uap) 384 { 385 /* 386 * XXX this obfuscation is to reduce stack usage, but the register 387 * structs may be too large to put on the stack anyway. 388 */ 389 union { 390 struct ptrace_io_desc piod; 391 struct ptrace_lwpinfo pl; 392 struct dbreg dbreg; 393 struct fpreg fpreg; 394 struct reg reg; 395 #ifdef COMPAT_IA32 396 struct dbreg32 dbreg32; 397 struct fpreg32 fpreg32; 398 struct reg32 reg32; 399 struct ptrace_io_desc32 piod32; 400 #endif 401 } r; 402 void *addr; 403 int error = 0; 404 #ifdef COMPAT_IA32 405 int wrap32 = 0; 406 407 if (SV_CURPROC_FLAG(SV_ILP32)) 408 wrap32 = 1; 409 #endif 410 AUDIT_ARG_PID(uap->pid); 411 AUDIT_ARG_CMD(uap->req); 412 AUDIT_ARG_VALUE(uap->data); 413 addr = &r; 414 switch (uap->req) { 415 case PT_GETREGS: 416 case PT_GETFPREGS: 417 case PT_GETDBREGS: 418 case PT_LWPINFO: 419 break; 420 case PT_SETREGS: 421 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 422 break; 423 case PT_SETFPREGS: 424 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 425 break; 426 case PT_SETDBREGS: 427 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 428 break; 429 case PT_IO: 430 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 431 break; 432 default: 433 addr = uap->addr; 434 break; 435 } 436 if (error) 437 return (error); 438 439 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 440 if (error) 441 return (error); 442 443 switch (uap->req) { 444 case PT_IO: 445 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 446 break; 447 case PT_GETREGS: 448 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 449 break; 450 case PT_GETFPREGS: 451 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 452 break; 453 case PT_GETDBREGS: 454 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 455 break; 456 case PT_LWPINFO: 457 error = copyout(&r.pl, uap->addr, uap->data); 458 break; 459 } 460 461 return (error); 462 } 463 #undef COPYIN 464 #undef COPYOUT 465 466 #ifdef COMPAT_IA32 467 /* 468 * PROC_READ(regs, td2, addr); 469 * becomes either: 470 * proc_read_regs(td2, addr); 471 * or 472 * proc_read_regs32(td2, addr); 473 * .. except this is done at runtime. There is an additional 474 * complication in that PROC_WRITE disallows 32 bit consumers 475 * from writing to 64 bit address space targets. 476 */ 477 #define PROC_READ(w, t, a) wrap32 ? \ 478 proc_read_ ## w ## 32(t, a) : \ 479 proc_read_ ## w (t, a) 480 #define PROC_WRITE(w, t, a) wrap32 ? \ 481 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 482 proc_write_ ## w (t, a) 483 #else 484 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 485 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 486 #endif 487 488 int 489 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 490 { 491 struct iovec iov; 492 struct uio uio; 493 struct proc *curp, *p, *pp; 494 struct thread *td2 = NULL; 495 struct ptrace_io_desc *piod = NULL; 496 struct ptrace_lwpinfo *pl; 497 int error, write, tmp, num; 498 int proctree_locked = 0; 499 lwpid_t tid = 0, *buf; 500 #ifdef COMPAT_IA32 501 int wrap32 = 0, safe = 0; 502 struct ptrace_io_desc32 *piod32 = NULL; 503 #endif 504 505 curp = td->td_proc; 506 507 /* Lock proctree before locking the process. */ 508 switch (req) { 509 case PT_TRACE_ME: 510 case PT_ATTACH: 511 case PT_STEP: 512 case PT_CONTINUE: 513 case PT_TO_SCE: 514 case PT_TO_SCX: 515 case PT_SYSCALL: 516 case PT_DETACH: 517 sx_xlock(&proctree_lock); 518 proctree_locked = 1; 519 break; 520 default: 521 break; 522 } 523 524 write = 0; 525 if (req == PT_TRACE_ME) { 526 p = td->td_proc; 527 PROC_LOCK(p); 528 } else { 529 if (pid <= PID_MAX) { 530 if ((p = pfind(pid)) == NULL) { 531 if (proctree_locked) 532 sx_xunlock(&proctree_lock); 533 return (ESRCH); 534 } 535 } else { 536 /* this is slow, should be optimized */ 537 sx_slock(&allproc_lock); 538 FOREACH_PROC_IN_SYSTEM(p) { 539 PROC_LOCK(p); 540 FOREACH_THREAD_IN_PROC(p, td2) { 541 if (td2->td_tid == pid) 542 break; 543 } 544 if (td2 != NULL) 545 break; /* proc lock held */ 546 PROC_UNLOCK(p); 547 } 548 sx_sunlock(&allproc_lock); 549 if (p == NULL) { 550 if (proctree_locked) 551 sx_xunlock(&proctree_lock); 552 return (ESRCH); 553 } 554 tid = pid; 555 pid = p->p_pid; 556 } 557 } 558 AUDIT_ARG_PROCESS(p); 559 560 if ((p->p_flag & P_WEXIT) != 0) { 561 error = ESRCH; 562 goto fail; 563 } 564 if ((error = p_cansee(td, p)) != 0) 565 goto fail; 566 567 if ((error = p_candebug(td, p)) != 0) 568 goto fail; 569 570 /* 571 * System processes can't be debugged. 572 */ 573 if ((p->p_flag & P_SYSTEM) != 0) { 574 error = EINVAL; 575 goto fail; 576 } 577 578 if (tid == 0) { 579 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 580 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 581 td2 = p->p_xthread; 582 } else { 583 td2 = FIRST_THREAD_IN_PROC(p); 584 } 585 tid = td2->td_tid; 586 } 587 588 #ifdef COMPAT_IA32 589 /* 590 * Test if we're a 32 bit client and what the target is. 591 * Set the wrap controls accordingly. 592 */ 593 if (SV_CURPROC_FLAG(SV_ILP32)) { 594 if (td2->td_proc->p_sysent->sv_flags & SV_ILP32) 595 safe = 1; 596 wrap32 = 1; 597 } 598 #endif 599 /* 600 * Permissions check 601 */ 602 switch (req) { 603 case PT_TRACE_ME: 604 /* Always legal. */ 605 break; 606 607 case PT_ATTACH: 608 /* Self */ 609 if (p->p_pid == td->td_proc->p_pid) { 610 error = EINVAL; 611 goto fail; 612 } 613 614 /* Already traced */ 615 if (p->p_flag & P_TRACED) { 616 error = EBUSY; 617 goto fail; 618 } 619 620 /* Can't trace an ancestor if you're being traced. */ 621 if (curp->p_flag & P_TRACED) { 622 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 623 if (pp == p) { 624 error = EINVAL; 625 goto fail; 626 } 627 } 628 } 629 630 631 /* OK */ 632 break; 633 634 case PT_CLEARSTEP: 635 /* Allow thread to clear single step for itself */ 636 if (td->td_tid == tid) 637 break; 638 639 /* FALLTHROUGH */ 640 default: 641 /* not being traced... */ 642 if ((p->p_flag & P_TRACED) == 0) { 643 error = EPERM; 644 goto fail; 645 } 646 647 /* not being traced by YOU */ 648 if (p->p_pptr != td->td_proc) { 649 error = EBUSY; 650 goto fail; 651 } 652 653 /* not currently stopped */ 654 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 655 p->p_suspcount != p->p_numthreads || 656 (p->p_flag & P_WAITED) == 0) { 657 error = EBUSY; 658 goto fail; 659 } 660 661 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 662 static int count = 0; 663 if (count++ == 0) 664 printf("P_STOPPED_TRACE not set.\n"); 665 } 666 667 /* OK */ 668 break; 669 } 670 671 /* Keep this process around until we finish this request. */ 672 _PHOLD(p); 673 674 #ifdef FIX_SSTEP 675 /* 676 * Single step fixup ala procfs 677 */ 678 FIX_SSTEP(td2); 679 #endif 680 681 /* 682 * Actually do the requests 683 */ 684 685 td->td_retval[0] = 0; 686 687 switch (req) { 688 case PT_TRACE_ME: 689 /* set my trace flag and "owner" so it can read/write me */ 690 p->p_flag |= P_TRACED; 691 p->p_oppid = p->p_pptr->p_pid; 692 break; 693 694 case PT_ATTACH: 695 /* security check done above */ 696 p->p_flag |= P_TRACED; 697 p->p_oppid = p->p_pptr->p_pid; 698 if (p->p_pptr != td->td_proc) 699 proc_reparent(p, td->td_proc); 700 data = SIGSTOP; 701 goto sendsig; /* in PT_CONTINUE below */ 702 703 case PT_CLEARSTEP: 704 error = ptrace_clear_single_step(td2); 705 break; 706 707 case PT_SETSTEP: 708 error = ptrace_single_step(td2); 709 break; 710 711 case PT_SUSPEND: 712 td2->td_dbgflags |= TDB_SUSPEND; 713 thread_lock(td2); 714 td2->td_flags |= TDF_NEEDSUSPCHK; 715 thread_unlock(td2); 716 break; 717 718 case PT_RESUME: 719 td2->td_dbgflags &= ~TDB_SUSPEND; 720 break; 721 722 case PT_STEP: 723 case PT_CONTINUE: 724 case PT_TO_SCE: 725 case PT_TO_SCX: 726 case PT_SYSCALL: 727 case PT_DETACH: 728 /* Zero means do not send any signal */ 729 if (data < 0 || data > _SIG_MAXSIG) { 730 error = EINVAL; 731 break; 732 } 733 734 switch (req) { 735 case PT_STEP: 736 error = ptrace_single_step(td2); 737 if (error) 738 goto out; 739 break; 740 case PT_TO_SCE: 741 p->p_stops |= S_PT_SCE; 742 break; 743 case PT_TO_SCX: 744 p->p_stops |= S_PT_SCX; 745 break; 746 case PT_SYSCALL: 747 p->p_stops |= S_PT_SCE | S_PT_SCX; 748 break; 749 } 750 751 if (addr != (void *)1) { 752 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr); 753 if (error) 754 break; 755 } 756 757 if (req == PT_DETACH) { 758 /* reset process parent */ 759 if (p->p_oppid != p->p_pptr->p_pid) { 760 struct proc *pp; 761 762 PROC_LOCK(p->p_pptr); 763 sigqueue_take(p->p_ksi); 764 PROC_UNLOCK(p->p_pptr); 765 766 PROC_UNLOCK(p); 767 pp = pfind(p->p_oppid); 768 if (pp == NULL) 769 pp = initproc; 770 else 771 PROC_UNLOCK(pp); 772 PROC_LOCK(p); 773 proc_reparent(p, pp); 774 if (pp == initproc) 775 p->p_sigparent = SIGCHLD; 776 } 777 p->p_flag &= ~(P_TRACED | P_WAITED); 778 p->p_oppid = 0; 779 780 /* should we send SIGCHLD? */ 781 /* childproc_continued(p); */ 782 } 783 784 sendsig: 785 if (proctree_locked) { 786 sx_xunlock(&proctree_lock); 787 proctree_locked = 0; 788 } 789 p->p_xstat = data; 790 p->p_xthread = NULL; 791 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 792 /* deliver or queue signal */ 793 td2->td_dbgflags &= ~TDB_XSIG; 794 td2->td_xsig = data; 795 796 if (req == PT_DETACH) { 797 struct thread *td3; 798 FOREACH_THREAD_IN_PROC(p, td3) { 799 td3->td_dbgflags &= ~TDB_SUSPEND; 800 } 801 } 802 /* 803 * unsuspend all threads, to not let a thread run, 804 * you should use PT_SUSPEND to suspend it before 805 * continuing process. 806 */ 807 PROC_SLOCK(p); 808 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 809 thread_unsuspend(p); 810 PROC_SUNLOCK(p); 811 } else { 812 if (data) 813 psignal(p, data); 814 } 815 break; 816 817 case PT_WRITE_I: 818 case PT_WRITE_D: 819 write = 1; 820 /* FALLTHROUGH */ 821 case PT_READ_I: 822 case PT_READ_D: 823 PROC_UNLOCK(p); 824 tmp = 0; 825 /* write = 0 set above */ 826 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 827 iov.iov_len = sizeof(int); 828 uio.uio_iov = &iov; 829 uio.uio_iovcnt = 1; 830 uio.uio_offset = (off_t)(uintptr_t)addr; 831 uio.uio_resid = sizeof(int); 832 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 833 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 834 uio.uio_td = td; 835 error = proc_rwmem(p, &uio); 836 if (uio.uio_resid != 0) { 837 /* 838 * XXX proc_rwmem() doesn't currently return ENOSPC, 839 * so I think write() can bogusly return 0. 840 * XXX what happens for short writes? We don't want 841 * to write partial data. 842 * XXX proc_rwmem() returns EPERM for other invalid 843 * addresses. Convert this to EINVAL. Does this 844 * clobber returns of EPERM for other reasons? 845 */ 846 if (error == 0 || error == ENOSPC || error == EPERM) 847 error = EINVAL; /* EOF */ 848 } 849 if (!write) 850 td->td_retval[0] = tmp; 851 PROC_LOCK(p); 852 break; 853 854 case PT_IO: 855 #ifdef COMPAT_IA32 856 if (wrap32) { 857 piod32 = addr; 858 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 859 iov.iov_len = piod32->piod_len; 860 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 861 uio.uio_resid = piod32->piod_len; 862 } else 863 #endif 864 { 865 piod = addr; 866 iov.iov_base = piod->piod_addr; 867 iov.iov_len = piod->piod_len; 868 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 869 uio.uio_resid = piod->piod_len; 870 } 871 uio.uio_iov = &iov; 872 uio.uio_iovcnt = 1; 873 uio.uio_segflg = UIO_USERSPACE; 874 uio.uio_td = td; 875 #ifdef COMPAT_IA32 876 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 877 #else 878 tmp = piod->piod_op; 879 #endif 880 switch (tmp) { 881 case PIOD_READ_D: 882 case PIOD_READ_I: 883 uio.uio_rw = UIO_READ; 884 break; 885 case PIOD_WRITE_D: 886 case PIOD_WRITE_I: 887 uio.uio_rw = UIO_WRITE; 888 break; 889 default: 890 error = EINVAL; 891 goto out; 892 } 893 PROC_UNLOCK(p); 894 error = proc_rwmem(p, &uio); 895 #ifdef COMPAT_IA32 896 if (wrap32) 897 piod32->piod_len -= uio.uio_resid; 898 else 899 #endif 900 piod->piod_len -= uio.uio_resid; 901 PROC_LOCK(p); 902 break; 903 904 case PT_KILL: 905 data = SIGKILL; 906 goto sendsig; /* in PT_CONTINUE above */ 907 908 case PT_SETREGS: 909 error = PROC_WRITE(regs, td2, addr); 910 break; 911 912 case PT_GETREGS: 913 error = PROC_READ(regs, td2, addr); 914 break; 915 916 case PT_SETFPREGS: 917 error = PROC_WRITE(fpregs, td2, addr); 918 break; 919 920 case PT_GETFPREGS: 921 error = PROC_READ(fpregs, td2, addr); 922 break; 923 924 case PT_SETDBREGS: 925 error = PROC_WRITE(dbregs, td2, addr); 926 break; 927 928 case PT_GETDBREGS: 929 error = PROC_READ(dbregs, td2, addr); 930 break; 931 932 case PT_LWPINFO: 933 if (data <= 0 || data > sizeof(*pl)) { 934 error = EINVAL; 935 break; 936 } 937 pl = addr; 938 pl->pl_lwpid = td2->td_tid; 939 if (td2->td_dbgflags & TDB_XSIG) 940 pl->pl_event = PL_EVENT_SIGNAL; 941 else 942 pl->pl_event = 0; 943 pl->pl_flags = 0; 944 pl->pl_sigmask = td2->td_sigmask; 945 pl->pl_siglist = td2->td_siglist; 946 break; 947 948 case PT_GETNUMLWPS: 949 td->td_retval[0] = p->p_numthreads; 950 break; 951 952 case PT_GETLWPLIST: 953 if (data <= 0) { 954 error = EINVAL; 955 break; 956 } 957 num = imin(p->p_numthreads, data); 958 PROC_UNLOCK(p); 959 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 960 tmp = 0; 961 PROC_LOCK(p); 962 FOREACH_THREAD_IN_PROC(p, td2) { 963 if (tmp >= num) 964 break; 965 buf[tmp++] = td2->td_tid; 966 } 967 PROC_UNLOCK(p); 968 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 969 free(buf, M_TEMP); 970 if (!error) 971 td->td_retval[0] = tmp; 972 PROC_LOCK(p); 973 break; 974 975 default: 976 #ifdef __HAVE_PTRACE_MACHDEP 977 if (req >= PT_FIRSTMACH) { 978 PROC_UNLOCK(p); 979 error = cpu_ptrace(td2, req, addr, data); 980 PROC_LOCK(p); 981 } else 982 #endif 983 /* Unknown request. */ 984 error = EINVAL; 985 break; 986 } 987 988 out: 989 /* Drop our hold on this process now that the request has completed. */ 990 _PRELE(p); 991 fail: 992 PROC_UNLOCK(p); 993 if (proctree_locked) 994 sx_xunlock(&proctree_lock); 995 return (error); 996 } 997 #undef PROC_READ 998 #undef PROC_WRITE 999 1000 /* 1001 * Stop a process because of a debugging event; 1002 * stay stopped until p->p_step is cleared 1003 * (cleared by PIOCCONT in procfs). 1004 */ 1005 void 1006 stopevent(struct proc *p, unsigned int event, unsigned int val) 1007 { 1008 1009 PROC_LOCK_ASSERT(p, MA_OWNED); 1010 p->p_step = 1; 1011 do { 1012 p->p_xstat = val; 1013 p->p_xthread = NULL; 1014 p->p_stype = event; /* Which event caused the stop? */ 1015 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1016 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1017 } while (p->p_step); 1018 } 1019