1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/syscallsubr.h> 40 #include <sys/sysproto.h> 41 #include <sys/proc.h> 42 #include <sys/vnode.h> 43 #include <sys/ptrace.h> 44 #include <sys/sx.h> 45 #include <sys/user.h> 46 47 #include <machine/reg.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_extern.h> 52 #include <vm/vm_map.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 57 /* 58 * Functions implemented using PROC_ACTION(): 59 * 60 * proc_read_regs(proc, regs) 61 * Get the current user-visible register set from the process 62 * and copy it into the regs structure (<machine/reg.h>). 63 * The process is stopped at the time read_regs is called. 64 * 65 * proc_write_regs(proc, regs) 66 * Update the current register set from the passed in regs 67 * structure. Take care to avoid clobbering special CPU 68 * registers or privileged bits in the PSL. 69 * Depending on the architecture this may have fix-up work to do, 70 * especially if the IAR or PCW are modified. 71 * The process is stopped at the time write_regs is called. 72 * 73 * proc_read_fpregs, proc_write_fpregs 74 * deal with the floating point register set, otherwise as above. 75 * 76 * proc_read_dbregs, proc_write_dbregs 77 * deal with the processor debug register set, otherwise as above. 78 * 79 * proc_sstep(proc) 80 * Arrange for the process to trap after executing a single instruction. 81 */ 82 83 #define PROC_ACTION(action) do { \ 84 int error; \ 85 \ 86 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 87 if ((td->td_proc->p_sflag & PS_INMEM) == 0) \ 88 error = EIO; \ 89 else \ 90 error = (action); \ 91 return (error); \ 92 } while(0) 93 94 int 95 proc_read_regs(struct thread *td, struct reg *regs) 96 { 97 98 PROC_ACTION(fill_regs(td, regs)); 99 } 100 101 int 102 proc_write_regs(struct thread *td, struct reg *regs) 103 { 104 105 PROC_ACTION(set_regs(td, regs)); 106 } 107 108 int 109 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 110 { 111 112 PROC_ACTION(fill_dbregs(td, dbregs)); 113 } 114 115 int 116 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 117 { 118 119 PROC_ACTION(set_dbregs(td, dbregs)); 120 } 121 122 /* 123 * Ptrace doesn't support fpregs at all, and there are no security holes 124 * or translations for fpregs, so we can just copy them. 125 */ 126 int 127 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 128 { 129 130 PROC_ACTION(fill_fpregs(td, fpregs)); 131 } 132 133 int 134 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 135 { 136 137 PROC_ACTION(set_fpregs(td, fpregs)); 138 } 139 140 int 141 proc_sstep(struct thread *td) 142 { 143 144 PROC_ACTION(ptrace_single_step(td)); 145 } 146 147 int 148 proc_rwmem(struct proc *p, struct uio *uio) 149 { 150 struct vmspace *vm; 151 vm_map_t map; 152 vm_object_t backing_object, object = NULL; 153 vm_offset_t pageno = 0; /* page number */ 154 vm_prot_t reqprot; 155 int error, writing; 156 157 mtx_lock(&Giant); 158 /* 159 * if the vmspace is in the midst of being deallocated or the 160 * process is exiting, don't try to grab anything. The page table 161 * usage in that process can be messed up. 162 */ 163 vm = p->p_vmspace; 164 if ((p->p_flag & P_WEXIT)) { 165 mtx_unlock(&Giant); 166 return (EFAULT); 167 } 168 if (vm->vm_refcnt < 1) { 169 mtx_unlock(&Giant); 170 return (EFAULT); 171 } 172 ++vm->vm_refcnt; 173 /* 174 * The map we want... 175 */ 176 map = &vm->vm_map; 177 178 writing = uio->uio_rw == UIO_WRITE; 179 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : 180 VM_PROT_READ; 181 182 /* 183 * Only map in one page at a time. We don't have to, but it 184 * makes things easier. This way is trivial - right? 185 */ 186 do { 187 vm_map_t tmap; 188 vm_offset_t uva; 189 int page_offset; /* offset into page */ 190 vm_map_entry_t out_entry; 191 vm_prot_t out_prot; 192 boolean_t wired; 193 vm_pindex_t pindex; 194 u_int len; 195 vm_page_t m; 196 197 object = NULL; 198 199 uva = (vm_offset_t)uio->uio_offset; 200 201 /* 202 * Get the page number of this segment. 203 */ 204 pageno = trunc_page(uva); 205 page_offset = uva - pageno; 206 207 /* 208 * How many bytes to copy 209 */ 210 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 211 212 /* 213 * Fault the page on behalf of the process 214 */ 215 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 216 if (error) { 217 error = EFAULT; 218 break; 219 } 220 221 /* 222 * Now we need to get the page. out_entry, out_prot, wired, 223 * and single_use aren't used. One would think the vm code 224 * would be a *bit* nicer... We use tmap because 225 * vm_map_lookup() can change the map argument. 226 */ 227 tmap = map; 228 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 229 &object, &pindex, &out_prot, &wired); 230 if (error) { 231 error = EFAULT; 232 break; 233 } 234 VM_OBJECT_LOCK(object); 235 while ((m = vm_page_lookup(object, pindex)) == NULL && 236 !writing && 237 (backing_object = object->backing_object) != NULL) { 238 /* 239 * Allow fallback to backing objects if we are reading. 240 */ 241 VM_OBJECT_LOCK(backing_object); 242 pindex += OFF_TO_IDX(object->backing_object_offset); 243 VM_OBJECT_UNLOCK(object); 244 object = backing_object; 245 } 246 VM_OBJECT_UNLOCK(object); 247 if (m == NULL) { 248 vm_map_lookup_done(tmap, out_entry); 249 error = EFAULT; 250 break; 251 } 252 253 /* 254 * Hold the page in memory. 255 */ 256 vm_page_lock_queues(); 257 vm_page_hold(m); 258 vm_page_unlock_queues(); 259 260 /* 261 * We're done with tmap now. 262 */ 263 vm_map_lookup_done(tmap, out_entry); 264 265 /* 266 * Now do the i/o move. 267 */ 268 error = uiomove_fromphys(&m, page_offset, len, uio); 269 270 /* 271 * Release the page. 272 */ 273 vm_page_lock_queues(); 274 vm_page_unhold(m); 275 vm_page_unlock_queues(); 276 277 } while (error == 0 && uio->uio_resid > 0); 278 279 vmspace_free(vm); 280 mtx_unlock(&Giant); 281 return (error); 282 } 283 284 /* 285 * Process debugging system call. 286 */ 287 #ifndef _SYS_SYSPROTO_H_ 288 struct ptrace_args { 289 int req; 290 pid_t pid; 291 caddr_t addr; 292 int data; 293 }; 294 #endif 295 296 /* 297 * MPSAFE 298 */ 299 int 300 ptrace(struct thread *td, struct ptrace_args *uap) 301 { 302 /* 303 * XXX this obfuscation is to reduce stack usage, but the register 304 * structs may be too large to put on the stack anyway. 305 */ 306 union { 307 struct ptrace_io_desc piod; 308 struct dbreg dbreg; 309 struct fpreg fpreg; 310 struct reg reg; 311 } r; 312 void *addr; 313 int error = 0; 314 315 addr = &r; 316 switch (uap->req) { 317 case PT_GETREGS: 318 case PT_GETFPREGS: 319 case PT_GETDBREGS: 320 break; 321 case PT_SETREGS: 322 error = copyin(uap->addr, &r.reg, sizeof r.reg); 323 break; 324 case PT_SETFPREGS: 325 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 326 break; 327 case PT_SETDBREGS: 328 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 329 break; 330 case PT_IO: 331 error = copyin(uap->addr, &r.piod, sizeof r.piod); 332 break; 333 default: 334 addr = uap->addr; 335 break; 336 } 337 if (error) 338 return (error); 339 340 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 341 if (error) 342 return (error); 343 344 switch (uap->req) { 345 case PT_IO: 346 (void)copyout(&r.piod, uap->addr, sizeof r.piod); 347 break; 348 case PT_GETREGS: 349 error = copyout(&r.reg, uap->addr, sizeof r.reg); 350 break; 351 case PT_GETFPREGS: 352 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 353 break; 354 case PT_GETDBREGS: 355 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 356 break; 357 } 358 359 return (error); 360 } 361 362 int 363 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 364 { 365 struct iovec iov; 366 struct uio uio; 367 struct proc *curp, *p, *pp; 368 struct thread *td2 = NULL; 369 struct ptrace_io_desc *piod; 370 int error, write, tmp; 371 int proctree_locked = 0; 372 lwpid_t tid = 0; 373 374 curp = td->td_proc; 375 376 /* Lock proctree before locking the process. */ 377 switch (req) { 378 case PT_TRACE_ME: 379 case PT_ATTACH: 380 case PT_STEP: 381 case PT_CONTINUE: 382 case PT_TO_SCE: 383 case PT_TO_SCX: 384 case PT_DETACH: 385 sx_xlock(&proctree_lock); 386 proctree_locked = 1; 387 break; 388 default: 389 break; 390 } 391 392 write = 0; 393 if (req == PT_TRACE_ME) { 394 p = td->td_proc; 395 PROC_LOCK(p); 396 } else { 397 if (pid <= PID_MAX) { 398 if ((p = pfind(pid)) == NULL) { 399 if (proctree_locked) 400 sx_xunlock(&proctree_lock); 401 return (ESRCH); 402 } 403 } else { 404 /* this is slow, should be optimized */ 405 sx_slock(&allproc_lock); 406 FOREACH_PROC_IN_SYSTEM(p) { 407 PROC_LOCK(p); 408 mtx_lock_spin(&sched_lock); 409 FOREACH_THREAD_IN_PROC(p, td2) { 410 if (td2->td_tid == pid) 411 break; 412 } 413 mtx_unlock_spin(&sched_lock); 414 if (td2 != NULL) 415 break; /* proc lock held */ 416 PROC_UNLOCK(p); 417 } 418 sx_sunlock(&allproc_lock); 419 if (p == NULL) { 420 if (proctree_locked) 421 sx_xunlock(&proctree_lock); 422 return (ESRCH); 423 } 424 tid = pid; 425 pid = p->p_pid; 426 } 427 } 428 if ((error = p_cansee(td, p)) != 0) 429 goto fail; 430 431 if ((error = p_candebug(td, p)) != 0) 432 goto fail; 433 434 /* 435 * System processes can't be debugged. 436 */ 437 if ((p->p_flag & P_SYSTEM) != 0) { 438 error = EINVAL; 439 goto fail; 440 } 441 442 if (tid == 0) { 443 td2 = FIRST_THREAD_IN_PROC(p); 444 tid = td2->td_tid; 445 } 446 447 /* 448 * Permissions check 449 */ 450 switch (req) { 451 case PT_TRACE_ME: 452 /* Always legal. */ 453 break; 454 455 case PT_ATTACH: 456 /* Self */ 457 if (p->p_pid == td->td_proc->p_pid) { 458 error = EINVAL; 459 goto fail; 460 } 461 462 /* Already traced */ 463 if (p->p_flag & P_TRACED) { 464 error = EBUSY; 465 goto fail; 466 } 467 468 /* Can't trace an ancestor if you're being traced. */ 469 if (curp->p_flag & P_TRACED) { 470 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 471 if (pp == p) { 472 error = EINVAL; 473 goto fail; 474 } 475 } 476 } 477 478 479 /* OK */ 480 break; 481 482 default: 483 /* not being traced... */ 484 if ((p->p_flag & P_TRACED) == 0) { 485 error = EPERM; 486 goto fail; 487 } 488 489 /* not being traced by YOU */ 490 if (p->p_pptr != td->td_proc) { 491 error = EBUSY; 492 goto fail; 493 } 494 495 /* not currently stopped */ 496 if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) { 497 error = EBUSY; 498 goto fail; 499 } 500 501 /* OK */ 502 break; 503 } 504 505 #ifdef FIX_SSTEP 506 /* 507 * Single step fixup ala procfs 508 */ 509 FIX_SSTEP(td2); /* XXXKSE */ 510 #endif 511 512 /* 513 * Actually do the requests 514 */ 515 516 td->td_retval[0] = 0; 517 518 switch (req) { 519 case PT_TRACE_ME: 520 /* set my trace flag and "owner" so it can read/write me */ 521 p->p_flag |= P_TRACED; 522 p->p_oppid = p->p_pptr->p_pid; 523 PROC_UNLOCK(p); 524 sx_xunlock(&proctree_lock); 525 return (0); 526 527 case PT_ATTACH: 528 /* security check done above */ 529 p->p_flag |= P_TRACED; 530 p->p_oppid = p->p_pptr->p_pid; 531 if (p->p_pptr != td->td_proc) 532 proc_reparent(p, td->td_proc); 533 data = SIGSTOP; 534 goto sendsig; /* in PT_CONTINUE below */ 535 536 case PT_STEP: 537 case PT_CONTINUE: 538 case PT_TO_SCE: 539 case PT_TO_SCX: 540 case PT_DETACH: 541 /* Zero means do not send any signal */ 542 if (data < 0 || data > _SIG_MAXSIG) { 543 error = EINVAL; 544 goto fail; 545 } 546 547 _PHOLD(p); 548 549 switch (req) { 550 case PT_STEP: 551 PROC_UNLOCK(p); 552 error = ptrace_single_step(td2); 553 if (error) { 554 PRELE(p); 555 goto fail_noproc; 556 } 557 PROC_LOCK(p); 558 break; 559 case PT_TO_SCE: 560 p->p_stops |= S_PT_SCE; 561 break; 562 case PT_TO_SCX: 563 p->p_stops |= S_PT_SCX; 564 break; 565 case PT_SYSCALL: 566 p->p_stops |= S_PT_SCE | S_PT_SCX; 567 break; 568 } 569 570 if (addr != (void *)1) { 571 PROC_UNLOCK(p); 572 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr); 573 if (error) { 574 PRELE(p); 575 goto fail_noproc; 576 } 577 PROC_LOCK(p); 578 } 579 _PRELE(p); 580 581 if (req == PT_DETACH) { 582 /* reset process parent */ 583 if (p->p_oppid != p->p_pptr->p_pid) { 584 struct proc *pp; 585 586 PROC_UNLOCK(p); 587 pp = pfind(p->p_oppid); 588 if (pp == NULL) 589 pp = initproc; 590 else 591 PROC_UNLOCK(pp); 592 PROC_LOCK(p); 593 proc_reparent(p, pp); 594 if (pp == initproc) 595 p->p_sigparent = SIGCHLD; 596 } 597 p->p_flag &= ~(P_TRACED | P_WAITED); 598 p->p_oppid = 0; 599 600 /* should we send SIGCHLD? */ 601 } 602 603 sendsig: 604 if (proctree_locked) 605 sx_xunlock(&proctree_lock); 606 /* deliver or queue signal */ 607 if (P_SHOULDSTOP(p)) { 608 p->p_xstat = data; 609 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG); 610 mtx_lock_spin(&sched_lock); 611 thread_unsuspend(p); 612 setrunnable(td2); /* XXXKSE */ 613 /* Need foreach kse in proc, ... make_kse_queued(). */ 614 mtx_unlock_spin(&sched_lock); 615 } else if (data) 616 psignal(p, data); 617 PROC_UNLOCK(p); 618 619 return (0); 620 621 case PT_WRITE_I: 622 case PT_WRITE_D: 623 write = 1; 624 /* FALLTHROUGH */ 625 case PT_READ_I: 626 case PT_READ_D: 627 PROC_UNLOCK(p); 628 tmp = 0; 629 /* write = 0 set above */ 630 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 631 iov.iov_len = sizeof(int); 632 uio.uio_iov = &iov; 633 uio.uio_iovcnt = 1; 634 uio.uio_offset = (off_t)(uintptr_t)addr; 635 uio.uio_resid = sizeof(int); 636 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 637 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 638 uio.uio_td = td; 639 error = proc_rwmem(p, &uio); 640 if (uio.uio_resid != 0) { 641 /* 642 * XXX proc_rwmem() doesn't currently return ENOSPC, 643 * so I think write() can bogusly return 0. 644 * XXX what happens for short writes? We don't want 645 * to write partial data. 646 * XXX proc_rwmem() returns EPERM for other invalid 647 * addresses. Convert this to EINVAL. Does this 648 * clobber returns of EPERM for other reasons? 649 */ 650 if (error == 0 || error == ENOSPC || error == EPERM) 651 error = EINVAL; /* EOF */ 652 } 653 if (!write) 654 td->td_retval[0] = tmp; 655 return (error); 656 657 case PT_IO: 658 PROC_UNLOCK(p); 659 piod = addr; 660 iov.iov_base = piod->piod_addr; 661 iov.iov_len = piod->piod_len; 662 uio.uio_iov = &iov; 663 uio.uio_iovcnt = 1; 664 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 665 uio.uio_resid = piod->piod_len; 666 uio.uio_segflg = UIO_USERSPACE; 667 uio.uio_td = td; 668 switch (piod->piod_op) { 669 case PIOD_READ_D: 670 case PIOD_READ_I: 671 uio.uio_rw = UIO_READ; 672 break; 673 case PIOD_WRITE_D: 674 case PIOD_WRITE_I: 675 uio.uio_rw = UIO_WRITE; 676 break; 677 default: 678 return (EINVAL); 679 } 680 error = proc_rwmem(p, &uio); 681 piod->piod_len -= uio.uio_resid; 682 return (error); 683 684 case PT_KILL: 685 data = SIGKILL; 686 goto sendsig; /* in PT_CONTINUE above */ 687 688 case PT_SETREGS: 689 _PHOLD(p); 690 error = proc_write_regs(td2, addr); 691 _PRELE(p); 692 PROC_UNLOCK(p); 693 return (error); 694 695 case PT_GETREGS: 696 _PHOLD(p); 697 error = proc_read_regs(td2, addr); 698 _PRELE(p); 699 PROC_UNLOCK(p); 700 return (error); 701 702 case PT_SETFPREGS: 703 _PHOLD(p); 704 error = proc_write_fpregs(td2, addr); 705 _PRELE(p); 706 PROC_UNLOCK(p); 707 return (error); 708 709 case PT_GETFPREGS: 710 _PHOLD(p); 711 error = proc_read_fpregs(td2, addr); 712 _PRELE(p); 713 PROC_UNLOCK(p); 714 return (error); 715 716 case PT_SETDBREGS: 717 _PHOLD(p); 718 error = proc_write_dbregs(td2, addr); 719 _PRELE(p); 720 PROC_UNLOCK(p); 721 return (error); 722 723 case PT_GETDBREGS: 724 _PHOLD(p); 725 error = proc_read_dbregs(td2, addr); 726 _PRELE(p); 727 PROC_UNLOCK(p); 728 return (error); 729 730 default: 731 #ifdef __HAVE_PTRACE_MACHDEP 732 if (req >= PT_FIRSTMACH) { 733 _PHOLD(p); 734 PROC_UNLOCK(p); 735 error = cpu_ptrace(td2, req, addr, data); 736 PRELE(p); 737 return (error); 738 } 739 #endif 740 break; 741 } 742 743 /* Unknown request. */ 744 error = EINVAL; 745 746 fail: 747 PROC_UNLOCK(p); 748 fail_noproc: 749 if (proctree_locked) 750 sx_xunlock(&proctree_lock); 751 return (error); 752 } 753 754 /* 755 * Stop a process because of a debugging event; 756 * stay stopped until p->p_step is cleared 757 * (cleared by PIOCCONT in procfs). 758 */ 759 void 760 stopevent(struct proc *p, unsigned int event, unsigned int val) 761 { 762 763 PROC_LOCK_ASSERT(p, MA_OWNED); 764 p->p_step = 1; 765 do { 766 p->p_xstat = val; 767 p->p_stype = event; /* Which event caused the stop? */ 768 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 769 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 770 } while (p->p_step); 771 } 772