1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/sysproto.h> 39 #include <sys/proc.h> 40 #include <sys/vnode.h> 41 #include <sys/ptrace.h> 42 #include <sys/sx.h> 43 #include <sys/user.h> 44 45 #include <machine/reg.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_param.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_kern.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_page.h> 55 56 #define PROC_REG_ACTION(name, action, type) \ 57 int \ 58 proc_##name##_##type##s(struct thread *td, struct type *regs) \ 59 { \ 60 int error; \ 61 \ 62 mtx_lock_spin(&sched_lock); \ 63 error = (action##_##type##s(td, regs)); \ 64 mtx_unlock_spin(&sched_lock); \ 65 return (error); \ 66 } 67 68 PROC_REG_ACTION(read, fill, reg); 69 PROC_REG_ACTION(write, set, reg); 70 PROC_REG_ACTION(read, fill, dbreg); 71 PROC_REG_ACTION(write, set, dbreg); 72 PROC_REG_ACTION(read, fill, fpreg); 73 PROC_REG_ACTION(write, set, fpreg); 74 75 int 76 proc_sstep(struct thread *td) 77 { 78 int error; 79 80 mtx_lock_spin(&sched_lock); 81 error = ptrace_single_step(td); 82 mtx_unlock_spin(&sched_lock); 83 return (error); 84 } 85 86 int 87 proc_rwmem(struct proc *p, struct uio *uio) 88 { 89 struct vmspace *vm; 90 vm_map_t map; 91 vm_object_t object = NULL; 92 vm_offset_t pageno = 0; /* page number */ 93 vm_prot_t reqprot; 94 vm_offset_t kva; 95 int error; 96 int writing; 97 98 GIANT_REQUIRED; 99 100 /* 101 * if the vmspace is in the midst of being deallocated or the 102 * process is exiting, don't try to grab anything. The page table 103 * usage in that process can be messed up. 104 */ 105 vm = p->p_vmspace; 106 if ((p->p_flag & P_WEXIT)) 107 return (EFAULT); 108 if (vm->vm_refcnt < 1) 109 return (EFAULT); 110 ++vm->vm_refcnt; 111 /* 112 * The map we want... 113 */ 114 map = &vm->vm_map; 115 116 writing = uio->uio_rw == UIO_WRITE; 117 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : 118 VM_PROT_READ; 119 120 kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 121 122 /* 123 * Only map in one page at a time. We don't have to, but it 124 * makes things easier. This way is trivial - right? 125 */ 126 do { 127 vm_map_t tmap; 128 vm_offset_t uva; 129 int page_offset; /* offset into page */ 130 vm_map_entry_t out_entry; 131 vm_prot_t out_prot; 132 boolean_t wired; 133 vm_pindex_t pindex; 134 u_int len; 135 vm_page_t m; 136 137 object = NULL; 138 139 uva = (vm_offset_t)uio->uio_offset; 140 141 /* 142 * Get the page number of this segment. 143 */ 144 pageno = trunc_page(uva); 145 page_offset = uva - pageno; 146 147 /* 148 * How many bytes to copy 149 */ 150 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 151 152 /* 153 * Fault the page on behalf of the process 154 */ 155 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 156 if (error) { 157 error = EFAULT; 158 break; 159 } 160 161 /* 162 * Now we need to get the page. out_entry, out_prot, wired, 163 * and single_use aren't used. One would think the vm code 164 * would be a *bit* nicer... We use tmap because 165 * vm_map_lookup() can change the map argument. 166 */ 167 tmap = map; 168 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 169 &object, &pindex, &out_prot, &wired); 170 171 if (error) { 172 error = EFAULT; 173 174 /* 175 * Make sure that there is no residue in 'object' from 176 * an error return on vm_map_lookup. 177 */ 178 object = NULL; 179 180 break; 181 } 182 183 m = vm_page_lookup(object, pindex); 184 185 /* Allow fallback to backing objects if we are reading */ 186 187 while (m == NULL && !writing && object->backing_object) { 188 189 pindex += OFF_TO_IDX(object->backing_object_offset); 190 object = object->backing_object; 191 192 m = vm_page_lookup(object, pindex); 193 } 194 195 if (m == NULL) { 196 error = EFAULT; 197 198 /* 199 * Make sure that there is no residue in 'object' from 200 * an error return on vm_map_lookup. 201 */ 202 object = NULL; 203 204 vm_map_lookup_done(tmap, out_entry); 205 206 break; 207 } 208 209 /* 210 * Wire the page into memory 211 */ 212 vm_page_wire(m); 213 214 /* 215 * We're done with tmap now. 216 * But reference the object first, so that we won't loose 217 * it. 218 */ 219 vm_object_reference(object); 220 vm_map_lookup_done(tmap, out_entry); 221 222 pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); 223 224 /* 225 * Now do the i/o move. 226 */ 227 error = uiomove((caddr_t)(kva + page_offset), len, uio); 228 229 pmap_kremove(kva); 230 231 /* 232 * release the page and the object 233 */ 234 vm_page_unwire(m, 1); 235 vm_object_deallocate(object); 236 237 object = NULL; 238 239 } while (error == 0 && uio->uio_resid > 0); 240 241 if (object) 242 vm_object_deallocate(object); 243 244 kmem_free(kernel_map, kva, PAGE_SIZE); 245 vmspace_free(vm); 246 return (error); 247 } 248 249 /* 250 * Process debugging system call. 251 */ 252 #ifndef _SYS_SYSPROTO_H_ 253 struct ptrace_args { 254 int req; 255 pid_t pid; 256 caddr_t addr; 257 int data; 258 }; 259 #endif 260 261 int 262 ptrace(struct thread *td, struct ptrace_args *uap) 263 { 264 struct proc *curp = td->td_proc; 265 struct proc *p; 266 struct iovec iov; 267 struct uio uio; 268 union { 269 struct reg reg; 270 struct dbreg dbreg; 271 struct fpreg fpreg; 272 } r; 273 int error = 0; 274 int write; 275 276 write = 0; 277 if (uap->req == PT_TRACE_ME) { 278 p = curp; 279 PROC_LOCK(p); 280 } else { 281 if ((p = pfind(uap->pid)) == NULL) 282 return (ESRCH); 283 } 284 if (p_cansee(curp, p)) { 285 PROC_UNLOCK(p); 286 return (ESRCH); 287 } 288 289 if ((error = p_candebug(curp, p)) != 0) { 290 PROC_UNLOCK(p); 291 return (error); 292 } 293 294 /* 295 * Don't debug system processes! 296 */ 297 if ((p->p_flag & P_SYSTEM) != 0) { 298 PROC_UNLOCK(p); 299 return (EINVAL); 300 } 301 302 /* 303 * Permissions check 304 */ 305 switch (uap->req) { 306 case PT_TRACE_ME: 307 /* Always legal. */ 308 break; 309 310 case PT_ATTACH: 311 /* Self */ 312 if (p->p_pid == curp->p_pid) { 313 PROC_UNLOCK(p); 314 return (EINVAL); 315 } 316 317 /* Already traced */ 318 if (p->p_flag & P_TRACED) { 319 PROC_UNLOCK(p); 320 return (EBUSY); 321 } 322 323 /* OK */ 324 break; 325 326 case PT_READ_I: 327 case PT_READ_D: 328 case PT_WRITE_I: 329 case PT_WRITE_D: 330 case PT_CONTINUE: 331 case PT_KILL: 332 case PT_STEP: 333 case PT_DETACH: 334 #ifdef PT_GETREGS 335 case PT_GETREGS: 336 #endif 337 #ifdef PT_SETREGS 338 case PT_SETREGS: 339 #endif 340 #ifdef PT_GETFPREGS 341 case PT_GETFPREGS: 342 #endif 343 #ifdef PT_SETFPREGS 344 case PT_SETFPREGS: 345 #endif 346 #ifdef PT_GETDBREGS 347 case PT_GETDBREGS: 348 #endif 349 #ifdef PT_SETDBREGS 350 case PT_SETDBREGS: 351 #endif 352 /* not being traced... */ 353 if ((p->p_flag & P_TRACED) == 0) { 354 PROC_UNLOCK(p); 355 return (EPERM); 356 } 357 358 /* not being traced by YOU */ 359 if (p->p_pptr != curp) { 360 PROC_UNLOCK(p); 361 return (EBUSY); 362 } 363 364 /* not currently stopped */ 365 mtx_lock_spin(&sched_lock); 366 if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) { 367 mtx_unlock_spin(&sched_lock); 368 PROC_UNLOCK(p); 369 return (EBUSY); 370 } 371 mtx_unlock_spin(&sched_lock); 372 373 /* OK */ 374 break; 375 376 default: 377 PROC_UNLOCK(p); 378 return (EINVAL); 379 } 380 381 PROC_UNLOCK(p); 382 #ifdef FIX_SSTEP 383 /* 384 * Single step fixup ala procfs 385 */ 386 FIX_SSTEP(&p->p_thread); /* XXXKSE */ 387 #endif 388 389 /* 390 * Actually do the requests 391 */ 392 393 td->td_retval[0] = 0; 394 395 switch (uap->req) { 396 case PT_TRACE_ME: 397 /* set my trace flag and "owner" so it can read/write me */ 398 sx_xlock(&proctree_lock); 399 PROC_LOCK(p); 400 p->p_flag |= P_TRACED; 401 p->p_oppid = p->p_pptr->p_pid; 402 PROC_UNLOCK(p); 403 sx_xunlock(&proctree_lock); 404 return (0); 405 406 case PT_ATTACH: 407 /* security check done above */ 408 sx_xlock(&proctree_lock); 409 PROC_LOCK(p); 410 p->p_flag |= P_TRACED; 411 p->p_oppid = p->p_pptr->p_pid; 412 if (p->p_pptr != curp) 413 proc_reparent(p, curp); 414 PROC_UNLOCK(p); 415 sx_xunlock(&proctree_lock); 416 uap->data = SIGSTOP; 417 goto sendsig; /* in PT_CONTINUE below */ 418 419 case PT_STEP: 420 case PT_CONTINUE: 421 case PT_DETACH: 422 if ((uap->req != PT_STEP) && ((unsigned)uap->data >= NSIG)) 423 return (EINVAL); 424 425 PHOLD(p); 426 427 if (uap->req == PT_STEP) { 428 if ((error = ptrace_single_step(&p->p_thread))) { 429 PRELE(p); 430 return (error); 431 } 432 } 433 434 if (uap->addr != (caddr_t)1) { 435 fill_kinfo_proc(p, &p->p_uarea->u_kproc); 436 if ((error = ptrace_set_pc(&p->p_thread, 437 (u_long)(uintfptr_t)uap->addr))) { 438 PRELE(p); 439 return (error); 440 } 441 } 442 PRELE(p); 443 444 if (uap->req == PT_DETACH) { 445 /* reset process parent */ 446 sx_xlock(&proctree_lock); 447 if (p->p_oppid != p->p_pptr->p_pid) { 448 struct proc *pp; 449 450 pp = pfind(p->p_oppid); 451 if (pp != NULL) 452 PROC_UNLOCK(pp); 453 else 454 pp = initproc; 455 PROC_LOCK(p); 456 proc_reparent(p, pp); 457 } else 458 PROC_LOCK(p); 459 p->p_flag &= ~(P_TRACED | P_WAITED); 460 p->p_oppid = 0; 461 462 PROC_UNLOCK(p); 463 sx_xunlock(&proctree_lock); 464 465 /* should we send SIGCHLD? */ 466 467 } 468 469 sendsig: 470 /* deliver or queue signal */ 471 PROC_LOCK(p); 472 mtx_lock_spin(&sched_lock); 473 if (p->p_stat == SSTOP) { 474 p->p_xstat = uap->data; 475 setrunnable(&p->p_thread); /* XXXKSE */ 476 mtx_unlock_spin(&sched_lock); 477 } else { 478 mtx_unlock_spin(&sched_lock); 479 if (uap->data) 480 psignal(p, uap->data); 481 482 } 483 PROC_UNLOCK(p); 484 return (0); 485 486 case PT_WRITE_I: 487 case PT_WRITE_D: 488 write = 1; 489 /* fallthrough */ 490 case PT_READ_I: 491 case PT_READ_D: 492 /* write = 0 set above */ 493 iov.iov_base = write ? (caddr_t)&uap->data : 494 (caddr_t)td->td_retval; 495 iov.iov_len = sizeof(int); 496 uio.uio_iov = &iov; 497 uio.uio_iovcnt = 1; 498 uio.uio_offset = (off_t)(uintptr_t)uap->addr; 499 uio.uio_resid = sizeof(int); 500 uio.uio_segflg = UIO_SYSSPACE; /* ie: the uap */ 501 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 502 uio.uio_td = td; 503 error = proc_rwmem(p, &uio); 504 if (uio.uio_resid != 0) { 505 /* 506 * XXX proc_rwmem() doesn't currently return ENOSPC, 507 * so I think write() can bogusly return 0. 508 * XXX what happens for short writes? We don't want 509 * to write partial data. 510 * XXX proc_rwmem() returns EPERM for other invalid 511 * addresses. Convert this to EINVAL. Does this 512 * clobber returns of EPERM for other reasons? 513 */ 514 if (error == 0 || error == ENOSPC || error == EPERM) 515 error = EINVAL; /* EOF */ 516 } 517 return (error); 518 519 case PT_KILL: 520 uap->data = SIGKILL; 521 goto sendsig; /* in PT_CONTINUE above */ 522 523 #ifdef PT_SETREGS 524 case PT_SETREGS: 525 error = copyin(uap->addr, &r.reg, sizeof r.reg); 526 if (error == 0) { 527 PHOLD(p); 528 error = proc_write_regs(&p->p_thread, &r.reg); 529 PRELE(p); 530 } 531 return (error); 532 #endif /* PT_SETREGS */ 533 534 #ifdef PT_GETREGS 535 case PT_GETREGS: 536 PHOLD(p); 537 error = proc_read_regs(&p->p_thread, &r.reg); 538 PRELE(p); 539 if (error == 0) 540 error = copyout(&r.reg, uap->addr, sizeof r.reg); 541 return (error); 542 #endif /* PT_SETREGS */ 543 544 #ifdef PT_SETFPREGS 545 case PT_SETFPREGS: 546 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 547 if (error == 0) { 548 PHOLD(p); 549 error = proc_write_fpregs(&p->p_thread, &r.fpreg); 550 PRELE(p); 551 } 552 return (error); 553 #endif /* PT_SETFPREGS */ 554 555 #ifdef PT_GETFPREGS 556 case PT_GETFPREGS: 557 PHOLD(p); 558 error = proc_read_fpregs(&p->p_thread, &r.fpreg); 559 PRELE(p); 560 if (error == 0) 561 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 562 return (error); 563 #endif /* PT_SETFPREGS */ 564 565 #ifdef PT_SETDBREGS 566 case PT_SETDBREGS: 567 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 568 if (error == 0) { 569 PHOLD(p); 570 error = proc_write_dbregs(&p->p_thread, &r.dbreg); 571 PRELE(p); 572 } 573 return (error); 574 #endif /* PT_SETDBREGS */ 575 576 #ifdef PT_GETDBREGS 577 case PT_GETDBREGS: 578 PHOLD(p); 579 error = proc_read_dbregs(&p->p_thread, &r.dbreg); 580 PRELE(p); 581 if (error == 0) 582 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 583 return (error); 584 #endif /* PT_SETDBREGS */ 585 586 default: 587 KASSERT(0, ("unreachable code\n")); 588 break; 589 } 590 591 KASSERT(0, ("unreachable code\n")); 592 return (0); 593 } 594 595 int 596 trace_req(struct proc *p) 597 { 598 return (1); 599 } 600 601 /* 602 * stopevent() 603 * Stop a process because of a debugging event; 604 * stay stopped until p->p_step is cleared 605 * (cleared by PIOCCONT in procfs). 606 * 607 * Must be called with the proc struct mutex held. 608 */ 609 610 void 611 stopevent(struct proc *p, unsigned int event, unsigned int val) 612 { 613 614 PROC_LOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED); 615 p->p_step = 1; 616 617 do { 618 p->p_xstat = val; 619 p->p_stype = event; /* Which event caused the stop? */ 620 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 621 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 622 } while (p->p_step); 623 } 624