1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/sysproto.h> 39 #include <sys/proc.h> 40 #include <sys/vnode.h> 41 #include <sys/ptrace.h> 42 #include <sys/sx.h> 43 #include <sys/user.h> 44 45 #include <machine/reg.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_param.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_kern.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_page.h> 55 56 #define PROC_REG_ACTION(name, action, type) \ 57 int \ 58 proc_##name##_##type##s(struct thread *td, struct type *regs) \ 59 { \ 60 int error; \ 61 \ 62 mtx_lock_spin(&sched_lock); \ 63 error = (action##_##type##s(td, regs)); \ 64 mtx_unlock_spin(&sched_lock); \ 65 return (error); \ 66 } 67 68 PROC_REG_ACTION(read, fill, reg); 69 PROC_REG_ACTION(write, set, reg); 70 PROC_REG_ACTION(read, fill, dbreg); 71 PROC_REG_ACTION(write, set, dbreg); 72 PROC_REG_ACTION(read, fill, fpreg); 73 PROC_REG_ACTION(write, set, fpreg); 74 75 int 76 proc_sstep(struct thread *td) 77 { 78 int error; 79 80 mtx_lock_spin(&sched_lock); 81 error = ptrace_single_step(td); 82 mtx_unlock_spin(&sched_lock); 83 return (error); 84 } 85 86 int 87 proc_rwmem(struct proc *p, struct uio *uio) 88 { 89 struct vmspace *vm; 90 vm_map_t map; 91 vm_object_t object = NULL; 92 vm_offset_t pageno = 0; /* page number */ 93 vm_prot_t reqprot; 94 vm_offset_t kva; 95 int error; 96 int writing; 97 98 GIANT_REQUIRED; 99 100 /* 101 * if the vmspace is in the midst of being deallocated or the 102 * process is exiting, don't try to grab anything. The page table 103 * usage in that process can be messed up. 104 */ 105 vm = p->p_vmspace; 106 if ((p->p_flag & P_WEXIT)) 107 return (EFAULT); 108 if (vm->vm_refcnt < 1) 109 return (EFAULT); 110 ++vm->vm_refcnt; 111 /* 112 * The map we want... 113 */ 114 map = &vm->vm_map; 115 116 writing = uio->uio_rw == UIO_WRITE; 117 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : 118 VM_PROT_READ; 119 120 kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 121 122 /* 123 * Only map in one page at a time. We don't have to, but it 124 * makes things easier. This way is trivial - right? 125 */ 126 do { 127 vm_map_t tmap; 128 vm_offset_t uva; 129 int page_offset; /* offset into page */ 130 vm_map_entry_t out_entry; 131 vm_prot_t out_prot; 132 boolean_t wired; 133 vm_pindex_t pindex; 134 u_int len; 135 vm_page_t m; 136 137 object = NULL; 138 139 uva = (vm_offset_t)uio->uio_offset; 140 141 /* 142 * Get the page number of this segment. 143 */ 144 pageno = trunc_page(uva); 145 page_offset = uva - pageno; 146 147 /* 148 * How many bytes to copy 149 */ 150 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 151 152 /* 153 * Fault the page on behalf of the process 154 */ 155 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 156 if (error) { 157 error = EFAULT; 158 break; 159 } 160 161 /* 162 * Now we need to get the page. out_entry, out_prot, wired, 163 * and single_use aren't used. One would think the vm code 164 * would be a *bit* nicer... We use tmap because 165 * vm_map_lookup() can change the map argument. 166 */ 167 tmap = map; 168 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 169 &object, &pindex, &out_prot, &wired); 170 171 if (error) { 172 error = EFAULT; 173 174 /* 175 * Make sure that there is no residue in 'object' from 176 * an error return on vm_map_lookup. 177 */ 178 object = NULL; 179 180 break; 181 } 182 183 m = vm_page_lookup(object, pindex); 184 185 /* Allow fallback to backing objects if we are reading */ 186 187 while (m == NULL && !writing && object->backing_object) { 188 189 pindex += OFF_TO_IDX(object->backing_object_offset); 190 object = object->backing_object; 191 192 m = vm_page_lookup(object, pindex); 193 } 194 195 if (m == NULL) { 196 error = EFAULT; 197 198 /* 199 * Make sure that there is no residue in 'object' from 200 * an error return on vm_map_lookup. 201 */ 202 object = NULL; 203 204 vm_map_lookup_done(tmap, out_entry); 205 206 break; 207 } 208 209 /* 210 * Wire the page into memory 211 */ 212 vm_page_wire(m); 213 214 /* 215 * We're done with tmap now. 216 * But reference the object first, so that we won't loose 217 * it. 218 */ 219 vm_object_reference(object); 220 vm_map_lookup_done(tmap, out_entry); 221 222 pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); 223 224 /* 225 * Now do the i/o move. 226 */ 227 error = uiomove((caddr_t)(kva + page_offset), len, uio); 228 229 pmap_kremove(kva); 230 231 /* 232 * release the page and the object 233 */ 234 vm_page_unwire(m, 1); 235 vm_object_deallocate(object); 236 237 object = NULL; 238 239 } while (error == 0 && uio->uio_resid > 0); 240 241 if (object) 242 vm_object_deallocate(object); 243 244 kmem_free(kernel_map, kva, PAGE_SIZE); 245 vmspace_free(vm); 246 return (error); 247 } 248 249 /* 250 * Process debugging system call. 251 */ 252 #ifndef _SYS_SYSPROTO_H_ 253 struct ptrace_args { 254 int req; 255 pid_t pid; 256 caddr_t addr; 257 int data; 258 }; 259 #endif 260 261 int 262 ptrace(struct thread *td, struct ptrace_args *uap) 263 { 264 struct proc *curp = td->td_proc; 265 struct proc *p; 266 struct thread *td2; 267 struct iovec iov; 268 struct uio uio; 269 union { 270 struct reg reg; 271 struct dbreg dbreg; 272 struct fpreg fpreg; 273 } r; 274 int error = 0; 275 int write; 276 277 write = 0; 278 if (uap->req == PT_TRACE_ME) { 279 p = curp; 280 PROC_LOCK(p); 281 } else { 282 if ((p = pfind(uap->pid)) == NULL) 283 return (ESRCH); 284 } 285 if (p_cansee(curp, p)) { 286 PROC_UNLOCK(p); 287 return (ESRCH); 288 } 289 290 if ((error = p_candebug(curp, p)) != 0) { 291 PROC_UNLOCK(p); 292 return (error); 293 } 294 295 /* 296 * Don't debug system processes! 297 */ 298 if ((p->p_flag & P_SYSTEM) != 0) { 299 PROC_UNLOCK(p); 300 return (EINVAL); 301 } 302 303 /* 304 * Permissions check 305 */ 306 switch (uap->req) { 307 case PT_TRACE_ME: 308 /* Always legal. */ 309 break; 310 311 case PT_ATTACH: 312 /* Self */ 313 if (p->p_pid == curp->p_pid) { 314 PROC_UNLOCK(p); 315 return (EINVAL); 316 } 317 318 /* Already traced */ 319 if (p->p_flag & P_TRACED) { 320 PROC_UNLOCK(p); 321 return (EBUSY); 322 } 323 324 /* OK */ 325 break; 326 327 case PT_READ_I: 328 case PT_READ_D: 329 case PT_WRITE_I: 330 case PT_WRITE_D: 331 case PT_CONTINUE: 332 case PT_KILL: 333 case PT_STEP: 334 case PT_DETACH: 335 #ifdef PT_GETREGS 336 case PT_GETREGS: 337 #endif 338 #ifdef PT_SETREGS 339 case PT_SETREGS: 340 #endif 341 #ifdef PT_GETFPREGS 342 case PT_GETFPREGS: 343 #endif 344 #ifdef PT_SETFPREGS 345 case PT_SETFPREGS: 346 #endif 347 #ifdef PT_GETDBREGS 348 case PT_GETDBREGS: 349 #endif 350 #ifdef PT_SETDBREGS 351 case PT_SETDBREGS: 352 #endif 353 /* not being traced... */ 354 if ((p->p_flag & P_TRACED) == 0) { 355 PROC_UNLOCK(p); 356 return (EPERM); 357 } 358 359 /* not being traced by YOU */ 360 if (p->p_pptr != curp) { 361 PROC_UNLOCK(p); 362 return (EBUSY); 363 } 364 365 /* not currently stopped */ 366 mtx_lock_spin(&sched_lock); 367 if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) { 368 mtx_unlock_spin(&sched_lock); 369 PROC_UNLOCK(p); 370 return (EBUSY); 371 } 372 mtx_unlock_spin(&sched_lock); 373 374 /* OK */ 375 break; 376 377 default: 378 PROC_UNLOCK(p); 379 return (EINVAL); 380 } 381 382 td2 = FIRST_THREAD_IN_PROC(p); 383 PROC_UNLOCK(p); 384 #ifdef FIX_SSTEP 385 /* 386 * Single step fixup ala procfs 387 */ 388 FIX_SSTEP(td2); /* XXXKSE */ 389 #endif 390 391 /* 392 * Actually do the requests 393 */ 394 395 td->td_retval[0] = 0; 396 397 switch (uap->req) { 398 case PT_TRACE_ME: 399 /* set my trace flag and "owner" so it can read/write me */ 400 sx_xlock(&proctree_lock); 401 PROC_LOCK(p); 402 p->p_flag |= P_TRACED; 403 p->p_oppid = p->p_pptr->p_pid; 404 PROC_UNLOCK(p); 405 sx_xunlock(&proctree_lock); 406 return (0); 407 408 case PT_ATTACH: 409 /* security check done above */ 410 sx_xlock(&proctree_lock); 411 PROC_LOCK(p); 412 p->p_flag |= P_TRACED; 413 p->p_oppid = p->p_pptr->p_pid; 414 if (p->p_pptr != curp) 415 proc_reparent(p, curp); 416 PROC_UNLOCK(p); 417 sx_xunlock(&proctree_lock); 418 uap->data = SIGSTOP; 419 goto sendsig; /* in PT_CONTINUE below */ 420 421 case PT_STEP: 422 case PT_CONTINUE: 423 case PT_DETACH: 424 if ((uap->req != PT_STEP) && ((unsigned)uap->data >= NSIG)) 425 return (EINVAL); 426 427 PHOLD(p); 428 429 if (uap->req == PT_STEP) { 430 error = ptrace_single_step(td2); 431 if (error) { 432 PRELE(p); 433 return (error); 434 } 435 } 436 437 if (uap->addr != (caddr_t)1) { 438 fill_kinfo_proc(p, &p->p_uarea->u_kproc); 439 error = ptrace_set_pc(td2, 440 (u_long)(uintfptr_t)uap->addr); 441 if (error) { 442 PRELE(p); 443 return (error); 444 } 445 } 446 PRELE(p); 447 448 if (uap->req == PT_DETACH) { 449 /* reset process parent */ 450 sx_xlock(&proctree_lock); 451 if (p->p_oppid != p->p_pptr->p_pid) { 452 struct proc *pp; 453 454 pp = pfind(p->p_oppid); 455 if (pp != NULL) 456 PROC_UNLOCK(pp); 457 else 458 pp = initproc; 459 PROC_LOCK(p); 460 proc_reparent(p, pp); 461 } else 462 PROC_LOCK(p); 463 p->p_flag &= ~(P_TRACED | P_WAITED); 464 p->p_oppid = 0; 465 466 PROC_UNLOCK(p); 467 sx_xunlock(&proctree_lock); 468 469 /* should we send SIGCHLD? */ 470 471 } 472 473 sendsig: 474 /* deliver or queue signal */ 475 PROC_LOCK(p); 476 mtx_lock_spin(&sched_lock); 477 if (p->p_stat == SSTOP) { 478 p->p_xstat = uap->data; 479 setrunnable(td2); /* XXXKSE */ 480 mtx_unlock_spin(&sched_lock); 481 } else { 482 mtx_unlock_spin(&sched_lock); 483 if (uap->data) 484 psignal(p, uap->data); 485 486 } 487 PROC_UNLOCK(p); 488 return (0); 489 490 case PT_WRITE_I: 491 case PT_WRITE_D: 492 write = 1; 493 /* fallthrough */ 494 case PT_READ_I: 495 case PT_READ_D: 496 /* write = 0 set above */ 497 iov.iov_base = write ? (caddr_t)&uap->data : 498 (caddr_t)td->td_retval; 499 iov.iov_len = sizeof(int); 500 uio.uio_iov = &iov; 501 uio.uio_iovcnt = 1; 502 uio.uio_offset = (off_t)(uintptr_t)uap->addr; 503 uio.uio_resid = sizeof(int); 504 uio.uio_segflg = UIO_SYSSPACE; /* ie: the uap */ 505 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 506 uio.uio_td = td; 507 error = proc_rwmem(p, &uio); 508 if (uio.uio_resid != 0) { 509 /* 510 * XXX proc_rwmem() doesn't currently return ENOSPC, 511 * so I think write() can bogusly return 0. 512 * XXX what happens for short writes? We don't want 513 * to write partial data. 514 * XXX proc_rwmem() returns EPERM for other invalid 515 * addresses. Convert this to EINVAL. Does this 516 * clobber returns of EPERM for other reasons? 517 */ 518 if (error == 0 || error == ENOSPC || error == EPERM) 519 error = EINVAL; /* EOF */ 520 } 521 return (error); 522 523 case PT_KILL: 524 uap->data = SIGKILL; 525 goto sendsig; /* in PT_CONTINUE above */ 526 527 #ifdef PT_SETREGS 528 case PT_SETREGS: 529 error = copyin(uap->addr, &r.reg, sizeof r.reg); 530 if (error == 0) { 531 PHOLD(p); 532 error = proc_write_regs(td2, &r.reg); 533 PRELE(p); 534 } 535 return (error); 536 #endif /* PT_SETREGS */ 537 538 #ifdef PT_GETREGS 539 case PT_GETREGS: 540 PHOLD(p); 541 error = proc_read_regs(td2, &r.reg); 542 PRELE(p); 543 if (error == 0) 544 error = copyout(&r.reg, uap->addr, sizeof r.reg); 545 return (error); 546 #endif /* PT_SETREGS */ 547 548 #ifdef PT_SETFPREGS 549 case PT_SETFPREGS: 550 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 551 if (error == 0) { 552 PHOLD(p); 553 error = proc_write_fpregs(td2, &r.fpreg); 554 PRELE(p); 555 } 556 return (error); 557 #endif /* PT_SETFPREGS */ 558 559 #ifdef PT_GETFPREGS 560 case PT_GETFPREGS: 561 PHOLD(p); 562 error = proc_read_fpregs(td2, &r.fpreg); 563 PRELE(p); 564 if (error == 0) 565 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 566 return (error); 567 #endif /* PT_SETFPREGS */ 568 569 #ifdef PT_SETDBREGS 570 case PT_SETDBREGS: 571 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 572 if (error == 0) { 573 PHOLD(p); 574 error = proc_write_dbregs(td2, &r.dbreg); 575 PRELE(p); 576 } 577 return (error); 578 #endif /* PT_SETDBREGS */ 579 580 #ifdef PT_GETDBREGS 581 case PT_GETDBREGS: 582 PHOLD(p); 583 error = proc_read_dbregs(td2, &r.dbreg); 584 PRELE(p); 585 if (error == 0) 586 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 587 return (error); 588 #endif /* PT_SETDBREGS */ 589 590 default: 591 KASSERT(0, ("unreachable code\n")); 592 break; 593 } 594 595 KASSERT(0, ("unreachable code\n")); 596 return (0); 597 } 598 599 int 600 trace_req(struct proc *p) 601 { 602 return (1); 603 } 604 605 /* 606 * stopevent() 607 * Stop a process because of a debugging event; 608 * stay stopped until p->p_step is cleared 609 * (cleared by PIOCCONT in procfs). 610 * 611 * Must be called with the proc struct mutex held. 612 */ 613 614 void 615 stopevent(struct proc *p, unsigned int event, unsigned int val) 616 { 617 618 PROC_LOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED); 619 p->p_step = 1; 620 621 do { 622 p->p_xstat = val; 623 p->p_stype = event; /* Which event caused the stop? */ 624 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 625 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 626 } while (p->p_step); 627 } 628