xref: /freebsd/sys/kern/sys_process.c (revision 6fd05b64b5b65dd4ba9b86482a0634a5f0b96c29)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/syscallsubr.h>
40 #include <sys/sysproto.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ptrace.h>
44 #include <sys/sx.h>
45 #include <sys/user.h>
46 #include <sys/malloc.h>
47 
48 #include <machine/reg.h>
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 
58 /*
59  * Functions implemented using PROC_ACTION():
60  *
61  * proc_read_regs(proc, regs)
62  *	Get the current user-visible register set from the process
63  *	and copy it into the regs structure (<machine/reg.h>).
64  *	The process is stopped at the time read_regs is called.
65  *
66  * proc_write_regs(proc, regs)
67  *	Update the current register set from the passed in regs
68  *	structure.  Take care to avoid clobbering special CPU
69  *	registers or privileged bits in the PSL.
70  *	Depending on the architecture this may have fix-up work to do,
71  *	especially if the IAR or PCW are modified.
72  *	The process is stopped at the time write_regs is called.
73  *
74  * proc_read_fpregs, proc_write_fpregs
75  *	deal with the floating point register set, otherwise as above.
76  *
77  * proc_read_dbregs, proc_write_dbregs
78  *	deal with the processor debug register set, otherwise as above.
79  *
80  * proc_sstep(proc)
81  *	Arrange for the process to trap after executing a single instruction.
82  */
83 
84 #define	PROC_ACTION(action) do {					\
85 	int error;							\
86 									\
87 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);			\
88 	if ((td->td_proc->p_sflag & PS_INMEM) == 0)			\
89 		error = EIO;						\
90 	else								\
91 		error = (action);					\
92 	return (error);							\
93 } while(0)
94 
95 int
96 proc_read_regs(struct thread *td, struct reg *regs)
97 {
98 
99 	PROC_ACTION(fill_regs(td, regs));
100 }
101 
102 int
103 proc_write_regs(struct thread *td, struct reg *regs)
104 {
105 
106 	PROC_ACTION(set_regs(td, regs));
107 }
108 
109 int
110 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
111 {
112 
113 	PROC_ACTION(fill_dbregs(td, dbregs));
114 }
115 
116 int
117 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
118 {
119 
120 	PROC_ACTION(set_dbregs(td, dbregs));
121 }
122 
123 /*
124  * Ptrace doesn't support fpregs at all, and there are no security holes
125  * or translations for fpregs, so we can just copy them.
126  */
127 int
128 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
129 {
130 
131 	PROC_ACTION(fill_fpregs(td, fpregs));
132 }
133 
134 int
135 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
136 {
137 
138 	PROC_ACTION(set_fpregs(td, fpregs));
139 }
140 
141 int
142 proc_sstep(struct thread *td)
143 {
144 
145 	PROC_ACTION(ptrace_single_step(td));
146 }
147 
148 int
149 proc_rwmem(struct proc *p, struct uio *uio)
150 {
151 	struct vmspace *vm;
152 	vm_map_t map;
153 	vm_object_t backing_object, object = NULL;
154 	vm_offset_t pageno = 0;		/* page number */
155 	vm_prot_t reqprot;
156 	int error, writing;
157 
158 	mtx_lock(&Giant);
159 	/*
160 	 * if the vmspace is in the midst of being deallocated or the
161 	 * process is exiting, don't try to grab anything.  The page table
162 	 * usage in that process can be messed up.
163 	 */
164 	vm = p->p_vmspace;
165 	if ((p->p_flag & P_WEXIT)) {
166 		mtx_unlock(&Giant);
167 		return (EFAULT);
168 	}
169 	if (vm->vm_refcnt < 1) {
170 		mtx_unlock(&Giant);
171 		return (EFAULT);
172 	}
173 	++vm->vm_refcnt;
174 	/*
175 	 * The map we want...
176 	 */
177 	map = &vm->vm_map;
178 
179 	writing = uio->uio_rw == UIO_WRITE;
180 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
181 	    VM_PROT_READ;
182 
183 	/*
184 	 * Only map in one page at a time.  We don't have to, but it
185 	 * makes things easier.  This way is trivial - right?
186 	 */
187 	do {
188 		vm_map_t tmap;
189 		vm_offset_t uva;
190 		int page_offset;		/* offset into page */
191 		vm_map_entry_t out_entry;
192 		vm_prot_t out_prot;
193 		boolean_t wired;
194 		vm_pindex_t pindex;
195 		u_int len;
196 		vm_page_t m;
197 
198 		object = NULL;
199 
200 		uva = (vm_offset_t)uio->uio_offset;
201 
202 		/*
203 		 * Get the page number of this segment.
204 		 */
205 		pageno = trunc_page(uva);
206 		page_offset = uva - pageno;
207 
208 		/*
209 		 * How many bytes to copy
210 		 */
211 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
212 
213 		/*
214 		 * Fault the page on behalf of the process
215 		 */
216 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
217 		if (error) {
218 			error = EFAULT;
219 			break;
220 		}
221 
222 		/*
223 		 * Now we need to get the page.  out_entry, out_prot, wired,
224 		 * and single_use aren't used.  One would think the vm code
225 		 * would be a *bit* nicer...  We use tmap because
226 		 * vm_map_lookup() can change the map argument.
227 		 */
228 		tmap = map;
229 		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
230 		    &object, &pindex, &out_prot, &wired);
231 		if (error) {
232 			error = EFAULT;
233 			break;
234 		}
235 		VM_OBJECT_LOCK(object);
236 		while ((m = vm_page_lookup(object, pindex)) == NULL &&
237 		    !writing &&
238 		    (backing_object = object->backing_object) != NULL) {
239 			/*
240 			 * Allow fallback to backing objects if we are reading.
241 			 */
242 			VM_OBJECT_LOCK(backing_object);
243 			pindex += OFF_TO_IDX(object->backing_object_offset);
244 			VM_OBJECT_UNLOCK(object);
245 			object = backing_object;
246 		}
247 		VM_OBJECT_UNLOCK(object);
248 		if (m == NULL) {
249 			vm_map_lookup_done(tmap, out_entry);
250 			error = EFAULT;
251 			break;
252 		}
253 
254 		/*
255 		 * Hold the page in memory.
256 		 */
257 		vm_page_lock_queues();
258 		vm_page_hold(m);
259 		vm_page_unlock_queues();
260 
261 		/*
262 		 * We're done with tmap now.
263 		 */
264 		vm_map_lookup_done(tmap, out_entry);
265 
266 		/*
267 		 * Now do the i/o move.
268 		 */
269 		error = uiomove_fromphys(&m, page_offset, len, uio);
270 
271 		/*
272 		 * Release the page.
273 		 */
274 		vm_page_lock_queues();
275 		vm_page_unhold(m);
276 		vm_page_unlock_queues();
277 
278 	} while (error == 0 && uio->uio_resid > 0);
279 
280 	vmspace_free(vm);
281 	mtx_unlock(&Giant);
282 	return (error);
283 }
284 
285 /*
286  * Process debugging system call.
287  */
288 #ifndef _SYS_SYSPROTO_H_
289 struct ptrace_args {
290 	int	req;
291 	pid_t	pid;
292 	caddr_t	addr;
293 	int	data;
294 };
295 #endif
296 
297 /*
298  * MPSAFE
299  */
300 int
301 ptrace(struct thread *td, struct ptrace_args *uap)
302 {
303 	/*
304 	 * XXX this obfuscation is to reduce stack usage, but the register
305 	 * structs may be too large to put on the stack anyway.
306 	 */
307 	union {
308 		struct ptrace_io_desc piod;
309 		struct ptrace_lwpinfo pl;
310 		struct dbreg dbreg;
311 		struct fpreg fpreg;
312 		struct reg reg;
313 	} r;
314 	void *addr;
315 	int error = 0;
316 
317 	addr = &r;
318 	switch (uap->req) {
319 	case PT_GETREGS:
320 	case PT_GETFPREGS:
321 	case PT_GETDBREGS:
322 	case PT_LWPINFO:
323 		break;
324 	case PT_SETREGS:
325 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
326 		break;
327 	case PT_SETFPREGS:
328 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
329 		break;
330 	case PT_SETDBREGS:
331 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
332 		break;
333 	case PT_IO:
334 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
335 		break;
336 	default:
337 		addr = uap->addr;
338 		break;
339 	}
340 	if (error)
341 		return (error);
342 
343 	error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
344 	if (error)
345 		return (error);
346 
347 	switch (uap->req) {
348 	case PT_IO:
349 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
350 		break;
351 	case PT_GETREGS:
352 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
353 		break;
354 	case PT_GETFPREGS:
355 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
356 		break;
357 	case PT_GETDBREGS:
358 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
359 		break;
360 	case PT_LWPINFO:
361 		error = copyout(&r.pl, uap->addr, uap->data);
362 		break;
363 	}
364 
365 	return (error);
366 }
367 
368 int
369 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
370 {
371 	struct iovec iov;
372 	struct uio uio;
373 	struct proc *curp, *p, *pp;
374 	struct thread *td2 = NULL;
375 	struct ptrace_io_desc *piod;
376 	struct ptrace_lwpinfo *pl;
377 	int error, write, tmp, num;
378 	int proctree_locked = 0;
379 	lwpid_t tid = 0, *buf;
380 	pid_t saved_pid = pid;
381 
382 	curp = td->td_proc;
383 
384 	/* Lock proctree before locking the process. */
385 	switch (req) {
386 	case PT_TRACE_ME:
387 	case PT_ATTACH:
388 	case PT_STEP:
389 	case PT_CONTINUE:
390 	case PT_TO_SCE:
391 	case PT_TO_SCX:
392 	case PT_DETACH:
393 		sx_xlock(&proctree_lock);
394 		proctree_locked = 1;
395 		break;
396 	default:
397 		break;
398 	}
399 
400 	write = 0;
401 	if (req == PT_TRACE_ME) {
402 		p = td->td_proc;
403 		PROC_LOCK(p);
404 	} else {
405 		if (pid <= PID_MAX) {
406 			if ((p = pfind(pid)) == NULL) {
407 				if (proctree_locked)
408 					sx_xunlock(&proctree_lock);
409 				return (ESRCH);
410 			}
411 		} else {
412 			/* this is slow, should be optimized */
413 			sx_slock(&allproc_lock);
414 			FOREACH_PROC_IN_SYSTEM(p) {
415 				PROC_LOCK(p);
416 				mtx_lock_spin(&sched_lock);
417 				FOREACH_THREAD_IN_PROC(p, td2) {
418 					if (td2->td_tid == pid)
419 						break;
420 				}
421 				mtx_unlock_spin(&sched_lock);
422 				if (td2 != NULL)
423 					break; /* proc lock held */
424 				PROC_UNLOCK(p);
425 			}
426 			sx_sunlock(&allproc_lock);
427 			if (p == NULL) {
428 				if (proctree_locked)
429 					sx_xunlock(&proctree_lock);
430 				return (ESRCH);
431 			}
432 			tid = pid;
433 			pid = p->p_pid;
434 		}
435 	}
436 	if ((error = p_cansee(td, p)) != 0)
437 		goto fail;
438 
439 	if ((error = p_candebug(td, p)) != 0)
440 		goto fail;
441 
442 	/*
443 	 * System processes can't be debugged.
444 	 */
445 	if ((p->p_flag & P_SYSTEM) != 0) {
446 		error = EINVAL;
447 		goto fail;
448 	}
449 
450 	if (tid == 0) {
451 		td2 = FIRST_THREAD_IN_PROC(p);
452 		tid = td2->td_tid;
453 	}
454 
455 	/*
456 	 * Permissions check
457 	 */
458 	switch (req) {
459 	case PT_TRACE_ME:
460 		/* Always legal. */
461 		break;
462 
463 	case PT_ATTACH:
464 		/* Self */
465 		if (p->p_pid == td->td_proc->p_pid) {
466 			error = EINVAL;
467 			goto fail;
468 		}
469 
470 		/* Already traced */
471 		if (p->p_flag & P_TRACED) {
472 			error = EBUSY;
473 			goto fail;
474 		}
475 
476 		/* Can't trace an ancestor if you're being traced. */
477 		if (curp->p_flag & P_TRACED) {
478 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
479 				if (pp == p) {
480 					error = EINVAL;
481 					goto fail;
482 				}
483 			}
484 		}
485 
486 
487 		/* OK */
488 		break;
489 
490 	case PT_CLEARSTEP:
491 		/* Allow thread to clear single step for itself */
492 		if (td->td_tid == tid)
493 			break;
494 
495 		/* FALLTHROUGH */
496 	default:
497 		/* not being traced... */
498 		if ((p->p_flag & P_TRACED) == 0) {
499 			error = EPERM;
500 			goto fail;
501 		}
502 
503 		/* not being traced by YOU */
504 		if (p->p_pptr != td->td_proc) {
505 			error = EBUSY;
506 			goto fail;
507 		}
508 
509 		/* not currently stopped */
510 		if (!P_SHOULDSTOP(p) || p->p_suspcount != p->p_numthreads ||
511 		    (p->p_flag & P_WAITED) == 0) {
512 			error = EBUSY;
513 			goto fail;
514 		}
515 
516 		/* OK */
517 		break;
518 	}
519 
520 #ifdef FIX_SSTEP
521 	/*
522 	 * Single step fixup ala procfs
523 	 */
524 	FIX_SSTEP(td2);			/* XXXKSE */
525 #endif
526 
527 	/*
528 	 * Actually do the requests
529 	 */
530 
531 	td->td_retval[0] = 0;
532 
533 	switch (req) {
534 	case PT_TRACE_ME:
535 		/* set my trace flag and "owner" so it can read/write me */
536 		p->p_flag |= P_TRACED;
537 		p->p_oppid = p->p_pptr->p_pid;
538 		PROC_UNLOCK(p);
539 		sx_xunlock(&proctree_lock);
540 		return (0);
541 
542 	case PT_ATTACH:
543 		/* security check done above */
544 		p->p_flag |= P_TRACED;
545 		p->p_oppid = p->p_pptr->p_pid;
546 		if (p->p_pptr != td->td_proc)
547 			proc_reparent(p, td->td_proc);
548 		data = SIGSTOP;
549 		goto sendsig;	/* in PT_CONTINUE below */
550 
551 	case PT_CLEARSTEP:
552 		_PHOLD(p);
553 		error = ptrace_clear_single_step(td2);
554 		_PRELE(p);
555 		if (error)
556 			goto fail;
557 		PROC_UNLOCK(p);
558 		return (0);
559 
560 	case PT_SETSTEP:
561 		_PHOLD(p);
562 		error = ptrace_single_step(td2);
563 		_PRELE(p);
564 		if (error)
565 			goto fail;
566 		PROC_UNLOCK(p);
567 		return (0);
568 
569 	case PT_SUSPEND:
570 		_PHOLD(p);
571 		mtx_lock_spin(&sched_lock);
572 		td2->td_flags |= TDF_DBSUSPEND;
573 		mtx_unlock_spin(&sched_lock);
574 		_PRELE(p);
575 		PROC_UNLOCK(p);
576 		return (0);
577 
578 	case PT_RESUME:
579 		_PHOLD(p);
580 		mtx_lock_spin(&sched_lock);
581 		td2->td_flags &= ~TDF_DBSUSPEND;
582 		mtx_unlock_spin(&sched_lock);
583 		_PRELE(p);
584 		PROC_UNLOCK(p);
585 		return (0);
586 
587 	case PT_STEP:
588 	case PT_CONTINUE:
589 	case PT_TO_SCE:
590 	case PT_TO_SCX:
591 	case PT_DETACH:
592 		/* Zero means do not send any signal */
593 		if (data < 0 || data > _SIG_MAXSIG) {
594 			error = EINVAL;
595 			goto fail;
596 		}
597 
598 		_PHOLD(p);
599 
600 		switch (req) {
601 		case PT_STEP:
602 			PROC_UNLOCK(p);
603 			error = ptrace_single_step(td2);
604 			if (error) {
605 				PRELE(p);
606 				goto fail_noproc;
607 			}
608 			PROC_LOCK(p);
609 			break;
610 		case PT_TO_SCE:
611 			p->p_stops |= S_PT_SCE;
612 			break;
613 		case PT_TO_SCX:
614 			p->p_stops |= S_PT_SCX;
615 			break;
616 		case PT_SYSCALL:
617 			p->p_stops |= S_PT_SCE | S_PT_SCX;
618 			break;
619 		}
620 
621 		if (addr != (void *)1) {
622 			PROC_UNLOCK(p);
623 			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
624 			if (error) {
625 				PRELE(p);
626 				goto fail_noproc;
627 			}
628 			PROC_LOCK(p);
629 		}
630 		_PRELE(p);
631 
632 		if (req == PT_DETACH) {
633 			/* reset process parent */
634 			if (p->p_oppid != p->p_pptr->p_pid) {
635 				struct proc *pp;
636 
637 				PROC_UNLOCK(p);
638 				pp = pfind(p->p_oppid);
639 				if (pp == NULL)
640 					pp = initproc;
641 				else
642 					PROC_UNLOCK(pp);
643 				PROC_LOCK(p);
644 				proc_reparent(p, pp);
645 				if (pp == initproc)
646 					p->p_sigparent = SIGCHLD;
647 			}
648 			p->p_flag &= ~(P_TRACED | P_WAITED);
649 			p->p_oppid = 0;
650 
651 			/* should we send SIGCHLD? */
652 		}
653 
654 	sendsig:
655 		if (proctree_locked)
656 			sx_xunlock(&proctree_lock);
657 		/* deliver or queue signal */
658 		if (P_SHOULDSTOP(p)) {
659 			p->p_xstat = data;
660 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
661 			mtx_lock_spin(&sched_lock);
662 			if (saved_pid <= PID_MAX) {
663 				p->p_xthread->td_flags &= ~TDF_XSIG;
664 				p->p_xthread->td_xsig = data;
665 			} else {
666 				td2->td_flags &= ~TDF_XSIG;
667 				td2->td_xsig = data;
668 			}
669 			p->p_xthread = NULL;
670 			if (req == PT_DETACH) {
671 				struct thread *td3;
672 				FOREACH_THREAD_IN_PROC(p, td3)
673 					td->td_flags &= ~TDF_DBSUSPEND;
674 			}
675 			/*
676 			 * unsuspend all threads, to not let a thread run,
677 			 * you should use PT_SUSPEND to suspend it before
678 			 * continuing process.
679 			 */
680 			thread_unsuspend(p);
681 			thread_continued(p);
682 			mtx_unlock_spin(&sched_lock);
683 		} else if (data) {
684 			psignal(p, data);
685 		}
686 		PROC_UNLOCK(p);
687 
688 		return (0);
689 
690 	case PT_WRITE_I:
691 	case PT_WRITE_D:
692 		write = 1;
693 		/* FALLTHROUGH */
694 	case PT_READ_I:
695 	case PT_READ_D:
696 		PROC_UNLOCK(p);
697 		tmp = 0;
698 		/* write = 0 set above */
699 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
700 		iov.iov_len = sizeof(int);
701 		uio.uio_iov = &iov;
702 		uio.uio_iovcnt = 1;
703 		uio.uio_offset = (off_t)(uintptr_t)addr;
704 		uio.uio_resid = sizeof(int);
705 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
706 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
707 		uio.uio_td = td;
708 		error = proc_rwmem(p, &uio);
709 		if (uio.uio_resid != 0) {
710 			/*
711 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
712 			 * so I think write() can bogusly return 0.
713 			 * XXX what happens for short writes?  We don't want
714 			 * to write partial data.
715 			 * XXX proc_rwmem() returns EPERM for other invalid
716 			 * addresses.  Convert this to EINVAL.  Does this
717 			 * clobber returns of EPERM for other reasons?
718 			 */
719 			if (error == 0 || error == ENOSPC || error == EPERM)
720 				error = EINVAL;	/* EOF */
721 		}
722 		if (!write)
723 			td->td_retval[0] = tmp;
724 		return (error);
725 
726 	case PT_IO:
727 		PROC_UNLOCK(p);
728 		piod = addr;
729 		iov.iov_base = piod->piod_addr;
730 		iov.iov_len = piod->piod_len;
731 		uio.uio_iov = &iov;
732 		uio.uio_iovcnt = 1;
733 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
734 		uio.uio_resid = piod->piod_len;
735 		uio.uio_segflg = UIO_USERSPACE;
736 		uio.uio_td = td;
737 		switch (piod->piod_op) {
738 		case PIOD_READ_D:
739 		case PIOD_READ_I:
740 			uio.uio_rw = UIO_READ;
741 			break;
742 		case PIOD_WRITE_D:
743 		case PIOD_WRITE_I:
744 			uio.uio_rw = UIO_WRITE;
745 			break;
746 		default:
747 			return (EINVAL);
748 		}
749 		error = proc_rwmem(p, &uio);
750 		piod->piod_len -= uio.uio_resid;
751 		return (error);
752 
753 	case PT_KILL:
754 		data = SIGKILL;
755 		goto sendsig;	/* in PT_CONTINUE above */
756 
757 	case PT_SETREGS:
758 		_PHOLD(p);
759 		error = proc_write_regs(td2, addr);
760 		_PRELE(p);
761 		PROC_UNLOCK(p);
762 		return (error);
763 
764 	case PT_GETREGS:
765 		_PHOLD(p);
766 		error = proc_read_regs(td2, addr);
767 		_PRELE(p);
768 		PROC_UNLOCK(p);
769 		return (error);
770 
771 	case PT_SETFPREGS:
772 		_PHOLD(p);
773 		error = proc_write_fpregs(td2, addr);
774 		_PRELE(p);
775 		PROC_UNLOCK(p);
776 		return (error);
777 
778 	case PT_GETFPREGS:
779 		_PHOLD(p);
780 		error = proc_read_fpregs(td2, addr);
781 		_PRELE(p);
782 		PROC_UNLOCK(p);
783 		return (error);
784 
785 	case PT_SETDBREGS:
786 		_PHOLD(p);
787 		error = proc_write_dbregs(td2, addr);
788 		_PRELE(p);
789 		PROC_UNLOCK(p);
790 		return (error);
791 
792 	case PT_GETDBREGS:
793 		_PHOLD(p);
794 		error = proc_read_dbregs(td2, addr);
795 		_PRELE(p);
796 		PROC_UNLOCK(p);
797 		return (error);
798 
799 	case PT_LWPINFO:
800 		if (data == 0 || data > sizeof(*pl))
801 			return (EINVAL);
802 		pl = addr;
803 		_PHOLD(p);
804 		if (saved_pid <= PID_MAX) {
805 			pl->pl_lwpid = p->p_xthread->td_tid;
806 			pl->pl_event = PL_EVENT_SIGNAL;
807 		} else {
808 			pl->pl_lwpid = td2->td_tid;
809 			if (td2->td_flags & TDF_XSIG)
810 				pl->pl_event = PL_EVENT_SIGNAL;
811 			else
812 				pl->pl_event = 0;
813 		}
814 		_PRELE(p);
815 		PROC_UNLOCK(p);
816 		return (0);
817 
818 	case PT_GETNUMLWPS:
819 		td->td_retval[0] = p->p_numthreads;
820 		PROC_UNLOCK(p);
821 		return (0);
822 
823 	case PT_GETLWPLIST:
824 		if (data <= 0) {
825 			PROC_UNLOCK(p);
826 			return (EINVAL);
827 		}
828 		num = imin(p->p_numthreads, data);
829 		PROC_UNLOCK(p);
830 		buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
831 		tmp = 0;
832 		PROC_LOCK(p);
833 		mtx_lock_spin(&sched_lock);
834 		FOREACH_THREAD_IN_PROC(p, td2) {
835 			if (tmp >= num)
836 				break;
837 			buf[tmp++] = td2->td_tid;
838 		}
839 		mtx_unlock_spin(&sched_lock);
840 		PROC_UNLOCK(p);
841 		error = copyout(buf, addr, tmp * sizeof(lwpid_t));
842 		free(buf, M_TEMP);
843 		if (!error)
844 			td->td_retval[0] = num;
845  		return (error);
846 
847 	default:
848 #ifdef __HAVE_PTRACE_MACHDEP
849 		if (req >= PT_FIRSTMACH) {
850 			_PHOLD(p);
851 			PROC_UNLOCK(p);
852 			error = cpu_ptrace(td2, req, addr, data);
853 			PRELE(p);
854 			return (error);
855 		}
856 #endif
857 		break;
858 	}
859 
860 	/* Unknown request. */
861 	error = EINVAL;
862 
863 fail:
864 	PROC_UNLOCK(p);
865 fail_noproc:
866 	if (proctree_locked)
867 		sx_xunlock(&proctree_lock);
868 	return (error);
869 }
870 
871 /*
872  * Stop a process because of a debugging event;
873  * stay stopped until p->p_step is cleared
874  * (cleared by PIOCCONT in procfs).
875  */
876 void
877 stopevent(struct proc *p, unsigned int event, unsigned int val)
878 {
879 
880 	PROC_LOCK_ASSERT(p, MA_OWNED);
881 	p->p_step = 1;
882 	do {
883 		p->p_xstat = val;
884 		p->p_xthread = NULL;
885 		p->p_stype = event;	/* Which event caused the stop? */
886 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
887 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
888 	} while (p->p_step);
889 }
890