xref: /freebsd/sys/kern/sys_process.c (revision ceaec73d406831b1251babb61675df0a1aa54a31)
1 /*-
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/syscallsubr.h>
40 #include <sys/sysproto.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ptrace.h>
44 #include <sys/sx.h>
45 #include <sys/malloc.h>
46 #include <sys/signalvar.h>
47 
48 #include <machine/reg.h>
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 
58 /*
59  * Functions implemented using PROC_ACTION():
60  *
61  * proc_read_regs(proc, regs)
62  *	Get the current user-visible register set from the process
63  *	and copy it into the regs structure (<machine/reg.h>).
64  *	The process is stopped at the time read_regs is called.
65  *
66  * proc_write_regs(proc, regs)
67  *	Update the current register set from the passed in regs
68  *	structure.  Take care to avoid clobbering special CPU
69  *	registers or privileged bits in the PSL.
70  *	Depending on the architecture this may have fix-up work to do,
71  *	especially if the IAR or PCW are modified.
72  *	The process is stopped at the time write_regs is called.
73  *
74  * proc_read_fpregs, proc_write_fpregs
75  *	deal with the floating point register set, otherwise as above.
76  *
77  * proc_read_dbregs, proc_write_dbregs
78  *	deal with the processor debug register set, otherwise as above.
79  *
80  * proc_sstep(proc)
81  *	Arrange for the process to trap after executing a single instruction.
82  */
83 
84 #define	PROC_ACTION(action) do {					\
85 	int error;							\
86 									\
87 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);			\
88 	if ((td->td_proc->p_sflag & PS_INMEM) == 0)			\
89 		error = EIO;						\
90 	else								\
91 		error = (action);					\
92 	return (error);							\
93 } while(0)
94 
95 int
96 proc_read_regs(struct thread *td, struct reg *regs)
97 {
98 
99 	PROC_ACTION(fill_regs(td, regs));
100 }
101 
102 int
103 proc_write_regs(struct thread *td, struct reg *regs)
104 {
105 
106 	PROC_ACTION(set_regs(td, regs));
107 }
108 
109 int
110 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
111 {
112 
113 	PROC_ACTION(fill_dbregs(td, dbregs));
114 }
115 
116 int
117 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
118 {
119 
120 	PROC_ACTION(set_dbregs(td, dbregs));
121 }
122 
123 /*
124  * Ptrace doesn't support fpregs at all, and there are no security holes
125  * or translations for fpregs, so we can just copy them.
126  */
127 int
128 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
129 {
130 
131 	PROC_ACTION(fill_fpregs(td, fpregs));
132 }
133 
134 int
135 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
136 {
137 
138 	PROC_ACTION(set_fpregs(td, fpregs));
139 }
140 
141 int
142 proc_sstep(struct thread *td)
143 {
144 
145 	PROC_ACTION(ptrace_single_step(td));
146 }
147 
148 int
149 proc_rwmem(struct proc *p, struct uio *uio)
150 {
151 	struct vmspace *vm;
152 	vm_map_t map;
153 	vm_object_t backing_object, object = NULL;
154 	vm_offset_t pageno = 0;		/* page number */
155 	vm_prot_t reqprot;
156 	int error, refcnt, writing;
157 
158 	/*
159 	 * if the vmspace is in the midst of being deallocated or the
160 	 * process is exiting, don't try to grab anything.  The page table
161 	 * usage in that process can be messed up.
162 	 */
163 	vm = p->p_vmspace;
164 	if ((p->p_flag & P_WEXIT))
165 		return (EFAULT);
166 	do {
167 		if ((refcnt = vm->vm_refcnt) < 1)
168 			return (EFAULT);
169 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
170 
171 	/*
172 	 * The map we want...
173 	 */
174 	map = &vm->vm_map;
175 
176 	writing = uio->uio_rw == UIO_WRITE;
177 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
178 	    VM_PROT_READ;
179 
180 	/*
181 	 * Only map in one page at a time.  We don't have to, but it
182 	 * makes things easier.  This way is trivial - right?
183 	 */
184 	do {
185 		vm_map_t tmap;
186 		vm_offset_t uva;
187 		int page_offset;		/* offset into page */
188 		vm_map_entry_t out_entry;
189 		vm_prot_t out_prot;
190 		boolean_t wired;
191 		vm_pindex_t pindex;
192 		u_int len;
193 		vm_page_t m;
194 
195 		object = NULL;
196 
197 		uva = (vm_offset_t)uio->uio_offset;
198 
199 		/*
200 		 * Get the page number of this segment.
201 		 */
202 		pageno = trunc_page(uva);
203 		page_offset = uva - pageno;
204 
205 		/*
206 		 * How many bytes to copy
207 		 */
208 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
209 
210 		/*
211 		 * Fault the page on behalf of the process
212 		 */
213 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
214 		if (error) {
215 			error = EFAULT;
216 			break;
217 		}
218 
219 		/*
220 		 * Now we need to get the page.  out_entry, out_prot, wired,
221 		 * and single_use aren't used.  One would think the vm code
222 		 * would be a *bit* nicer...  We use tmap because
223 		 * vm_map_lookup() can change the map argument.
224 		 */
225 		tmap = map;
226 		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
227 		    &object, &pindex, &out_prot, &wired);
228 		if (error) {
229 			error = EFAULT;
230 			break;
231 		}
232 		VM_OBJECT_LOCK(object);
233 		while ((m = vm_page_lookup(object, pindex)) == NULL &&
234 		    !writing &&
235 		    (backing_object = object->backing_object) != NULL) {
236 			/*
237 			 * Allow fallback to backing objects if we are reading.
238 			 */
239 			VM_OBJECT_LOCK(backing_object);
240 			pindex += OFF_TO_IDX(object->backing_object_offset);
241 			VM_OBJECT_UNLOCK(object);
242 			object = backing_object;
243 		}
244 		VM_OBJECT_UNLOCK(object);
245 		if (m == NULL) {
246 			vm_map_lookup_done(tmap, out_entry);
247 			error = EFAULT;
248 			break;
249 		}
250 
251 		/*
252 		 * Hold the page in memory.
253 		 */
254 		vm_page_lock_queues();
255 		vm_page_hold(m);
256 		vm_page_unlock_queues();
257 
258 		/*
259 		 * We're done with tmap now.
260 		 */
261 		vm_map_lookup_done(tmap, out_entry);
262 
263 		/*
264 		 * Now do the i/o move.
265 		 */
266 		error = uiomove_fromphys(&m, page_offset, len, uio);
267 
268 		/*
269 		 * Release the page.
270 		 */
271 		vm_page_lock_queues();
272 		vm_page_unhold(m);
273 		vm_page_unlock_queues();
274 
275 	} while (error == 0 && uio->uio_resid > 0);
276 
277 	vmspace_free(vm);
278 	return (error);
279 }
280 
281 /*
282  * Process debugging system call.
283  */
284 #ifndef _SYS_SYSPROTO_H_
285 struct ptrace_args {
286 	int	req;
287 	pid_t	pid;
288 	caddr_t	addr;
289 	int	data;
290 };
291 #endif
292 
293 /*
294  * MPSAFE
295  */
296 int
297 ptrace(struct thread *td, struct ptrace_args *uap)
298 {
299 	/*
300 	 * XXX this obfuscation is to reduce stack usage, but the register
301 	 * structs may be too large to put on the stack anyway.
302 	 */
303 	union {
304 		struct ptrace_io_desc piod;
305 		struct ptrace_lwpinfo pl;
306 		struct dbreg dbreg;
307 		struct fpreg fpreg;
308 		struct reg reg;
309 	} r;
310 	void *addr;
311 	int error = 0;
312 
313 	addr = &r;
314 	switch (uap->req) {
315 	case PT_GETREGS:
316 	case PT_GETFPREGS:
317 	case PT_GETDBREGS:
318 	case PT_LWPINFO:
319 		break;
320 	case PT_SETREGS:
321 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
322 		break;
323 	case PT_SETFPREGS:
324 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
325 		break;
326 	case PT_SETDBREGS:
327 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
328 		break;
329 	case PT_IO:
330 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
331 		break;
332 	default:
333 		addr = uap->addr;
334 		break;
335 	}
336 	if (error)
337 		return (error);
338 
339 	error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
340 	if (error)
341 		return (error);
342 
343 	switch (uap->req) {
344 	case PT_IO:
345 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
346 		break;
347 	case PT_GETREGS:
348 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
349 		break;
350 	case PT_GETFPREGS:
351 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
352 		break;
353 	case PT_GETDBREGS:
354 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
355 		break;
356 	case PT_LWPINFO:
357 		error = copyout(&r.pl, uap->addr, uap->data);
358 		break;
359 	}
360 
361 	return (error);
362 }
363 
364 int
365 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
366 {
367 	struct iovec iov;
368 	struct uio uio;
369 	struct proc *curp, *p, *pp;
370 	struct thread *td2 = NULL;
371 	struct ptrace_io_desc *piod;
372 	struct ptrace_lwpinfo *pl;
373 	int error, write, tmp, num;
374 	int proctree_locked = 0;
375 	lwpid_t tid = 0, *buf;
376 	pid_t saved_pid = pid;
377 
378 	curp = td->td_proc;
379 
380 	/* Lock proctree before locking the process. */
381 	switch (req) {
382 	case PT_TRACE_ME:
383 	case PT_ATTACH:
384 	case PT_STEP:
385 	case PT_CONTINUE:
386 	case PT_TO_SCE:
387 	case PT_TO_SCX:
388 	case PT_SYSCALL:
389 	case PT_DETACH:
390 		sx_xlock(&proctree_lock);
391 		proctree_locked = 1;
392 		break;
393 	default:
394 		break;
395 	}
396 
397 	write = 0;
398 	if (req == PT_TRACE_ME) {
399 		p = td->td_proc;
400 		PROC_LOCK(p);
401 	} else {
402 		if (pid <= PID_MAX) {
403 			if ((p = pfind(pid)) == NULL) {
404 				if (proctree_locked)
405 					sx_xunlock(&proctree_lock);
406 				return (ESRCH);
407 			}
408 		} else {
409 			/* this is slow, should be optimized */
410 			sx_slock(&allproc_lock);
411 			FOREACH_PROC_IN_SYSTEM(p) {
412 				PROC_LOCK(p);
413 				mtx_lock_spin(&sched_lock);
414 				FOREACH_THREAD_IN_PROC(p, td2) {
415 					if (td2->td_tid == pid)
416 						break;
417 				}
418 				mtx_unlock_spin(&sched_lock);
419 				if (td2 != NULL)
420 					break; /* proc lock held */
421 				PROC_UNLOCK(p);
422 			}
423 			sx_sunlock(&allproc_lock);
424 			if (p == NULL) {
425 				if (proctree_locked)
426 					sx_xunlock(&proctree_lock);
427 				return (ESRCH);
428 			}
429 			tid = pid;
430 			pid = p->p_pid;
431 		}
432 	}
433 	if ((error = p_cansee(td, p)) != 0)
434 		goto fail;
435 
436 	if ((error = p_candebug(td, p)) != 0)
437 		goto fail;
438 
439 	/*
440 	 * System processes can't be debugged.
441 	 */
442 	if ((p->p_flag & P_SYSTEM) != 0) {
443 		error = EINVAL;
444 		goto fail;
445 	}
446 
447 	if (tid == 0) {
448 		td2 = FIRST_THREAD_IN_PROC(p);
449 		tid = td2->td_tid;
450 	}
451 
452 	/*
453 	 * Permissions check
454 	 */
455 	switch (req) {
456 	case PT_TRACE_ME:
457 		/* Always legal. */
458 		break;
459 
460 	case PT_ATTACH:
461 		/* Self */
462 		if (p->p_pid == td->td_proc->p_pid) {
463 			error = EINVAL;
464 			goto fail;
465 		}
466 
467 		/* Already traced */
468 		if (p->p_flag & P_TRACED) {
469 			error = EBUSY;
470 			goto fail;
471 		}
472 
473 		/* Can't trace an ancestor if you're being traced. */
474 		if (curp->p_flag & P_TRACED) {
475 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
476 				if (pp == p) {
477 					error = EINVAL;
478 					goto fail;
479 				}
480 			}
481 		}
482 
483 
484 		/* OK */
485 		break;
486 
487 	case PT_CLEARSTEP:
488 		/* Allow thread to clear single step for itself */
489 		if (td->td_tid == tid)
490 			break;
491 
492 		/* FALLTHROUGH */
493 	default:
494 		/* not being traced... */
495 		if ((p->p_flag & P_TRACED) == 0) {
496 			error = EPERM;
497 			goto fail;
498 		}
499 
500 		/* not being traced by YOU */
501 		if (p->p_pptr != td->td_proc) {
502 			error = EBUSY;
503 			goto fail;
504 		}
505 
506 		/* not currently stopped */
507 		if (!P_SHOULDSTOP(p) || p->p_suspcount != p->p_numthreads ||
508 		    (p->p_flag & P_WAITED) == 0) {
509 			error = EBUSY;
510 			goto fail;
511 		}
512 
513 		/* OK */
514 		break;
515 	}
516 
517 #ifdef FIX_SSTEP
518 	/*
519 	 * Single step fixup ala procfs
520 	 */
521 	FIX_SSTEP(td2);			/* XXXKSE */
522 #endif
523 
524 	/*
525 	 * Actually do the requests
526 	 */
527 
528 	td->td_retval[0] = 0;
529 
530 	switch (req) {
531 	case PT_TRACE_ME:
532 		/* set my trace flag and "owner" so it can read/write me */
533 		p->p_flag |= P_TRACED;
534 		p->p_oppid = p->p_pptr->p_pid;
535 		PROC_UNLOCK(p);
536 		sx_xunlock(&proctree_lock);
537 		return (0);
538 
539 	case PT_ATTACH:
540 		/* security check done above */
541 		p->p_flag |= P_TRACED;
542 		p->p_oppid = p->p_pptr->p_pid;
543 		if (p->p_pptr != td->td_proc)
544 			proc_reparent(p, td->td_proc);
545 		data = SIGSTOP;
546 		goto sendsig;	/* in PT_CONTINUE below */
547 
548 	case PT_CLEARSTEP:
549 		_PHOLD(p);
550 		error = ptrace_clear_single_step(td2);
551 		_PRELE(p);
552 		if (error)
553 			goto fail;
554 		PROC_UNLOCK(p);
555 		return (0);
556 
557 	case PT_SETSTEP:
558 		_PHOLD(p);
559 		error = ptrace_single_step(td2);
560 		_PRELE(p);
561 		if (error)
562 			goto fail;
563 		PROC_UNLOCK(p);
564 		return (0);
565 
566 	case PT_SUSPEND:
567 		_PHOLD(p);
568 		mtx_lock_spin(&sched_lock);
569 		td2->td_flags |= TDF_DBSUSPEND;
570 		mtx_unlock_spin(&sched_lock);
571 		_PRELE(p);
572 		PROC_UNLOCK(p);
573 		return (0);
574 
575 	case PT_RESUME:
576 		_PHOLD(p);
577 		mtx_lock_spin(&sched_lock);
578 		td2->td_flags &= ~TDF_DBSUSPEND;
579 		mtx_unlock_spin(&sched_lock);
580 		_PRELE(p);
581 		PROC_UNLOCK(p);
582 		return (0);
583 
584 	case PT_STEP:
585 	case PT_CONTINUE:
586 	case PT_TO_SCE:
587 	case PT_TO_SCX:
588 	case PT_SYSCALL:
589 	case PT_DETACH:
590 		/* Zero means do not send any signal */
591 		if (data < 0 || data > _SIG_MAXSIG) {
592 			error = EINVAL;
593 			goto fail;
594 		}
595 
596 		_PHOLD(p);
597 
598 		switch (req) {
599 		case PT_STEP:
600 			PROC_UNLOCK(p);
601 			error = ptrace_single_step(td2);
602 			if (error) {
603 				PRELE(p);
604 				goto fail_noproc;
605 			}
606 			PROC_LOCK(p);
607 			break;
608 		case PT_TO_SCE:
609 			p->p_stops |= S_PT_SCE;
610 			break;
611 		case PT_TO_SCX:
612 			p->p_stops |= S_PT_SCX;
613 			break;
614 		case PT_SYSCALL:
615 			p->p_stops |= S_PT_SCE | S_PT_SCX;
616 			break;
617 		}
618 
619 		if (addr != (void *)1) {
620 			PROC_UNLOCK(p);
621 			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
622 			if (error) {
623 				PRELE(p);
624 				goto fail_noproc;
625 			}
626 			PROC_LOCK(p);
627 		}
628 		_PRELE(p);
629 
630 		if (req == PT_DETACH) {
631 			/* reset process parent */
632 			if (p->p_oppid != p->p_pptr->p_pid) {
633 				struct proc *pp;
634 
635 				PROC_UNLOCK(p);
636 				pp = pfind(p->p_oppid);
637 				if (pp == NULL)
638 					pp = initproc;
639 				else
640 					PROC_UNLOCK(pp);
641 				PROC_LOCK(p);
642 				proc_reparent(p, pp);
643 				if (pp == initproc)
644 					p->p_sigparent = SIGCHLD;
645 			}
646 			p->p_flag &= ~(P_TRACED | P_WAITED);
647 			p->p_oppid = 0;
648 
649 			/* should we send SIGCHLD? */
650 		}
651 
652 	sendsig:
653 		if (proctree_locked)
654 			sx_xunlock(&proctree_lock);
655 		/* deliver or queue signal */
656 		if (P_SHOULDSTOP(p)) {
657 			p->p_xstat = data;
658 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
659 			mtx_lock_spin(&sched_lock);
660 			if (saved_pid <= PID_MAX) {
661 				p->p_xthread->td_flags &= ~TDF_XSIG;
662 				p->p_xthread->td_xsig = data;
663 			} else {
664 				td2->td_flags &= ~TDF_XSIG;
665 				td2->td_xsig = data;
666 			}
667 			p->p_xthread = NULL;
668 			if (req == PT_DETACH) {
669 				struct thread *td3;
670 				FOREACH_THREAD_IN_PROC(p, td3)
671 					td3->td_flags &= ~TDF_DBSUSPEND;
672 			}
673 			/*
674 			 * unsuspend all threads, to not let a thread run,
675 			 * you should use PT_SUSPEND to suspend it before
676 			 * continuing process.
677 			 */
678 			thread_unsuspend(p);
679 			thread_continued(p);
680 			mtx_unlock_spin(&sched_lock);
681 		} else if (data) {
682 			psignal(p, data);
683 		}
684 		PROC_UNLOCK(p);
685 
686 		return (0);
687 
688 	case PT_WRITE_I:
689 	case PT_WRITE_D:
690 		write = 1;
691 		/* FALLTHROUGH */
692 	case PT_READ_I:
693 	case PT_READ_D:
694 		PROC_UNLOCK(p);
695 		tmp = 0;
696 		/* write = 0 set above */
697 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
698 		iov.iov_len = sizeof(int);
699 		uio.uio_iov = &iov;
700 		uio.uio_iovcnt = 1;
701 		uio.uio_offset = (off_t)(uintptr_t)addr;
702 		uio.uio_resid = sizeof(int);
703 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
704 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
705 		uio.uio_td = td;
706 		error = proc_rwmem(p, &uio);
707 		if (uio.uio_resid != 0) {
708 			/*
709 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
710 			 * so I think write() can bogusly return 0.
711 			 * XXX what happens for short writes?  We don't want
712 			 * to write partial data.
713 			 * XXX proc_rwmem() returns EPERM for other invalid
714 			 * addresses.  Convert this to EINVAL.  Does this
715 			 * clobber returns of EPERM for other reasons?
716 			 */
717 			if (error == 0 || error == ENOSPC || error == EPERM)
718 				error = EINVAL;	/* EOF */
719 		}
720 		if (!write)
721 			td->td_retval[0] = tmp;
722 		return (error);
723 
724 	case PT_IO:
725 		PROC_UNLOCK(p);
726 		piod = addr;
727 		iov.iov_base = piod->piod_addr;
728 		iov.iov_len = piod->piod_len;
729 		uio.uio_iov = &iov;
730 		uio.uio_iovcnt = 1;
731 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
732 		uio.uio_resid = piod->piod_len;
733 		uio.uio_segflg = UIO_USERSPACE;
734 		uio.uio_td = td;
735 		switch (piod->piod_op) {
736 		case PIOD_READ_D:
737 		case PIOD_READ_I:
738 			uio.uio_rw = UIO_READ;
739 			break;
740 		case PIOD_WRITE_D:
741 		case PIOD_WRITE_I:
742 			uio.uio_rw = UIO_WRITE;
743 			break;
744 		default:
745 			return (EINVAL);
746 		}
747 		error = proc_rwmem(p, &uio);
748 		piod->piod_len -= uio.uio_resid;
749 		return (error);
750 
751 	case PT_KILL:
752 		data = SIGKILL;
753 		goto sendsig;	/* in PT_CONTINUE above */
754 
755 	case PT_SETREGS:
756 		_PHOLD(p);
757 		error = proc_write_regs(td2, addr);
758 		_PRELE(p);
759 		PROC_UNLOCK(p);
760 		return (error);
761 
762 	case PT_GETREGS:
763 		_PHOLD(p);
764 		error = proc_read_regs(td2, addr);
765 		_PRELE(p);
766 		PROC_UNLOCK(p);
767 		return (error);
768 
769 	case PT_SETFPREGS:
770 		_PHOLD(p);
771 		error = proc_write_fpregs(td2, addr);
772 		_PRELE(p);
773 		PROC_UNLOCK(p);
774 		return (error);
775 
776 	case PT_GETFPREGS:
777 		_PHOLD(p);
778 		error = proc_read_fpregs(td2, addr);
779 		_PRELE(p);
780 		PROC_UNLOCK(p);
781 		return (error);
782 
783 	case PT_SETDBREGS:
784 		_PHOLD(p);
785 		error = proc_write_dbregs(td2, addr);
786 		_PRELE(p);
787 		PROC_UNLOCK(p);
788 		return (error);
789 
790 	case PT_GETDBREGS:
791 		_PHOLD(p);
792 		error = proc_read_dbregs(td2, addr);
793 		_PRELE(p);
794 		PROC_UNLOCK(p);
795 		return (error);
796 
797 	case PT_LWPINFO:
798 		if (data == 0 || data > sizeof(*pl))
799 			return (EINVAL);
800 		pl = addr;
801 		_PHOLD(p);
802 		if (saved_pid <= PID_MAX) {
803 			pl->pl_lwpid = p->p_xthread->td_tid;
804 			pl->pl_event = PL_EVENT_SIGNAL;
805 		} else {
806 			pl->pl_lwpid = td2->td_tid;
807 			if (td2->td_flags & TDF_XSIG)
808 				pl->pl_event = PL_EVENT_SIGNAL;
809 			else
810 				pl->pl_event = 0;
811 		}
812 		if (td2->td_pflags & TDP_SA) {
813 			pl->pl_flags = PL_FLAG_SA;
814 			if (td2->td_upcall && !TD_CAN_UNBIND(td2))
815 				pl->pl_flags |= PL_FLAG_BOUND;
816 		} else {
817 			pl->pl_flags = 0;
818 		}
819 		_PRELE(p);
820 		PROC_UNLOCK(p);
821 		return (0);
822 
823 	case PT_GETNUMLWPS:
824 		td->td_retval[0] = p->p_numthreads;
825 		PROC_UNLOCK(p);
826 		return (0);
827 
828 	case PT_GETLWPLIST:
829 		if (data <= 0) {
830 			PROC_UNLOCK(p);
831 			return (EINVAL);
832 		}
833 		num = imin(p->p_numthreads, data);
834 		PROC_UNLOCK(p);
835 		buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
836 		tmp = 0;
837 		PROC_LOCK(p);
838 		mtx_lock_spin(&sched_lock);
839 		FOREACH_THREAD_IN_PROC(p, td2) {
840 			if (tmp >= num)
841 				break;
842 			buf[tmp++] = td2->td_tid;
843 		}
844 		mtx_unlock_spin(&sched_lock);
845 		PROC_UNLOCK(p);
846 		error = copyout(buf, addr, tmp * sizeof(lwpid_t));
847 		free(buf, M_TEMP);
848 		if (!error)
849 			td->td_retval[0] = num;
850  		return (error);
851 
852 	default:
853 #ifdef __HAVE_PTRACE_MACHDEP
854 		if (req >= PT_FIRSTMACH) {
855 			_PHOLD(p);
856 			PROC_UNLOCK(p);
857 			error = cpu_ptrace(td2, req, addr, data);
858 			PRELE(p);
859 			return (error);
860 		}
861 #endif
862 		break;
863 	}
864 
865 	/* Unknown request. */
866 	error = EINVAL;
867 
868 fail:
869 	PROC_UNLOCK(p);
870 fail_noproc:
871 	if (proctree_locked)
872 		sx_xunlock(&proctree_lock);
873 	return (error);
874 }
875 
876 /*
877  * Stop a process because of a debugging event;
878  * stay stopped until p->p_step is cleared
879  * (cleared by PIOCCONT in procfs).
880  */
881 void
882 stopevent(struct proc *p, unsigned int event, unsigned int val)
883 {
884 
885 	PROC_LOCK_ASSERT(p, MA_OWNED);
886 	p->p_step = 1;
887 	do {
888 		p->p_xstat = val;
889 		p->p_xthread = NULL;
890 		p->p_stype = event;	/* Which event caused the stop? */
891 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
892 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
893 	} while (p->p_step);
894 }
895