xref: /freebsd/sys/kern/sys_process.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/syscallsubr.h>
40 #include <sys/sysproto.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ptrace.h>
44 #include <sys/sx.h>
45 #include <sys/malloc.h>
46 #include <sys/signalvar.h>
47 
48 #include <machine/reg.h>
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 
58 /*
59  * Functions implemented using PROC_ACTION():
60  *
61  * proc_read_regs(proc, regs)
62  *	Get the current user-visible register set from the process
63  *	and copy it into the regs structure (<machine/reg.h>).
64  *	The process is stopped at the time read_regs is called.
65  *
66  * proc_write_regs(proc, regs)
67  *	Update the current register set from the passed in regs
68  *	structure.  Take care to avoid clobbering special CPU
69  *	registers or privileged bits in the PSL.
70  *	Depending on the architecture this may have fix-up work to do,
71  *	especially if the IAR or PCW are modified.
72  *	The process is stopped at the time write_regs is called.
73  *
74  * proc_read_fpregs, proc_write_fpregs
75  *	deal with the floating point register set, otherwise as above.
76  *
77  * proc_read_dbregs, proc_write_dbregs
78  *	deal with the processor debug register set, otherwise as above.
79  *
80  * proc_sstep(proc)
81  *	Arrange for the process to trap after executing a single instruction.
82  */
83 
84 #define	PROC_ACTION(action) do {					\
85 	int error;							\
86 									\
87 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);			\
88 	if ((td->td_proc->p_sflag & PS_INMEM) == 0)			\
89 		error = EIO;						\
90 	else								\
91 		error = (action);					\
92 	return (error);							\
93 } while(0)
94 
95 int
96 proc_read_regs(struct thread *td, struct reg *regs)
97 {
98 
99 	PROC_ACTION(fill_regs(td, regs));
100 }
101 
102 int
103 proc_write_regs(struct thread *td, struct reg *regs)
104 {
105 
106 	PROC_ACTION(set_regs(td, regs));
107 }
108 
109 int
110 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
111 {
112 
113 	PROC_ACTION(fill_dbregs(td, dbregs));
114 }
115 
116 int
117 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
118 {
119 
120 	PROC_ACTION(set_dbregs(td, dbregs));
121 }
122 
123 /*
124  * Ptrace doesn't support fpregs at all, and there are no security holes
125  * or translations for fpregs, so we can just copy them.
126  */
127 int
128 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
129 {
130 
131 	PROC_ACTION(fill_fpregs(td, fpregs));
132 }
133 
134 int
135 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
136 {
137 
138 	PROC_ACTION(set_fpregs(td, fpregs));
139 }
140 
141 int
142 proc_sstep(struct thread *td)
143 {
144 
145 	PROC_ACTION(ptrace_single_step(td));
146 }
147 
148 int
149 proc_rwmem(struct proc *p, struct uio *uio)
150 {
151 	struct vmspace *vm;
152 	vm_map_t map;
153 	vm_object_t backing_object, object = NULL;
154 	vm_offset_t pageno = 0;		/* page number */
155 	vm_prot_t reqprot;
156 	int error, refcnt, writing;
157 
158 	/*
159 	 * if the vmspace is in the midst of being deallocated or the
160 	 * process is exiting, don't try to grab anything.  The page table
161 	 * usage in that process can be messed up.
162 	 */
163 	vm = p->p_vmspace;
164 	if ((p->p_flag & P_WEXIT))
165 		return (EFAULT);
166 	do {
167 		if ((refcnt = vm->vm_refcnt) < 1)
168 			return (EFAULT);
169 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
170 
171 	/*
172 	 * The map we want...
173 	 */
174 	map = &vm->vm_map;
175 
176 	writing = uio->uio_rw == UIO_WRITE;
177 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
178 	    VM_PROT_READ;
179 
180 	/*
181 	 * Only map in one page at a time.  We don't have to, but it
182 	 * makes things easier.  This way is trivial - right?
183 	 */
184 	do {
185 		vm_map_t tmap;
186 		vm_offset_t uva;
187 		int page_offset;		/* offset into page */
188 		vm_map_entry_t out_entry;
189 		vm_prot_t out_prot;
190 		boolean_t wired;
191 		vm_pindex_t pindex;
192 		u_int len;
193 		vm_page_t m;
194 
195 		object = NULL;
196 
197 		uva = (vm_offset_t)uio->uio_offset;
198 
199 		/*
200 		 * Get the page number of this segment.
201 		 */
202 		pageno = trunc_page(uva);
203 		page_offset = uva - pageno;
204 
205 		/*
206 		 * How many bytes to copy
207 		 */
208 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
209 
210 		/*
211 		 * Fault the page on behalf of the process
212 		 */
213 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
214 		if (error) {
215 			error = EFAULT;
216 			break;
217 		}
218 
219 		/*
220 		 * Now we need to get the page.  out_entry, out_prot, wired,
221 		 * and single_use aren't used.  One would think the vm code
222 		 * would be a *bit* nicer...  We use tmap because
223 		 * vm_map_lookup() can change the map argument.
224 		 */
225 		tmap = map;
226 		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
227 		    &object, &pindex, &out_prot, &wired);
228 		if (error) {
229 			error = EFAULT;
230 			break;
231 		}
232 		VM_OBJECT_LOCK(object);
233 		while ((m = vm_page_lookup(object, pindex)) == NULL &&
234 		    !writing &&
235 		    (backing_object = object->backing_object) != NULL) {
236 			/*
237 			 * Allow fallback to backing objects if we are reading.
238 			 */
239 			VM_OBJECT_LOCK(backing_object);
240 			pindex += OFF_TO_IDX(object->backing_object_offset);
241 			VM_OBJECT_UNLOCK(object);
242 			object = backing_object;
243 		}
244 		VM_OBJECT_UNLOCK(object);
245 		if (m == NULL) {
246 			vm_map_lookup_done(tmap, out_entry);
247 			error = EFAULT;
248 			break;
249 		}
250 
251 		/*
252 		 * Hold the page in memory.
253 		 */
254 		vm_page_lock_queues();
255 		vm_page_hold(m);
256 		vm_page_unlock_queues();
257 
258 		/*
259 		 * We're done with tmap now.
260 		 */
261 		vm_map_lookup_done(tmap, out_entry);
262 
263 		/*
264 		 * Now do the i/o move.
265 		 */
266 		error = uiomove_fromphys(&m, page_offset, len, uio);
267 
268 		/*
269 		 * Release the page.
270 		 */
271 		vm_page_lock_queues();
272 		vm_page_unhold(m);
273 		vm_page_unlock_queues();
274 
275 	} while (error == 0 && uio->uio_resid > 0);
276 
277 	vmspace_free(vm);
278 	return (error);
279 }
280 
281 /*
282  * Process debugging system call.
283  */
284 #ifndef _SYS_SYSPROTO_H_
285 struct ptrace_args {
286 	int	req;
287 	pid_t	pid;
288 	caddr_t	addr;
289 	int	data;
290 };
291 #endif
292 
293 /*
294  * MPSAFE
295  */
296 int
297 ptrace(struct thread *td, struct ptrace_args *uap)
298 {
299 	/*
300 	 * XXX this obfuscation is to reduce stack usage, but the register
301 	 * structs may be too large to put on the stack anyway.
302 	 */
303 	union {
304 		struct ptrace_io_desc piod;
305 		struct ptrace_lwpinfo pl;
306 		struct dbreg dbreg;
307 		struct fpreg fpreg;
308 		struct reg reg;
309 	} r;
310 	void *addr;
311 	int error = 0;
312 
313 	addr = &r;
314 	switch (uap->req) {
315 	case PT_GETREGS:
316 	case PT_GETFPREGS:
317 	case PT_GETDBREGS:
318 	case PT_LWPINFO:
319 		break;
320 	case PT_SETREGS:
321 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
322 		break;
323 	case PT_SETFPREGS:
324 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
325 		break;
326 	case PT_SETDBREGS:
327 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
328 		break;
329 	case PT_IO:
330 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
331 		break;
332 	default:
333 		addr = uap->addr;
334 		break;
335 	}
336 	if (error)
337 		return (error);
338 
339 	error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
340 	if (error)
341 		return (error);
342 
343 	switch (uap->req) {
344 	case PT_IO:
345 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
346 		break;
347 	case PT_GETREGS:
348 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
349 		break;
350 	case PT_GETFPREGS:
351 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
352 		break;
353 	case PT_GETDBREGS:
354 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
355 		break;
356 	case PT_LWPINFO:
357 		error = copyout(&r.pl, uap->addr, uap->data);
358 		break;
359 	}
360 
361 	return (error);
362 }
363 
364 int
365 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
366 {
367 	struct iovec iov;
368 	struct uio uio;
369 	struct proc *curp, *p, *pp;
370 	struct thread *td2 = NULL;
371 	struct ptrace_io_desc *piod;
372 	struct ptrace_lwpinfo *pl;
373 	int error, write, tmp, num;
374 	int proctree_locked = 0;
375 	lwpid_t tid = 0, *buf;
376 	pid_t saved_pid = pid;
377 
378 	curp = td->td_proc;
379 
380 	/* Lock proctree before locking the process. */
381 	switch (req) {
382 	case PT_TRACE_ME:
383 	case PT_ATTACH:
384 	case PT_STEP:
385 	case PT_CONTINUE:
386 	case PT_TO_SCE:
387 	case PT_TO_SCX:
388 	case PT_DETACH:
389 		sx_xlock(&proctree_lock);
390 		proctree_locked = 1;
391 		break;
392 	default:
393 		break;
394 	}
395 
396 	write = 0;
397 	if (req == PT_TRACE_ME) {
398 		p = td->td_proc;
399 		PROC_LOCK(p);
400 	} else {
401 		if (pid <= PID_MAX) {
402 			if ((p = pfind(pid)) == NULL) {
403 				if (proctree_locked)
404 					sx_xunlock(&proctree_lock);
405 				return (ESRCH);
406 			}
407 		} else {
408 			/* this is slow, should be optimized */
409 			sx_slock(&allproc_lock);
410 			FOREACH_PROC_IN_SYSTEM(p) {
411 				PROC_LOCK(p);
412 				mtx_lock_spin(&sched_lock);
413 				FOREACH_THREAD_IN_PROC(p, td2) {
414 					if (td2->td_tid == pid)
415 						break;
416 				}
417 				mtx_unlock_spin(&sched_lock);
418 				if (td2 != NULL)
419 					break; /* proc lock held */
420 				PROC_UNLOCK(p);
421 			}
422 			sx_sunlock(&allproc_lock);
423 			if (p == NULL) {
424 				if (proctree_locked)
425 					sx_xunlock(&proctree_lock);
426 				return (ESRCH);
427 			}
428 			tid = pid;
429 			pid = p->p_pid;
430 		}
431 	}
432 	if ((error = p_cansee(td, p)) != 0)
433 		goto fail;
434 
435 	if ((error = p_candebug(td, p)) != 0)
436 		goto fail;
437 
438 	/*
439 	 * System processes can't be debugged.
440 	 */
441 	if ((p->p_flag & P_SYSTEM) != 0) {
442 		error = EINVAL;
443 		goto fail;
444 	}
445 
446 	if (tid == 0) {
447 		td2 = FIRST_THREAD_IN_PROC(p);
448 		tid = td2->td_tid;
449 	}
450 
451 	/*
452 	 * Permissions check
453 	 */
454 	switch (req) {
455 	case PT_TRACE_ME:
456 		/* Always legal. */
457 		break;
458 
459 	case PT_ATTACH:
460 		/* Self */
461 		if (p->p_pid == td->td_proc->p_pid) {
462 			error = EINVAL;
463 			goto fail;
464 		}
465 
466 		/* Already traced */
467 		if (p->p_flag & P_TRACED) {
468 			error = EBUSY;
469 			goto fail;
470 		}
471 
472 		/* Can't trace an ancestor if you're being traced. */
473 		if (curp->p_flag & P_TRACED) {
474 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
475 				if (pp == p) {
476 					error = EINVAL;
477 					goto fail;
478 				}
479 			}
480 		}
481 
482 
483 		/* OK */
484 		break;
485 
486 	case PT_CLEARSTEP:
487 		/* Allow thread to clear single step for itself */
488 		if (td->td_tid == tid)
489 			break;
490 
491 		/* FALLTHROUGH */
492 	default:
493 		/* not being traced... */
494 		if ((p->p_flag & P_TRACED) == 0) {
495 			error = EPERM;
496 			goto fail;
497 		}
498 
499 		/* not being traced by YOU */
500 		if (p->p_pptr != td->td_proc) {
501 			error = EBUSY;
502 			goto fail;
503 		}
504 
505 		/* not currently stopped */
506 		if (!P_SHOULDSTOP(p) || p->p_suspcount != p->p_numthreads ||
507 		    (p->p_flag & P_WAITED) == 0) {
508 			error = EBUSY;
509 			goto fail;
510 		}
511 
512 		/* OK */
513 		break;
514 	}
515 
516 #ifdef FIX_SSTEP
517 	/*
518 	 * Single step fixup ala procfs
519 	 */
520 	FIX_SSTEP(td2);			/* XXXKSE */
521 #endif
522 
523 	/*
524 	 * Actually do the requests
525 	 */
526 
527 	td->td_retval[0] = 0;
528 
529 	switch (req) {
530 	case PT_TRACE_ME:
531 		/* set my trace flag and "owner" so it can read/write me */
532 		p->p_flag |= P_TRACED;
533 		p->p_oppid = p->p_pptr->p_pid;
534 		PROC_UNLOCK(p);
535 		sx_xunlock(&proctree_lock);
536 		return (0);
537 
538 	case PT_ATTACH:
539 		/* security check done above */
540 		p->p_flag |= P_TRACED;
541 		p->p_oppid = p->p_pptr->p_pid;
542 		if (p->p_pptr != td->td_proc)
543 			proc_reparent(p, td->td_proc);
544 		data = SIGSTOP;
545 		goto sendsig;	/* in PT_CONTINUE below */
546 
547 	case PT_CLEARSTEP:
548 		_PHOLD(p);
549 		error = ptrace_clear_single_step(td2);
550 		_PRELE(p);
551 		if (error)
552 			goto fail;
553 		PROC_UNLOCK(p);
554 		return (0);
555 
556 	case PT_SETSTEP:
557 		_PHOLD(p);
558 		error = ptrace_single_step(td2);
559 		_PRELE(p);
560 		if (error)
561 			goto fail;
562 		PROC_UNLOCK(p);
563 		return (0);
564 
565 	case PT_SUSPEND:
566 		_PHOLD(p);
567 		mtx_lock_spin(&sched_lock);
568 		td2->td_flags |= TDF_DBSUSPEND;
569 		mtx_unlock_spin(&sched_lock);
570 		_PRELE(p);
571 		PROC_UNLOCK(p);
572 		return (0);
573 
574 	case PT_RESUME:
575 		_PHOLD(p);
576 		mtx_lock_spin(&sched_lock);
577 		td2->td_flags &= ~TDF_DBSUSPEND;
578 		mtx_unlock_spin(&sched_lock);
579 		_PRELE(p);
580 		PROC_UNLOCK(p);
581 		return (0);
582 
583 	case PT_STEP:
584 	case PT_CONTINUE:
585 	case PT_TO_SCE:
586 	case PT_TO_SCX:
587 	case PT_DETACH:
588 		/* Zero means do not send any signal */
589 		if (data < 0 || data > _SIG_MAXSIG) {
590 			error = EINVAL;
591 			goto fail;
592 		}
593 
594 		_PHOLD(p);
595 
596 		switch (req) {
597 		case PT_STEP:
598 			PROC_UNLOCK(p);
599 			error = ptrace_single_step(td2);
600 			if (error) {
601 				PRELE(p);
602 				goto fail_noproc;
603 			}
604 			PROC_LOCK(p);
605 			break;
606 		case PT_TO_SCE:
607 			p->p_stops |= S_PT_SCE;
608 			break;
609 		case PT_TO_SCX:
610 			p->p_stops |= S_PT_SCX;
611 			break;
612 		case PT_SYSCALL:
613 			p->p_stops |= S_PT_SCE | S_PT_SCX;
614 			break;
615 		}
616 
617 		if (addr != (void *)1) {
618 			PROC_UNLOCK(p);
619 			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
620 			if (error) {
621 				PRELE(p);
622 				goto fail_noproc;
623 			}
624 			PROC_LOCK(p);
625 		}
626 		_PRELE(p);
627 
628 		if (req == PT_DETACH) {
629 			/* reset process parent */
630 			if (p->p_oppid != p->p_pptr->p_pid) {
631 				struct proc *pp;
632 
633 				PROC_UNLOCK(p);
634 				pp = pfind(p->p_oppid);
635 				if (pp == NULL)
636 					pp = initproc;
637 				else
638 					PROC_UNLOCK(pp);
639 				PROC_LOCK(p);
640 				proc_reparent(p, pp);
641 				if (pp == initproc)
642 					p->p_sigparent = SIGCHLD;
643 			}
644 			p->p_flag &= ~(P_TRACED | P_WAITED);
645 			p->p_oppid = 0;
646 
647 			/* should we send SIGCHLD? */
648 		}
649 
650 	sendsig:
651 		if (proctree_locked)
652 			sx_xunlock(&proctree_lock);
653 		/* deliver or queue signal */
654 		if (P_SHOULDSTOP(p)) {
655 			p->p_xstat = data;
656 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
657 			mtx_lock_spin(&sched_lock);
658 			if (saved_pid <= PID_MAX) {
659 				p->p_xthread->td_flags &= ~TDF_XSIG;
660 				p->p_xthread->td_xsig = data;
661 			} else {
662 				td2->td_flags &= ~TDF_XSIG;
663 				td2->td_xsig = data;
664 			}
665 			p->p_xthread = NULL;
666 			if (req == PT_DETACH) {
667 				struct thread *td3;
668 				FOREACH_THREAD_IN_PROC(p, td3)
669 					td3->td_flags &= ~TDF_DBSUSPEND;
670 			}
671 			/*
672 			 * unsuspend all threads, to not let a thread run,
673 			 * you should use PT_SUSPEND to suspend it before
674 			 * continuing process.
675 			 */
676 			thread_unsuspend(p);
677 			thread_continued(p);
678 			mtx_unlock_spin(&sched_lock);
679 		} else if (data) {
680 			psignal(p, data);
681 		}
682 		PROC_UNLOCK(p);
683 
684 		return (0);
685 
686 	case PT_WRITE_I:
687 	case PT_WRITE_D:
688 		write = 1;
689 		/* FALLTHROUGH */
690 	case PT_READ_I:
691 	case PT_READ_D:
692 		PROC_UNLOCK(p);
693 		tmp = 0;
694 		/* write = 0 set above */
695 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
696 		iov.iov_len = sizeof(int);
697 		uio.uio_iov = &iov;
698 		uio.uio_iovcnt = 1;
699 		uio.uio_offset = (off_t)(uintptr_t)addr;
700 		uio.uio_resid = sizeof(int);
701 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
702 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
703 		uio.uio_td = td;
704 		error = proc_rwmem(p, &uio);
705 		if (uio.uio_resid != 0) {
706 			/*
707 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
708 			 * so I think write() can bogusly return 0.
709 			 * XXX what happens for short writes?  We don't want
710 			 * to write partial data.
711 			 * XXX proc_rwmem() returns EPERM for other invalid
712 			 * addresses.  Convert this to EINVAL.  Does this
713 			 * clobber returns of EPERM for other reasons?
714 			 */
715 			if (error == 0 || error == ENOSPC || error == EPERM)
716 				error = EINVAL;	/* EOF */
717 		}
718 		if (!write)
719 			td->td_retval[0] = tmp;
720 		return (error);
721 
722 	case PT_IO:
723 		PROC_UNLOCK(p);
724 		piod = addr;
725 		iov.iov_base = piod->piod_addr;
726 		iov.iov_len = piod->piod_len;
727 		uio.uio_iov = &iov;
728 		uio.uio_iovcnt = 1;
729 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
730 		uio.uio_resid = piod->piod_len;
731 		uio.uio_segflg = UIO_USERSPACE;
732 		uio.uio_td = td;
733 		switch (piod->piod_op) {
734 		case PIOD_READ_D:
735 		case PIOD_READ_I:
736 			uio.uio_rw = UIO_READ;
737 			break;
738 		case PIOD_WRITE_D:
739 		case PIOD_WRITE_I:
740 			uio.uio_rw = UIO_WRITE;
741 			break;
742 		default:
743 			return (EINVAL);
744 		}
745 		error = proc_rwmem(p, &uio);
746 		piod->piod_len -= uio.uio_resid;
747 		return (error);
748 
749 	case PT_KILL:
750 		data = SIGKILL;
751 		goto sendsig;	/* in PT_CONTINUE above */
752 
753 	case PT_SETREGS:
754 		_PHOLD(p);
755 		error = proc_write_regs(td2, addr);
756 		_PRELE(p);
757 		PROC_UNLOCK(p);
758 		return (error);
759 
760 	case PT_GETREGS:
761 		_PHOLD(p);
762 		error = proc_read_regs(td2, addr);
763 		_PRELE(p);
764 		PROC_UNLOCK(p);
765 		return (error);
766 
767 	case PT_SETFPREGS:
768 		_PHOLD(p);
769 		error = proc_write_fpregs(td2, addr);
770 		_PRELE(p);
771 		PROC_UNLOCK(p);
772 		return (error);
773 
774 	case PT_GETFPREGS:
775 		_PHOLD(p);
776 		error = proc_read_fpregs(td2, addr);
777 		_PRELE(p);
778 		PROC_UNLOCK(p);
779 		return (error);
780 
781 	case PT_SETDBREGS:
782 		_PHOLD(p);
783 		error = proc_write_dbregs(td2, addr);
784 		_PRELE(p);
785 		PROC_UNLOCK(p);
786 		return (error);
787 
788 	case PT_GETDBREGS:
789 		_PHOLD(p);
790 		error = proc_read_dbregs(td2, addr);
791 		_PRELE(p);
792 		PROC_UNLOCK(p);
793 		return (error);
794 
795 	case PT_LWPINFO:
796 		if (data == 0 || data > sizeof(*pl))
797 			return (EINVAL);
798 		pl = addr;
799 		_PHOLD(p);
800 		if (saved_pid <= PID_MAX) {
801 			pl->pl_lwpid = p->p_xthread->td_tid;
802 			pl->pl_event = PL_EVENT_SIGNAL;
803 		} else {
804 			pl->pl_lwpid = td2->td_tid;
805 			if (td2->td_flags & TDF_XSIG)
806 				pl->pl_event = PL_EVENT_SIGNAL;
807 			else
808 				pl->pl_event = 0;
809 		}
810 		if (td2->td_pflags & TDP_SA) {
811 			pl->pl_flags = PL_FLAG_SA;
812 			if (td2->td_upcall && !TD_CAN_UNBIND(td2))
813 				pl->pl_flags |= PL_FLAG_BOUND;
814 		} else {
815 			pl->pl_flags = 0;
816 		}
817 		_PRELE(p);
818 		PROC_UNLOCK(p);
819 		return (0);
820 
821 	case PT_GETNUMLWPS:
822 		td->td_retval[0] = p->p_numthreads;
823 		PROC_UNLOCK(p);
824 		return (0);
825 
826 	case PT_GETLWPLIST:
827 		if (data <= 0) {
828 			PROC_UNLOCK(p);
829 			return (EINVAL);
830 		}
831 		num = imin(p->p_numthreads, data);
832 		PROC_UNLOCK(p);
833 		buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
834 		tmp = 0;
835 		PROC_LOCK(p);
836 		mtx_lock_spin(&sched_lock);
837 		FOREACH_THREAD_IN_PROC(p, td2) {
838 			if (tmp >= num)
839 				break;
840 			buf[tmp++] = td2->td_tid;
841 		}
842 		mtx_unlock_spin(&sched_lock);
843 		PROC_UNLOCK(p);
844 		error = copyout(buf, addr, tmp * sizeof(lwpid_t));
845 		free(buf, M_TEMP);
846 		if (!error)
847 			td->td_retval[0] = num;
848  		return (error);
849 
850 	default:
851 #ifdef __HAVE_PTRACE_MACHDEP
852 		if (req >= PT_FIRSTMACH) {
853 			_PHOLD(p);
854 			PROC_UNLOCK(p);
855 			error = cpu_ptrace(td2, req, addr, data);
856 			PRELE(p);
857 			return (error);
858 		}
859 #endif
860 		break;
861 	}
862 
863 	/* Unknown request. */
864 	error = EINVAL;
865 
866 fail:
867 	PROC_UNLOCK(p);
868 fail_noproc:
869 	if (proctree_locked)
870 		sx_xunlock(&proctree_lock);
871 	return (error);
872 }
873 
874 /*
875  * Stop a process because of a debugging event;
876  * stay stopped until p->p_step is cleared
877  * (cleared by PIOCCONT in procfs).
878  */
879 void
880 stopevent(struct proc *p, unsigned int event, unsigned int val)
881 {
882 
883 	PROC_LOCK_ASSERT(p, MA_OWNED);
884 	p->p_step = 1;
885 	do {
886 		p->p_xstat = val;
887 		p->p_xthread = NULL;
888 		p->p_stype = event;	/* Which event caused the stop? */
889 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
890 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
891 	} while (p->p_step);
892 }
893