xref: /freebsd/sys/kern/sys_process.c (revision 5ae59dec60e3815b621ae87f74a377cf3449ca55)
1 /*-
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_compat.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysent.h>
43 #include <sys/sysproto.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 #include <sys/procctl.h>
47 #include <sys/vnode.h>
48 #include <sys/ptrace.h>
49 #include <sys/rwlock.h>
50 #include <sys/sx.h>
51 #include <sys/malloc.h>
52 #include <sys/signalvar.h>
53 
54 #include <machine/reg.h>
55 
56 #include <security/audit/audit.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_extern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_param.h>
66 
67 #ifdef COMPAT_FREEBSD32
68 #include <sys/procfs.h>
69 #include <compat/freebsd32/freebsd32_signal.h>
70 
71 struct ptrace_io_desc32 {
72 	int		piod_op;
73 	uint32_t	piod_offs;
74 	uint32_t	piod_addr;
75 	uint32_t	piod_len;
76 };
77 
78 struct ptrace_vm_entry32 {
79 	int		pve_entry;
80 	int		pve_timestamp;
81 	uint32_t	pve_start;
82 	uint32_t	pve_end;
83 	uint32_t	pve_offset;
84 	u_int		pve_prot;
85 	u_int		pve_pathlen;
86 	int32_t		pve_fileid;
87 	u_int		pve_fsid;
88 	uint32_t	pve_path;
89 };
90 
91 struct ptrace_lwpinfo32 {
92 	lwpid_t	pl_lwpid;	/* LWP described. */
93 	int	pl_event;	/* Event that stopped the LWP. */
94 	int	pl_flags;	/* LWP flags. */
95 	sigset_t	pl_sigmask;	/* LWP signal mask */
96 	sigset_t	pl_siglist;	/* LWP pending signal */
97 	struct siginfo32 pl_siginfo;	/* siginfo for signal */
98 	char	pl_tdname[MAXCOMLEN + 1];	/* LWP name. */
99 	int	pl_child_pid;		/* New child pid */
100 };
101 
102 #endif
103 
104 /*
105  * Functions implemented using PROC_ACTION():
106  *
107  * proc_read_regs(proc, regs)
108  *	Get the current user-visible register set from the process
109  *	and copy it into the regs structure (<machine/reg.h>).
110  *	The process is stopped at the time read_regs is called.
111  *
112  * proc_write_regs(proc, regs)
113  *	Update the current register set from the passed in regs
114  *	structure.  Take care to avoid clobbering special CPU
115  *	registers or privileged bits in the PSL.
116  *	Depending on the architecture this may have fix-up work to do,
117  *	especially if the IAR or PCW are modified.
118  *	The process is stopped at the time write_regs is called.
119  *
120  * proc_read_fpregs, proc_write_fpregs
121  *	deal with the floating point register set, otherwise as above.
122  *
123  * proc_read_dbregs, proc_write_dbregs
124  *	deal with the processor debug register set, otherwise as above.
125  *
126  * proc_sstep(proc)
127  *	Arrange for the process to trap after executing a single instruction.
128  */
129 
130 #define	PROC_ACTION(action) do {					\
131 	int error;							\
132 									\
133 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);			\
134 	if ((td->td_proc->p_flag & P_INMEM) == 0)			\
135 		error = EIO;						\
136 	else								\
137 		error = (action);					\
138 	return (error);							\
139 } while(0)
140 
141 int
142 proc_read_regs(struct thread *td, struct reg *regs)
143 {
144 
145 	PROC_ACTION(fill_regs(td, regs));
146 }
147 
148 int
149 proc_write_regs(struct thread *td, struct reg *regs)
150 {
151 
152 	PROC_ACTION(set_regs(td, regs));
153 }
154 
155 int
156 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
157 {
158 
159 	PROC_ACTION(fill_dbregs(td, dbregs));
160 }
161 
162 int
163 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
164 {
165 
166 	PROC_ACTION(set_dbregs(td, dbregs));
167 }
168 
169 /*
170  * Ptrace doesn't support fpregs at all, and there are no security holes
171  * or translations for fpregs, so we can just copy them.
172  */
173 int
174 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
175 {
176 
177 	PROC_ACTION(fill_fpregs(td, fpregs));
178 }
179 
180 int
181 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
182 {
183 
184 	PROC_ACTION(set_fpregs(td, fpregs));
185 }
186 
187 #ifdef COMPAT_FREEBSD32
188 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */
189 int
190 proc_read_regs32(struct thread *td, struct reg32 *regs32)
191 {
192 
193 	PROC_ACTION(fill_regs32(td, regs32));
194 }
195 
196 int
197 proc_write_regs32(struct thread *td, struct reg32 *regs32)
198 {
199 
200 	PROC_ACTION(set_regs32(td, regs32));
201 }
202 
203 int
204 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
205 {
206 
207 	PROC_ACTION(fill_dbregs32(td, dbregs32));
208 }
209 
210 int
211 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
212 {
213 
214 	PROC_ACTION(set_dbregs32(td, dbregs32));
215 }
216 
217 int
218 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
219 {
220 
221 	PROC_ACTION(fill_fpregs32(td, fpregs32));
222 }
223 
224 int
225 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
226 {
227 
228 	PROC_ACTION(set_fpregs32(td, fpregs32));
229 }
230 #endif
231 
232 int
233 proc_sstep(struct thread *td)
234 {
235 
236 	PROC_ACTION(ptrace_single_step(td));
237 }
238 
239 int
240 proc_rwmem(struct proc *p, struct uio *uio)
241 {
242 	vm_map_t map;
243 	vm_offset_t pageno;		/* page number */
244 	vm_prot_t reqprot;
245 	int error, fault_flags, page_offset, writing;
246 
247 	/*
248 	 * Assert that someone has locked this vmspace.  (Should be
249 	 * curthread but we can't assert that.)  This keeps the process
250 	 * from exiting out from under us until this operation completes.
251 	 */
252 	KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__,
253 	    p, p->p_pid));
254 
255 	/*
256 	 * The map we want...
257 	 */
258 	map = &p->p_vmspace->vm_map;
259 
260 	/*
261 	 * If we are writing, then we request vm_fault() to create a private
262 	 * copy of each page.  Since these copies will not be writeable by the
263 	 * process, we must explicity request that they be dirtied.
264 	 */
265 	writing = uio->uio_rw == UIO_WRITE;
266 	reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
267 	fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
268 
269 	/*
270 	 * Only map in one page at a time.  We don't have to, but it
271 	 * makes things easier.  This way is trivial - right?
272 	 */
273 	do {
274 		vm_offset_t uva;
275 		u_int len;
276 		vm_page_t m;
277 
278 		uva = (vm_offset_t)uio->uio_offset;
279 
280 		/*
281 		 * Get the page number of this segment.
282 		 */
283 		pageno = trunc_page(uva);
284 		page_offset = uva - pageno;
285 
286 		/*
287 		 * How many bytes to copy
288 		 */
289 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
290 
291 		/*
292 		 * Fault and hold the page on behalf of the process.
293 		 */
294 		error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m);
295 		if (error != KERN_SUCCESS) {
296 			if (error == KERN_RESOURCE_SHORTAGE)
297 				error = ENOMEM;
298 			else
299 				error = EFAULT;
300 			break;
301 		}
302 
303 		/*
304 		 * Now do the i/o move.
305 		 */
306 		error = uiomove_fromphys(&m, page_offset, len, uio);
307 
308 		/* Make the I-cache coherent for breakpoints. */
309 		if (writing && error == 0) {
310 			vm_map_lock_read(map);
311 			if (vm_map_check_protection(map, pageno, pageno +
312 			    PAGE_SIZE, VM_PROT_EXECUTE))
313 				vm_sync_icache(map, uva, len);
314 			vm_map_unlock_read(map);
315 		}
316 
317 		/*
318 		 * Release the page.
319 		 */
320 		vm_page_lock(m);
321 		vm_page_unhold(m);
322 		vm_page_unlock(m);
323 
324 	} while (error == 0 && uio->uio_resid > 0);
325 
326 	return (error);
327 }
328 
329 static int
330 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
331 {
332 	struct vattr vattr;
333 	vm_map_t map;
334 	vm_map_entry_t entry;
335 	vm_object_t obj, tobj, lobj;
336 	struct vmspace *vm;
337 	struct vnode *vp;
338 	char *freepath, *fullpath;
339 	u_int pathlen;
340 	int error, index;
341 
342 	error = 0;
343 	obj = NULL;
344 
345 	vm = vmspace_acquire_ref(p);
346 	map = &vm->vm_map;
347 	vm_map_lock_read(map);
348 
349 	do {
350 		entry = map->header.next;
351 		index = 0;
352 		while (index < pve->pve_entry && entry != &map->header) {
353 			entry = entry->next;
354 			index++;
355 		}
356 		if (index != pve->pve_entry) {
357 			error = EINVAL;
358 			break;
359 		}
360 		while (entry != &map->header &&
361 		    (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
362 			entry = entry->next;
363 			index++;
364 		}
365 		if (entry == &map->header) {
366 			error = ENOENT;
367 			break;
368 		}
369 
370 		/* We got an entry. */
371 		pve->pve_entry = index + 1;
372 		pve->pve_timestamp = map->timestamp;
373 		pve->pve_start = entry->start;
374 		pve->pve_end = entry->end - 1;
375 		pve->pve_offset = entry->offset;
376 		pve->pve_prot = entry->protection;
377 
378 		/* Backing object's path needed? */
379 		if (pve->pve_pathlen == 0)
380 			break;
381 
382 		pathlen = pve->pve_pathlen;
383 		pve->pve_pathlen = 0;
384 
385 		obj = entry->object.vm_object;
386 		if (obj != NULL)
387 			VM_OBJECT_RLOCK(obj);
388 	} while (0);
389 
390 	vm_map_unlock_read(map);
391 	vmspace_free(vm);
392 
393 	pve->pve_fsid = VNOVAL;
394 	pve->pve_fileid = VNOVAL;
395 
396 	if (error == 0 && obj != NULL) {
397 		lobj = obj;
398 		for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
399 			if (tobj != obj)
400 				VM_OBJECT_RLOCK(tobj);
401 			if (lobj != obj)
402 				VM_OBJECT_RUNLOCK(lobj);
403 			lobj = tobj;
404 			pve->pve_offset += tobj->backing_object_offset;
405 		}
406 		vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL;
407 		if (vp != NULL)
408 			vref(vp);
409 		if (lobj != obj)
410 			VM_OBJECT_RUNLOCK(lobj);
411 		VM_OBJECT_RUNLOCK(obj);
412 
413 		if (vp != NULL) {
414 			freepath = NULL;
415 			fullpath = NULL;
416 			vn_fullpath(td, vp, &fullpath, &freepath);
417 			vn_lock(vp, LK_SHARED | LK_RETRY);
418 			if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) {
419 				pve->pve_fileid = vattr.va_fileid;
420 				pve->pve_fsid = vattr.va_fsid;
421 			}
422 			vput(vp);
423 
424 			if (fullpath != NULL) {
425 				pve->pve_pathlen = strlen(fullpath) + 1;
426 				if (pve->pve_pathlen <= pathlen) {
427 					error = copyout(fullpath, pve->pve_path,
428 					    pve->pve_pathlen);
429 				} else
430 					error = ENAMETOOLONG;
431 			}
432 			if (freepath != NULL)
433 				free(freepath, M_TEMP);
434 		}
435 	}
436 
437 	return (error);
438 }
439 
440 #ifdef COMPAT_FREEBSD32
441 static int
442 ptrace_vm_entry32(struct thread *td, struct proc *p,
443     struct ptrace_vm_entry32 *pve32)
444 {
445 	struct ptrace_vm_entry pve;
446 	int error;
447 
448 	pve.pve_entry = pve32->pve_entry;
449 	pve.pve_pathlen = pve32->pve_pathlen;
450 	pve.pve_path = (void *)(uintptr_t)pve32->pve_path;
451 
452 	error = ptrace_vm_entry(td, p, &pve);
453 	if (error == 0) {
454 		pve32->pve_entry = pve.pve_entry;
455 		pve32->pve_timestamp = pve.pve_timestamp;
456 		pve32->pve_start = pve.pve_start;
457 		pve32->pve_end = pve.pve_end;
458 		pve32->pve_offset = pve.pve_offset;
459 		pve32->pve_prot = pve.pve_prot;
460 		pve32->pve_fileid = pve.pve_fileid;
461 		pve32->pve_fsid = pve.pve_fsid;
462 	}
463 
464 	pve32->pve_pathlen = pve.pve_pathlen;
465 	return (error);
466 }
467 
468 static void
469 ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl,
470     struct ptrace_lwpinfo32 *pl32)
471 {
472 
473 	pl32->pl_lwpid = pl->pl_lwpid;
474 	pl32->pl_event = pl->pl_event;
475 	pl32->pl_flags = pl->pl_flags;
476 	pl32->pl_sigmask = pl->pl_sigmask;
477 	pl32->pl_siglist = pl->pl_siglist;
478 	siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo);
479 	strcpy(pl32->pl_tdname, pl->pl_tdname);
480 	pl32->pl_child_pid = pl->pl_child_pid;
481 }
482 #endif /* COMPAT_FREEBSD32 */
483 
484 /*
485  * Process debugging system call.
486  */
487 #ifndef _SYS_SYSPROTO_H_
488 struct ptrace_args {
489 	int	req;
490 	pid_t	pid;
491 	caddr_t	addr;
492 	int	data;
493 };
494 #endif
495 
496 #ifdef COMPAT_FREEBSD32
497 /*
498  * This CPP subterfuge is to try and reduce the number of ifdefs in
499  * the body of the code.
500  *   COPYIN(uap->addr, &r.reg, sizeof r.reg);
501  * becomes either:
502  *   copyin(uap->addr, &r.reg, sizeof r.reg);
503  * or
504  *   copyin(uap->addr, &r.reg32, sizeof r.reg32);
505  * .. except this is done at runtime.
506  */
507 #define	COPYIN(u, k, s)		wrap32 ? \
508 	copyin(u, k ## 32, s ## 32) : \
509 	copyin(u, k, s)
510 #define	COPYOUT(k, u, s)	wrap32 ? \
511 	copyout(k ## 32, u, s ## 32) : \
512 	copyout(k, u, s)
513 #else
514 #define	COPYIN(u, k, s)		copyin(u, k, s)
515 #define	COPYOUT(k, u, s)	copyout(k, u, s)
516 #endif
517 int
518 sys_ptrace(struct thread *td, struct ptrace_args *uap)
519 {
520 	/*
521 	 * XXX this obfuscation is to reduce stack usage, but the register
522 	 * structs may be too large to put on the stack anyway.
523 	 */
524 	union {
525 		struct ptrace_io_desc piod;
526 		struct ptrace_lwpinfo pl;
527 		struct ptrace_vm_entry pve;
528 		struct dbreg dbreg;
529 		struct fpreg fpreg;
530 		struct reg reg;
531 #ifdef COMPAT_FREEBSD32
532 		struct dbreg32 dbreg32;
533 		struct fpreg32 fpreg32;
534 		struct reg32 reg32;
535 		struct ptrace_io_desc32 piod32;
536 		struct ptrace_lwpinfo32 pl32;
537 		struct ptrace_vm_entry32 pve32;
538 #endif
539 	} r;
540 	void *addr;
541 	int error = 0;
542 #ifdef COMPAT_FREEBSD32
543 	int wrap32 = 0;
544 
545 	if (SV_CURPROC_FLAG(SV_ILP32))
546 		wrap32 = 1;
547 #endif
548 	AUDIT_ARG_PID(uap->pid);
549 	AUDIT_ARG_CMD(uap->req);
550 	AUDIT_ARG_VALUE(uap->data);
551 	addr = &r;
552 	switch (uap->req) {
553 	case PT_GETREGS:
554 	case PT_GETFPREGS:
555 	case PT_GETDBREGS:
556 	case PT_LWPINFO:
557 		break;
558 	case PT_SETREGS:
559 		error = COPYIN(uap->addr, &r.reg, sizeof r.reg);
560 		break;
561 	case PT_SETFPREGS:
562 		error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg);
563 		break;
564 	case PT_SETDBREGS:
565 		error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg);
566 		break;
567 	case PT_IO:
568 		error = COPYIN(uap->addr, &r.piod, sizeof r.piod);
569 		break;
570 	case PT_VM_ENTRY:
571 		error = COPYIN(uap->addr, &r.pve, sizeof r.pve);
572 		break;
573 	default:
574 		addr = uap->addr;
575 		break;
576 	}
577 	if (error)
578 		return (error);
579 
580 	error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
581 	if (error)
582 		return (error);
583 
584 	switch (uap->req) {
585 	case PT_VM_ENTRY:
586 		error = COPYOUT(&r.pve, uap->addr, sizeof r.pve);
587 		break;
588 	case PT_IO:
589 		error = COPYOUT(&r.piod, uap->addr, sizeof r.piod);
590 		break;
591 	case PT_GETREGS:
592 		error = COPYOUT(&r.reg, uap->addr, sizeof r.reg);
593 		break;
594 	case PT_GETFPREGS:
595 		error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg);
596 		break;
597 	case PT_GETDBREGS:
598 		error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg);
599 		break;
600 	case PT_LWPINFO:
601 		error = copyout(&r.pl, uap->addr, uap->data);
602 		break;
603 	}
604 
605 	return (error);
606 }
607 #undef COPYIN
608 #undef COPYOUT
609 
610 #ifdef COMPAT_FREEBSD32
611 /*
612  *   PROC_READ(regs, td2, addr);
613  * becomes either:
614  *   proc_read_regs(td2, addr);
615  * or
616  *   proc_read_regs32(td2, addr);
617  * .. except this is done at runtime.  There is an additional
618  * complication in that PROC_WRITE disallows 32 bit consumers
619  * from writing to 64 bit address space targets.
620  */
621 #define	PROC_READ(w, t, a)	wrap32 ? \
622 	proc_read_ ## w ## 32(t, a) : \
623 	proc_read_ ## w (t, a)
624 #define	PROC_WRITE(w, t, a)	wrap32 ? \
625 	(safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \
626 	proc_write_ ## w (t, a)
627 #else
628 #define	PROC_READ(w, t, a)	proc_read_ ## w (t, a)
629 #define	PROC_WRITE(w, t, a)	proc_write_ ## w (t, a)
630 #endif
631 
632 int
633 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
634 {
635 	struct iovec iov;
636 	struct uio uio;
637 	struct proc *curp, *p, *pp;
638 	struct thread *td2 = NULL, *td3;
639 	struct ptrace_io_desc *piod = NULL;
640 	struct ptrace_lwpinfo *pl;
641 	int error, write, tmp, num;
642 	int proctree_locked = 0;
643 	lwpid_t tid = 0, *buf;
644 #ifdef COMPAT_FREEBSD32
645 	int wrap32 = 0, safe = 0;
646 	struct ptrace_io_desc32 *piod32 = NULL;
647 	struct ptrace_lwpinfo32 *pl32 = NULL;
648 	struct ptrace_lwpinfo plr;
649 #endif
650 
651 	curp = td->td_proc;
652 
653 	/* Lock proctree before locking the process. */
654 	switch (req) {
655 	case PT_TRACE_ME:
656 	case PT_ATTACH:
657 	case PT_STEP:
658 	case PT_CONTINUE:
659 	case PT_TO_SCE:
660 	case PT_TO_SCX:
661 	case PT_SYSCALL:
662 	case PT_FOLLOW_FORK:
663 	case PT_DETACH:
664 		sx_xlock(&proctree_lock);
665 		proctree_locked = 1;
666 		break;
667 	default:
668 		break;
669 	}
670 
671 	write = 0;
672 	if (req == PT_TRACE_ME) {
673 		p = td->td_proc;
674 		PROC_LOCK(p);
675 	} else {
676 		if (pid <= PID_MAX) {
677 			if ((p = pfind(pid)) == NULL) {
678 				if (proctree_locked)
679 					sx_xunlock(&proctree_lock);
680 				return (ESRCH);
681 			}
682 		} else {
683 			td2 = tdfind(pid, -1);
684 			if (td2 == NULL) {
685 				if (proctree_locked)
686 					sx_xunlock(&proctree_lock);
687 				return (ESRCH);
688 			}
689 			p = td2->td_proc;
690 			tid = pid;
691 			pid = p->p_pid;
692 		}
693 	}
694 	AUDIT_ARG_PROCESS(p);
695 
696 	if ((p->p_flag & P_WEXIT) != 0) {
697 		error = ESRCH;
698 		goto fail;
699 	}
700 	if ((error = p_cansee(td, p)) != 0)
701 		goto fail;
702 
703 	if ((error = p_candebug(td, p)) != 0)
704 		goto fail;
705 
706 	/*
707 	 * System processes can't be debugged.
708 	 */
709 	if ((p->p_flag & P_SYSTEM) != 0) {
710 		error = EINVAL;
711 		goto fail;
712 	}
713 
714 	if (tid == 0) {
715 		if ((p->p_flag & P_STOPPED_TRACE) != 0) {
716 			KASSERT(p->p_xthread != NULL, ("NULL p_xthread"));
717 			td2 = p->p_xthread;
718 		} else {
719 			td2 = FIRST_THREAD_IN_PROC(p);
720 		}
721 		tid = td2->td_tid;
722 	}
723 
724 #ifdef COMPAT_FREEBSD32
725 	/*
726 	 * Test if we're a 32 bit client and what the target is.
727 	 * Set the wrap controls accordingly.
728 	 */
729 	if (SV_CURPROC_FLAG(SV_ILP32)) {
730 		if (SV_PROC_FLAG(td2->td_proc, SV_ILP32))
731 			safe = 1;
732 		wrap32 = 1;
733 	}
734 #endif
735 	/*
736 	 * Permissions check
737 	 */
738 	switch (req) {
739 	case PT_TRACE_ME:
740 		/* Always legal. */
741 		break;
742 
743 	case PT_ATTACH:
744 		/* Self */
745 		if (p->p_pid == td->td_proc->p_pid) {
746 			error = EINVAL;
747 			goto fail;
748 		}
749 
750 		/* Already traced */
751 		if (p->p_flag & P_TRACED) {
752 			error = EBUSY;
753 			goto fail;
754 		}
755 
756 		/* Can't trace an ancestor if you're being traced. */
757 		if (curp->p_flag & P_TRACED) {
758 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
759 				if (pp == p) {
760 					error = EINVAL;
761 					goto fail;
762 				}
763 			}
764 		}
765 
766 
767 		/* OK */
768 		break;
769 
770 	case PT_CLEARSTEP:
771 		/* Allow thread to clear single step for itself */
772 		if (td->td_tid == tid)
773 			break;
774 
775 		/* FALLTHROUGH */
776 	default:
777 		/* not being traced... */
778 		if ((p->p_flag & P_TRACED) == 0) {
779 			error = EPERM;
780 			goto fail;
781 		}
782 
783 		/* not being traced by YOU */
784 		if (p->p_pptr != td->td_proc) {
785 			error = EBUSY;
786 			goto fail;
787 		}
788 
789 		/* not currently stopped */
790 		if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 ||
791 		    p->p_suspcount != p->p_numthreads  ||
792 		    (p->p_flag & P_WAITED) == 0) {
793 			error = EBUSY;
794 			goto fail;
795 		}
796 
797 		if ((p->p_flag & P_STOPPED_TRACE) == 0) {
798 			static int count = 0;
799 			if (count++ == 0)
800 				printf("P_STOPPED_TRACE not set.\n");
801 		}
802 
803 		/* OK */
804 		break;
805 	}
806 
807 	/* Keep this process around until we finish this request. */
808 	_PHOLD(p);
809 
810 #ifdef FIX_SSTEP
811 	/*
812 	 * Single step fixup ala procfs
813 	 */
814 	FIX_SSTEP(td2);
815 #endif
816 
817 	/*
818 	 * Actually do the requests
819 	 */
820 
821 	td->td_retval[0] = 0;
822 
823 	switch (req) {
824 	case PT_TRACE_ME:
825 		/* set my trace flag and "owner" so it can read/write me */
826 		p->p_flag |= P_TRACED;
827 		if (p->p_flag & P_PPWAIT)
828 			p->p_flag |= P_PPTRACE;
829 		p->p_oppid = p->p_pptr->p_pid;
830 		break;
831 
832 	case PT_ATTACH:
833 		/* security check done above */
834 		/*
835 		 * It would be nice if the tracing relationship was separate
836 		 * from the parent relationship but that would require
837 		 * another set of links in the proc struct or for "wait"
838 		 * to scan the entire proc table.  To make life easier,
839 		 * we just re-parent the process we're trying to trace.
840 		 * The old parent is remembered so we can put things back
841 		 * on a "detach".
842 		 */
843 		p->p_flag |= P_TRACED;
844 		p->p_oppid = p->p_pptr->p_pid;
845 		if (p->p_pptr != td->td_proc) {
846 			proc_reparent(p, td->td_proc);
847 		}
848 		data = SIGSTOP;
849 		goto sendsig;	/* in PT_CONTINUE below */
850 
851 	case PT_CLEARSTEP:
852 		error = ptrace_clear_single_step(td2);
853 		break;
854 
855 	case PT_SETSTEP:
856 		error = ptrace_single_step(td2);
857 		break;
858 
859 	case PT_SUSPEND:
860 		td2->td_dbgflags |= TDB_SUSPEND;
861 		thread_lock(td2);
862 		td2->td_flags |= TDF_NEEDSUSPCHK;
863 		thread_unlock(td2);
864 		break;
865 
866 	case PT_RESUME:
867 		td2->td_dbgflags &= ~TDB_SUSPEND;
868 		break;
869 
870 	case PT_FOLLOW_FORK:
871 		if (data)
872 			p->p_flag |= P_FOLLOWFORK;
873 		else
874 			p->p_flag &= ~P_FOLLOWFORK;
875 		break;
876 
877 	case PT_STEP:
878 	case PT_CONTINUE:
879 	case PT_TO_SCE:
880 	case PT_TO_SCX:
881 	case PT_SYSCALL:
882 	case PT_DETACH:
883 		/* Zero means do not send any signal */
884 		if (data < 0 || data > _SIG_MAXSIG) {
885 			error = EINVAL;
886 			break;
887 		}
888 
889 		switch (req) {
890 		case PT_STEP:
891 			error = ptrace_single_step(td2);
892 			if (error)
893 				goto out;
894 			break;
895 		case PT_CONTINUE:
896 		case PT_TO_SCE:
897 		case PT_TO_SCX:
898 		case PT_SYSCALL:
899 			if (addr != (void *)1) {
900 				error = ptrace_set_pc(td2,
901 				    (u_long)(uintfptr_t)addr);
902 				if (error)
903 					goto out;
904 			}
905 			switch (req) {
906 			case PT_TO_SCE:
907 				p->p_stops |= S_PT_SCE;
908 				break;
909 			case PT_TO_SCX:
910 				p->p_stops |= S_PT_SCX;
911 				break;
912 			case PT_SYSCALL:
913 				p->p_stops |= S_PT_SCE | S_PT_SCX;
914 				break;
915 			}
916 			break;
917 		case PT_DETACH:
918 			/* reset process parent */
919 			if (p->p_oppid != p->p_pptr->p_pid) {
920 				struct proc *pp;
921 
922 				PROC_LOCK(p->p_pptr);
923 				sigqueue_take(p->p_ksi);
924 				PROC_UNLOCK(p->p_pptr);
925 
926 				PROC_UNLOCK(p);
927 				pp = pfind(p->p_oppid);
928 				if (pp == NULL)
929 					pp = initproc;
930 				else
931 					PROC_UNLOCK(pp);
932 				PROC_LOCK(p);
933 				proc_reparent(p, pp);
934 				if (pp == initproc)
935 					p->p_sigparent = SIGCHLD;
936 			}
937 			p->p_oppid = 0;
938 			p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK);
939 
940 			/* should we send SIGCHLD? */
941 			/* childproc_continued(p); */
942 			break;
943 		}
944 
945 	sendsig:
946 		if (proctree_locked) {
947 			sx_xunlock(&proctree_lock);
948 			proctree_locked = 0;
949 		}
950 		p->p_xstat = data;
951 		p->p_xthread = NULL;
952 		if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) {
953 			/* deliver or queue signal */
954 			td2->td_dbgflags &= ~TDB_XSIG;
955 			td2->td_xsig = data;
956 
957 			if (req == PT_DETACH) {
958 				FOREACH_THREAD_IN_PROC(p, td3)
959 					td3->td_dbgflags &= ~TDB_SUSPEND;
960 			}
961 			/*
962 			 * unsuspend all threads, to not let a thread run,
963 			 * you should use PT_SUSPEND to suspend it before
964 			 * continuing process.
965 			 */
966 			PROC_SLOCK(p);
967 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
968 			thread_unsuspend(p);
969 			PROC_SUNLOCK(p);
970 			if (req == PT_ATTACH)
971 				kern_psignal(p, data);
972 		} else {
973 			if (data)
974 				kern_psignal(p, data);
975 		}
976 		break;
977 
978 	case PT_WRITE_I:
979 	case PT_WRITE_D:
980 		td2->td_dbgflags |= TDB_USERWR;
981 		write = 1;
982 		/* FALLTHROUGH */
983 	case PT_READ_I:
984 	case PT_READ_D:
985 		PROC_UNLOCK(p);
986 		tmp = 0;
987 		/* write = 0 set above */
988 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
989 		iov.iov_len = sizeof(int);
990 		uio.uio_iov = &iov;
991 		uio.uio_iovcnt = 1;
992 		uio.uio_offset = (off_t)(uintptr_t)addr;
993 		uio.uio_resid = sizeof(int);
994 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
995 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
996 		uio.uio_td = td;
997 		error = proc_rwmem(p, &uio);
998 		if (uio.uio_resid != 0) {
999 			/*
1000 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
1001 			 * so I think write() can bogusly return 0.
1002 			 * XXX what happens for short writes?  We don't want
1003 			 * to write partial data.
1004 			 * XXX proc_rwmem() returns EPERM for other invalid
1005 			 * addresses.  Convert this to EINVAL.  Does this
1006 			 * clobber returns of EPERM for other reasons?
1007 			 */
1008 			if (error == 0 || error == ENOSPC || error == EPERM)
1009 				error = EINVAL;	/* EOF */
1010 		}
1011 		if (!write)
1012 			td->td_retval[0] = tmp;
1013 		PROC_LOCK(p);
1014 		break;
1015 
1016 	case PT_IO:
1017 #ifdef COMPAT_FREEBSD32
1018 		if (wrap32) {
1019 			piod32 = addr;
1020 			iov.iov_base = (void *)(uintptr_t)piod32->piod_addr;
1021 			iov.iov_len = piod32->piod_len;
1022 			uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs;
1023 			uio.uio_resid = piod32->piod_len;
1024 		} else
1025 #endif
1026 		{
1027 			piod = addr;
1028 			iov.iov_base = piod->piod_addr;
1029 			iov.iov_len = piod->piod_len;
1030 			uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
1031 			uio.uio_resid = piod->piod_len;
1032 		}
1033 		uio.uio_iov = &iov;
1034 		uio.uio_iovcnt = 1;
1035 		uio.uio_segflg = UIO_USERSPACE;
1036 		uio.uio_td = td;
1037 #ifdef COMPAT_FREEBSD32
1038 		tmp = wrap32 ? piod32->piod_op : piod->piod_op;
1039 #else
1040 		tmp = piod->piod_op;
1041 #endif
1042 		switch (tmp) {
1043 		case PIOD_READ_D:
1044 		case PIOD_READ_I:
1045 			uio.uio_rw = UIO_READ;
1046 			break;
1047 		case PIOD_WRITE_D:
1048 		case PIOD_WRITE_I:
1049 			td2->td_dbgflags |= TDB_USERWR;
1050 			uio.uio_rw = UIO_WRITE;
1051 			break;
1052 		default:
1053 			error = EINVAL;
1054 			goto out;
1055 		}
1056 		PROC_UNLOCK(p);
1057 		error = proc_rwmem(p, &uio);
1058 #ifdef COMPAT_FREEBSD32
1059 		if (wrap32)
1060 			piod32->piod_len -= uio.uio_resid;
1061 		else
1062 #endif
1063 			piod->piod_len -= uio.uio_resid;
1064 		PROC_LOCK(p);
1065 		break;
1066 
1067 	case PT_KILL:
1068 		data = SIGKILL;
1069 		goto sendsig;	/* in PT_CONTINUE above */
1070 
1071 	case PT_SETREGS:
1072 		td2->td_dbgflags |= TDB_USERWR;
1073 		error = PROC_WRITE(regs, td2, addr);
1074 		break;
1075 
1076 	case PT_GETREGS:
1077 		error = PROC_READ(regs, td2, addr);
1078 		break;
1079 
1080 	case PT_SETFPREGS:
1081 		td2->td_dbgflags |= TDB_USERWR;
1082 		error = PROC_WRITE(fpregs, td2, addr);
1083 		break;
1084 
1085 	case PT_GETFPREGS:
1086 		error = PROC_READ(fpregs, td2, addr);
1087 		break;
1088 
1089 	case PT_SETDBREGS:
1090 		td2->td_dbgflags |= TDB_USERWR;
1091 		error = PROC_WRITE(dbregs, td2, addr);
1092 		break;
1093 
1094 	case PT_GETDBREGS:
1095 		error = PROC_READ(dbregs, td2, addr);
1096 		break;
1097 
1098 	case PT_LWPINFO:
1099 		if (data <= 0 ||
1100 #ifdef COMPAT_FREEBSD32
1101 		    (!wrap32 && data > sizeof(*pl)) ||
1102 		    (wrap32 && data > sizeof(*pl32))) {
1103 #else
1104 		    data > sizeof(*pl)) {
1105 #endif
1106 			error = EINVAL;
1107 			break;
1108 		}
1109 #ifdef COMPAT_FREEBSD32
1110 		if (wrap32) {
1111 			pl = &plr;
1112 			pl32 = addr;
1113 		} else
1114 #endif
1115 		pl = addr;
1116 		pl->pl_lwpid = td2->td_tid;
1117 		pl->pl_event = PL_EVENT_NONE;
1118 		pl->pl_flags = 0;
1119 		if (td2->td_dbgflags & TDB_XSIG) {
1120 			pl->pl_event = PL_EVENT_SIGNAL;
1121 			if (td2->td_dbgksi.ksi_signo != 0 &&
1122 #ifdef COMPAT_FREEBSD32
1123 			    ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo,
1124 			    pl_siginfo) + sizeof(pl->pl_siginfo)) ||
1125 			    (wrap32 && data >= offsetof(struct ptrace_lwpinfo32,
1126 			    pl_siginfo) + sizeof(struct siginfo32)))
1127 #else
1128 			    data >= offsetof(struct ptrace_lwpinfo, pl_siginfo)
1129 			    + sizeof(pl->pl_siginfo)
1130 #endif
1131 			){
1132 				pl->pl_flags |= PL_FLAG_SI;
1133 				pl->pl_siginfo = td2->td_dbgksi.ksi_info;
1134 			}
1135 		}
1136 		if ((pl->pl_flags & PL_FLAG_SI) == 0)
1137 			bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo));
1138 		if (td2->td_dbgflags & TDB_SCE)
1139 			pl->pl_flags |= PL_FLAG_SCE;
1140 		else if (td2->td_dbgflags & TDB_SCX)
1141 			pl->pl_flags |= PL_FLAG_SCX;
1142 		if (td2->td_dbgflags & TDB_EXEC)
1143 			pl->pl_flags |= PL_FLAG_EXEC;
1144 		if (td2->td_dbgflags & TDB_FORK) {
1145 			pl->pl_flags |= PL_FLAG_FORKED;
1146 			pl->pl_child_pid = td2->td_dbg_forked;
1147 		}
1148 		if (td2->td_dbgflags & TDB_CHILD)
1149 			pl->pl_flags |= PL_FLAG_CHILD;
1150 		pl->pl_sigmask = td2->td_sigmask;
1151 		pl->pl_siglist = td2->td_siglist;
1152 		strcpy(pl->pl_tdname, td2->td_name);
1153 #ifdef COMPAT_FREEBSD32
1154 		if (wrap32)
1155 			ptrace_lwpinfo_to32(pl, pl32);
1156 #endif
1157 		break;
1158 
1159 	case PT_GETNUMLWPS:
1160 		td->td_retval[0] = p->p_numthreads;
1161 		break;
1162 
1163 	case PT_GETLWPLIST:
1164 		if (data <= 0) {
1165 			error = EINVAL;
1166 			break;
1167 		}
1168 		num = imin(p->p_numthreads, data);
1169 		PROC_UNLOCK(p);
1170 		buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
1171 		tmp = 0;
1172 		PROC_LOCK(p);
1173 		FOREACH_THREAD_IN_PROC(p, td2) {
1174 			if (tmp >= num)
1175 				break;
1176 			buf[tmp++] = td2->td_tid;
1177 		}
1178 		PROC_UNLOCK(p);
1179 		error = copyout(buf, addr, tmp * sizeof(lwpid_t));
1180 		free(buf, M_TEMP);
1181 		if (!error)
1182 			td->td_retval[0] = tmp;
1183 		PROC_LOCK(p);
1184 		break;
1185 
1186 	case PT_VM_TIMESTAMP:
1187 		td->td_retval[0] = p->p_vmspace->vm_map.timestamp;
1188 		break;
1189 
1190 	case PT_VM_ENTRY:
1191 		PROC_UNLOCK(p);
1192 #ifdef COMPAT_FREEBSD32
1193 		if (wrap32)
1194 			error = ptrace_vm_entry32(td, p, addr);
1195 		else
1196 #endif
1197 		error = ptrace_vm_entry(td, p, addr);
1198 		PROC_LOCK(p);
1199 		break;
1200 
1201 	default:
1202 #ifdef __HAVE_PTRACE_MACHDEP
1203 		if (req >= PT_FIRSTMACH) {
1204 			PROC_UNLOCK(p);
1205 			error = cpu_ptrace(td2, req, addr, data);
1206 			PROC_LOCK(p);
1207 		} else
1208 #endif
1209 			/* Unknown request. */
1210 			error = EINVAL;
1211 		break;
1212 	}
1213 
1214 out:
1215 	/* Drop our hold on this process now that the request has completed. */
1216 	_PRELE(p);
1217 fail:
1218 	PROC_UNLOCK(p);
1219 	if (proctree_locked)
1220 		sx_xunlock(&proctree_lock);
1221 	return (error);
1222 }
1223 #undef PROC_READ
1224 #undef PROC_WRITE
1225 
1226 /*
1227  * Stop a process because of a debugging event;
1228  * stay stopped until p->p_step is cleared
1229  * (cleared by PIOCCONT in procfs).
1230  */
1231 void
1232 stopevent(struct proc *p, unsigned int event, unsigned int val)
1233 {
1234 
1235 	PROC_LOCK_ASSERT(p, MA_OWNED);
1236 	p->p_step = 1;
1237 	do {
1238 		p->p_xstat = val;
1239 		p->p_xthread = NULL;
1240 		p->p_stype = event;	/* Which event caused the stop? */
1241 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
1242 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
1243 	} while (p->p_step);
1244 }
1245 
1246 static int
1247 protect_setchild(struct thread *td, struct proc *p, int flags)
1248 {
1249 
1250 	PROC_LOCK_ASSERT(p, MA_OWNED);
1251 	if (p->p_flag & P_SYSTEM || p_cansee(td, p) != 0)
1252 		return (0);
1253 	if (flags & PPROT_SET) {
1254 		p->p_flag |= P_PROTECTED;
1255 		if (flags & PPROT_INHERIT)
1256 			p->p_flag2 |= P2_INHERIT_PROTECTED;
1257 	} else {
1258 		p->p_flag &= ~P_PROTECTED;
1259 		p->p_flag2 &= ~P2_INHERIT_PROTECTED;
1260 	}
1261 	return (1);
1262 }
1263 
1264 static int
1265 protect_setchildren(struct thread *td, struct proc *top, int flags)
1266 {
1267 	struct proc *p;
1268 	int ret;
1269 
1270 	p = top;
1271 	ret = 0;
1272 	sx_assert(&proctree_lock, SX_LOCKED);
1273 	for (;;) {
1274 		ret |= protect_setchild(td, p, flags);
1275 		PROC_UNLOCK(p);
1276 		/*
1277 		 * If this process has children, descend to them next,
1278 		 * otherwise do any siblings, and if done with this level,
1279 		 * follow back up the tree (but not past top).
1280 		 */
1281 		if (!LIST_EMPTY(&p->p_children))
1282 			p = LIST_FIRST(&p->p_children);
1283 		else for (;;) {
1284 			if (p == top) {
1285 				PROC_LOCK(p);
1286 				return (ret);
1287 			}
1288 			if (LIST_NEXT(p, p_sibling)) {
1289 				p = LIST_NEXT(p, p_sibling);
1290 				break;
1291 			}
1292 			p = p->p_pptr;
1293 		}
1294 		PROC_LOCK(p);
1295 	}
1296 }
1297 
1298 static int
1299 protect_set(struct thread *td, struct proc *p, int flags)
1300 {
1301 	int error, ret;
1302 
1303 	switch (PPROT_OP(flags)) {
1304 	case PPROT_SET:
1305 	case PPROT_CLEAR:
1306 		break;
1307 	default:
1308 		return (EINVAL);
1309 	}
1310 
1311 	if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
1312 		return (EINVAL);
1313 
1314 	error = priv_check(td, PRIV_VM_MADV_PROTECT);
1315 	if (error)
1316 		return (error);
1317 
1318 	if (flags & PPROT_DESCEND)
1319 		ret = protect_setchildren(td, p, flags);
1320 	else
1321 		ret = protect_setchild(td, p, flags);
1322 	if (ret == 0)
1323 		return (EPERM);
1324 	return (0);
1325 }
1326 
1327 #ifndef _SYS_SYSPROTO_H_
1328 struct procctl_args {
1329 	idtype_t idtype;
1330 	id_t	id;
1331 	int	com;
1332 	void	*data;
1333 };
1334 #endif
1335 /* ARGSUSED */
1336 int
1337 sys_procctl(struct thread *td, struct procctl_args *uap)
1338 {
1339 	int error, flags;
1340 	void *data;
1341 
1342 	switch (uap->com) {
1343 	case PROC_SPROTECT:
1344 		error = copyin(uap->data, &flags, sizeof(flags));
1345 		if (error)
1346 			return (error);
1347 		data = &flags;
1348 		break;
1349 	default:
1350 		return (EINVAL);
1351 	}
1352 
1353 	return (kern_procctl(td, uap->idtype, uap->id, uap->com, data));
1354 }
1355 
1356 static int
1357 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
1358 {
1359 
1360 	PROC_LOCK_ASSERT(p, MA_OWNED);
1361 	switch (com) {
1362 	case PROC_SPROTECT:
1363 		return (protect_set(td, p, *(int *)data));
1364 	default:
1365 		return (EINVAL);
1366 	}
1367 }
1368 
1369 int
1370 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
1371 {
1372 	struct pgrp *pg;
1373 	struct proc *p;
1374 	int error, first_error, ok;
1375 
1376 	sx_slock(&proctree_lock);
1377 	switch (idtype) {
1378 	case P_PID:
1379 		p = pfind(id);
1380 		if (p == NULL) {
1381 			error = ESRCH;
1382 			break;
1383 		}
1384 		if (p->p_state == PRS_NEW)
1385 			error = ESRCH;
1386 		else
1387 			error = p_cansee(td, p);
1388 		if (error == 0)
1389 			error = kern_procctl_single(td, p, com, data);
1390 		PROC_UNLOCK(p);
1391 		break;
1392 	case P_PGID:
1393 		/*
1394 		 * Attempt to apply the operation to all members of the
1395 		 * group.  Ignore processes in the group that can't be
1396 		 * seen.  Ignore errors so long as at least one process is
1397 		 * able to complete the request successfully.
1398 		 */
1399 		pg = pgfind(id);
1400 		if (pg == NULL) {
1401 			error = ESRCH;
1402 			break;
1403 		}
1404 		PGRP_UNLOCK(pg);
1405 		ok = 0;
1406 		first_error = 0;
1407 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1408 			PROC_LOCK(p);
1409 			if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) {
1410 				PROC_UNLOCK(p);
1411 				continue;
1412 			}
1413 			error = kern_procctl_single(td, p, com, data);
1414 			PROC_UNLOCK(p);
1415 			if (error == 0)
1416 				ok = 1;
1417 			else if (first_error == 0)
1418 				first_error = error;
1419 		}
1420 		if (ok)
1421 			error = 0;
1422 		else if (first_error != 0)
1423 			error = first_error;
1424 		else
1425 			/*
1426 			 * Was not able to see any processes in the
1427 			 * process group.
1428 			 */
1429 			error = ESRCH;
1430 		break;
1431 	default:
1432 		error = EINVAL;
1433 		break;
1434 	}
1435 	sx_sunlock(&proctree_lock);
1436 	return (error);
1437 }
1438