xref: /freebsd/sys/kern/sys_process.c (revision 77b7cdf1999ee965ad494fddd184b18f532ac91a)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/syscallsubr.h>
39 #include <sys/sysproto.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/ptrace.h>
43 #include <sys/sx.h>
44 #include <sys/user.h>
45 
46 #include <machine/reg.h>
47 
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 
56 /*
57  * Functions implemented using PROC_ACTION():
58  *
59  * proc_read_regs(proc, regs)
60  *	Get the current user-visible register set from the process
61  *	and copy it into the regs structure (<machine/reg.h>).
62  *	The process is stopped at the time read_regs is called.
63  *
64  * proc_write_regs(proc, regs)
65  *	Update the current register set from the passed in regs
66  *	structure.  Take care to avoid clobbering special CPU
67  *	registers or privileged bits in the PSL.
68  *	Depending on the architecture this may have fix-up work to do,
69  *	especially if the IAR or PCW are modified.
70  *	The process is stopped at the time write_regs is called.
71  *
72  * proc_read_fpregs, proc_write_fpregs
73  *	deal with the floating point register set, otherwise as above.
74  *
75  * proc_read_dbregs, proc_write_dbregs
76  *	deal with the processor debug register set, otherwise as above.
77  *
78  * proc_sstep(proc)
79  *	Arrange for the process to trap after executing a single instruction.
80  */
81 
82 #define	PROC_ACTION(action) do {					\
83 	int error;							\
84 									\
85 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);			\
86 	if ((td->td_proc->p_sflag & PS_INMEM) == 0)			\
87 		error = EIO;						\
88 	else								\
89 		error = (action);					\
90 	return (error);							\
91 } while(0)
92 
93 int
94 proc_read_regs(struct thread *td, struct reg *regs)
95 {
96 
97 	PROC_ACTION(fill_regs(td, regs));
98 }
99 
100 int
101 proc_write_regs(struct thread *td, struct reg *regs)
102 {
103 
104 	PROC_ACTION(set_regs(td, regs));
105 }
106 
107 int
108 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
109 {
110 
111 	PROC_ACTION(fill_dbregs(td, dbregs));
112 }
113 
114 int
115 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
116 {
117 
118 	PROC_ACTION(set_dbregs(td, dbregs));
119 }
120 
121 /*
122  * Ptrace doesn't support fpregs at all, and there are no security holes
123  * or translations for fpregs, so we can just copy them.
124  */
125 int
126 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
127 {
128 
129 	PROC_ACTION(fill_fpregs(td, fpregs));
130 }
131 
132 int
133 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
134 {
135 
136 	PROC_ACTION(set_fpregs(td, fpregs));
137 }
138 
139 int
140 proc_sstep(struct thread *td)
141 {
142 
143 	PROC_ACTION(ptrace_single_step(td));
144 }
145 
146 int
147 proc_rwmem(struct proc *p, struct uio *uio)
148 {
149 	struct vmspace *vm;
150 	vm_map_t map;
151 	vm_object_t object = NULL;
152 	vm_offset_t pageno = 0;		/* page number */
153 	vm_prot_t reqprot;
154 	vm_offset_t kva;
155 	int error, writing;
156 
157 	GIANT_REQUIRED;
158 
159 	/*
160 	 * if the vmspace is in the midst of being deallocated or the
161 	 * process is exiting, don't try to grab anything.  The page table
162 	 * usage in that process can be messed up.
163 	 */
164 	vm = p->p_vmspace;
165 	if ((p->p_flag & P_WEXIT))
166 		return (EFAULT);
167 	if (vm->vm_refcnt < 1)
168 		return (EFAULT);
169 	++vm->vm_refcnt;
170 	/*
171 	 * The map we want...
172 	 */
173 	map = &vm->vm_map;
174 
175 	writing = uio->uio_rw == UIO_WRITE;
176 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
177 	    VM_PROT_READ;
178 
179 	kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
180 
181 	/*
182 	 * Only map in one page at a time.  We don't have to, but it
183 	 * makes things easier.  This way is trivial - right?
184 	 */
185 	do {
186 		vm_map_t tmap;
187 		vm_offset_t uva;
188 		int page_offset;		/* offset into page */
189 		vm_map_entry_t out_entry;
190 		vm_prot_t out_prot;
191 		boolean_t wired;
192 		vm_pindex_t pindex;
193 		u_int len;
194 		vm_page_t m;
195 
196 		object = NULL;
197 
198 		uva = (vm_offset_t)uio->uio_offset;
199 
200 		/*
201 		 * Get the page number of this segment.
202 		 */
203 		pageno = trunc_page(uva);
204 		page_offset = uva - pageno;
205 
206 		/*
207 		 * How many bytes to copy
208 		 */
209 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
210 
211 		/*
212 		 * Fault the page on behalf of the process
213 		 */
214 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
215 		if (error) {
216 			error = EFAULT;
217 			break;
218 		}
219 
220 		/*
221 		 * Now we need to get the page.  out_entry, out_prot, wired,
222 		 * and single_use aren't used.  One would think the vm code
223 		 * would be a *bit* nicer...  We use tmap because
224 		 * vm_map_lookup() can change the map argument.
225 		 */
226 		tmap = map;
227 		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
228 		    &object, &pindex, &out_prot, &wired);
229 
230 		if (error) {
231 			error = EFAULT;
232 
233 			/*
234 			 * Make sure that there is no residue in 'object' from
235 			 * an error return on vm_map_lookup.
236 			 */
237 			object = NULL;
238 
239 			break;
240 		}
241 
242 		m = vm_page_lookup(object, pindex);
243 
244 		/* Allow fallback to backing objects if we are reading */
245 
246 		while (m == NULL && !writing && object->backing_object) {
247 
248 			pindex += OFF_TO_IDX(object->backing_object_offset);
249 			object = object->backing_object;
250 
251 			m = vm_page_lookup(object, pindex);
252 		}
253 
254 		if (m == NULL) {
255 			error = EFAULT;
256 
257 			/*
258 			 * Make sure that there is no residue in 'object' from
259 			 * an error return on vm_map_lookup.
260 			 */
261 			object = NULL;
262 
263 			vm_map_lookup_done(tmap, out_entry);
264 
265 			break;
266 		}
267 
268 		/*
269 		 * Wire the page into memory
270 		 */
271 		vm_page_lock_queues();
272 		vm_page_wire(m);
273 		vm_page_unlock_queues();
274 
275 		/*
276 		 * We're done with tmap now.
277 		 * But reference the object first, so that we won't loose
278 		 * it.
279 		 */
280 		vm_object_reference(object);
281 		vm_map_lookup_done(tmap, out_entry);
282 
283 		pmap_qenter(kva, &m, 1);
284 
285 		/*
286 		 * Now do the i/o move.
287 		 */
288 		error = uiomove((caddr_t)(kva + page_offset), len, uio);
289 
290 		pmap_qremove(kva, 1);
291 
292 		/*
293 		 * release the page and the object
294 		 */
295 		vm_page_lock_queues();
296 		vm_page_unwire(m, 1);
297 		vm_page_unlock_queues();
298 		vm_object_deallocate(object);
299 
300 		object = NULL;
301 
302 	} while (error == 0 && uio->uio_resid > 0);
303 
304 	if (object)
305 		vm_object_deallocate(object);
306 
307 	kmem_free(kernel_map, kva, PAGE_SIZE);
308 	vmspace_free(vm);
309 	return (error);
310 }
311 
312 /*
313  * Process debugging system call.
314  */
315 #ifndef _SYS_SYSPROTO_H_
316 struct ptrace_args {
317 	int	req;
318 	pid_t	pid;
319 	caddr_t	addr;
320 	int	data;
321 };
322 #endif
323 
324 /*
325  * MPSAFE
326  */
327 int
328 ptrace(struct thread *td, struct ptrace_args *uap)
329 {
330 	/*
331 	 * XXX this obfuscation is to reduce stack usage, but the register
332 	 * structs may be too large to put on the stack anyway.
333 	 */
334 	union {
335 		struct ptrace_io_desc piod;
336 		struct dbreg dbreg;
337 		struct fpreg fpreg;
338 		struct reg reg;
339 	} r;
340 	void *addr;
341 	int error = 0;
342 
343 	addr = &r;
344 	switch (uap->req) {
345 	case PT_GETREGS:
346 	case PT_GETFPREGS:
347 	case PT_GETDBREGS:
348 		break;
349 	case PT_SETREGS:
350 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
351 		break;
352 	case PT_SETFPREGS:
353 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
354 		break;
355 	case PT_SETDBREGS:
356 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
357 		break;
358 	case PT_IO:
359 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
360 		break;
361 	default:
362 		addr = uap->addr;
363 	}
364 	if (error)
365 		return (error);
366 
367 	error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
368 	if (error)
369 		return (error);
370 
371 	switch (uap->req) {
372 	case PT_IO:
373 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
374 		break;
375 	case PT_GETREGS:
376 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
377 		break;
378 	case PT_GETFPREGS:
379 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
380 		break;
381 	case PT_GETDBREGS:
382 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
383 		break;
384 	}
385 
386 	return (error);
387 }
388 
389 int
390 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
391 {
392 	struct iovec iov;
393 	struct uio uio;
394 	struct proc *curp, *p, *pp;
395 	struct thread *td2;
396 	struct ptrace_io_desc *piod;
397 	int error, write, tmp;
398 	int proctree_locked = 0;
399 
400 	curp = td->td_proc;
401 
402 	/* Lock proctree before locking the process. */
403 	switch (req) {
404 	case PT_TRACE_ME:
405 	case PT_ATTACH:
406 	case PT_STEP:
407 	case PT_CONTINUE:
408 	case PT_DETACH:
409 		sx_xlock(&proctree_lock);
410 		proctree_locked = 1;
411 		break;
412 	default:
413 		break;
414 	}
415 
416 	write = 0;
417 	if (req == PT_TRACE_ME) {
418 		p = td->td_proc;
419 		PROC_LOCK(p);
420 	} else {
421 		if ((p = pfind(pid)) == NULL) {
422 			if (proctree_locked)
423 				sx_xunlock(&proctree_lock);
424 			return (ESRCH);
425 		}
426 	}
427 	if ((error = p_cansee(td, p)) != 0)
428 		goto fail;
429 
430 	if ((error = p_candebug(td, p)) != 0)
431 		goto fail;
432 
433 	/*
434 	 * System processes can't be debugged.
435 	 */
436 	if ((p->p_flag & P_SYSTEM) != 0) {
437 		error = EINVAL;
438 		goto fail;
439 	}
440 
441 	/*
442 	 * Permissions check
443 	 */
444 	switch (req) {
445 	case PT_TRACE_ME:
446 		/* Always legal. */
447 		break;
448 
449 	case PT_ATTACH:
450 		/* Self */
451 		if (p->p_pid == td->td_proc->p_pid) {
452 			error = EINVAL;
453 			goto fail;
454 		}
455 
456 		/* Already traced */
457 		if (p->p_flag & P_TRACED) {
458 			error = EBUSY;
459 			goto fail;
460 		}
461 
462 		/* Can't trace an ancestor if you're being traced. */
463 		if (curp->p_flag & P_TRACED) {
464 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
465 				if (pp == p) {
466 					error = EINVAL;
467 					goto fail;
468 				}
469 			}
470 		}
471 
472 
473 		/* OK */
474 		break;
475 
476 	case PT_READ_I:
477 	case PT_READ_D:
478 	case PT_WRITE_I:
479 	case PT_WRITE_D:
480 	case PT_IO:
481 	case PT_CONTINUE:
482 	case PT_KILL:
483 	case PT_STEP:
484 	case PT_DETACH:
485 	case PT_GETREGS:
486 	case PT_SETREGS:
487 	case PT_GETFPREGS:
488 	case PT_SETFPREGS:
489 	case PT_GETDBREGS:
490 	case PT_SETDBREGS:
491 		/* not being traced... */
492 		if ((p->p_flag & P_TRACED) == 0) {
493 			error = EPERM;
494 			goto fail;
495 		}
496 
497 		/* not being traced by YOU */
498 		if (p->p_pptr != td->td_proc) {
499 			error = EBUSY;
500 			goto fail;
501 		}
502 
503 		/* not currently stopped */
504 		if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) {
505 			error = EBUSY;
506 			goto fail;
507 		}
508 
509 		/* OK */
510 		break;
511 
512 	default:
513 		error = EINVAL;
514 		goto fail;
515 	}
516 
517 	td2 = FIRST_THREAD_IN_PROC(p);
518 #ifdef FIX_SSTEP
519 	/*
520 	 * Single step fixup ala procfs
521 	 */
522 	FIX_SSTEP(td2);			/* XXXKSE */
523 #endif
524 
525 	/*
526 	 * Actually do the requests
527 	 */
528 
529 	td->td_retval[0] = 0;
530 
531 	switch (req) {
532 	case PT_TRACE_ME:
533 		/* set my trace flag and "owner" so it can read/write me */
534 		p->p_flag |= P_TRACED;
535 		p->p_oppid = p->p_pptr->p_pid;
536 		PROC_UNLOCK(p);
537 		sx_xunlock(&proctree_lock);
538 		return (0);
539 
540 	case PT_ATTACH:
541 		/* security check done above */
542 		p->p_flag |= P_TRACED;
543 		p->p_oppid = p->p_pptr->p_pid;
544 		if (p->p_pptr != td->td_proc)
545 			proc_reparent(p, td->td_proc);
546 		data = SIGSTOP;
547 		goto sendsig;	/* in PT_CONTINUE below */
548 
549 	case PT_STEP:
550 	case PT_CONTINUE:
551 	case PT_DETACH:
552 		/* XXX data is used even in the PT_STEP case. */
553 		if (req != PT_STEP && (unsigned)data > _SIG_MAXSIG) {
554 			error = EINVAL;
555 			goto fail;
556 		}
557 
558 		_PHOLD(p);
559 
560 		if (req == PT_STEP) {
561 			error = ptrace_single_step(td2);
562 			if (error) {
563 				_PRELE(p);
564 				goto fail;
565 			}
566 		}
567 
568 		if (addr != (void *)1) {
569 			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
570 			if (error) {
571 				_PRELE(p);
572 				goto fail;
573 			}
574 		}
575 		_PRELE(p);
576 
577 		if (req == PT_DETACH) {
578 			/* reset process parent */
579 			if (p->p_oppid != p->p_pptr->p_pid) {
580 				struct proc *pp;
581 
582 				PROC_UNLOCK(p);
583 				pp = pfind(p->p_oppid);
584 				if (pp == NULL)
585 					pp = initproc;
586 				else
587 					PROC_UNLOCK(pp);
588 				PROC_LOCK(p);
589 				proc_reparent(p, pp);
590 			}
591 			p->p_flag &= ~(P_TRACED | P_WAITED);
592 			p->p_oppid = 0;
593 
594 			/* should we send SIGCHLD? */
595 		}
596 
597 	sendsig:
598 		if (proctree_locked)
599 			sx_xunlock(&proctree_lock);
600 		/* deliver or queue signal */
601 		if (P_SHOULDSTOP(p)) {
602 			p->p_xstat = data;
603 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
604 			mtx_lock_spin(&sched_lock);
605 			thread_unsuspend(p);
606 			setrunnable(td2);	/* XXXKSE */
607 			/* Need foreach kse in proc, ... make_kse_queued(). */
608 			mtx_unlock_spin(&sched_lock);
609 		} else if (data)
610 			psignal(p, data);
611 		PROC_UNLOCK(p);
612 
613 		return (0);
614 
615 	case PT_WRITE_I:
616 	case PT_WRITE_D:
617 		write = 1;
618 		/* FALLTHROUGH */
619 	case PT_READ_I:
620 	case PT_READ_D:
621 		PROC_UNLOCK(p);
622 		tmp = 0;
623 		/* write = 0 set above */
624 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
625 		iov.iov_len = sizeof(int);
626 		uio.uio_iov = &iov;
627 		uio.uio_iovcnt = 1;
628 		uio.uio_offset = (off_t)(uintptr_t)addr;
629 		uio.uio_resid = sizeof(int);
630 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
631 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
632 		uio.uio_td = td;
633 		mtx_lock(&Giant);
634 		error = proc_rwmem(p, &uio);
635 		mtx_unlock(&Giant);
636 		if (uio.uio_resid != 0) {
637 			/*
638 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
639 			 * so I think write() can bogusly return 0.
640 			 * XXX what happens for short writes?  We don't want
641 			 * to write partial data.
642 			 * XXX proc_rwmem() returns EPERM for other invalid
643 			 * addresses.  Convert this to EINVAL.  Does this
644 			 * clobber returns of EPERM for other reasons?
645 			 */
646 			if (error == 0 || error == ENOSPC || error == EPERM)
647 				error = EINVAL;	/* EOF */
648 		}
649 		if (!write)
650 			td->td_retval[0] = tmp;
651 		return (error);
652 
653 	case PT_IO:
654 		PROC_UNLOCK(p);
655 		piod = addr;
656 		iov.iov_base = piod->piod_addr;
657 		iov.iov_len = piod->piod_len;
658 		uio.uio_iov = &iov;
659 		uio.uio_iovcnt = 1;
660 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
661 		uio.uio_resid = piod->piod_len;
662 		uio.uio_segflg = UIO_USERSPACE;
663 		uio.uio_td = td;
664 		switch (piod->piod_op) {
665 		case PIOD_READ_D:
666 		case PIOD_READ_I:
667 			uio.uio_rw = UIO_READ;
668 			break;
669 		case PIOD_WRITE_D:
670 		case PIOD_WRITE_I:
671 			uio.uio_rw = UIO_WRITE;
672 			break;
673 		default:
674 			return (EINVAL);
675 		}
676 		mtx_lock(&Giant);
677 		error = proc_rwmem(p, &uio);
678 		mtx_unlock(&Giant);
679 		piod->piod_len -= uio.uio_resid;
680 		return (error);
681 
682 	case PT_KILL:
683 		data = SIGKILL;
684 		goto sendsig;	/* in PT_CONTINUE above */
685 
686 	case PT_SETREGS:
687 		_PHOLD(p);
688 		error = proc_write_regs(td2, addr);
689 		_PRELE(p);
690 		PROC_UNLOCK(p);
691 		return (error);
692 
693 	case PT_GETREGS:
694 		_PHOLD(p);
695 		error = proc_read_regs(td2, addr);
696 		_PRELE(p);
697 		PROC_UNLOCK(p);
698 		return (error);
699 
700 	case PT_SETFPREGS:
701 		_PHOLD(p);
702 		error = proc_write_fpregs(td2, addr);
703 		_PRELE(p);
704 		PROC_UNLOCK(p);
705 		return (error);
706 
707 	case PT_GETFPREGS:
708 		_PHOLD(p);
709 		error = proc_read_fpregs(td2, addr);
710 		_PRELE(p);
711 		PROC_UNLOCK(p);
712 		return (error);
713 
714 	case PT_SETDBREGS:
715 		_PHOLD(p);
716 		error = proc_write_dbregs(td2, addr);
717 		_PRELE(p);
718 		PROC_UNLOCK(p);
719 		return (error);
720 
721 	case PT_GETDBREGS:
722 		_PHOLD(p);
723 		error = proc_read_dbregs(td2, addr);
724 		_PRELE(p);
725 		PROC_UNLOCK(p);
726 		return (error);
727 
728 	default:
729 		KASSERT(0, ("unreachable code\n"));
730 		break;
731 	}
732 
733 	KASSERT(0, ("unreachable code\n"));
734 	return (0);
735 
736 fail:
737 	PROC_UNLOCK(p);
738 	if (proctree_locked)
739 		sx_xunlock(&proctree_lock);
740 	return (error);
741 }
742 
743 /*
744  * Stop a process because of a debugging event;
745  * stay stopped until p->p_step is cleared
746  * (cleared by PIOCCONT in procfs).
747  */
748 void
749 stopevent(struct proc *p, unsigned int event, unsigned int val)
750 {
751 
752 	PROC_LOCK_ASSERT(p, MA_OWNED);
753 	p->p_step = 1;
754 	do {
755 		p->p_xstat = val;
756 		p->p_stype = event;	/* Which event caused the stop? */
757 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
758 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
759 	} while (p->p_step);
760 }
761