xref: /freebsd/sys/kern/sys_process.c (revision d086ded32300bc0f33fb1574d0bcfccfbc60881d)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/syscallsubr.h>
40 #include <sys/sysproto.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ptrace.h>
44 #include <sys/sx.h>
45 #include <sys/user.h>
46 
47 #include <machine/reg.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 
57 /*
58  * Functions implemented using PROC_ACTION():
59  *
60  * proc_read_regs(proc, regs)
61  *	Get the current user-visible register set from the process
62  *	and copy it into the regs structure (<machine/reg.h>).
63  *	The process is stopped at the time read_regs is called.
64  *
65  * proc_write_regs(proc, regs)
66  *	Update the current register set from the passed in regs
67  *	structure.  Take care to avoid clobbering special CPU
68  *	registers or privileged bits in the PSL.
69  *	Depending on the architecture this may have fix-up work to do,
70  *	especially if the IAR or PCW are modified.
71  *	The process is stopped at the time write_regs is called.
72  *
73  * proc_read_fpregs, proc_write_fpregs
74  *	deal with the floating point register set, otherwise as above.
75  *
76  * proc_read_dbregs, proc_write_dbregs
77  *	deal with the processor debug register set, otherwise as above.
78  *
79  * proc_sstep(proc)
80  *	Arrange for the process to trap after executing a single instruction.
81  */
82 
83 #define	PROC_ACTION(action) do {					\
84 	int error;							\
85 									\
86 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);			\
87 	if ((td->td_proc->p_sflag & PS_INMEM) == 0)			\
88 		error = EIO;						\
89 	else								\
90 		error = (action);					\
91 	return (error);							\
92 } while(0)
93 
94 int
95 proc_read_regs(struct thread *td, struct reg *regs)
96 {
97 
98 	PROC_ACTION(fill_regs(td, regs));
99 }
100 
101 int
102 proc_write_regs(struct thread *td, struct reg *regs)
103 {
104 
105 	PROC_ACTION(set_regs(td, regs));
106 }
107 
108 int
109 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
110 {
111 
112 	PROC_ACTION(fill_dbregs(td, dbregs));
113 }
114 
115 int
116 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
117 {
118 
119 	PROC_ACTION(set_dbregs(td, dbregs));
120 }
121 
122 /*
123  * Ptrace doesn't support fpregs at all, and there are no security holes
124  * or translations for fpregs, so we can just copy them.
125  */
126 int
127 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
128 {
129 
130 	PROC_ACTION(fill_fpregs(td, fpregs));
131 }
132 
133 int
134 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
135 {
136 
137 	PROC_ACTION(set_fpregs(td, fpregs));
138 }
139 
140 int
141 proc_sstep(struct thread *td)
142 {
143 
144 	PROC_ACTION(ptrace_single_step(td));
145 }
146 
147 int
148 proc_rwmem(struct proc *p, struct uio *uio)
149 {
150 	struct vmspace *vm;
151 	vm_map_t map;
152 	vm_object_t backing_object, object = NULL;
153 	vm_offset_t pageno = 0;		/* page number */
154 	vm_prot_t reqprot;
155 	vm_offset_t kva;
156 	int error, writing;
157 
158 	GIANT_REQUIRED;
159 
160 	/*
161 	 * if the vmspace is in the midst of being deallocated or the
162 	 * process is exiting, don't try to grab anything.  The page table
163 	 * usage in that process can be messed up.
164 	 */
165 	vm = p->p_vmspace;
166 	if ((p->p_flag & P_WEXIT))
167 		return (EFAULT);
168 	if (vm->vm_refcnt < 1)
169 		return (EFAULT);
170 	++vm->vm_refcnt;
171 	/*
172 	 * The map we want...
173 	 */
174 	map = &vm->vm_map;
175 
176 	writing = uio->uio_rw == UIO_WRITE;
177 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
178 	    VM_PROT_READ;
179 
180 	kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
181 
182 	/*
183 	 * Only map in one page at a time.  We don't have to, but it
184 	 * makes things easier.  This way is trivial - right?
185 	 */
186 	do {
187 		vm_map_t tmap;
188 		vm_offset_t uva;
189 		int page_offset;		/* offset into page */
190 		vm_map_entry_t out_entry;
191 		vm_prot_t out_prot;
192 		boolean_t wired;
193 		vm_pindex_t pindex;
194 		u_int len;
195 		vm_page_t m;
196 
197 		object = NULL;
198 
199 		uva = (vm_offset_t)uio->uio_offset;
200 
201 		/*
202 		 * Get the page number of this segment.
203 		 */
204 		pageno = trunc_page(uva);
205 		page_offset = uva - pageno;
206 
207 		/*
208 		 * How many bytes to copy
209 		 */
210 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
211 
212 		/*
213 		 * Fault the page on behalf of the process
214 		 */
215 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
216 		if (error) {
217 			error = EFAULT;
218 			break;
219 		}
220 
221 		/*
222 		 * Now we need to get the page.  out_entry, out_prot, wired,
223 		 * and single_use aren't used.  One would think the vm code
224 		 * would be a *bit* nicer...  We use tmap because
225 		 * vm_map_lookup() can change the map argument.
226 		 */
227 		tmap = map;
228 		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
229 		    &object, &pindex, &out_prot, &wired);
230 
231 		if (error) {
232 			error = EFAULT;
233 
234 			/*
235 			 * Make sure that there is no residue in 'object' from
236 			 * an error return on vm_map_lookup.
237 			 */
238 			object = NULL;
239 
240 			break;
241 		}
242 		VM_OBJECT_LOCK(object);
243 		while ((m = vm_page_lookup(object, pindex)) == NULL &&
244 		    !writing &&
245 		    (backing_object = object->backing_object) != NULL) {
246 			/*
247 			 * Allow fallback to backing objects if we are reading.
248 			 */
249 			VM_OBJECT_LOCK(backing_object);
250 			pindex += OFF_TO_IDX(object->backing_object_offset);
251 			VM_OBJECT_UNLOCK(object);
252 			object = backing_object;
253 		}
254 		VM_OBJECT_UNLOCK(object);
255 		if (m == NULL) {
256 			error = EFAULT;
257 
258 			/*
259 			 * Make sure that there is no residue in 'object' from
260 			 * an error return on vm_map_lookup.
261 			 */
262 			object = NULL;
263 
264 			vm_map_lookup_done(tmap, out_entry);
265 
266 			break;
267 		}
268 
269 		/*
270 		 * Wire the page into memory
271 		 */
272 		vm_page_lock_queues();
273 		vm_page_wire(m);
274 		vm_page_unlock_queues();
275 
276 		/*
277 		 * We're done with tmap now.
278 		 * But reference the object first, so that we won't loose
279 		 * it.
280 		 */
281 		vm_object_reference(object);
282 		vm_map_lookup_done(tmap, out_entry);
283 
284 		pmap_qenter(kva, &m, 1);
285 
286 		/*
287 		 * Now do the i/o move.
288 		 */
289 		error = uiomove((caddr_t)(kva + page_offset), len, uio);
290 
291 		pmap_qremove(kva, 1);
292 
293 		/*
294 		 * release the page and the object
295 		 */
296 		vm_page_lock_queues();
297 		vm_page_unwire(m, 1);
298 		vm_page_unlock_queues();
299 		vm_object_deallocate(object);
300 
301 		object = NULL;
302 
303 	} while (error == 0 && uio->uio_resid > 0);
304 
305 	if (object)
306 		vm_object_deallocate(object);
307 
308 	kmem_free(kernel_map, kva, PAGE_SIZE);
309 	vmspace_free(vm);
310 	return (error);
311 }
312 
313 /*
314  * Process debugging system call.
315  */
316 #ifndef _SYS_SYSPROTO_H_
317 struct ptrace_args {
318 	int	req;
319 	pid_t	pid;
320 	caddr_t	addr;
321 	int	data;
322 };
323 #endif
324 
325 /*
326  * MPSAFE
327  */
328 int
329 ptrace(struct thread *td, struct ptrace_args *uap)
330 {
331 	/*
332 	 * XXX this obfuscation is to reduce stack usage, but the register
333 	 * structs may be too large to put on the stack anyway.
334 	 */
335 	union {
336 		struct ptrace_io_desc piod;
337 		struct dbreg dbreg;
338 		struct fpreg fpreg;
339 		struct reg reg;
340 	} r;
341 	void *addr;
342 	int error = 0;
343 
344 	addr = &r;
345 	switch (uap->req) {
346 	case PT_GETREGS:
347 	case PT_GETFPREGS:
348 	case PT_GETDBREGS:
349 		break;
350 	case PT_SETREGS:
351 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
352 		break;
353 	case PT_SETFPREGS:
354 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
355 		break;
356 	case PT_SETDBREGS:
357 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
358 		break;
359 	case PT_IO:
360 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
361 		break;
362 	default:
363 		addr = uap->addr;
364 	}
365 	if (error)
366 		return (error);
367 
368 	error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
369 	if (error)
370 		return (error);
371 
372 	switch (uap->req) {
373 	case PT_IO:
374 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
375 		break;
376 	case PT_GETREGS:
377 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
378 		break;
379 	case PT_GETFPREGS:
380 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
381 		break;
382 	case PT_GETDBREGS:
383 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
384 		break;
385 	}
386 
387 	return (error);
388 }
389 
390 int
391 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
392 {
393 	struct iovec iov;
394 	struct uio uio;
395 	struct proc *curp, *p, *pp;
396 	struct thread *td2;
397 	struct ptrace_io_desc *piod;
398 	int error, write, tmp;
399 	int proctree_locked = 0;
400 
401 	curp = td->td_proc;
402 
403 	/* Lock proctree before locking the process. */
404 	switch (req) {
405 	case PT_TRACE_ME:
406 	case PT_ATTACH:
407 	case PT_STEP:
408 	case PT_CONTINUE:
409 	case PT_DETACH:
410 		sx_xlock(&proctree_lock);
411 		proctree_locked = 1;
412 		break;
413 	default:
414 		break;
415 	}
416 
417 	write = 0;
418 	if (req == PT_TRACE_ME) {
419 		p = td->td_proc;
420 		PROC_LOCK(p);
421 	} else {
422 		if ((p = pfind(pid)) == NULL) {
423 			if (proctree_locked)
424 				sx_xunlock(&proctree_lock);
425 			return (ESRCH);
426 		}
427 	}
428 	if ((error = p_cansee(td, p)) != 0)
429 		goto fail;
430 
431 	if ((error = p_candebug(td, p)) != 0)
432 		goto fail;
433 
434 	/*
435 	 * System processes can't be debugged.
436 	 */
437 	if ((p->p_flag & P_SYSTEM) != 0) {
438 		error = EINVAL;
439 		goto fail;
440 	}
441 
442 	/*
443 	 * Permissions check
444 	 */
445 	switch (req) {
446 	case PT_TRACE_ME:
447 		/* Always legal. */
448 		break;
449 
450 	case PT_ATTACH:
451 		/* Self */
452 		if (p->p_pid == td->td_proc->p_pid) {
453 			error = EINVAL;
454 			goto fail;
455 		}
456 
457 		/* Already traced */
458 		if (p->p_flag & P_TRACED) {
459 			error = EBUSY;
460 			goto fail;
461 		}
462 
463 		/* Can't trace an ancestor if you're being traced. */
464 		if (curp->p_flag & P_TRACED) {
465 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
466 				if (pp == p) {
467 					error = EINVAL;
468 					goto fail;
469 				}
470 			}
471 		}
472 
473 
474 		/* OK */
475 		break;
476 
477 	case PT_READ_I:
478 	case PT_READ_D:
479 	case PT_WRITE_I:
480 	case PT_WRITE_D:
481 	case PT_IO:
482 	case PT_CONTINUE:
483 	case PT_KILL:
484 	case PT_STEP:
485 	case PT_DETACH:
486 	case PT_GETREGS:
487 	case PT_SETREGS:
488 	case PT_GETFPREGS:
489 	case PT_SETFPREGS:
490 	case PT_GETDBREGS:
491 	case PT_SETDBREGS:
492 		/* not being traced... */
493 		if ((p->p_flag & P_TRACED) == 0) {
494 			error = EPERM;
495 			goto fail;
496 		}
497 
498 		/* not being traced by YOU */
499 		if (p->p_pptr != td->td_proc) {
500 			error = EBUSY;
501 			goto fail;
502 		}
503 
504 		/* not currently stopped */
505 		if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) {
506 			error = EBUSY;
507 			goto fail;
508 		}
509 
510 		/* OK */
511 		break;
512 
513 	default:
514 		error = EINVAL;
515 		goto fail;
516 	}
517 
518 	td2 = FIRST_THREAD_IN_PROC(p);
519 #ifdef FIX_SSTEP
520 	/*
521 	 * Single step fixup ala procfs
522 	 */
523 	FIX_SSTEP(td2);			/* XXXKSE */
524 #endif
525 
526 	/*
527 	 * Actually do the requests
528 	 */
529 
530 	td->td_retval[0] = 0;
531 
532 	switch (req) {
533 	case PT_TRACE_ME:
534 		/* set my trace flag and "owner" so it can read/write me */
535 		p->p_flag |= P_TRACED;
536 		p->p_oppid = p->p_pptr->p_pid;
537 		PROC_UNLOCK(p);
538 		sx_xunlock(&proctree_lock);
539 		return (0);
540 
541 	case PT_ATTACH:
542 		/* security check done above */
543 		p->p_flag |= P_TRACED;
544 		p->p_oppid = p->p_pptr->p_pid;
545 		if (p->p_pptr != td->td_proc)
546 			proc_reparent(p, td->td_proc);
547 		data = SIGSTOP;
548 		goto sendsig;	/* in PT_CONTINUE below */
549 
550 	case PT_STEP:
551 	case PT_CONTINUE:
552 	case PT_DETACH:
553 		/* XXX data is used even in the PT_STEP case. */
554 		if (req != PT_STEP && (unsigned)data > _SIG_MAXSIG) {
555 			error = EINVAL;
556 			goto fail;
557 		}
558 
559 		_PHOLD(p);
560 
561 		if (req == PT_STEP) {
562 			error = ptrace_single_step(td2);
563 			if (error) {
564 				_PRELE(p);
565 				goto fail;
566 			}
567 		}
568 
569 		if (addr != (void *)1) {
570 			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
571 			if (error) {
572 				_PRELE(p);
573 				goto fail;
574 			}
575 		}
576 		_PRELE(p);
577 
578 		if (req == PT_DETACH) {
579 			/* reset process parent */
580 			if (p->p_oppid != p->p_pptr->p_pid) {
581 				struct proc *pp;
582 
583 				PROC_UNLOCK(p);
584 				pp = pfind(p->p_oppid);
585 				if (pp == NULL)
586 					pp = initproc;
587 				else
588 					PROC_UNLOCK(pp);
589 				PROC_LOCK(p);
590 				proc_reparent(p, pp);
591 			}
592 			p->p_flag &= ~(P_TRACED | P_WAITED);
593 			p->p_oppid = 0;
594 
595 			/* should we send SIGCHLD? */
596 		}
597 
598 	sendsig:
599 		if (proctree_locked)
600 			sx_xunlock(&proctree_lock);
601 		/* deliver or queue signal */
602 		if (P_SHOULDSTOP(p)) {
603 			p->p_xstat = data;
604 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
605 			mtx_lock_spin(&sched_lock);
606 			thread_unsuspend(p);
607 			setrunnable(td2);	/* XXXKSE */
608 			/* Need foreach kse in proc, ... make_kse_queued(). */
609 			mtx_unlock_spin(&sched_lock);
610 		} else if (data)
611 			psignal(p, data);
612 		PROC_UNLOCK(p);
613 
614 		return (0);
615 
616 	case PT_WRITE_I:
617 	case PT_WRITE_D:
618 		write = 1;
619 		/* FALLTHROUGH */
620 	case PT_READ_I:
621 	case PT_READ_D:
622 		PROC_UNLOCK(p);
623 		tmp = 0;
624 		/* write = 0 set above */
625 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
626 		iov.iov_len = sizeof(int);
627 		uio.uio_iov = &iov;
628 		uio.uio_iovcnt = 1;
629 		uio.uio_offset = (off_t)(uintptr_t)addr;
630 		uio.uio_resid = sizeof(int);
631 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
632 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
633 		uio.uio_td = td;
634 		mtx_lock(&Giant);
635 		error = proc_rwmem(p, &uio);
636 		mtx_unlock(&Giant);
637 		if (uio.uio_resid != 0) {
638 			/*
639 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
640 			 * so I think write() can bogusly return 0.
641 			 * XXX what happens for short writes?  We don't want
642 			 * to write partial data.
643 			 * XXX proc_rwmem() returns EPERM for other invalid
644 			 * addresses.  Convert this to EINVAL.  Does this
645 			 * clobber returns of EPERM for other reasons?
646 			 */
647 			if (error == 0 || error == ENOSPC || error == EPERM)
648 				error = EINVAL;	/* EOF */
649 		}
650 		if (!write)
651 			td->td_retval[0] = tmp;
652 		return (error);
653 
654 	case PT_IO:
655 		PROC_UNLOCK(p);
656 		piod = addr;
657 		iov.iov_base = piod->piod_addr;
658 		iov.iov_len = piod->piod_len;
659 		uio.uio_iov = &iov;
660 		uio.uio_iovcnt = 1;
661 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
662 		uio.uio_resid = piod->piod_len;
663 		uio.uio_segflg = UIO_USERSPACE;
664 		uio.uio_td = td;
665 		switch (piod->piod_op) {
666 		case PIOD_READ_D:
667 		case PIOD_READ_I:
668 			uio.uio_rw = UIO_READ;
669 			break;
670 		case PIOD_WRITE_D:
671 		case PIOD_WRITE_I:
672 			uio.uio_rw = UIO_WRITE;
673 			break;
674 		default:
675 			return (EINVAL);
676 		}
677 		mtx_lock(&Giant);
678 		error = proc_rwmem(p, &uio);
679 		mtx_unlock(&Giant);
680 		piod->piod_len -= uio.uio_resid;
681 		return (error);
682 
683 	case PT_KILL:
684 		data = SIGKILL;
685 		goto sendsig;	/* in PT_CONTINUE above */
686 
687 	case PT_SETREGS:
688 		_PHOLD(p);
689 		error = proc_write_regs(td2, addr);
690 		_PRELE(p);
691 		PROC_UNLOCK(p);
692 		return (error);
693 
694 	case PT_GETREGS:
695 		_PHOLD(p);
696 		error = proc_read_regs(td2, addr);
697 		_PRELE(p);
698 		PROC_UNLOCK(p);
699 		return (error);
700 
701 	case PT_SETFPREGS:
702 		_PHOLD(p);
703 		error = proc_write_fpregs(td2, addr);
704 		_PRELE(p);
705 		PROC_UNLOCK(p);
706 		return (error);
707 
708 	case PT_GETFPREGS:
709 		_PHOLD(p);
710 		error = proc_read_fpregs(td2, addr);
711 		_PRELE(p);
712 		PROC_UNLOCK(p);
713 		return (error);
714 
715 	case PT_SETDBREGS:
716 		_PHOLD(p);
717 		error = proc_write_dbregs(td2, addr);
718 		_PRELE(p);
719 		PROC_UNLOCK(p);
720 		return (error);
721 
722 	case PT_GETDBREGS:
723 		_PHOLD(p);
724 		error = proc_read_dbregs(td2, addr);
725 		_PRELE(p);
726 		PROC_UNLOCK(p);
727 		return (error);
728 
729 	default:
730 		KASSERT(0, ("unreachable code\n"));
731 		break;
732 	}
733 
734 	KASSERT(0, ("unreachable code\n"));
735 	return (0);
736 
737 fail:
738 	PROC_UNLOCK(p);
739 	if (proctree_locked)
740 		sx_xunlock(&proctree_lock);
741 	return (error);
742 }
743 
744 /*
745  * Stop a process because of a debugging event;
746  * stay stopped until p->p_step is cleared
747  * (cleared by PIOCCONT in procfs).
748  */
749 void
750 stopevent(struct proc *p, unsigned int event, unsigned int val)
751 {
752 
753 	PROC_LOCK_ASSERT(p, MA_OWNED);
754 	p->p_step = 1;
755 	do {
756 		p->p_xstat = val;
757 		p->p_stype = event;	/* Which event caused the stop? */
758 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
759 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
760 	} while (p->p_step);
761 }
762