xref: /freebsd/sys/kern/kern_ktrace.c (revision 87569f75a91f298c52a71823c04d41cf53c88889)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.
4  * Copyright (c) 2005 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 4. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)kern_ktrace.c	8.2 (Berkeley) 9/23/93
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ktrace.h"
38 #include "opt_mac.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/mac.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/ktrace.h>
55 #include <sys/sx.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
58 #include <sys/sysproto.h>
59 
60 /*
61  * The ktrace facility allows the tracing of certain key events in user space
62  * processes, such as system calls, signal delivery, context switches, and
63  * user generated events using utrace(2).  It works by streaming event
64  * records and data to a vnode associated with the process using the
65  * ktrace(2) system call.  In general, records can be written directly from
66  * the context that generates the event.  One important exception to this is
67  * during a context switch, where sleeping is not permitted.  To handle this
68  * case, trace events are generated using in-kernel ktr_request records, and
69  * then delivered to disk at a convenient moment -- either immediately, the
70  * next traceable event, at system call return, or at process exit.
71  *
72  * When dealing with multiple threads or processes writing to the same event
73  * log, ordering guarantees are weak: specifically, if an event has multiple
74  * records (i.e., system call enter and return), they may be interlaced with
75  * records from another event.  Process and thread ID information is provided
76  * in the record, and user applications can de-interlace events if required.
77  */
78 
79 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
80 
81 #ifdef KTRACE
82 
83 #ifndef KTRACE_REQUEST_POOL
84 #define	KTRACE_REQUEST_POOL	100
85 #endif
86 
87 struct ktr_request {
88 	struct	ktr_header ktr_header;
89 	void	*ktr_buffer;
90 	union {
91 		struct	ktr_syscall ktr_syscall;
92 		struct	ktr_sysret ktr_sysret;
93 		struct	ktr_genio ktr_genio;
94 		struct	ktr_psig ktr_psig;
95 		struct	ktr_csw ktr_csw;
96 	} ktr_data;
97 	STAILQ_ENTRY(ktr_request) ktr_list;
98 };
99 
100 static int data_lengths[] = {
101 	0,					/* none */
102 	offsetof(struct ktr_syscall, ktr_args),	/* KTR_SYSCALL */
103 	sizeof(struct ktr_sysret),		/* KTR_SYSRET */
104 	0,					/* KTR_NAMEI */
105 	sizeof(struct ktr_genio),		/* KTR_GENIO */
106 	sizeof(struct ktr_psig),		/* KTR_PSIG */
107 	sizeof(struct ktr_csw),			/* KTR_CSW */
108 	0					/* KTR_USER */
109 };
110 
111 static STAILQ_HEAD(, ktr_request) ktr_free;
112 
113 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
114 
115 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
116 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
117 
118 static u_int ktr_geniosize = PAGE_SIZE;
119 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
120 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
121     0, "Maximum size of genio event payload");
122 
123 static int print_message = 1;
124 struct mtx ktrace_mtx;
125 static struct cv ktrace_cv;
126 static struct sx ktrace_sx;
127 
128 static void ktrace_init(void *dummy);
129 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
130 static u_int ktrace_resize_pool(u_int newsize);
131 static struct ktr_request *ktr_getrequest(int type);
132 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
133 static void ktr_freerequest(struct ktr_request *req);
134 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
135 static int ktrcanset(struct thread *,struct proc *);
136 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
137 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
138 
139 /*
140  * ktrace itself generates events, such as context switches, which we do not
141  * wish to trace.  Maintain a flag, TDP_INKTRACE, on each thread to determine
142  * whether or not it is in a region where tracing of events should be
143  * suppressed.
144  */
145 static void
146 ktrace_enter(struct thread *td)
147 {
148 
149 	KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
150 	td->td_pflags |= TDP_INKTRACE;
151 }
152 
153 static void
154 ktrace_exit(struct thread *td)
155 {
156 
157 	KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
158 	td->td_pflags &= ~TDP_INKTRACE;
159 }
160 
161 static void
162 ktrace_assert(struct thread *td)
163 {
164 
165 	KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
166 }
167 
168 static void
169 ktrace_init(void *dummy)
170 {
171 	struct ktr_request *req;
172 	int i;
173 
174 	mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
175 	sx_init(&ktrace_sx, "ktrace_sx");
176 	cv_init(&ktrace_cv, "ktrace");
177 	STAILQ_INIT(&ktr_free);
178 	for (i = 0; i < ktr_requestpool; i++) {
179 		req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
180 		STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
181 	}
182 }
183 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
184 
185 static int
186 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
187 {
188 	struct thread *td;
189 	u_int newsize, oldsize, wantsize;
190 	int error;
191 
192 	/* Handle easy read-only case first to avoid warnings from GCC. */
193 	if (!req->newptr) {
194 		mtx_lock(&ktrace_mtx);
195 		oldsize = ktr_requestpool;
196 		mtx_unlock(&ktrace_mtx);
197 		return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
198 	}
199 
200 	error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
201 	if (error)
202 		return (error);
203 	td = curthread;
204 	ktrace_enter(td);
205 	mtx_lock(&ktrace_mtx);
206 	oldsize = ktr_requestpool;
207 	newsize = ktrace_resize_pool(wantsize);
208 	mtx_unlock(&ktrace_mtx);
209 	ktrace_exit(td);
210 	error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
211 	if (error)
212 		return (error);
213 	if (wantsize > oldsize && newsize < wantsize)
214 		return (ENOSPC);
215 	return (0);
216 }
217 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
218     &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
219 
220 static u_int
221 ktrace_resize_pool(u_int newsize)
222 {
223 	struct ktr_request *req;
224 	int bound;
225 
226 	mtx_assert(&ktrace_mtx, MA_OWNED);
227 	print_message = 1;
228 	bound = newsize - ktr_requestpool;
229 	if (bound == 0)
230 		return (ktr_requestpool);
231 	if (bound < 0)
232 		/* Shrink pool down to newsize if possible. */
233 		while (bound++ < 0) {
234 			req = STAILQ_FIRST(&ktr_free);
235 			if (req == NULL)
236 				return (ktr_requestpool);
237 			STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
238 			ktr_requestpool--;
239 			mtx_unlock(&ktrace_mtx);
240 			free(req, M_KTRACE);
241 			mtx_lock(&ktrace_mtx);
242 		}
243 	else
244 		/* Grow pool up to newsize. */
245 		while (bound-- > 0) {
246 			mtx_unlock(&ktrace_mtx);
247 			req = malloc(sizeof(struct ktr_request), M_KTRACE,
248 			    M_WAITOK);
249 			mtx_lock(&ktrace_mtx);
250 			STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
251 			ktr_requestpool++;
252 		}
253 	return (ktr_requestpool);
254 }
255 
256 static struct ktr_request *
257 ktr_getrequest(int type)
258 {
259 	struct ktr_request *req;
260 	struct thread *td = curthread;
261 	struct proc *p = td->td_proc;
262 	int pm;
263 
264 	ktrace_enter(td);	/* XXX: In caller instead? */
265 	mtx_lock(&ktrace_mtx);
266 	if (!KTRCHECK(td, type)) {
267 		mtx_unlock(&ktrace_mtx);
268 		ktrace_exit(td);
269 		return (NULL);
270 	}
271 	req = STAILQ_FIRST(&ktr_free);
272 	if (req != NULL) {
273 		STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
274 		req->ktr_header.ktr_type = type;
275 		if (p->p_traceflag & KTRFAC_DROP) {
276 			req->ktr_header.ktr_type |= KTR_DROP;
277 			p->p_traceflag &= ~KTRFAC_DROP;
278 		}
279 		mtx_unlock(&ktrace_mtx);
280 		microtime(&req->ktr_header.ktr_time);
281 		req->ktr_header.ktr_pid = p->p_pid;
282 		req->ktr_header.ktr_tid = td->td_tid;
283 		bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
284 		req->ktr_buffer = NULL;
285 		req->ktr_header.ktr_len = 0;
286 	} else {
287 		p->p_traceflag |= KTRFAC_DROP;
288 		pm = print_message;
289 		print_message = 0;
290 		mtx_unlock(&ktrace_mtx);
291 		if (pm)
292 			printf("Out of ktrace request objects.\n");
293 		ktrace_exit(td);
294 	}
295 	return (req);
296 }
297 
298 /*
299  * Some trace generation environments don't permit direct access to VFS,
300  * such as during a context switch where sleeping is not allowed.  Under these
301  * circumstances, queue a request to the thread to be written asynchronously
302  * later.
303  */
304 static void
305 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
306 {
307 
308 	mtx_lock(&ktrace_mtx);
309 	STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
310 	mtx_unlock(&ktrace_mtx);
311 	ktrace_exit(td);
312 }
313 
314 /*
315  * Drain any pending ktrace records from the per-thread queue to disk.  This
316  * is used both internally before committing other records, and also on
317  * system call return.  We drain all the ones we can find at the time when
318  * drain is requested, but don't keep draining after that as those events
319  * may me approximately "after" the current event.
320  */
321 static void
322 ktr_drain(struct thread *td)
323 {
324 	struct ktr_request *queued_req;
325 	STAILQ_HEAD(, ktr_request) local_queue;
326 
327 	ktrace_assert(td);
328 	sx_assert(&ktrace_sx, SX_XLOCKED);
329 
330 	STAILQ_INIT(&local_queue);	/* XXXRW: needed? */
331 
332 	if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
333 		mtx_lock(&ktrace_mtx);
334 		STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
335 		mtx_unlock(&ktrace_mtx);
336 
337 		while ((queued_req = STAILQ_FIRST(&local_queue))) {
338 			STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
339 			ktr_writerequest(td, queued_req);
340 			ktr_freerequest(queued_req);
341 		}
342 	}
343 }
344 
345 /*
346  * Submit a trace record for immediate commit to disk -- to be used only
347  * where entering VFS is OK.  First drain any pending records that may have
348  * been cached in the thread.
349  */
350 static void
351 ktr_submitrequest(struct thread *td, struct ktr_request *req)
352 {
353 
354 	ktrace_assert(td);
355 
356 	sx_xlock(&ktrace_sx);
357 	ktr_drain(td);
358 	ktr_writerequest(td, req);
359 	ktr_freerequest(req);
360 	sx_xunlock(&ktrace_sx);
361 
362 	ktrace_exit(td);
363 }
364 
365 static void
366 ktr_freerequest(struct ktr_request *req)
367 {
368 
369 	if (req->ktr_buffer != NULL)
370 		free(req->ktr_buffer, M_KTRACE);
371 	mtx_lock(&ktrace_mtx);
372 	STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
373 	mtx_unlock(&ktrace_mtx);
374 }
375 
376 /*
377  * MPSAFE
378  */
379 void
380 ktrsyscall(code, narg, args)
381 	int code, narg;
382 	register_t args[];
383 {
384 	struct ktr_request *req;
385 	struct ktr_syscall *ktp;
386 	size_t buflen;
387 	char *buf = NULL;
388 
389 	buflen = sizeof(register_t) * narg;
390 	if (buflen > 0) {
391 		buf = malloc(buflen, M_KTRACE, M_WAITOK);
392 		bcopy(args, buf, buflen);
393 	}
394 	req = ktr_getrequest(KTR_SYSCALL);
395 	if (req == NULL) {
396 		if (buf != NULL)
397 			free(buf, M_KTRACE);
398 		return;
399 	}
400 	ktp = &req->ktr_data.ktr_syscall;
401 	ktp->ktr_code = code;
402 	ktp->ktr_narg = narg;
403 	if (buflen > 0) {
404 		req->ktr_header.ktr_len = buflen;
405 		req->ktr_buffer = buf;
406 	}
407 	ktr_submitrequest(curthread, req);
408 }
409 
410 /*
411  * MPSAFE
412  */
413 void
414 ktrsysret(code, error, retval)
415 	int code, error;
416 	register_t retval;
417 {
418 	struct ktr_request *req;
419 	struct ktr_sysret *ktp;
420 
421 	req = ktr_getrequest(KTR_SYSRET);
422 	if (req == NULL)
423 		return;
424 	ktp = &req->ktr_data.ktr_sysret;
425 	ktp->ktr_code = code;
426 	ktp->ktr_error = error;
427 	ktp->ktr_retval = retval;		/* what about val2 ? */
428 	ktr_submitrequest(curthread, req);
429 }
430 
431 /*
432  * When a process exits, drain per-process asynchronous trace records.
433  */
434 void
435 ktrprocexit(struct thread *td)
436 {
437 
438 	ktrace_enter(td);
439 	sx_xlock(&ktrace_sx);
440 	ktr_drain(td);
441 	sx_xunlock(&ktrace_sx);
442 	ktrace_exit(td);
443 }
444 
445 /*
446  * When a thread returns, drain any asynchronous records generated by the
447  * system call.
448  */
449 void
450 ktruserret(struct thread *td)
451 {
452 
453 	ktrace_enter(td);
454 	sx_xlock(&ktrace_sx);
455 	ktr_drain(td);
456 	sx_xunlock(&ktrace_sx);
457 	ktrace_exit(td);
458 }
459 
460 void
461 ktrnamei(path)
462 	char *path;
463 {
464 	struct ktr_request *req;
465 	int namelen;
466 	char *buf = NULL;
467 
468 	namelen = strlen(path);
469 	if (namelen > 0) {
470 		buf = malloc(namelen, M_KTRACE, M_WAITOK);
471 		bcopy(path, buf, namelen);
472 	}
473 	req = ktr_getrequest(KTR_NAMEI);
474 	if (req == NULL) {
475 		if (buf != NULL)
476 			free(buf, M_KTRACE);
477 		return;
478 	}
479 	if (namelen > 0) {
480 		req->ktr_header.ktr_len = namelen;
481 		req->ktr_buffer = buf;
482 	}
483 	ktr_submitrequest(curthread, req);
484 }
485 
486 /*
487  * Since the uio may not stay valid, we can not hand off this request to
488  * the thread and need to process it synchronously.  However, we wish to
489  * keep the relative order of records in a trace file correct, so we
490  * do put this request on the queue (if it isn't empty) and then block.
491  * The ktrace thread waks us back up when it is time for this event to
492  * be posted and blocks until we have completed writing out the event
493  * and woken it back up.
494  */
495 void
496 ktrgenio(fd, rw, uio, error)
497 	int fd;
498 	enum uio_rw rw;
499 	struct uio *uio;
500 	int error;
501 {
502 	struct ktr_request *req;
503 	struct ktr_genio *ktg;
504 	int datalen;
505 	char *buf;
506 
507 	if (error) {
508 		free(uio, M_IOV);
509 		return;
510 	}
511 	uio->uio_offset = 0;
512 	uio->uio_rw = UIO_WRITE;
513 	datalen = imin(uio->uio_resid, ktr_geniosize);
514 	buf = malloc(datalen, M_KTRACE, M_WAITOK);
515 	error = uiomove(buf, datalen, uio);
516 	free(uio, M_IOV);
517 	if (error) {
518 		free(buf, M_KTRACE);
519 		return;
520 	}
521 	req = ktr_getrequest(KTR_GENIO);
522 	if (req == NULL) {
523 		free(buf, M_KTRACE);
524 		return;
525 	}
526 	ktg = &req->ktr_data.ktr_genio;
527 	ktg->ktr_fd = fd;
528 	ktg->ktr_rw = rw;
529 	req->ktr_header.ktr_len = datalen;
530 	req->ktr_buffer = buf;
531 	ktr_submitrequest(curthread, req);
532 }
533 
534 void
535 ktrpsig(sig, action, mask, code)
536 	int sig;
537 	sig_t action;
538 	sigset_t *mask;
539 	int code;
540 {
541 	struct ktr_request *req;
542 	struct ktr_psig	*kp;
543 
544 	req = ktr_getrequest(KTR_PSIG);
545 	if (req == NULL)
546 		return;
547 	kp = &req->ktr_data.ktr_psig;
548 	kp->signo = (char)sig;
549 	kp->action = action;
550 	kp->mask = *mask;
551 	kp->code = code;
552 	ktr_enqueuerequest(curthread, req);
553 }
554 
555 void
556 ktrcsw(out, user)
557 	int out, user;
558 {
559 	struct ktr_request *req;
560 	struct ktr_csw *kc;
561 
562 	req = ktr_getrequest(KTR_CSW);
563 	if (req == NULL)
564 		return;
565 	kc = &req->ktr_data.ktr_csw;
566 	kc->out = out;
567 	kc->user = user;
568 	ktr_enqueuerequest(curthread, req);
569 }
570 #endif /* KTRACE */
571 
572 /* Interface and common routines */
573 
574 /*
575  * ktrace system call
576  *
577  * MPSAFE
578  */
579 #ifndef _SYS_SYSPROTO_H_
580 struct ktrace_args {
581 	char	*fname;
582 	int	ops;
583 	int	facs;
584 	int	pid;
585 };
586 #endif
587 /* ARGSUSED */
588 int
589 ktrace(td, uap)
590 	struct thread *td;
591 	register struct ktrace_args *uap;
592 {
593 #ifdef KTRACE
594 	register struct vnode *vp = NULL;
595 	register struct proc *p;
596 	struct pgrp *pg;
597 	int facs = uap->facs & ~KTRFAC_ROOT;
598 	int ops = KTROP(uap->ops);
599 	int descend = uap->ops & KTRFLAG_DESCEND;
600 	int nfound, ret = 0;
601 	int flags, error = 0;
602 	struct nameidata nd;
603 	struct ucred *cred;
604 
605 	/*
606 	 * Need something to (un)trace.
607 	 */
608 	if (ops != KTROP_CLEARFILE && facs == 0)
609 		return (EINVAL);
610 
611 	ktrace_enter(td);
612 	if (ops != KTROP_CLEAR) {
613 		/*
614 		 * an operation which requires a file argument.
615 		 */
616 		NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
617 		flags = FREAD | FWRITE | O_NOFOLLOW;
618 		mtx_lock(&Giant);
619 		error = vn_open(&nd, &flags, 0, -1);
620 		if (error) {
621 			mtx_unlock(&Giant);
622 			ktrace_exit(td);
623 			return (error);
624 		}
625 		NDFREE(&nd, NDF_ONLY_PNBUF);
626 		vp = nd.ni_vp;
627 		VOP_UNLOCK(vp, 0, td);
628 		if (vp->v_type != VREG) {
629 			(void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
630 			mtx_unlock(&Giant);
631 			ktrace_exit(td);
632 			return (EACCES);
633 		}
634 		mtx_unlock(&Giant);
635 	}
636 	/*
637 	 * Clear all uses of the tracefile.
638 	 */
639 	if (ops == KTROP_CLEARFILE) {
640 		sx_slock(&allproc_lock);
641 		LIST_FOREACH(p, &allproc, p_list) {
642 			PROC_LOCK(p);
643 			if (p->p_tracevp == vp) {
644 				if (ktrcanset(td, p)) {
645 					mtx_lock(&ktrace_mtx);
646 					cred = p->p_tracecred;
647 					p->p_tracecred = NULL;
648 					p->p_tracevp = NULL;
649 					p->p_traceflag = 0;
650 					mtx_unlock(&ktrace_mtx);
651 					PROC_UNLOCK(p);
652 					mtx_lock(&Giant);
653 					(void) vn_close(vp, FREAD|FWRITE,
654 						cred, td);
655 					mtx_unlock(&Giant);
656 					crfree(cred);
657 				} else {
658 					PROC_UNLOCK(p);
659 					error = EPERM;
660 				}
661 			} else
662 				PROC_UNLOCK(p);
663 		}
664 		sx_sunlock(&allproc_lock);
665 		goto done;
666 	}
667 	/*
668 	 * do it
669 	 */
670 	sx_slock(&proctree_lock);
671 	if (uap->pid < 0) {
672 		/*
673 		 * by process group
674 		 */
675 		pg = pgfind(-uap->pid);
676 		if (pg == NULL) {
677 			sx_sunlock(&proctree_lock);
678 			error = ESRCH;
679 			goto done;
680 		}
681 		/*
682 		 * ktrops() may call vrele(). Lock pg_members
683 		 * by the proctree_lock rather than pg_mtx.
684 		 */
685 		PGRP_UNLOCK(pg);
686 		nfound = 0;
687 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
688 			PROC_LOCK(p);
689 			if (p_cansee(td, p) != 0) {
690 				PROC_UNLOCK(p);
691 				continue;
692 			}
693 			PROC_UNLOCK(p);
694 			nfound++;
695 			if (descend)
696 				ret |= ktrsetchildren(td, p, ops, facs, vp);
697 			else
698 				ret |= ktrops(td, p, ops, facs, vp);
699 		}
700 		if (nfound == 0) {
701 			sx_sunlock(&proctree_lock);
702 			error = ESRCH;
703 			goto done;
704 		}
705 	} else {
706 		/*
707 		 * by pid
708 		 */
709 		p = pfind(uap->pid);
710 		if (p == NULL) {
711 			sx_sunlock(&proctree_lock);
712 			error = ESRCH;
713 			goto done;
714 		}
715 		error = p_cansee(td, p);
716 		/*
717 		 * The slock of the proctree lock will keep this process
718 		 * from going away, so unlocking the proc here is ok.
719 		 */
720 		PROC_UNLOCK(p);
721 		if (error) {
722 			sx_sunlock(&proctree_lock);
723 			goto done;
724 		}
725 		if (descend)
726 			ret |= ktrsetchildren(td, p, ops, facs, vp);
727 		else
728 			ret |= ktrops(td, p, ops, facs, vp);
729 	}
730 	sx_sunlock(&proctree_lock);
731 	if (!ret)
732 		error = EPERM;
733 done:
734 	if (vp != NULL) {
735 		mtx_lock(&Giant);
736 		(void) vn_close(vp, FWRITE, td->td_ucred, td);
737 		mtx_unlock(&Giant);
738 	}
739 	ktrace_exit(td);
740 	return (error);
741 #else /* !KTRACE */
742 	return (ENOSYS);
743 #endif /* KTRACE */
744 }
745 
746 /*
747  * utrace system call
748  *
749  * MPSAFE
750  */
751 /* ARGSUSED */
752 int
753 utrace(td, uap)
754 	struct thread *td;
755 	register struct utrace_args *uap;
756 {
757 
758 #ifdef KTRACE
759 	struct ktr_request *req;
760 	void *cp;
761 	int error;
762 
763 	if (!KTRPOINT(td, KTR_USER))
764 		return (0);
765 	if (uap->len > KTR_USER_MAXLEN)
766 		return (EINVAL);
767 	cp = malloc(uap->len, M_KTRACE, M_WAITOK);
768 	error = copyin(uap->addr, cp, uap->len);
769 	if (error) {
770 		free(cp, M_KTRACE);
771 		return (error);
772 	}
773 	req = ktr_getrequest(KTR_USER);
774 	if (req == NULL) {
775 		free(cp, M_KTRACE);
776 		return (ENOMEM);
777 	}
778 	req->ktr_buffer = cp;
779 	req->ktr_header.ktr_len = uap->len;
780 	ktr_submitrequest(td, req);
781 	return (0);
782 #else /* !KTRACE */
783 	return (ENOSYS);
784 #endif /* KTRACE */
785 }
786 
787 #ifdef KTRACE
788 static int
789 ktrops(td, p, ops, facs, vp)
790 	struct thread *td;
791 	struct proc *p;
792 	int ops, facs;
793 	struct vnode *vp;
794 {
795 	struct vnode *tracevp = NULL;
796 	struct ucred *tracecred = NULL;
797 
798 	PROC_LOCK(p);
799 	if (!ktrcanset(td, p)) {
800 		PROC_UNLOCK(p);
801 		return (0);
802 	}
803 	mtx_lock(&ktrace_mtx);
804 	if (ops == KTROP_SET) {
805 		if (p->p_tracevp != vp) {
806 			/*
807 			 * if trace file already in use, relinquish below
808 			 */
809 			tracevp = p->p_tracevp;
810 			VREF(vp);
811 			p->p_tracevp = vp;
812 		}
813 		if (p->p_tracecred != td->td_ucred) {
814 			tracecred = p->p_tracecred;
815 			p->p_tracecred = crhold(td->td_ucred);
816 		}
817 		p->p_traceflag |= facs;
818 		if (td->td_ucred->cr_uid == 0)
819 			p->p_traceflag |= KTRFAC_ROOT;
820 	} else {
821 		/* KTROP_CLEAR */
822 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
823 			/* no more tracing */
824 			p->p_traceflag = 0;
825 			tracevp = p->p_tracevp;
826 			p->p_tracevp = NULL;
827 			tracecred = p->p_tracecred;
828 			p->p_tracecred = NULL;
829 		}
830 	}
831 	mtx_unlock(&ktrace_mtx);
832 	PROC_UNLOCK(p);
833 	if (tracevp != NULL) {
834 		int vfslocked;
835 
836 		vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
837 		vrele(tracevp);
838 		VFS_UNLOCK_GIANT(vfslocked);
839 	}
840 	if (tracecred != NULL)
841 		crfree(tracecred);
842 
843 	return (1);
844 }
845 
846 static int
847 ktrsetchildren(td, top, ops, facs, vp)
848 	struct thread *td;
849 	struct proc *top;
850 	int ops, facs;
851 	struct vnode *vp;
852 {
853 	register struct proc *p;
854 	register int ret = 0;
855 
856 	p = top;
857 	sx_assert(&proctree_lock, SX_LOCKED);
858 	for (;;) {
859 		ret |= ktrops(td, p, ops, facs, vp);
860 		/*
861 		 * If this process has children, descend to them next,
862 		 * otherwise do any siblings, and if done with this level,
863 		 * follow back up the tree (but not past top).
864 		 */
865 		if (!LIST_EMPTY(&p->p_children))
866 			p = LIST_FIRST(&p->p_children);
867 		else for (;;) {
868 			if (p == top)
869 				return (ret);
870 			if (LIST_NEXT(p, p_sibling)) {
871 				p = LIST_NEXT(p, p_sibling);
872 				break;
873 			}
874 			p = p->p_pptr;
875 		}
876 	}
877 	/*NOTREACHED*/
878 }
879 
880 static void
881 ktr_writerequest(struct thread *td, struct ktr_request *req)
882 {
883 	struct ktr_header *kth;
884 	struct vnode *vp;
885 	struct proc *p;
886 	struct ucred *cred;
887 	struct uio auio;
888 	struct iovec aiov[3];
889 	struct mount *mp;
890 	int datalen, buflen, vrele_count;
891 	int error;
892 
893 	/*
894 	 * We hold the vnode and credential for use in I/O in case ktrace is
895 	 * disabled on the process as we write out the request.
896 	 *
897 	 * XXXRW: This is not ideal: we could end up performing a write after
898 	 * the vnode has been closed.
899 	 */
900 	mtx_lock(&ktrace_mtx);
901 	vp = td->td_proc->p_tracevp;
902 	if (vp != NULL)
903 		VREF(vp);
904 	cred = td->td_proc->p_tracecred;
905 	if (cred != NULL)
906 		crhold(cred);
907 	mtx_unlock(&ktrace_mtx);
908 
909 	/*
910 	 * If vp is NULL, the vp has been cleared out from under this
911 	 * request, so just drop it.  Make sure the credential and vnode are
912 	 * in sync: we should have both or neither.
913 	 */
914 	if (vp == NULL) {
915 		KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
916 		return;
917 	}
918 	KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
919 
920 	kth = &req->ktr_header;
921 	datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
922 	buflen = kth->ktr_len;
923 	auio.uio_iov = &aiov[0];
924 	auio.uio_offset = 0;
925 	auio.uio_segflg = UIO_SYSSPACE;
926 	auio.uio_rw = UIO_WRITE;
927 	aiov[0].iov_base = (caddr_t)kth;
928 	aiov[0].iov_len = sizeof(struct ktr_header);
929 	auio.uio_resid = sizeof(struct ktr_header);
930 	auio.uio_iovcnt = 1;
931 	auio.uio_td = td;
932 	if (datalen != 0) {
933 		aiov[1].iov_base = (caddr_t)&req->ktr_data;
934 		aiov[1].iov_len = datalen;
935 		auio.uio_resid += datalen;
936 		auio.uio_iovcnt++;
937 		kth->ktr_len += datalen;
938 	}
939 	if (buflen != 0) {
940 		KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
941 		aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
942 		aiov[auio.uio_iovcnt].iov_len = buflen;
943 		auio.uio_resid += buflen;
944 		auio.uio_iovcnt++;
945 	}
946 
947 	mtx_lock(&Giant);
948 	vn_start_write(vp, &mp, V_WAIT);
949 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
950 	(void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
951 #ifdef MAC
952 	error = mac_check_vnode_write(cred, NOCRED, vp);
953 	if (error == 0)
954 #endif
955 		error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
956 	VOP_UNLOCK(vp, 0, td);
957 	vn_finished_write(mp);
958 	vrele(vp);
959 	mtx_unlock(&Giant);
960 	if (!error)
961 		return;
962 	/*
963 	 * If error encountered, give up tracing on this vnode.  We defer
964 	 * all the vrele()'s on the vnode until after we are finished walking
965 	 * the various lists to avoid needlessly holding locks.
966 	 */
967 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
968 	    error);
969 	vrele_count = 0;
970 	/*
971 	 * First, clear this vnode from being used by any processes in the
972 	 * system.
973 	 * XXX - If one process gets an EPERM writing to the vnode, should
974 	 * we really do this?  Other processes might have suitable
975 	 * credentials for the operation.
976 	 */
977 	cred = NULL;
978 	sx_slock(&allproc_lock);
979 	LIST_FOREACH(p, &allproc, p_list) {
980 		PROC_LOCK(p);
981 		if (p->p_tracevp == vp) {
982 			mtx_lock(&ktrace_mtx);
983 			p->p_tracevp = NULL;
984 			p->p_traceflag = 0;
985 			cred = p->p_tracecred;
986 			p->p_tracecred = NULL;
987 			mtx_unlock(&ktrace_mtx);
988 			vrele_count++;
989 		}
990 		PROC_UNLOCK(p);
991 		if (cred != NULL) {
992 			crfree(cred);
993 			cred = NULL;
994 		}
995 	}
996 	sx_sunlock(&allproc_lock);
997 
998 	/*
999 	 * We can't clear any pending requests in threads that have cached
1000 	 * them but not yet committed them, as those are per-thread.  The
1001 	 * thread will have to clear it itself on system call return.
1002 	 */
1003 	mtx_lock(&Giant);
1004 	while (vrele_count-- > 0)
1005 		vrele(vp);
1006 	mtx_unlock(&Giant);
1007 }
1008 
1009 /*
1010  * Return true if caller has permission to set the ktracing state
1011  * of target.  Essentially, the target can't possess any
1012  * more permissions than the caller.  KTRFAC_ROOT signifies that
1013  * root previously set the tracing status on the target process, and
1014  * so, only root may further change it.
1015  */
1016 static int
1017 ktrcanset(td, targetp)
1018 	struct thread *td;
1019 	struct proc *targetp;
1020 {
1021 
1022 	PROC_LOCK_ASSERT(targetp, MA_OWNED);
1023 	if (targetp->p_traceflag & KTRFAC_ROOT &&
1024 	    suser_cred(td->td_ucred, SUSER_ALLOWJAIL))
1025 		return (0);
1026 
1027 	if (p_candebug(td, targetp) != 0)
1028 		return (0);
1029 
1030 	return (1);
1031 }
1032 
1033 #endif /* KTRACE */
1034