xref: /freebsd/sys/kern/kern_ktrace.c (revision 60e15db9920e1f90f14d143a96f1b230f1d0da59)
19454b2d8SWarner Losh /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1989, 1993
32c255e9dSRobert Watson  *	The Regents of the University of California.
42c255e9dSRobert Watson  * Copyright (c) 2005 Robert N. M. Watson
52c255e9dSRobert Watson  * All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
8df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
9df8bae1dSRodney W. Grimes  * are met:
10df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
11df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
12df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
13df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
14df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
15df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
16df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
17df8bae1dSRodney W. Grimes  *    without specific prior written permission.
18df8bae1dSRodney W. Grimes  *
19df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
30df8bae1dSRodney W. Grimes  *
31df8bae1dSRodney W. Grimes  *	@(#)kern_ktrace.c	8.2 (Berkeley) 9/23/93
32df8bae1dSRodney W. Grimes  */
33df8bae1dSRodney W. Grimes 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
37db6a20e2SGarrett Wollman #include "opt_ktrace.h"
38467a273cSRobert Watson #include "opt_mac.h"
39df8bae1dSRodney W. Grimes 
40df8bae1dSRodney W. Grimes #include <sys/param.h>
41f23b4c91SGarrett Wollman #include <sys/systm.h>
42ea3fc8e4SJohn Baldwin #include <sys/fcntl.h>
43ea3fc8e4SJohn Baldwin #include <sys/kernel.h>
44ea3fc8e4SJohn Baldwin #include <sys/kthread.h>
45fb919e4dSMark Murray #include <sys/lock.h>
46fb919e4dSMark Murray #include <sys/mutex.h>
47ea3fc8e4SJohn Baldwin #include <sys/malloc.h>
48033eb86eSJeff Roberson #include <sys/mount.h>
49df8bae1dSRodney W. Grimes #include <sys/namei.h>
50acd3428bSRobert Watson #include <sys/priv.h>
51ea3fc8e4SJohn Baldwin #include <sys/proc.h>
52ea3fc8e4SJohn Baldwin #include <sys/unistd.h>
53df8bae1dSRodney W. Grimes #include <sys/vnode.h>
5460e15db9SDag-Erling Smørgrav #include <sys/socket.h>
5560e15db9SDag-Erling Smørgrav #include <sys/stat.h>
56df8bae1dSRodney W. Grimes #include <sys/ktrace.h>
571005a129SJohn Baldwin #include <sys/sx.h>
58ea3fc8e4SJohn Baldwin #include <sys/sysctl.h>
59df8bae1dSRodney W. Grimes #include <sys/syslog.h>
60ea3fc8e4SJohn Baldwin #include <sys/sysproto.h>
61df8bae1dSRodney W. Grimes 
62aed55708SRobert Watson #include <security/mac/mac_framework.h>
63aed55708SRobert Watson 
642c255e9dSRobert Watson /*
652c255e9dSRobert Watson  * The ktrace facility allows the tracing of certain key events in user space
662c255e9dSRobert Watson  * processes, such as system calls, signal delivery, context switches, and
672c255e9dSRobert Watson  * user generated events using utrace(2).  It works by streaming event
682c255e9dSRobert Watson  * records and data to a vnode associated with the process using the
692c255e9dSRobert Watson  * ktrace(2) system call.  In general, records can be written directly from
702c255e9dSRobert Watson  * the context that generates the event.  One important exception to this is
712c255e9dSRobert Watson  * during a context switch, where sleeping is not permitted.  To handle this
722c255e9dSRobert Watson  * case, trace events are generated using in-kernel ktr_request records, and
732c255e9dSRobert Watson  * then delivered to disk at a convenient moment -- either immediately, the
742c255e9dSRobert Watson  * next traceable event, at system call return, or at process exit.
752c255e9dSRobert Watson  *
762c255e9dSRobert Watson  * When dealing with multiple threads or processes writing to the same event
772c255e9dSRobert Watson  * log, ordering guarantees are weak: specifically, if an event has multiple
782c255e9dSRobert Watson  * records (i.e., system call enter and return), they may be interlaced with
792c255e9dSRobert Watson  * records from another event.  Process and thread ID information is provided
802c255e9dSRobert Watson  * in the record, and user applications can de-interlace events if required.
812c255e9dSRobert Watson  */
822c255e9dSRobert Watson 
83a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
8455166637SPoul-Henning Kamp 
85db6a20e2SGarrett Wollman #ifdef KTRACE
86ea3fc8e4SJohn Baldwin 
87ea3fc8e4SJohn Baldwin #ifndef KTRACE_REQUEST_POOL
88ea3fc8e4SJohn Baldwin #define	KTRACE_REQUEST_POOL	100
89ea3fc8e4SJohn Baldwin #endif
90ea3fc8e4SJohn Baldwin 
91ea3fc8e4SJohn Baldwin struct ktr_request {
92ea3fc8e4SJohn Baldwin 	struct	ktr_header ktr_header;
93d977a583SRobert Watson 	void	*ktr_buffer;
94ea3fc8e4SJohn Baldwin 	union {
95ea3fc8e4SJohn Baldwin 		struct	ktr_syscall ktr_syscall;
96ea3fc8e4SJohn Baldwin 		struct	ktr_sysret ktr_sysret;
97ea3fc8e4SJohn Baldwin 		struct	ktr_genio ktr_genio;
98ea3fc8e4SJohn Baldwin 		struct	ktr_psig ktr_psig;
99ea3fc8e4SJohn Baldwin 		struct	ktr_csw ktr_csw;
100ea3fc8e4SJohn Baldwin 	} ktr_data;
101ea3fc8e4SJohn Baldwin 	STAILQ_ENTRY(ktr_request) ktr_list;
102ea3fc8e4SJohn Baldwin };
103ea3fc8e4SJohn Baldwin 
104ea3fc8e4SJohn Baldwin static int data_lengths[] = {
105ea3fc8e4SJohn Baldwin 	0,					/* none */
106ea3fc8e4SJohn Baldwin 	offsetof(struct ktr_syscall, ktr_args),	/* KTR_SYSCALL */
107ea3fc8e4SJohn Baldwin 	sizeof(struct ktr_sysret),		/* KTR_SYSRET */
108ea3fc8e4SJohn Baldwin 	0,					/* KTR_NAMEI */
109ea3fc8e4SJohn Baldwin 	sizeof(struct ktr_genio),		/* KTR_GENIO */
110ea3fc8e4SJohn Baldwin 	sizeof(struct ktr_psig),		/* KTR_PSIG */
111ea3fc8e4SJohn Baldwin 	sizeof(struct ktr_csw),			/* KTR_CSW */
11260e15db9SDag-Erling Smørgrav 	0,					/* KTR_USER */
11360e15db9SDag-Erling Smørgrav 	0,					/* KTR_STRUCT */
114ea3fc8e4SJohn Baldwin };
115ea3fc8e4SJohn Baldwin 
116ea3fc8e4SJohn Baldwin static STAILQ_HEAD(, ktr_request) ktr_free;
117ea3fc8e4SJohn Baldwin 
1185ece08f5SPoul-Henning Kamp static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
11912301fc3SJohn Baldwin 
1208b149b51SJohn Baldwin static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
12112301fc3SJohn Baldwin TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
12212301fc3SJohn Baldwin 
1238b149b51SJohn Baldwin static u_int ktr_geniosize = PAGE_SIZE;
12412301fc3SJohn Baldwin TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
12512301fc3SJohn Baldwin SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
12612301fc3SJohn Baldwin     0, "Maximum size of genio event payload");
127ea3fc8e4SJohn Baldwin 
128ea3fc8e4SJohn Baldwin static int print_message = 1;
129ea3fc8e4SJohn Baldwin struct mtx ktrace_mtx;
1302c255e9dSRobert Watson static struct sx ktrace_sx;
131ea3fc8e4SJohn Baldwin 
132ea3fc8e4SJohn Baldwin static void ktrace_init(void *dummy);
133ea3fc8e4SJohn Baldwin static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
1348b149b51SJohn Baldwin static u_int ktrace_resize_pool(u_int newsize);
135ea3fc8e4SJohn Baldwin static struct ktr_request *ktr_getrequest(int type);
1362c255e9dSRobert Watson static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
137ea3fc8e4SJohn Baldwin static void ktr_freerequest(struct ktr_request *req);
1382c255e9dSRobert Watson static void ktr_writerequest(struct thread *td, struct ktr_request *req);
139a7ff7443SJohn Baldwin static int ktrcanset(struct thread *,struct proc *);
140a7ff7443SJohn Baldwin static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
141a7ff7443SJohn Baldwin static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
14298d93822SBruce Evans 
1432c255e9dSRobert Watson /*
1442c255e9dSRobert Watson  * ktrace itself generates events, such as context switches, which we do not
1452c255e9dSRobert Watson  * wish to trace.  Maintain a flag, TDP_INKTRACE, on each thread to determine
1462c255e9dSRobert Watson  * whether or not it is in a region where tracing of events should be
1472c255e9dSRobert Watson  * suppressed.
1482c255e9dSRobert Watson  */
1492c255e9dSRobert Watson static void
1502c255e9dSRobert Watson ktrace_enter(struct thread *td)
1512c255e9dSRobert Watson {
1522c255e9dSRobert Watson 
1532c255e9dSRobert Watson 	KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
1542c255e9dSRobert Watson 	td->td_pflags |= TDP_INKTRACE;
1552c255e9dSRobert Watson }
1562c255e9dSRobert Watson 
1572c255e9dSRobert Watson static void
1582c255e9dSRobert Watson ktrace_exit(struct thread *td)
1592c255e9dSRobert Watson {
1602c255e9dSRobert Watson 
1612c255e9dSRobert Watson 	KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
1622c255e9dSRobert Watson 	td->td_pflags &= ~TDP_INKTRACE;
1632c255e9dSRobert Watson }
1642c255e9dSRobert Watson 
1652c255e9dSRobert Watson static void
1662c255e9dSRobert Watson ktrace_assert(struct thread *td)
1672c255e9dSRobert Watson {
1682c255e9dSRobert Watson 
1692c255e9dSRobert Watson 	KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
1702c255e9dSRobert Watson }
1712c255e9dSRobert Watson 
172ea3fc8e4SJohn Baldwin static void
173ea3fc8e4SJohn Baldwin ktrace_init(void *dummy)
174df8bae1dSRodney W. Grimes {
175ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
176ea3fc8e4SJohn Baldwin 	int i;
177df8bae1dSRodney W. Grimes 
178ea3fc8e4SJohn Baldwin 	mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
1792c255e9dSRobert Watson 	sx_init(&ktrace_sx, "ktrace_sx");
180ea3fc8e4SJohn Baldwin 	STAILQ_INIT(&ktr_free);
181ea3fc8e4SJohn Baldwin 	for (i = 0; i < ktr_requestpool; i++) {
182a163d034SWarner Losh 		req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
183ea3fc8e4SJohn Baldwin 		STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
184ea3fc8e4SJohn Baldwin 	}
185ea3fc8e4SJohn Baldwin }
186ea3fc8e4SJohn Baldwin SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
187ea3fc8e4SJohn Baldwin 
188ea3fc8e4SJohn Baldwin static int
189ea3fc8e4SJohn Baldwin sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
190ea3fc8e4SJohn Baldwin {
191ea3fc8e4SJohn Baldwin 	struct thread *td;
1928b149b51SJohn Baldwin 	u_int newsize, oldsize, wantsize;
193ea3fc8e4SJohn Baldwin 	int error;
194ea3fc8e4SJohn Baldwin 
195ea3fc8e4SJohn Baldwin 	/* Handle easy read-only case first to avoid warnings from GCC. */
196ea3fc8e4SJohn Baldwin 	if (!req->newptr) {
197ea3fc8e4SJohn Baldwin 		mtx_lock(&ktrace_mtx);
198ea3fc8e4SJohn Baldwin 		oldsize = ktr_requestpool;
199ea3fc8e4SJohn Baldwin 		mtx_unlock(&ktrace_mtx);
2008b149b51SJohn Baldwin 		return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
201ea3fc8e4SJohn Baldwin 	}
202ea3fc8e4SJohn Baldwin 
2038b149b51SJohn Baldwin 	error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
204ea3fc8e4SJohn Baldwin 	if (error)
205ea3fc8e4SJohn Baldwin 		return (error);
206ea3fc8e4SJohn Baldwin 	td = curthread;
2072c255e9dSRobert Watson 	ktrace_enter(td);
208ea3fc8e4SJohn Baldwin 	mtx_lock(&ktrace_mtx);
209ea3fc8e4SJohn Baldwin 	oldsize = ktr_requestpool;
210ea3fc8e4SJohn Baldwin 	newsize = ktrace_resize_pool(wantsize);
211ea3fc8e4SJohn Baldwin 	mtx_unlock(&ktrace_mtx);
2122c255e9dSRobert Watson 	ktrace_exit(td);
2138b149b51SJohn Baldwin 	error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
214ea3fc8e4SJohn Baldwin 	if (error)
215ea3fc8e4SJohn Baldwin 		return (error);
216a5896914SJoseph Koshy 	if (wantsize > oldsize && newsize < wantsize)
217ea3fc8e4SJohn Baldwin 		return (ENOSPC);
218ea3fc8e4SJohn Baldwin 	return (0);
219ea3fc8e4SJohn Baldwin }
22012301fc3SJohn Baldwin SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
221ea3fc8e4SJohn Baldwin     &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
222ea3fc8e4SJohn Baldwin 
2238b149b51SJohn Baldwin static u_int
2248b149b51SJohn Baldwin ktrace_resize_pool(u_int newsize)
225ea3fc8e4SJohn Baldwin {
226ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
227a5896914SJoseph Koshy 	int bound;
228ea3fc8e4SJohn Baldwin 
229ea3fc8e4SJohn Baldwin 	mtx_assert(&ktrace_mtx, MA_OWNED);
230ea3fc8e4SJohn Baldwin 	print_message = 1;
231a5896914SJoseph Koshy 	bound = newsize - ktr_requestpool;
232a5896914SJoseph Koshy 	if (bound == 0)
233a5896914SJoseph Koshy 		return (ktr_requestpool);
234a5896914SJoseph Koshy 	if (bound < 0)
235ea3fc8e4SJohn Baldwin 		/* Shrink pool down to newsize if possible. */
236a5896914SJoseph Koshy 		while (bound++ < 0) {
237ea3fc8e4SJohn Baldwin 			req = STAILQ_FIRST(&ktr_free);
238ea3fc8e4SJohn Baldwin 			if (req == NULL)
239ea3fc8e4SJohn Baldwin 				return (ktr_requestpool);
240ea3fc8e4SJohn Baldwin 			STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
241ea3fc8e4SJohn Baldwin 			ktr_requestpool--;
242ea3fc8e4SJohn Baldwin 			mtx_unlock(&ktrace_mtx);
243ea3fc8e4SJohn Baldwin 			free(req, M_KTRACE);
244ea3fc8e4SJohn Baldwin 			mtx_lock(&ktrace_mtx);
245ea3fc8e4SJohn Baldwin 		}
246ea3fc8e4SJohn Baldwin 	else
247ea3fc8e4SJohn Baldwin 		/* Grow pool up to newsize. */
248a5896914SJoseph Koshy 		while (bound-- > 0) {
249ea3fc8e4SJohn Baldwin 			mtx_unlock(&ktrace_mtx);
250ea3fc8e4SJohn Baldwin 			req = malloc(sizeof(struct ktr_request), M_KTRACE,
251a163d034SWarner Losh 			    M_WAITOK);
252ea3fc8e4SJohn Baldwin 			mtx_lock(&ktrace_mtx);
253ea3fc8e4SJohn Baldwin 			STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
254ea3fc8e4SJohn Baldwin 			ktr_requestpool++;
255ea3fc8e4SJohn Baldwin 		}
256ea3fc8e4SJohn Baldwin 	return (ktr_requestpool);
257ea3fc8e4SJohn Baldwin }
258ea3fc8e4SJohn Baldwin 
259ea3fc8e4SJohn Baldwin static struct ktr_request *
260ea3fc8e4SJohn Baldwin ktr_getrequest(int type)
261ea3fc8e4SJohn Baldwin {
262ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
263ea3fc8e4SJohn Baldwin 	struct thread *td = curthread;
264ea3fc8e4SJohn Baldwin 	struct proc *p = td->td_proc;
265ea3fc8e4SJohn Baldwin 	int pm;
266ea3fc8e4SJohn Baldwin 
2672c255e9dSRobert Watson 	ktrace_enter(td);	/* XXX: In caller instead? */
268c5c9bd5bSRobert Watson 	mtx_lock(&ktrace_mtx);
269ea3fc8e4SJohn Baldwin 	if (!KTRCHECK(td, type)) {
270c5c9bd5bSRobert Watson 		mtx_unlock(&ktrace_mtx);
2712c255e9dSRobert Watson 		ktrace_exit(td);
272ea3fc8e4SJohn Baldwin 		return (NULL);
273ea3fc8e4SJohn Baldwin 	}
274ea3fc8e4SJohn Baldwin 	req = STAILQ_FIRST(&ktr_free);
275ea3fc8e4SJohn Baldwin 	if (req != NULL) {
276ea3fc8e4SJohn Baldwin 		STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
277ea3fc8e4SJohn Baldwin 		req->ktr_header.ktr_type = type;
27875768576SJohn Baldwin 		if (p->p_traceflag & KTRFAC_DROP) {
27975768576SJohn Baldwin 			req->ktr_header.ktr_type |= KTR_DROP;
28075768576SJohn Baldwin 			p->p_traceflag &= ~KTRFAC_DROP;
28175768576SJohn Baldwin 		}
282c5c9bd5bSRobert Watson 		mtx_unlock(&ktrace_mtx);
283ea3fc8e4SJohn Baldwin 		microtime(&req->ktr_header.ktr_time);
284ea3fc8e4SJohn Baldwin 		req->ktr_header.ktr_pid = p->p_pid;
2852bdeb3f9SRobert Watson 		req->ktr_header.ktr_tid = td->td_tid;
286e01eafefSJulian Elischer 		bcopy(td->td_name, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
287d977a583SRobert Watson 		req->ktr_buffer = NULL;
288ea3fc8e4SJohn Baldwin 		req->ktr_header.ktr_len = 0;
289ea3fc8e4SJohn Baldwin 	} else {
29075768576SJohn Baldwin 		p->p_traceflag |= KTRFAC_DROP;
291ea3fc8e4SJohn Baldwin 		pm = print_message;
292ea3fc8e4SJohn Baldwin 		print_message = 0;
293ea3fc8e4SJohn Baldwin 		mtx_unlock(&ktrace_mtx);
294ea3fc8e4SJohn Baldwin 		if (pm)
295ea3fc8e4SJohn Baldwin 			printf("Out of ktrace request objects.\n");
2962c255e9dSRobert Watson 		ktrace_exit(td);
297ea3fc8e4SJohn Baldwin 	}
298ea3fc8e4SJohn Baldwin 	return (req);
299ea3fc8e4SJohn Baldwin }
300ea3fc8e4SJohn Baldwin 
3012c255e9dSRobert Watson /*
3022c255e9dSRobert Watson  * Some trace generation environments don't permit direct access to VFS,
3032c255e9dSRobert Watson  * such as during a context switch where sleeping is not allowed.  Under these
3042c255e9dSRobert Watson  * circumstances, queue a request to the thread to be written asynchronously
3052c255e9dSRobert Watson  * later.
3062c255e9dSRobert Watson  */
307ea3fc8e4SJohn Baldwin static void
3082c255e9dSRobert Watson ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
309ea3fc8e4SJohn Baldwin {
310ea3fc8e4SJohn Baldwin 
311ea3fc8e4SJohn Baldwin 	mtx_lock(&ktrace_mtx);
3122c255e9dSRobert Watson 	STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
313ea3fc8e4SJohn Baldwin 	mtx_unlock(&ktrace_mtx);
3142c255e9dSRobert Watson 	ktrace_exit(td);
3152c255e9dSRobert Watson }
3162c255e9dSRobert Watson 
3172c255e9dSRobert Watson /*
3182c255e9dSRobert Watson  * Drain any pending ktrace records from the per-thread queue to disk.  This
3192c255e9dSRobert Watson  * is used both internally before committing other records, and also on
3202c255e9dSRobert Watson  * system call return.  We drain all the ones we can find at the time when
3212c255e9dSRobert Watson  * drain is requested, but don't keep draining after that as those events
3222c255e9dSRobert Watson  * may me approximately "after" the current event.
3232c255e9dSRobert Watson  */
3242c255e9dSRobert Watson static void
3252c255e9dSRobert Watson ktr_drain(struct thread *td)
3262c255e9dSRobert Watson {
3272c255e9dSRobert Watson 	struct ktr_request *queued_req;
3282c255e9dSRobert Watson 	STAILQ_HEAD(, ktr_request) local_queue;
3292c255e9dSRobert Watson 
3302c255e9dSRobert Watson 	ktrace_assert(td);
3312c255e9dSRobert Watson 	sx_assert(&ktrace_sx, SX_XLOCKED);
3322c255e9dSRobert Watson 
3332c255e9dSRobert Watson 	STAILQ_INIT(&local_queue);	/* XXXRW: needed? */
3342c255e9dSRobert Watson 
3352c255e9dSRobert Watson 	if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
3362c255e9dSRobert Watson 		mtx_lock(&ktrace_mtx);
3372c255e9dSRobert Watson 		STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
3382c255e9dSRobert Watson 		mtx_unlock(&ktrace_mtx);
3392c255e9dSRobert Watson 
3402c255e9dSRobert Watson 		while ((queued_req = STAILQ_FIRST(&local_queue))) {
3412c255e9dSRobert Watson 			STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
3422c255e9dSRobert Watson 			ktr_writerequest(td, queued_req);
3432c255e9dSRobert Watson 			ktr_freerequest(queued_req);
3442c255e9dSRobert Watson 		}
3452c255e9dSRobert Watson 	}
3462c255e9dSRobert Watson }
3472c255e9dSRobert Watson 
3482c255e9dSRobert Watson /*
3492c255e9dSRobert Watson  * Submit a trace record for immediate commit to disk -- to be used only
3502c255e9dSRobert Watson  * where entering VFS is OK.  First drain any pending records that may have
3512c255e9dSRobert Watson  * been cached in the thread.
3522c255e9dSRobert Watson  */
3532c255e9dSRobert Watson static void
3542c255e9dSRobert Watson ktr_submitrequest(struct thread *td, struct ktr_request *req)
3552c255e9dSRobert Watson {
3562c255e9dSRobert Watson 
3572c255e9dSRobert Watson 	ktrace_assert(td);
3582c255e9dSRobert Watson 
3592c255e9dSRobert Watson 	sx_xlock(&ktrace_sx);
3602c255e9dSRobert Watson 	ktr_drain(td);
3612c255e9dSRobert Watson 	ktr_writerequest(td, req);
3622c255e9dSRobert Watson 	ktr_freerequest(req);
3632c255e9dSRobert Watson 	sx_xunlock(&ktrace_sx);
3642c255e9dSRobert Watson 
3652c255e9dSRobert Watson 	ktrace_exit(td);
366ea3fc8e4SJohn Baldwin }
367ea3fc8e4SJohn Baldwin 
368ea3fc8e4SJohn Baldwin static void
369ea3fc8e4SJohn Baldwin ktr_freerequest(struct ktr_request *req)
370ea3fc8e4SJohn Baldwin {
371ea3fc8e4SJohn Baldwin 
372d977a583SRobert Watson 	if (req->ktr_buffer != NULL)
373d977a583SRobert Watson 		free(req->ktr_buffer, M_KTRACE);
374ea3fc8e4SJohn Baldwin 	mtx_lock(&ktrace_mtx);
375ea3fc8e4SJohn Baldwin 	STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
376ea3fc8e4SJohn Baldwin 	mtx_unlock(&ktrace_mtx);
377ea3fc8e4SJohn Baldwin }
378ea3fc8e4SJohn Baldwin 
37926f9a767SRodney W. Grimes void
380ea3fc8e4SJohn Baldwin ktrsyscall(code, narg, args)
38171ddfdbbSDmitrij Tejblum 	int code, narg;
38271ddfdbbSDmitrij Tejblum 	register_t args[];
383df8bae1dSRodney W. Grimes {
384ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
385df8bae1dSRodney W. Grimes 	struct ktr_syscall *ktp;
386ea3fc8e4SJohn Baldwin 	size_t buflen;
3874b3aac3dSJohn Baldwin 	char *buf = NULL;
388df8bae1dSRodney W. Grimes 
3894b3aac3dSJohn Baldwin 	buflen = sizeof(register_t) * narg;
3904b3aac3dSJohn Baldwin 	if (buflen > 0) {
391a163d034SWarner Losh 		buf = malloc(buflen, M_KTRACE, M_WAITOK);
3924b3aac3dSJohn Baldwin 		bcopy(args, buf, buflen);
3934b3aac3dSJohn Baldwin 	}
394ea3fc8e4SJohn Baldwin 	req = ktr_getrequest(KTR_SYSCALL);
39550c22331SPoul-Henning Kamp 	if (req == NULL) {
39650c22331SPoul-Henning Kamp 		if (buf != NULL)
39750c22331SPoul-Henning Kamp 			free(buf, M_KTRACE);
398ea3fc8e4SJohn Baldwin 		return;
39950c22331SPoul-Henning Kamp 	}
400ea3fc8e4SJohn Baldwin 	ktp = &req->ktr_data.ktr_syscall;
401df8bae1dSRodney W. Grimes 	ktp->ktr_code = code;
402df8bae1dSRodney W. Grimes 	ktp->ktr_narg = narg;
403ea3fc8e4SJohn Baldwin 	if (buflen > 0) {
404ea3fc8e4SJohn Baldwin 		req->ktr_header.ktr_len = buflen;
405d977a583SRobert Watson 		req->ktr_buffer = buf;
406ea3fc8e4SJohn Baldwin 	}
4072c255e9dSRobert Watson 	ktr_submitrequest(curthread, req);
408df8bae1dSRodney W. Grimes }
409df8bae1dSRodney W. Grimes 
41026f9a767SRodney W. Grimes void
411ea3fc8e4SJohn Baldwin ktrsysret(code, error, retval)
41271ddfdbbSDmitrij Tejblum 	int code, error;
41371ddfdbbSDmitrij Tejblum 	register_t retval;
414df8bae1dSRodney W. Grimes {
415ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
416ea3fc8e4SJohn Baldwin 	struct ktr_sysret *ktp;
417df8bae1dSRodney W. Grimes 
418ea3fc8e4SJohn Baldwin 	req = ktr_getrequest(KTR_SYSRET);
419ea3fc8e4SJohn Baldwin 	if (req == NULL)
420ea3fc8e4SJohn Baldwin 		return;
421ea3fc8e4SJohn Baldwin 	ktp = &req->ktr_data.ktr_sysret;
422ea3fc8e4SJohn Baldwin 	ktp->ktr_code = code;
423ea3fc8e4SJohn Baldwin 	ktp->ktr_error = error;
424ea3fc8e4SJohn Baldwin 	ktp->ktr_retval = retval;		/* what about val2 ? */
4252c255e9dSRobert Watson 	ktr_submitrequest(curthread, req);
4262c255e9dSRobert Watson }
4272c255e9dSRobert Watson 
4282c255e9dSRobert Watson /*
4292c255e9dSRobert Watson  * When a process exits, drain per-process asynchronous trace records.
4302c255e9dSRobert Watson  */
4312c255e9dSRobert Watson void
4322c255e9dSRobert Watson ktrprocexit(struct thread *td)
4332c255e9dSRobert Watson {
4342c255e9dSRobert Watson 
4352c255e9dSRobert Watson 	ktrace_enter(td);
4362c255e9dSRobert Watson 	sx_xlock(&ktrace_sx);
4372c255e9dSRobert Watson 	ktr_drain(td);
4382c255e9dSRobert Watson 	sx_xunlock(&ktrace_sx);
4392c255e9dSRobert Watson 	ktrace_exit(td);
4402c255e9dSRobert Watson }
4412c255e9dSRobert Watson 
4422c255e9dSRobert Watson /*
4432c255e9dSRobert Watson  * When a thread returns, drain any asynchronous records generated by the
4442c255e9dSRobert Watson  * system call.
4452c255e9dSRobert Watson  */
4462c255e9dSRobert Watson void
4472c255e9dSRobert Watson ktruserret(struct thread *td)
4482c255e9dSRobert Watson {
4492c255e9dSRobert Watson 
4502c255e9dSRobert Watson 	ktrace_enter(td);
4512c255e9dSRobert Watson 	sx_xlock(&ktrace_sx);
4522c255e9dSRobert Watson 	ktr_drain(td);
4532c255e9dSRobert Watson 	sx_xunlock(&ktrace_sx);
4542c255e9dSRobert Watson 	ktrace_exit(td);
455df8bae1dSRodney W. Grimes }
456df8bae1dSRodney W. Grimes 
45726f9a767SRodney W. Grimes void
458ea3fc8e4SJohn Baldwin ktrnamei(path)
459df8bae1dSRodney W. Grimes 	char *path;
460df8bae1dSRodney W. Grimes {
461ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
462ea3fc8e4SJohn Baldwin 	int namelen;
4634b3aac3dSJohn Baldwin 	char *buf = NULL;
464df8bae1dSRodney W. Grimes 
4654b3aac3dSJohn Baldwin 	namelen = strlen(path);
4664b3aac3dSJohn Baldwin 	if (namelen > 0) {
467a163d034SWarner Losh 		buf = malloc(namelen, M_KTRACE, M_WAITOK);
4684b3aac3dSJohn Baldwin 		bcopy(path, buf, namelen);
4694b3aac3dSJohn Baldwin 	}
470ea3fc8e4SJohn Baldwin 	req = ktr_getrequest(KTR_NAMEI);
47150c22331SPoul-Henning Kamp 	if (req == NULL) {
47250c22331SPoul-Henning Kamp 		if (buf != NULL)
47350c22331SPoul-Henning Kamp 			free(buf, M_KTRACE);
474ea3fc8e4SJohn Baldwin 		return;
47550c22331SPoul-Henning Kamp 	}
476ea3fc8e4SJohn Baldwin 	if (namelen > 0) {
477ea3fc8e4SJohn Baldwin 		req->ktr_header.ktr_len = namelen;
478d977a583SRobert Watson 		req->ktr_buffer = buf;
479ea3fc8e4SJohn Baldwin 	}
4802c255e9dSRobert Watson 	ktr_submitrequest(curthread, req);
481df8bae1dSRodney W. Grimes }
482df8bae1dSRodney W. Grimes 
48326f9a767SRodney W. Grimes void
484ea3fc8e4SJohn Baldwin ktrgenio(fd, rw, uio, error)
485df8bae1dSRodney W. Grimes 	int fd;
486df8bae1dSRodney W. Grimes 	enum uio_rw rw;
48742ebfbf2SBrian Feldman 	struct uio *uio;
48842ebfbf2SBrian Feldman 	int error;
489df8bae1dSRodney W. Grimes {
490ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
491ea3fc8e4SJohn Baldwin 	struct ktr_genio *ktg;
492b92584a6SJohn Baldwin 	int datalen;
493b92584a6SJohn Baldwin 	char *buf;
494df8bae1dSRodney W. Grimes 
495552afd9cSPoul-Henning Kamp 	if (error) {
496552afd9cSPoul-Henning Kamp 		free(uio, M_IOV);
497df8bae1dSRodney W. Grimes 		return;
498552afd9cSPoul-Henning Kamp 	}
499b92584a6SJohn Baldwin 	uio->uio_offset = 0;
500b92584a6SJohn Baldwin 	uio->uio_rw = UIO_WRITE;
501b92584a6SJohn Baldwin 	datalen = imin(uio->uio_resid, ktr_geniosize);
502a163d034SWarner Losh 	buf = malloc(datalen, M_KTRACE, M_WAITOK);
503552afd9cSPoul-Henning Kamp 	error = uiomove(buf, datalen, uio);
504552afd9cSPoul-Henning Kamp 	free(uio, M_IOV);
505552afd9cSPoul-Henning Kamp 	if (error) {
506b92584a6SJohn Baldwin 		free(buf, M_KTRACE);
507ea3fc8e4SJohn Baldwin 		return;
508b92584a6SJohn Baldwin 	}
509b92584a6SJohn Baldwin 	req = ktr_getrequest(KTR_GENIO);
510b92584a6SJohn Baldwin 	if (req == NULL) {
511b92584a6SJohn Baldwin 		free(buf, M_KTRACE);
512b92584a6SJohn Baldwin 		return;
513b92584a6SJohn Baldwin 	}
514ea3fc8e4SJohn Baldwin 	ktg = &req->ktr_data.ktr_genio;
515ea3fc8e4SJohn Baldwin 	ktg->ktr_fd = fd;
516ea3fc8e4SJohn Baldwin 	ktg->ktr_rw = rw;
517b92584a6SJohn Baldwin 	req->ktr_header.ktr_len = datalen;
518d977a583SRobert Watson 	req->ktr_buffer = buf;
5192c255e9dSRobert Watson 	ktr_submitrequest(curthread, req);
520df8bae1dSRodney W. Grimes }
521df8bae1dSRodney W. Grimes 
52226f9a767SRodney W. Grimes void
523ea3fc8e4SJohn Baldwin ktrpsig(sig, action, mask, code)
524a93fdaacSMarcel Moolenaar 	int sig;
525df8bae1dSRodney W. Grimes 	sig_t action;
5262c42a146SMarcel Moolenaar 	sigset_t *mask;
527a93fdaacSMarcel Moolenaar 	int code;
528df8bae1dSRodney W. Grimes {
529ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
530ea3fc8e4SJohn Baldwin 	struct ktr_psig	*kp;
531df8bae1dSRodney W. Grimes 
532ea3fc8e4SJohn Baldwin 	req = ktr_getrequest(KTR_PSIG);
533ea3fc8e4SJohn Baldwin 	if (req == NULL)
534ea3fc8e4SJohn Baldwin 		return;
535ea3fc8e4SJohn Baldwin 	kp = &req->ktr_data.ktr_psig;
536ea3fc8e4SJohn Baldwin 	kp->signo = (char)sig;
537ea3fc8e4SJohn Baldwin 	kp->action = action;
538ea3fc8e4SJohn Baldwin 	kp->mask = *mask;
539ea3fc8e4SJohn Baldwin 	kp->code = code;
5402c255e9dSRobert Watson 	ktr_enqueuerequest(curthread, req);
541df8bae1dSRodney W. Grimes }
542df8bae1dSRodney W. Grimes 
54326f9a767SRodney W. Grimes void
544ea3fc8e4SJohn Baldwin ktrcsw(out, user)
545df8bae1dSRodney W. Grimes 	int out, user;
546df8bae1dSRodney W. Grimes {
547ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
548ea3fc8e4SJohn Baldwin 	struct ktr_csw *kc;
549df8bae1dSRodney W. Grimes 
550ea3fc8e4SJohn Baldwin 	req = ktr_getrequest(KTR_CSW);
551ea3fc8e4SJohn Baldwin 	if (req == NULL)
552ea3fc8e4SJohn Baldwin 		return;
553ea3fc8e4SJohn Baldwin 	kc = &req->ktr_data.ktr_csw;
554ea3fc8e4SJohn Baldwin 	kc->out = out;
555ea3fc8e4SJohn Baldwin 	kc->user = user;
5562c255e9dSRobert Watson 	ktr_enqueuerequest(curthread, req);
557df8bae1dSRodney W. Grimes }
55860e15db9SDag-Erling Smørgrav 
55960e15db9SDag-Erling Smørgrav void
56060e15db9SDag-Erling Smørgrav ktrstruct(name, namelen, data, datalen)
56160e15db9SDag-Erling Smørgrav 	const char *name;
56260e15db9SDag-Erling Smørgrav 	size_t namelen;
56360e15db9SDag-Erling Smørgrav 	void *data;
56460e15db9SDag-Erling Smørgrav 	size_t datalen;
56560e15db9SDag-Erling Smørgrav {
56660e15db9SDag-Erling Smørgrav 	struct ktr_request *req;
56760e15db9SDag-Erling Smørgrav 	char *buf = NULL;
56860e15db9SDag-Erling Smørgrav 	size_t buflen;
56960e15db9SDag-Erling Smørgrav 
57060e15db9SDag-Erling Smørgrav 	if (!data)
57160e15db9SDag-Erling Smørgrav 		datalen = 0;
57260e15db9SDag-Erling Smørgrav 	buflen = namelen + 1 + datalen;
57360e15db9SDag-Erling Smørgrav 	buf = malloc(buflen, M_KTRACE, M_WAITOK);
57460e15db9SDag-Erling Smørgrav 	bcopy(name, buf, namelen);
57560e15db9SDag-Erling Smørgrav 	buf[namelen] = '\0';
57660e15db9SDag-Erling Smørgrav 	bcopy(data, buf + namelen + 1, datalen);
57760e15db9SDag-Erling Smørgrav 	if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
57860e15db9SDag-Erling Smørgrav 		free(buf, M_KTRACE);
57960e15db9SDag-Erling Smørgrav 		return;
58060e15db9SDag-Erling Smørgrav 	}
58160e15db9SDag-Erling Smørgrav 	req->ktr_buffer = buf;
58260e15db9SDag-Erling Smørgrav 	req->ktr_header.ktr_len = buflen;
58360e15db9SDag-Erling Smørgrav 	ktr_submitrequest(curthread, req);
58460e15db9SDag-Erling Smørgrav }
58564cc6a13SJohn Baldwin #endif /* KTRACE */
586df8bae1dSRodney W. Grimes 
587df8bae1dSRodney W. Grimes /* Interface and common routines */
588df8bae1dSRodney W. Grimes 
589d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
590df8bae1dSRodney W. Grimes struct ktrace_args {
591df8bae1dSRodney W. Grimes 	char	*fname;
592df8bae1dSRodney W. Grimes 	int	ops;
593df8bae1dSRodney W. Grimes 	int	facs;
594df8bae1dSRodney W. Grimes 	int	pid;
595df8bae1dSRodney W. Grimes };
596d2d3e875SBruce Evans #endif
597df8bae1dSRodney W. Grimes /* ARGSUSED */
59826f9a767SRodney W. Grimes int
599b40ce416SJulian Elischer ktrace(td, uap)
600b40ce416SJulian Elischer 	struct thread *td;
601df8bae1dSRodney W. Grimes 	register struct ktrace_args *uap;
602df8bae1dSRodney W. Grimes {
603db6a20e2SGarrett Wollman #ifdef KTRACE
604df8bae1dSRodney W. Grimes 	register struct vnode *vp = NULL;
605df8bae1dSRodney W. Grimes 	register struct proc *p;
606df8bae1dSRodney W. Grimes 	struct pgrp *pg;
607df8bae1dSRodney W. Grimes 	int facs = uap->facs & ~KTRFAC_ROOT;
608df8bae1dSRodney W. Grimes 	int ops = KTROP(uap->ops);
609df8bae1dSRodney W. Grimes 	int descend = uap->ops & KTRFLAG_DESCEND;
610400a74bfSPawel Jakub Dawidek 	int nfound, ret = 0;
61133f19beeSJohn Baldwin 	int flags, error = 0, vfslocked;
612df8bae1dSRodney W. Grimes 	struct nameidata nd;
613a5881ea5SJohn Baldwin 	struct ucred *cred;
614df8bae1dSRodney W. Grimes 
61564cc6a13SJohn Baldwin 	/*
61664cc6a13SJohn Baldwin 	 * Need something to (un)trace.
61764cc6a13SJohn Baldwin 	 */
61864cc6a13SJohn Baldwin 	if (ops != KTROP_CLEARFILE && facs == 0)
61964cc6a13SJohn Baldwin 		return (EINVAL);
62064cc6a13SJohn Baldwin 
6212c255e9dSRobert Watson 	ktrace_enter(td);
622df8bae1dSRodney W. Grimes 	if (ops != KTROP_CLEAR) {
623df8bae1dSRodney W. Grimes 		/*
624df8bae1dSRodney W. Grimes 		 * an operation which requires a file argument.
625df8bae1dSRodney W. Grimes 		 */
62633f19beeSJohn Baldwin 		NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE,
62733f19beeSJohn Baldwin 		    uap->fname, td);
628e6796b67SKirk McKusick 		flags = FREAD | FWRITE | O_NOFOLLOW;
6299e223287SKonstantin Belousov 		error = vn_open(&nd, &flags, 0, NULL);
630797f2d22SPoul-Henning Kamp 		if (error) {
6312c255e9dSRobert Watson 			ktrace_exit(td);
632df8bae1dSRodney W. Grimes 			return (error);
633df8bae1dSRodney W. Grimes 		}
63433f19beeSJohn Baldwin 		vfslocked = NDHASGIANT(&nd);
635762e6b85SEivind Eklund 		NDFREE(&nd, NDF_ONLY_PNBUF);
636df8bae1dSRodney W. Grimes 		vp = nd.ni_vp;
63722db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
638df8bae1dSRodney W. Grimes 		if (vp->v_type != VREG) {
639a854ed98SJohn Baldwin 			(void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
64033f19beeSJohn Baldwin 			VFS_UNLOCK_GIANT(vfslocked);
6412c255e9dSRobert Watson 			ktrace_exit(td);
642df8bae1dSRodney W. Grimes 			return (EACCES);
643df8bae1dSRodney W. Grimes 		}
64433f19beeSJohn Baldwin 		VFS_UNLOCK_GIANT(vfslocked);
645df8bae1dSRodney W. Grimes 	}
646df8bae1dSRodney W. Grimes 	/*
64779deba82SMatthew Dillon 	 * Clear all uses of the tracefile.
648df8bae1dSRodney W. Grimes 	 */
649df8bae1dSRodney W. Grimes 	if (ops == KTROP_CLEARFILE) {
65051fd6380SMike Pritchard 		int vrele_count;
65151fd6380SMike Pritchard 
65251fd6380SMike Pritchard 		vrele_count = 0;
6531005a129SJohn Baldwin 		sx_slock(&allproc_lock);
6544f506694SXin LI 		FOREACH_PROC_IN_SYSTEM(p) {
655a7ff7443SJohn Baldwin 			PROC_LOCK(p);
656a5881ea5SJohn Baldwin 			if (p->p_tracevp == vp) {
657ea3fc8e4SJohn Baldwin 				if (ktrcanset(td, p)) {
658ea3fc8e4SJohn Baldwin 					mtx_lock(&ktrace_mtx);
659a5881ea5SJohn Baldwin 					cred = p->p_tracecred;
660a5881ea5SJohn Baldwin 					p->p_tracecred = NULL;
661a5881ea5SJohn Baldwin 					p->p_tracevp = NULL;
662df8bae1dSRodney W. Grimes 					p->p_traceflag = 0;
663ea3fc8e4SJohn Baldwin 					mtx_unlock(&ktrace_mtx);
66451fd6380SMike Pritchard 					vrele_count++;
665a5881ea5SJohn Baldwin 					crfree(cred);
66651fd6380SMike Pritchard 				} else
667df8bae1dSRodney W. Grimes 					error = EPERM;
668df8bae1dSRodney W. Grimes 			}
669a7ff7443SJohn Baldwin 			PROC_UNLOCK(p);
67079deba82SMatthew Dillon 		}
6711005a129SJohn Baldwin 		sx_sunlock(&allproc_lock);
67251fd6380SMike Pritchard 		if (vrele_count > 0) {
67351fd6380SMike Pritchard 			vfslocked = VFS_LOCK_GIANT(vp->v_mount);
67451fd6380SMike Pritchard 			while (vrele_count-- > 0)
67551fd6380SMike Pritchard 				vrele(vp);
67651fd6380SMike Pritchard 			VFS_UNLOCK_GIANT(vfslocked);
67751fd6380SMike Pritchard 		}
678df8bae1dSRodney W. Grimes 		goto done;
679df8bae1dSRodney W. Grimes 	}
680df8bae1dSRodney W. Grimes 	/*
681df8bae1dSRodney W. Grimes 	 * do it
682df8bae1dSRodney W. Grimes 	 */
68364cc6a13SJohn Baldwin 	sx_slock(&proctree_lock);
684df8bae1dSRodney W. Grimes 	if (uap->pid < 0) {
685df8bae1dSRodney W. Grimes 		/*
686df8bae1dSRodney W. Grimes 		 * by process group
687df8bae1dSRodney W. Grimes 		 */
688df8bae1dSRodney W. Grimes 		pg = pgfind(-uap->pid);
689df8bae1dSRodney W. Grimes 		if (pg == NULL) {
690ba626c1dSJohn Baldwin 			sx_sunlock(&proctree_lock);
691df8bae1dSRodney W. Grimes 			error = ESRCH;
692df8bae1dSRodney W. Grimes 			goto done;
693df8bae1dSRodney W. Grimes 		}
694f591779bSSeigo Tanimura 		/*
695f591779bSSeigo Tanimura 		 * ktrops() may call vrele(). Lock pg_members
696ba626c1dSJohn Baldwin 		 * by the proctree_lock rather than pg_mtx.
697f591779bSSeigo Tanimura 		 */
698f591779bSSeigo Tanimura 		PGRP_UNLOCK(pg);
699400a74bfSPawel Jakub Dawidek 		nfound = 0;
700400a74bfSPawel Jakub Dawidek 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
701400a74bfSPawel Jakub Dawidek 			PROC_LOCK(p);
702400a74bfSPawel Jakub Dawidek 			if (p_cansee(td, p) != 0) {
703400a74bfSPawel Jakub Dawidek 				PROC_UNLOCK(p);
704400a74bfSPawel Jakub Dawidek 				continue;
705400a74bfSPawel Jakub Dawidek 			}
706400a74bfSPawel Jakub Dawidek 			PROC_UNLOCK(p);
707400a74bfSPawel Jakub Dawidek 			nfound++;
708df8bae1dSRodney W. Grimes 			if (descend)
709a7ff7443SJohn Baldwin 				ret |= ktrsetchildren(td, p, ops, facs, vp);
710df8bae1dSRodney W. Grimes 			else
711a7ff7443SJohn Baldwin 				ret |= ktrops(td, p, ops, facs, vp);
712400a74bfSPawel Jakub Dawidek 		}
713400a74bfSPawel Jakub Dawidek 		if (nfound == 0) {
714400a74bfSPawel Jakub Dawidek 			sx_sunlock(&proctree_lock);
715400a74bfSPawel Jakub Dawidek 			error = ESRCH;
716400a74bfSPawel Jakub Dawidek 			goto done;
717400a74bfSPawel Jakub Dawidek 		}
718df8bae1dSRodney W. Grimes 	} else {
719df8bae1dSRodney W. Grimes 		/*
720df8bae1dSRodney W. Grimes 		 * by pid
721df8bae1dSRodney W. Grimes 		 */
722df8bae1dSRodney W. Grimes 		p = pfind(uap->pid);
723df8bae1dSRodney W. Grimes 		if (p == NULL) {
72464cc6a13SJohn Baldwin 			sx_sunlock(&proctree_lock);
725df8bae1dSRodney W. Grimes 			error = ESRCH;
726df8bae1dSRodney W. Grimes 			goto done;
727df8bae1dSRodney W. Grimes 		}
7284eb7c9f6SPawel Jakub Dawidek 		error = p_cansee(td, p);
72964cc6a13SJohn Baldwin 		/*
73064cc6a13SJohn Baldwin 		 * The slock of the proctree lock will keep this process
73164cc6a13SJohn Baldwin 		 * from going away, so unlocking the proc here is ok.
73264cc6a13SJohn Baldwin 		 */
73333a9ed9dSJohn Baldwin 		PROC_UNLOCK(p);
734b0d9aeddSPawel Jakub Dawidek 		if (error) {
735b0d9aeddSPawel Jakub Dawidek 			sx_sunlock(&proctree_lock);
7364eb7c9f6SPawel Jakub Dawidek 			goto done;
737b0d9aeddSPawel Jakub Dawidek 		}
738df8bae1dSRodney W. Grimes 		if (descend)
739a7ff7443SJohn Baldwin 			ret |= ktrsetchildren(td, p, ops, facs, vp);
740df8bae1dSRodney W. Grimes 		else
741a7ff7443SJohn Baldwin 			ret |= ktrops(td, p, ops, facs, vp);
742df8bae1dSRodney W. Grimes 	}
74364cc6a13SJohn Baldwin 	sx_sunlock(&proctree_lock);
744df8bae1dSRodney W. Grimes 	if (!ret)
745df8bae1dSRodney W. Grimes 		error = EPERM;
746df8bae1dSRodney W. Grimes done:
74764cc6a13SJohn Baldwin 	if (vp != NULL) {
74833f19beeSJohn Baldwin 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
749a854ed98SJohn Baldwin 		(void) vn_close(vp, FWRITE, td->td_ucred, td);
75033f19beeSJohn Baldwin 		VFS_UNLOCK_GIANT(vfslocked);
75164cc6a13SJohn Baldwin 	}
7522c255e9dSRobert Watson 	ktrace_exit(td);
753df8bae1dSRodney W. Grimes 	return (error);
75464cc6a13SJohn Baldwin #else /* !KTRACE */
75564cc6a13SJohn Baldwin 	return (ENOSYS);
75664cc6a13SJohn Baldwin #endif /* KTRACE */
757df8bae1dSRodney W. Grimes }
758df8bae1dSRodney W. Grimes 
759e6c4b9baSPoul-Henning Kamp /* ARGSUSED */
760e6c4b9baSPoul-Henning Kamp int
761b40ce416SJulian Elischer utrace(td, uap)
762b40ce416SJulian Elischer 	struct thread *td;
763e6c4b9baSPoul-Henning Kamp 	register struct utrace_args *uap;
764e6c4b9baSPoul-Henning Kamp {
765b40ce416SJulian Elischer 
766e6c4b9baSPoul-Henning Kamp #ifdef KTRACE
767ea3fc8e4SJohn Baldwin 	struct ktr_request *req;
7687f05b035SAlfred Perlstein 	void *cp;
769c9e7d28eSJohn Baldwin 	int error;
770e6c4b9baSPoul-Henning Kamp 
771c9e7d28eSJohn Baldwin 	if (!KTRPOINT(td, KTR_USER))
772c9e7d28eSJohn Baldwin 		return (0);
773bdfa4f04SAlfred Perlstein 	if (uap->len > KTR_USER_MAXLEN)
7740bad156aSAlfred Perlstein 		return (EINVAL);
775a163d034SWarner Losh 	cp = malloc(uap->len, M_KTRACE, M_WAITOK);
776c9e7d28eSJohn Baldwin 	error = copyin(uap->addr, cp, uap->len);
77750c22331SPoul-Henning Kamp 	if (error) {
77850c22331SPoul-Henning Kamp 		free(cp, M_KTRACE);
779c9e7d28eSJohn Baldwin 		return (error);
78050c22331SPoul-Henning Kamp 	}
781ea3fc8e4SJohn Baldwin 	req = ktr_getrequest(KTR_USER);
78250c22331SPoul-Henning Kamp 	if (req == NULL) {
78350c22331SPoul-Henning Kamp 		free(cp, M_KTRACE);
784b10221ffSJoseph Koshy 		return (ENOMEM);
78550c22331SPoul-Henning Kamp 	}
786d977a583SRobert Watson 	req->ktr_buffer = cp;
787ea3fc8e4SJohn Baldwin 	req->ktr_header.ktr_len = uap->len;
7882c255e9dSRobert Watson 	ktr_submitrequest(td, req);
789e6c4b9baSPoul-Henning Kamp 	return (0);
79064cc6a13SJohn Baldwin #else /* !KTRACE */
791e6c4b9baSPoul-Henning Kamp 	return (ENOSYS);
79264cc6a13SJohn Baldwin #endif /* KTRACE */
793e6c4b9baSPoul-Henning Kamp }
794e6c4b9baSPoul-Henning Kamp 
795db6a20e2SGarrett Wollman #ifdef KTRACE
79687b6de2bSPoul-Henning Kamp static int
797a7ff7443SJohn Baldwin ktrops(td, p, ops, facs, vp)
798a7ff7443SJohn Baldwin 	struct thread *td;
799a7ff7443SJohn Baldwin 	struct proc *p;
800df8bae1dSRodney W. Grimes 	int ops, facs;
801df8bae1dSRodney W. Grimes 	struct vnode *vp;
802df8bae1dSRodney W. Grimes {
803ea3fc8e4SJohn Baldwin 	struct vnode *tracevp = NULL;
804a5881ea5SJohn Baldwin 	struct ucred *tracecred = NULL;
805df8bae1dSRodney W. Grimes 
806a7ff7443SJohn Baldwin 	PROC_LOCK(p);
807a7ff7443SJohn Baldwin 	if (!ktrcanset(td, p)) {
808a7ff7443SJohn Baldwin 		PROC_UNLOCK(p);
809df8bae1dSRodney W. Grimes 		return (0);
810a7ff7443SJohn Baldwin 	}
811ea3fc8e4SJohn Baldwin 	mtx_lock(&ktrace_mtx);
812df8bae1dSRodney W. Grimes 	if (ops == KTROP_SET) {
813a5881ea5SJohn Baldwin 		if (p->p_tracevp != vp) {
814df8bae1dSRodney W. Grimes 			/*
815a7ff7443SJohn Baldwin 			 * if trace file already in use, relinquish below
816df8bae1dSRodney W. Grimes 			 */
817a5881ea5SJohn Baldwin 			tracevp = p->p_tracevp;
818ea3fc8e4SJohn Baldwin 			VREF(vp);
819a5881ea5SJohn Baldwin 			p->p_tracevp = vp;
820a5881ea5SJohn Baldwin 		}
821a5881ea5SJohn Baldwin 		if (p->p_tracecred != td->td_ucred) {
822a5881ea5SJohn Baldwin 			tracecred = p->p_tracecred;
823a5881ea5SJohn Baldwin 			p->p_tracecred = crhold(td->td_ucred);
824df8bae1dSRodney W. Grimes 		}
825df8bae1dSRodney W. Grimes 		p->p_traceflag |= facs;
82632f9753cSRobert Watson 		if (priv_check(td, PRIV_KTRACE) == 0)
827df8bae1dSRodney W. Grimes 			p->p_traceflag |= KTRFAC_ROOT;
828df8bae1dSRodney W. Grimes 	} else {
829df8bae1dSRodney W. Grimes 		/* KTROP_CLEAR */
830df8bae1dSRodney W. Grimes 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
831df8bae1dSRodney W. Grimes 			/* no more tracing */
832df8bae1dSRodney W. Grimes 			p->p_traceflag = 0;
833a5881ea5SJohn Baldwin 			tracevp = p->p_tracevp;
834a5881ea5SJohn Baldwin 			p->p_tracevp = NULL;
835a5881ea5SJohn Baldwin 			tracecred = p->p_tracecred;
836a5881ea5SJohn Baldwin 			p->p_tracecred = NULL;
837a7ff7443SJohn Baldwin 		}
838a7ff7443SJohn Baldwin 	}
839ea3fc8e4SJohn Baldwin 	mtx_unlock(&ktrace_mtx);
840a7ff7443SJohn Baldwin 	PROC_UNLOCK(p);
84164cc6a13SJohn Baldwin 	if (tracevp != NULL) {
842033eb86eSJeff Roberson 		int vfslocked;
843033eb86eSJeff Roberson 
844033eb86eSJeff Roberson 		vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
845ea3fc8e4SJohn Baldwin 		vrele(tracevp);
846033eb86eSJeff Roberson 		VFS_UNLOCK_GIANT(vfslocked);
84764cc6a13SJohn Baldwin 	}
848a5881ea5SJohn Baldwin 	if (tracecred != NULL)
849a5881ea5SJohn Baldwin 		crfree(tracecred);
850df8bae1dSRodney W. Grimes 
851df8bae1dSRodney W. Grimes 	return (1);
852df8bae1dSRodney W. Grimes }
853df8bae1dSRodney W. Grimes 
85487b6de2bSPoul-Henning Kamp static int
855a7ff7443SJohn Baldwin ktrsetchildren(td, top, ops, facs, vp)
856a7ff7443SJohn Baldwin 	struct thread *td;
857a7ff7443SJohn Baldwin 	struct proc *top;
858df8bae1dSRodney W. Grimes 	int ops, facs;
859df8bae1dSRodney W. Grimes 	struct vnode *vp;
860df8bae1dSRodney W. Grimes {
861df8bae1dSRodney W. Grimes 	register struct proc *p;
862df8bae1dSRodney W. Grimes 	register int ret = 0;
863df8bae1dSRodney W. Grimes 
864df8bae1dSRodney W. Grimes 	p = top;
86564cc6a13SJohn Baldwin 	sx_assert(&proctree_lock, SX_LOCKED);
866df8bae1dSRodney W. Grimes 	for (;;) {
867a7ff7443SJohn Baldwin 		ret |= ktrops(td, p, ops, facs, vp);
868df8bae1dSRodney W. Grimes 		/*
869df8bae1dSRodney W. Grimes 		 * If this process has children, descend to them next,
870df8bae1dSRodney W. Grimes 		 * otherwise do any siblings, and if done with this level,
871df8bae1dSRodney W. Grimes 		 * follow back up the tree (but not past top).
872df8bae1dSRodney W. Grimes 		 */
8732e3c8fcbSPoul-Henning Kamp 		if (!LIST_EMPTY(&p->p_children))
8742e3c8fcbSPoul-Henning Kamp 			p = LIST_FIRST(&p->p_children);
875df8bae1dSRodney W. Grimes 		else for (;;) {
87664cc6a13SJohn Baldwin 			if (p == top)
877df8bae1dSRodney W. Grimes 				return (ret);
8782e3c8fcbSPoul-Henning Kamp 			if (LIST_NEXT(p, p_sibling)) {
8792e3c8fcbSPoul-Henning Kamp 				p = LIST_NEXT(p, p_sibling);
880df8bae1dSRodney W. Grimes 				break;
881df8bae1dSRodney W. Grimes 			}
882b75356e1SJeffrey Hsu 			p = p->p_pptr;
883df8bae1dSRodney W. Grimes 		}
884df8bae1dSRodney W. Grimes 	}
885df8bae1dSRodney W. Grimes 	/*NOTREACHED*/
886df8bae1dSRodney W. Grimes }
887df8bae1dSRodney W. Grimes 
88887b6de2bSPoul-Henning Kamp static void
8892c255e9dSRobert Watson ktr_writerequest(struct thread *td, struct ktr_request *req)
890df8bae1dSRodney W. Grimes {
891ea3fc8e4SJohn Baldwin 	struct ktr_header *kth;
892ea3fc8e4SJohn Baldwin 	struct vnode *vp;
893ea3fc8e4SJohn Baldwin 	struct proc *p;
894ea3fc8e4SJohn Baldwin 	struct ucred *cred;
895df8bae1dSRodney W. Grimes 	struct uio auio;
896ea3fc8e4SJohn Baldwin 	struct iovec aiov[3];
897f2a2857bSKirk McKusick 	struct mount *mp;
898ea3fc8e4SJohn Baldwin 	int datalen, buflen, vrele_count;
89933f19beeSJohn Baldwin 	int error, vfslocked;
900df8bae1dSRodney W. Grimes 
9012c255e9dSRobert Watson 	/*
9022c255e9dSRobert Watson 	 * We hold the vnode and credential for use in I/O in case ktrace is
9032c255e9dSRobert Watson 	 * disabled on the process as we write out the request.
9042c255e9dSRobert Watson 	 *
9052c255e9dSRobert Watson 	 * XXXRW: This is not ideal: we could end up performing a write after
9062c255e9dSRobert Watson 	 * the vnode has been closed.
9072c255e9dSRobert Watson 	 */
9082c255e9dSRobert Watson 	mtx_lock(&ktrace_mtx);
9092c255e9dSRobert Watson 	vp = td->td_proc->p_tracevp;
9102c255e9dSRobert Watson 	if (vp != NULL)
9112c255e9dSRobert Watson 		VREF(vp);
9122c255e9dSRobert Watson 	cred = td->td_proc->p_tracecred;
9132c255e9dSRobert Watson 	if (cred != NULL)
9142c255e9dSRobert Watson 		crhold(cred);
9152c255e9dSRobert Watson 	mtx_unlock(&ktrace_mtx);
9162c255e9dSRobert Watson 
917ea3fc8e4SJohn Baldwin 	/*
918ea3fc8e4SJohn Baldwin 	 * If vp is NULL, the vp has been cleared out from under this
9192c255e9dSRobert Watson 	 * request, so just drop it.  Make sure the credential and vnode are
9202c255e9dSRobert Watson 	 * in sync: we should have both or neither.
921ea3fc8e4SJohn Baldwin 	 */
9222c255e9dSRobert Watson 	if (vp == NULL) {
9232c255e9dSRobert Watson 		KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
924df8bae1dSRodney W. Grimes 		return;
9252c255e9dSRobert Watson 	}
9262c255e9dSRobert Watson 	KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
9272c255e9dSRobert Watson 
928ea3fc8e4SJohn Baldwin 	kth = &req->ktr_header;
9298b149b51SJohn Baldwin 	datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
930ea3fc8e4SJohn Baldwin 	buflen = kth->ktr_len;
931df8bae1dSRodney W. Grimes 	auio.uio_iov = &aiov[0];
932df8bae1dSRodney W. Grimes 	auio.uio_offset = 0;
933df8bae1dSRodney W. Grimes 	auio.uio_segflg = UIO_SYSSPACE;
934df8bae1dSRodney W. Grimes 	auio.uio_rw = UIO_WRITE;
935df8bae1dSRodney W. Grimes 	aiov[0].iov_base = (caddr_t)kth;
936df8bae1dSRodney W. Grimes 	aiov[0].iov_len = sizeof(struct ktr_header);
937df8bae1dSRodney W. Grimes 	auio.uio_resid = sizeof(struct ktr_header);
938df8bae1dSRodney W. Grimes 	auio.uio_iovcnt = 1;
939ea3fc8e4SJohn Baldwin 	auio.uio_td = td;
940ea3fc8e4SJohn Baldwin 	if (datalen != 0) {
941ea3fc8e4SJohn Baldwin 		aiov[1].iov_base = (caddr_t)&req->ktr_data;
942ea3fc8e4SJohn Baldwin 		aiov[1].iov_len = datalen;
943ea3fc8e4SJohn Baldwin 		auio.uio_resid += datalen;
944df8bae1dSRodney W. Grimes 		auio.uio_iovcnt++;
945ea3fc8e4SJohn Baldwin 		kth->ktr_len += datalen;
946ea3fc8e4SJohn Baldwin 	}
947ea3fc8e4SJohn Baldwin 	if (buflen != 0) {
948d977a583SRobert Watson 		KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
949d977a583SRobert Watson 		aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
950ea3fc8e4SJohn Baldwin 		aiov[auio.uio_iovcnt].iov_len = buflen;
951ea3fc8e4SJohn Baldwin 		auio.uio_resid += buflen;
952ea3fc8e4SJohn Baldwin 		auio.uio_iovcnt++;
953b92584a6SJohn Baldwin 	}
9542c255e9dSRobert Watson 
95533f19beeSJohn Baldwin 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
956f2a2857bSKirk McKusick 	vn_start_write(vp, &mp, V_WAIT);
957cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
958ea3fc8e4SJohn Baldwin 	(void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
959467a273cSRobert Watson #ifdef MAC
96030d239bcSRobert Watson 	error = mac_vnode_check_write(cred, NOCRED, vp);
961467a273cSRobert Watson 	if (error == 0)
962467a273cSRobert Watson #endif
963ea3fc8e4SJohn Baldwin 		error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
96422db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
965f2a2857bSKirk McKusick 	vn_finished_write(mp);
966704c9f00SJohn Baldwin 	vrele(vp);
96733f19beeSJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
968df8bae1dSRodney W. Grimes 	if (!error)
969df8bae1dSRodney W. Grimes 		return;
970df8bae1dSRodney W. Grimes 	/*
971ea3fc8e4SJohn Baldwin 	 * If error encountered, give up tracing on this vnode.  We defer
972ea3fc8e4SJohn Baldwin 	 * all the vrele()'s on the vnode until after we are finished walking
973ea3fc8e4SJohn Baldwin 	 * the various lists to avoid needlessly holding locks.
974df8bae1dSRodney W. Grimes 	 */
975df8bae1dSRodney W. Grimes 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
976df8bae1dSRodney W. Grimes 	    error);
977ea3fc8e4SJohn Baldwin 	vrele_count = 0;
978ea3fc8e4SJohn Baldwin 	/*
979ea3fc8e4SJohn Baldwin 	 * First, clear this vnode from being used by any processes in the
980ea3fc8e4SJohn Baldwin 	 * system.
981ea3fc8e4SJohn Baldwin 	 * XXX - If one process gets an EPERM writing to the vnode, should
982ea3fc8e4SJohn Baldwin 	 * we really do this?  Other processes might have suitable
983ea3fc8e4SJohn Baldwin 	 * credentials for the operation.
984ea3fc8e4SJohn Baldwin 	 */
985a5881ea5SJohn Baldwin 	cred = NULL;
9861005a129SJohn Baldwin 	sx_slock(&allproc_lock);
9874f506694SXin LI 	FOREACH_PROC_IN_SYSTEM(p) {
988ea3fc8e4SJohn Baldwin 		PROC_LOCK(p);
989a5881ea5SJohn Baldwin 		if (p->p_tracevp == vp) {
990ea3fc8e4SJohn Baldwin 			mtx_lock(&ktrace_mtx);
991a5881ea5SJohn Baldwin 			p->p_tracevp = NULL;
992df8bae1dSRodney W. Grimes 			p->p_traceflag = 0;
993a5881ea5SJohn Baldwin 			cred = p->p_tracecred;
994a5881ea5SJohn Baldwin 			p->p_tracecred = NULL;
995ea3fc8e4SJohn Baldwin 			mtx_unlock(&ktrace_mtx);
996ea3fc8e4SJohn Baldwin 			vrele_count++;
997df8bae1dSRodney W. Grimes 		}
998ea3fc8e4SJohn Baldwin 		PROC_UNLOCK(p);
999a5881ea5SJohn Baldwin 		if (cred != NULL) {
1000a5881ea5SJohn Baldwin 			crfree(cred);
1001a5881ea5SJohn Baldwin 			cred = NULL;
1002a5881ea5SJohn Baldwin 		}
1003df8bae1dSRodney W. Grimes 	}
10041005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
10052c255e9dSRobert Watson 
1006ea3fc8e4SJohn Baldwin 	/*
10072c255e9dSRobert Watson 	 * We can't clear any pending requests in threads that have cached
10082c255e9dSRobert Watson 	 * them but not yet committed them, as those are per-thread.  The
10092c255e9dSRobert Watson 	 * thread will have to clear it itself on system call return.
1010ea3fc8e4SJohn Baldwin 	 */
101133f19beeSJohn Baldwin 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1012ea3fc8e4SJohn Baldwin 	while (vrele_count-- > 0)
1013ea3fc8e4SJohn Baldwin 		vrele(vp);
101433f19beeSJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
1015df8bae1dSRodney W. Grimes }
1016df8bae1dSRodney W. Grimes 
1017df8bae1dSRodney W. Grimes /*
1018df8bae1dSRodney W. Grimes  * Return true if caller has permission to set the ktracing state
1019df8bae1dSRodney W. Grimes  * of target.  Essentially, the target can't possess any
1020df8bae1dSRodney W. Grimes  * more permissions than the caller.  KTRFAC_ROOT signifies that
1021df8bae1dSRodney W. Grimes  * root previously set the tracing status on the target process, and
1022df8bae1dSRodney W. Grimes  * so, only root may further change it.
1023df8bae1dSRodney W. Grimes  */
102487b6de2bSPoul-Henning Kamp static int
1025a7ff7443SJohn Baldwin ktrcanset(td, targetp)
1026a7ff7443SJohn Baldwin 	struct thread *td;
1027a7ff7443SJohn Baldwin 	struct proc *targetp;
1028df8bae1dSRodney W. Grimes {
1029df8bae1dSRodney W. Grimes 
1030a7ff7443SJohn Baldwin 	PROC_LOCK_ASSERT(targetp, MA_OWNED);
1031a0f75161SRobert Watson 	if (targetp->p_traceflag & KTRFAC_ROOT &&
103232f9753cSRobert Watson 	    priv_check(td, PRIV_KTRACE))
103375c13541SPoul-Henning Kamp 		return (0);
1034a0f75161SRobert Watson 
1035f44d9e24SJohn Baldwin 	if (p_candebug(td, targetp) != 0)
1036a0f75161SRobert Watson 		return (0);
1037a0f75161SRobert Watson 
1038df8bae1dSRodney W. Grimes 	return (1);
1039df8bae1dSRodney W. Grimes }
1040df8bae1dSRodney W. Grimes 
1041db6a20e2SGarrett Wollman #endif /* KTRACE */
1042