19454b2d8SWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1989, 1993 32c255e9dSRobert Watson * The Regents of the University of California. 42c255e9dSRobert Watson * Copyright (c) 2005 Robert N. M. Watson 52c255e9dSRobert Watson * All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 8df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 9df8bae1dSRodney W. Grimes * are met: 10df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 12df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 13df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 14df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 15df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 16df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 17df8bae1dSRodney W. Grimes * without specific prior written permission. 18df8bae1dSRodney W. Grimes * 19df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29df8bae1dSRodney W. Grimes * SUCH DAMAGE. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 32df8bae1dSRodney W. Grimes */ 33df8bae1dSRodney W. Grimes 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 37db6a20e2SGarrett Wollman #include "opt_ktrace.h" 38df8bae1dSRodney W. Grimes 39df8bae1dSRodney W. Grimes #include <sys/param.h> 40f23b4c91SGarrett Wollman #include <sys/systm.h> 41ea3fc8e4SJohn Baldwin #include <sys/fcntl.h> 42ea3fc8e4SJohn Baldwin #include <sys/kernel.h> 43ea3fc8e4SJohn Baldwin #include <sys/kthread.h> 44fb919e4dSMark Murray #include <sys/lock.h> 45fb919e4dSMark Murray #include <sys/mutex.h> 46ea3fc8e4SJohn Baldwin #include <sys/malloc.h> 47033eb86eSJeff Roberson #include <sys/mount.h> 48df8bae1dSRodney W. Grimes #include <sys/namei.h> 49acd3428bSRobert Watson #include <sys/priv.h> 50ea3fc8e4SJohn Baldwin #include <sys/proc.h> 51ea3fc8e4SJohn Baldwin #include <sys/unistd.h> 52df8bae1dSRodney W. Grimes #include <sys/vnode.h> 5360e15db9SDag-Erling Smørgrav #include <sys/socket.h> 5460e15db9SDag-Erling Smørgrav #include <sys/stat.h> 55df8bae1dSRodney W. Grimes #include <sys/ktrace.h> 561005a129SJohn Baldwin #include <sys/sx.h> 57ea3fc8e4SJohn Baldwin #include <sys/sysctl.h> 58*7705d4b2SDmitry Chagin #include <sys/sysent.h> 59df8bae1dSRodney W. Grimes #include <sys/syslog.h> 60ea3fc8e4SJohn Baldwin #include <sys/sysproto.h> 61df8bae1dSRodney W. Grimes 62aed55708SRobert Watson #include <security/mac/mac_framework.h> 63aed55708SRobert Watson 642c255e9dSRobert Watson /* 652c255e9dSRobert Watson * The ktrace facility allows the tracing of certain key events in user space 662c255e9dSRobert Watson * processes, such as system calls, signal delivery, context switches, and 672c255e9dSRobert Watson * user generated events using utrace(2). It works by streaming event 682c255e9dSRobert Watson * records and data to a vnode associated with the process using the 692c255e9dSRobert Watson * ktrace(2) system call. In general, records can be written directly from 702c255e9dSRobert Watson * the context that generates the event. One important exception to this is 712c255e9dSRobert Watson * during a context switch, where sleeping is not permitted. To handle this 722c255e9dSRobert Watson * case, trace events are generated using in-kernel ktr_request records, and 732c255e9dSRobert Watson * then delivered to disk at a convenient moment -- either immediately, the 742c255e9dSRobert Watson * next traceable event, at system call return, or at process exit. 752c255e9dSRobert Watson * 762c255e9dSRobert Watson * When dealing with multiple threads or processes writing to the same event 772c255e9dSRobert Watson * log, ordering guarantees are weak: specifically, if an event has multiple 782c255e9dSRobert Watson * records (i.e., system call enter and return), they may be interlaced with 792c255e9dSRobert Watson * records from another event. Process and thread ID information is provided 802c255e9dSRobert Watson * in the record, and user applications can de-interlace events if required. 812c255e9dSRobert Watson */ 822c255e9dSRobert Watson 83a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 8455166637SPoul-Henning Kamp 85db6a20e2SGarrett Wollman #ifdef KTRACE 86ea3fc8e4SJohn Baldwin 87de5b1952SAlexander Leidinger FEATURE(ktrace, "Kernel support for system-call tracing"); 88de5b1952SAlexander Leidinger 89ea3fc8e4SJohn Baldwin #ifndef KTRACE_REQUEST_POOL 90ea3fc8e4SJohn Baldwin #define KTRACE_REQUEST_POOL 100 91ea3fc8e4SJohn Baldwin #endif 92ea3fc8e4SJohn Baldwin 93ea3fc8e4SJohn Baldwin struct ktr_request { 94ea3fc8e4SJohn Baldwin struct ktr_header ktr_header; 95d977a583SRobert Watson void *ktr_buffer; 96ea3fc8e4SJohn Baldwin union { 97*7705d4b2SDmitry Chagin struct ktr_proc_ctor ktr_proc_ctor; 98ea3fc8e4SJohn Baldwin struct ktr_syscall ktr_syscall; 99ea3fc8e4SJohn Baldwin struct ktr_sysret ktr_sysret; 100ea3fc8e4SJohn Baldwin struct ktr_genio ktr_genio; 101ea3fc8e4SJohn Baldwin struct ktr_psig ktr_psig; 102ea3fc8e4SJohn Baldwin struct ktr_csw ktr_csw; 103ea3fc8e4SJohn Baldwin } ktr_data; 104ea3fc8e4SJohn Baldwin STAILQ_ENTRY(ktr_request) ktr_list; 105ea3fc8e4SJohn Baldwin }; 106ea3fc8e4SJohn Baldwin 107ea3fc8e4SJohn Baldwin static int data_lengths[] = { 108ea3fc8e4SJohn Baldwin 0, /* none */ 109ea3fc8e4SJohn Baldwin offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */ 110ea3fc8e4SJohn Baldwin sizeof(struct ktr_sysret), /* KTR_SYSRET */ 111ea3fc8e4SJohn Baldwin 0, /* KTR_NAMEI */ 112ea3fc8e4SJohn Baldwin sizeof(struct ktr_genio), /* KTR_GENIO */ 113ea3fc8e4SJohn Baldwin sizeof(struct ktr_psig), /* KTR_PSIG */ 114ea3fc8e4SJohn Baldwin sizeof(struct ktr_csw), /* KTR_CSW */ 11560e15db9SDag-Erling Smørgrav 0, /* KTR_USER */ 11660e15db9SDag-Erling Smørgrav 0, /* KTR_STRUCT */ 117a56be37eSJohn Baldwin 0, /* KTR_SYSCTL */ 118*7705d4b2SDmitry Chagin sizeof(struct ktr_proc_ctor), /* KTR_PROCCTOR */ 119*7705d4b2SDmitry Chagin 0, /* KTR_PROCDTOR */ 120ea3fc8e4SJohn Baldwin }; 121ea3fc8e4SJohn Baldwin 122ea3fc8e4SJohn Baldwin static STAILQ_HEAD(, ktr_request) ktr_free; 123ea3fc8e4SJohn Baldwin 1245ece08f5SPoul-Henning Kamp static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options"); 12512301fc3SJohn Baldwin 1268b149b51SJohn Baldwin static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 12712301fc3SJohn Baldwin TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 12812301fc3SJohn Baldwin 1298b149b51SJohn Baldwin static u_int ktr_geniosize = PAGE_SIZE; 13012301fc3SJohn Baldwin TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize); 13112301fc3SJohn Baldwin SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize, 13212301fc3SJohn Baldwin 0, "Maximum size of genio event payload"); 133ea3fc8e4SJohn Baldwin 134ea3fc8e4SJohn Baldwin static int print_message = 1; 135d680caabSJohn Baldwin static struct mtx ktrace_mtx; 1362c255e9dSRobert Watson static struct sx ktrace_sx; 137ea3fc8e4SJohn Baldwin 138ea3fc8e4SJohn Baldwin static void ktrace_init(void *dummy); 139ea3fc8e4SJohn Baldwin static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 140b4c20e5eSDmitry Chagin static u_int ktrace_resize_pool(u_int oldsize, u_int newsize); 141*7705d4b2SDmitry Chagin static struct ktr_request *ktr_getrequest_ne(struct thread *, int type); 142ea3fc8e4SJohn Baldwin static struct ktr_request *ktr_getrequest(int type); 143*7705d4b2SDmitry Chagin static void ktr_submitrequest_ne(struct thread *td, struct ktr_request *req); 1442c255e9dSRobert Watson static void ktr_submitrequest(struct thread *td, struct ktr_request *req); 145d680caabSJohn Baldwin static void ktr_freeproc(struct proc *p, struct ucred **uc, 146d680caabSJohn Baldwin struct vnode **vp); 147ea3fc8e4SJohn Baldwin static void ktr_freerequest(struct ktr_request *req); 148d680caabSJohn Baldwin static void ktr_freerequest_locked(struct ktr_request *req); 1492c255e9dSRobert Watson static void ktr_writerequest(struct thread *td, struct ktr_request *req); 150a7ff7443SJohn Baldwin static int ktrcanset(struct thread *,struct proc *); 151a7ff7443SJohn Baldwin static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *); 152a7ff7443SJohn Baldwin static int ktrops(struct thread *,struct proc *,int,int,struct vnode *); 153*7705d4b2SDmitry Chagin static void ktrprocctor_ne(struct thread *, struct proc *p); 15498d93822SBruce Evans 1552c255e9dSRobert Watson /* 1562c255e9dSRobert Watson * ktrace itself generates events, such as context switches, which we do not 1572c255e9dSRobert Watson * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 1582c255e9dSRobert Watson * whether or not it is in a region where tracing of events should be 1592c255e9dSRobert Watson * suppressed. 1602c255e9dSRobert Watson */ 1612c255e9dSRobert Watson static void 1622c255e9dSRobert Watson ktrace_enter(struct thread *td) 1632c255e9dSRobert Watson { 1642c255e9dSRobert Watson 1652c255e9dSRobert Watson KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 1662c255e9dSRobert Watson td->td_pflags |= TDP_INKTRACE; 1672c255e9dSRobert Watson } 1682c255e9dSRobert Watson 1692c255e9dSRobert Watson static void 1702c255e9dSRobert Watson ktrace_exit(struct thread *td) 1712c255e9dSRobert Watson { 1722c255e9dSRobert Watson 1732c255e9dSRobert Watson KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 1742c255e9dSRobert Watson td->td_pflags &= ~TDP_INKTRACE; 1752c255e9dSRobert Watson } 1762c255e9dSRobert Watson 1772c255e9dSRobert Watson static void 1782c255e9dSRobert Watson ktrace_assert(struct thread *td) 1792c255e9dSRobert Watson { 1802c255e9dSRobert Watson 1812c255e9dSRobert Watson KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 1822c255e9dSRobert Watson } 1832c255e9dSRobert Watson 184ea3fc8e4SJohn Baldwin static void 185ea3fc8e4SJohn Baldwin ktrace_init(void *dummy) 186df8bae1dSRodney W. Grimes { 187ea3fc8e4SJohn Baldwin struct ktr_request *req; 188ea3fc8e4SJohn Baldwin int i; 189df8bae1dSRodney W. Grimes 190ea3fc8e4SJohn Baldwin mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 1912c255e9dSRobert Watson sx_init(&ktrace_sx, "ktrace_sx"); 192ea3fc8e4SJohn Baldwin STAILQ_INIT(&ktr_free); 193ea3fc8e4SJohn Baldwin for (i = 0; i < ktr_requestpool; i++) { 194a163d034SWarner Losh req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK); 195ea3fc8e4SJohn Baldwin STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 196ea3fc8e4SJohn Baldwin } 197ea3fc8e4SJohn Baldwin } 198ea3fc8e4SJohn Baldwin SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 199ea3fc8e4SJohn Baldwin 200ea3fc8e4SJohn Baldwin static int 201ea3fc8e4SJohn Baldwin sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 202ea3fc8e4SJohn Baldwin { 203ea3fc8e4SJohn Baldwin struct thread *td; 2048b149b51SJohn Baldwin u_int newsize, oldsize, wantsize; 205ea3fc8e4SJohn Baldwin int error; 206ea3fc8e4SJohn Baldwin 207ea3fc8e4SJohn Baldwin /* Handle easy read-only case first to avoid warnings from GCC. */ 208ea3fc8e4SJohn Baldwin if (!req->newptr) { 209ea3fc8e4SJohn Baldwin oldsize = ktr_requestpool; 2108b149b51SJohn Baldwin return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 211ea3fc8e4SJohn Baldwin } 212ea3fc8e4SJohn Baldwin 2138b149b51SJohn Baldwin error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 214ea3fc8e4SJohn Baldwin if (error) 215ea3fc8e4SJohn Baldwin return (error); 216ea3fc8e4SJohn Baldwin td = curthread; 2172c255e9dSRobert Watson ktrace_enter(td); 218ea3fc8e4SJohn Baldwin oldsize = ktr_requestpool; 219b4c20e5eSDmitry Chagin newsize = ktrace_resize_pool(oldsize, wantsize); 2202c255e9dSRobert Watson ktrace_exit(td); 2218b149b51SJohn Baldwin error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 222ea3fc8e4SJohn Baldwin if (error) 223ea3fc8e4SJohn Baldwin return (error); 224a5896914SJoseph Koshy if (wantsize > oldsize && newsize < wantsize) 225ea3fc8e4SJohn Baldwin return (ENOSPC); 226ea3fc8e4SJohn Baldwin return (0); 227ea3fc8e4SJohn Baldwin } 22812301fc3SJohn Baldwin SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW, 229a0c87b74SGavin Atkinson &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", 230a0c87b74SGavin Atkinson "Pool buffer size for ktrace(1)"); 231ea3fc8e4SJohn Baldwin 2328b149b51SJohn Baldwin static u_int 233b4c20e5eSDmitry Chagin ktrace_resize_pool(u_int oldsize, u_int newsize) 234ea3fc8e4SJohn Baldwin { 235b4c20e5eSDmitry Chagin STAILQ_HEAD(, ktr_request) ktr_new; 236ea3fc8e4SJohn Baldwin struct ktr_request *req; 237a5896914SJoseph Koshy int bound; 238ea3fc8e4SJohn Baldwin 239ea3fc8e4SJohn Baldwin print_message = 1; 240b4c20e5eSDmitry Chagin bound = newsize - oldsize; 241a5896914SJoseph Koshy if (bound == 0) 242a5896914SJoseph Koshy return (ktr_requestpool); 243b4c20e5eSDmitry Chagin if (bound < 0) { 244b4c20e5eSDmitry Chagin mtx_lock(&ktrace_mtx); 245ea3fc8e4SJohn Baldwin /* Shrink pool down to newsize if possible. */ 246a5896914SJoseph Koshy while (bound++ < 0) { 247ea3fc8e4SJohn Baldwin req = STAILQ_FIRST(&ktr_free); 248ea3fc8e4SJohn Baldwin if (req == NULL) 249b4c20e5eSDmitry Chagin break; 250ea3fc8e4SJohn Baldwin STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 251ea3fc8e4SJohn Baldwin ktr_requestpool--; 252ea3fc8e4SJohn Baldwin free(req, M_KTRACE); 253ea3fc8e4SJohn Baldwin } 254b4c20e5eSDmitry Chagin } else { 255ea3fc8e4SJohn Baldwin /* Grow pool up to newsize. */ 256b4c20e5eSDmitry Chagin STAILQ_INIT(&ktr_new); 257a5896914SJoseph Koshy while (bound-- > 0) { 258ea3fc8e4SJohn Baldwin req = malloc(sizeof(struct ktr_request), M_KTRACE, 259a163d034SWarner Losh M_WAITOK); 260b4c20e5eSDmitry Chagin STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list); 261ea3fc8e4SJohn Baldwin } 262b4c20e5eSDmitry Chagin mtx_lock(&ktrace_mtx); 263b4c20e5eSDmitry Chagin STAILQ_CONCAT(&ktr_free, &ktr_new); 264b4c20e5eSDmitry Chagin ktr_requestpool += (newsize - oldsize); 265b4c20e5eSDmitry Chagin } 266b4c20e5eSDmitry Chagin mtx_unlock(&ktrace_mtx); 267ea3fc8e4SJohn Baldwin return (ktr_requestpool); 268ea3fc8e4SJohn Baldwin } 269ea3fc8e4SJohn Baldwin 2705ca4819dSJohn Baldwin /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */ 2715ca4819dSJohn Baldwin CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) == 2725ca4819dSJohn Baldwin (sizeof((struct thread *)NULL)->td_name)); 2735ca4819dSJohn Baldwin 274ea3fc8e4SJohn Baldwin static struct ktr_request * 275*7705d4b2SDmitry Chagin ktr_getrequest_ne(struct thread *td, int type) 276ea3fc8e4SJohn Baldwin { 277ea3fc8e4SJohn Baldwin struct ktr_request *req; 278ea3fc8e4SJohn Baldwin struct proc *p = td->td_proc; 279ea3fc8e4SJohn Baldwin int pm; 280ea3fc8e4SJohn Baldwin 281c5c9bd5bSRobert Watson mtx_lock(&ktrace_mtx); 282ea3fc8e4SJohn Baldwin if (!KTRCHECK(td, type)) { 283c5c9bd5bSRobert Watson mtx_unlock(&ktrace_mtx); 284ea3fc8e4SJohn Baldwin return (NULL); 285ea3fc8e4SJohn Baldwin } 286ea3fc8e4SJohn Baldwin req = STAILQ_FIRST(&ktr_free); 287ea3fc8e4SJohn Baldwin if (req != NULL) { 288ea3fc8e4SJohn Baldwin STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 289ea3fc8e4SJohn Baldwin req->ktr_header.ktr_type = type; 29075768576SJohn Baldwin if (p->p_traceflag & KTRFAC_DROP) { 29175768576SJohn Baldwin req->ktr_header.ktr_type |= KTR_DROP; 29275768576SJohn Baldwin p->p_traceflag &= ~KTRFAC_DROP; 29375768576SJohn Baldwin } 294c5c9bd5bSRobert Watson mtx_unlock(&ktrace_mtx); 295ea3fc8e4SJohn Baldwin microtime(&req->ktr_header.ktr_time); 296ea3fc8e4SJohn Baldwin req->ktr_header.ktr_pid = p->p_pid; 2972bdeb3f9SRobert Watson req->ktr_header.ktr_tid = td->td_tid; 2985ca4819dSJohn Baldwin bcopy(td->td_name, req->ktr_header.ktr_comm, 2995ca4819dSJohn Baldwin sizeof(req->ktr_header.ktr_comm)); 300d977a583SRobert Watson req->ktr_buffer = NULL; 301ea3fc8e4SJohn Baldwin req->ktr_header.ktr_len = 0; 302ea3fc8e4SJohn Baldwin } else { 30375768576SJohn Baldwin p->p_traceflag |= KTRFAC_DROP; 304ea3fc8e4SJohn Baldwin pm = print_message; 305ea3fc8e4SJohn Baldwin print_message = 0; 306ea3fc8e4SJohn Baldwin mtx_unlock(&ktrace_mtx); 307ea3fc8e4SJohn Baldwin if (pm) 308ea3fc8e4SJohn Baldwin printf("Out of ktrace request objects.\n"); 309ea3fc8e4SJohn Baldwin } 310ea3fc8e4SJohn Baldwin return (req); 311ea3fc8e4SJohn Baldwin } 312ea3fc8e4SJohn Baldwin 313*7705d4b2SDmitry Chagin static struct ktr_request * 314*7705d4b2SDmitry Chagin ktr_getrequest(int type) 315*7705d4b2SDmitry Chagin { 316*7705d4b2SDmitry Chagin struct thread *td = curthread; 317*7705d4b2SDmitry Chagin struct ktr_request *req; 318*7705d4b2SDmitry Chagin 319*7705d4b2SDmitry Chagin ktrace_enter(td); 320*7705d4b2SDmitry Chagin req = ktr_getrequest_ne(td, type); 321*7705d4b2SDmitry Chagin if (req == NULL) 322*7705d4b2SDmitry Chagin ktrace_exit(td); 323*7705d4b2SDmitry Chagin 324*7705d4b2SDmitry Chagin return (req); 325*7705d4b2SDmitry Chagin } 326*7705d4b2SDmitry Chagin 3272c255e9dSRobert Watson /* 3282c255e9dSRobert Watson * Some trace generation environments don't permit direct access to VFS, 3292c255e9dSRobert Watson * such as during a context switch where sleeping is not allowed. Under these 3302c255e9dSRobert Watson * circumstances, queue a request to the thread to be written asynchronously 3312c255e9dSRobert Watson * later. 3322c255e9dSRobert Watson */ 333ea3fc8e4SJohn Baldwin static void 3342c255e9dSRobert Watson ktr_enqueuerequest(struct thread *td, struct ktr_request *req) 335ea3fc8e4SJohn Baldwin { 336ea3fc8e4SJohn Baldwin 337ea3fc8e4SJohn Baldwin mtx_lock(&ktrace_mtx); 3382c255e9dSRobert Watson STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 339ea3fc8e4SJohn Baldwin mtx_unlock(&ktrace_mtx); 3402c255e9dSRobert Watson ktrace_exit(td); 3412c255e9dSRobert Watson } 3422c255e9dSRobert Watson 3432c255e9dSRobert Watson /* 3442c255e9dSRobert Watson * Drain any pending ktrace records from the per-thread queue to disk. This 3452c255e9dSRobert Watson * is used both internally before committing other records, and also on 3462c255e9dSRobert Watson * system call return. We drain all the ones we can find at the time when 3472c255e9dSRobert Watson * drain is requested, but don't keep draining after that as those events 348a56be37eSJohn Baldwin * may be approximately "after" the current event. 3492c255e9dSRobert Watson */ 3502c255e9dSRobert Watson static void 3512c255e9dSRobert Watson ktr_drain(struct thread *td) 3522c255e9dSRobert Watson { 3532c255e9dSRobert Watson struct ktr_request *queued_req; 3542c255e9dSRobert Watson STAILQ_HEAD(, ktr_request) local_queue; 3552c255e9dSRobert Watson 3562c255e9dSRobert Watson ktrace_assert(td); 3572c255e9dSRobert Watson sx_assert(&ktrace_sx, SX_XLOCKED); 3582c255e9dSRobert Watson 3592b3fb615SJohn Baldwin STAILQ_INIT(&local_queue); 3602c255e9dSRobert Watson 3612c255e9dSRobert Watson if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 3622c255e9dSRobert Watson mtx_lock(&ktrace_mtx); 3632c255e9dSRobert Watson STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 3642c255e9dSRobert Watson mtx_unlock(&ktrace_mtx); 3652c255e9dSRobert Watson 3662c255e9dSRobert Watson while ((queued_req = STAILQ_FIRST(&local_queue))) { 3672c255e9dSRobert Watson STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 3682c255e9dSRobert Watson ktr_writerequest(td, queued_req); 3692c255e9dSRobert Watson ktr_freerequest(queued_req); 3702c255e9dSRobert Watson } 3712c255e9dSRobert Watson } 3722c255e9dSRobert Watson } 3732c255e9dSRobert Watson 3742c255e9dSRobert Watson /* 3752c255e9dSRobert Watson * Submit a trace record for immediate commit to disk -- to be used only 3762c255e9dSRobert Watson * where entering VFS is OK. First drain any pending records that may have 3772c255e9dSRobert Watson * been cached in the thread. 3782c255e9dSRobert Watson */ 3792c255e9dSRobert Watson static void 380*7705d4b2SDmitry Chagin ktr_submitrequest_ne(struct thread *td, struct ktr_request *req) 3812c255e9dSRobert Watson { 3822c255e9dSRobert Watson 3832c255e9dSRobert Watson ktrace_assert(td); 3842c255e9dSRobert Watson 3852c255e9dSRobert Watson sx_xlock(&ktrace_sx); 3862c255e9dSRobert Watson ktr_drain(td); 3872c255e9dSRobert Watson ktr_writerequest(td, req); 3882c255e9dSRobert Watson ktr_freerequest(req); 3892c255e9dSRobert Watson sx_xunlock(&ktrace_sx); 390*7705d4b2SDmitry Chagin } 3912c255e9dSRobert Watson 392*7705d4b2SDmitry Chagin static void 393*7705d4b2SDmitry Chagin ktr_submitrequest(struct thread *td, struct ktr_request *req) 394*7705d4b2SDmitry Chagin { 395*7705d4b2SDmitry Chagin 396*7705d4b2SDmitry Chagin ktrace_assert(td); 397*7705d4b2SDmitry Chagin ktr_submitrequest_ne(td, req); 3982c255e9dSRobert Watson ktrace_exit(td); 399ea3fc8e4SJohn Baldwin } 400ea3fc8e4SJohn Baldwin 401ea3fc8e4SJohn Baldwin static void 402ea3fc8e4SJohn Baldwin ktr_freerequest(struct ktr_request *req) 403ea3fc8e4SJohn Baldwin { 404ea3fc8e4SJohn Baldwin 405d680caabSJohn Baldwin mtx_lock(&ktrace_mtx); 406d680caabSJohn Baldwin ktr_freerequest_locked(req); 407d680caabSJohn Baldwin mtx_unlock(&ktrace_mtx); 408d680caabSJohn Baldwin } 409d680caabSJohn Baldwin 410d680caabSJohn Baldwin static void 411d680caabSJohn Baldwin ktr_freerequest_locked(struct ktr_request *req) 412d680caabSJohn Baldwin { 413d680caabSJohn Baldwin 414d680caabSJohn Baldwin mtx_assert(&ktrace_mtx, MA_OWNED); 415d977a583SRobert Watson if (req->ktr_buffer != NULL) 416d977a583SRobert Watson free(req->ktr_buffer, M_KTRACE); 417ea3fc8e4SJohn Baldwin STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 418d680caabSJohn Baldwin } 419d680caabSJohn Baldwin 420d680caabSJohn Baldwin /* 421d680caabSJohn Baldwin * Disable tracing for a process and release all associated resources. 422d680caabSJohn Baldwin * The caller is responsible for releasing a reference on the returned 423d680caabSJohn Baldwin * vnode and credentials. 424d680caabSJohn Baldwin */ 425d680caabSJohn Baldwin static void 426d680caabSJohn Baldwin ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp) 427d680caabSJohn Baldwin { 428d680caabSJohn Baldwin struct ktr_request *req; 429d680caabSJohn Baldwin 430d680caabSJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 431d680caabSJohn Baldwin mtx_assert(&ktrace_mtx, MA_OWNED); 432d680caabSJohn Baldwin *uc = p->p_tracecred; 433d680caabSJohn Baldwin p->p_tracecred = NULL; 434d680caabSJohn Baldwin if (vp != NULL) 435d680caabSJohn Baldwin *vp = p->p_tracevp; 436d680caabSJohn Baldwin p->p_tracevp = NULL; 437d680caabSJohn Baldwin p->p_traceflag = 0; 438d680caabSJohn Baldwin while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) { 439d680caabSJohn Baldwin STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list); 440d680caabSJohn Baldwin ktr_freerequest_locked(req); 441d680caabSJohn Baldwin } 442ea3fc8e4SJohn Baldwin } 443ea3fc8e4SJohn Baldwin 44426f9a767SRodney W. Grimes void 445ea3fc8e4SJohn Baldwin ktrsyscall(code, narg, args) 44671ddfdbbSDmitrij Tejblum int code, narg; 44771ddfdbbSDmitrij Tejblum register_t args[]; 448df8bae1dSRodney W. Grimes { 449ea3fc8e4SJohn Baldwin struct ktr_request *req; 450df8bae1dSRodney W. Grimes struct ktr_syscall *ktp; 451ea3fc8e4SJohn Baldwin size_t buflen; 4524b3aac3dSJohn Baldwin char *buf = NULL; 453df8bae1dSRodney W. Grimes 4544b3aac3dSJohn Baldwin buflen = sizeof(register_t) * narg; 4554b3aac3dSJohn Baldwin if (buflen > 0) { 456a163d034SWarner Losh buf = malloc(buflen, M_KTRACE, M_WAITOK); 4574b3aac3dSJohn Baldwin bcopy(args, buf, buflen); 4584b3aac3dSJohn Baldwin } 459ea3fc8e4SJohn Baldwin req = ktr_getrequest(KTR_SYSCALL); 46050c22331SPoul-Henning Kamp if (req == NULL) { 46150c22331SPoul-Henning Kamp if (buf != NULL) 46250c22331SPoul-Henning Kamp free(buf, M_KTRACE); 463ea3fc8e4SJohn Baldwin return; 46450c22331SPoul-Henning Kamp } 465ea3fc8e4SJohn Baldwin ktp = &req->ktr_data.ktr_syscall; 466df8bae1dSRodney W. Grimes ktp->ktr_code = code; 467df8bae1dSRodney W. Grimes ktp->ktr_narg = narg; 468ea3fc8e4SJohn Baldwin if (buflen > 0) { 469ea3fc8e4SJohn Baldwin req->ktr_header.ktr_len = buflen; 470d977a583SRobert Watson req->ktr_buffer = buf; 471ea3fc8e4SJohn Baldwin } 4722c255e9dSRobert Watson ktr_submitrequest(curthread, req); 473df8bae1dSRodney W. Grimes } 474df8bae1dSRodney W. Grimes 47526f9a767SRodney W. Grimes void 476ea3fc8e4SJohn Baldwin ktrsysret(code, error, retval) 47771ddfdbbSDmitrij Tejblum int code, error; 47871ddfdbbSDmitrij Tejblum register_t retval; 479df8bae1dSRodney W. Grimes { 480ea3fc8e4SJohn Baldwin struct ktr_request *req; 481ea3fc8e4SJohn Baldwin struct ktr_sysret *ktp; 482df8bae1dSRodney W. Grimes 483ea3fc8e4SJohn Baldwin req = ktr_getrequest(KTR_SYSRET); 484ea3fc8e4SJohn Baldwin if (req == NULL) 485ea3fc8e4SJohn Baldwin return; 486ea3fc8e4SJohn Baldwin ktp = &req->ktr_data.ktr_sysret; 487ea3fc8e4SJohn Baldwin ktp->ktr_code = code; 488ea3fc8e4SJohn Baldwin ktp->ktr_error = error; 489ea3fc8e4SJohn Baldwin ktp->ktr_retval = retval; /* what about val2 ? */ 4902c255e9dSRobert Watson ktr_submitrequest(curthread, req); 4912c255e9dSRobert Watson } 4922c255e9dSRobert Watson 4932c255e9dSRobert Watson /* 494d680caabSJohn Baldwin * When a setuid process execs, disable tracing. 495d680caabSJohn Baldwin * 496d680caabSJohn Baldwin * XXX: We toss any pending asynchronous records. 497d680caabSJohn Baldwin */ 498d680caabSJohn Baldwin void 499d680caabSJohn Baldwin ktrprocexec(struct proc *p, struct ucred **uc, struct vnode **vp) 500d680caabSJohn Baldwin { 501d680caabSJohn Baldwin 502d680caabSJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 503d680caabSJohn Baldwin mtx_lock(&ktrace_mtx); 504d680caabSJohn Baldwin ktr_freeproc(p, uc, vp); 505d680caabSJohn Baldwin mtx_unlock(&ktrace_mtx); 506d680caabSJohn Baldwin } 507d680caabSJohn Baldwin 508d680caabSJohn Baldwin /* 509d680caabSJohn Baldwin * When a process exits, drain per-process asynchronous trace records 510d680caabSJohn Baldwin * and disable tracing. 5112c255e9dSRobert Watson */ 5122c255e9dSRobert Watson void 5132c255e9dSRobert Watson ktrprocexit(struct thread *td) 5142c255e9dSRobert Watson { 515*7705d4b2SDmitry Chagin struct ktr_request *req; 516d680caabSJohn Baldwin struct proc *p; 517d680caabSJohn Baldwin struct ucred *cred; 518d680caabSJohn Baldwin struct vnode *vp; 519d680caabSJohn Baldwin int vfslocked; 520d680caabSJohn Baldwin 521d680caabSJohn Baldwin p = td->td_proc; 522d680caabSJohn Baldwin if (p->p_traceflag == 0) 523d680caabSJohn Baldwin return; 5242c255e9dSRobert Watson 5252c255e9dSRobert Watson ktrace_enter(td); 5262c255e9dSRobert Watson sx_xlock(&ktrace_sx); 5272c255e9dSRobert Watson ktr_drain(td); 5282c255e9dSRobert Watson sx_xunlock(&ktrace_sx); 529*7705d4b2SDmitry Chagin req = ktr_getrequest_ne(td, KTR_PROCDTOR); 530*7705d4b2SDmitry Chagin if (req != NULL) 531*7705d4b2SDmitry Chagin ktr_submitrequest_ne(td, req); 532d680caabSJohn Baldwin PROC_LOCK(p); 533d680caabSJohn Baldwin mtx_lock(&ktrace_mtx); 534d680caabSJohn Baldwin ktr_freeproc(p, &cred, &vp); 535d680caabSJohn Baldwin mtx_unlock(&ktrace_mtx); 536d680caabSJohn Baldwin PROC_UNLOCK(p); 537d680caabSJohn Baldwin if (vp != NULL) { 538d680caabSJohn Baldwin vfslocked = VFS_LOCK_GIANT(vp->v_mount); 539d680caabSJohn Baldwin vrele(vp); 540d680caabSJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 541d680caabSJohn Baldwin } 542d680caabSJohn Baldwin if (cred != NULL) 543d680caabSJohn Baldwin crfree(cred); 5442c255e9dSRobert Watson ktrace_exit(td); 5452c255e9dSRobert Watson } 5462c255e9dSRobert Watson 547*7705d4b2SDmitry Chagin static void 548*7705d4b2SDmitry Chagin ktrprocctor_ne(struct thread *td, struct proc *p) 549*7705d4b2SDmitry Chagin { 550*7705d4b2SDmitry Chagin struct ktr_proc_ctor *ktp; 551*7705d4b2SDmitry Chagin struct ktr_request *req; 552*7705d4b2SDmitry Chagin struct thread *td2; 553*7705d4b2SDmitry Chagin 554*7705d4b2SDmitry Chagin ktrace_assert(td); 555*7705d4b2SDmitry Chagin td2 = FIRST_THREAD_IN_PROC(p); 556*7705d4b2SDmitry Chagin req = ktr_getrequest_ne(td2, KTR_PROCCTOR); 557*7705d4b2SDmitry Chagin if (req == NULL) 558*7705d4b2SDmitry Chagin return; 559*7705d4b2SDmitry Chagin 560*7705d4b2SDmitry Chagin ktp = &req->ktr_data.ktr_proc_ctor; 561*7705d4b2SDmitry Chagin ktp->sv_flags = p->p_sysent->sv_flags; 562*7705d4b2SDmitry Chagin ktr_submitrequest_ne(td, req); 563*7705d4b2SDmitry Chagin } 564*7705d4b2SDmitry Chagin 565*7705d4b2SDmitry Chagin void 566*7705d4b2SDmitry Chagin ktrprocctor(struct proc *p) 567*7705d4b2SDmitry Chagin { 568*7705d4b2SDmitry Chagin struct thread *td = curthread; 569*7705d4b2SDmitry Chagin 570*7705d4b2SDmitry Chagin if ((p->p_traceflag & KTRFAC_MASK) == 0) 571*7705d4b2SDmitry Chagin return; 572*7705d4b2SDmitry Chagin 573*7705d4b2SDmitry Chagin ktrace_enter(td); 574*7705d4b2SDmitry Chagin ktrprocctor_ne(td, p); 575*7705d4b2SDmitry Chagin ktrace_exit(td); 576*7705d4b2SDmitry Chagin } 577*7705d4b2SDmitry Chagin 5782c255e9dSRobert Watson /* 579d680caabSJohn Baldwin * When a process forks, enable tracing in the new process if needed. 580d680caabSJohn Baldwin */ 581d680caabSJohn Baldwin void 582d680caabSJohn Baldwin ktrprocfork(struct proc *p1, struct proc *p2) 583d680caabSJohn Baldwin { 584d680caabSJohn Baldwin 585*7705d4b2SDmitry Chagin PROC_LOCK(p1); 586d680caabSJohn Baldwin mtx_lock(&ktrace_mtx); 587d680caabSJohn Baldwin KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode")); 588d680caabSJohn Baldwin if (p1->p_traceflag & KTRFAC_INHERIT) { 589d680caabSJohn Baldwin p2->p_traceflag = p1->p_traceflag; 590d680caabSJohn Baldwin if ((p2->p_tracevp = p1->p_tracevp) != NULL) { 591d680caabSJohn Baldwin VREF(p2->p_tracevp); 592d680caabSJohn Baldwin KASSERT(p1->p_tracecred != NULL, 593d680caabSJohn Baldwin ("ktrace vnode with no cred")); 594d680caabSJohn Baldwin p2->p_tracecred = crhold(p1->p_tracecred); 595d680caabSJohn Baldwin } 596d680caabSJohn Baldwin } 597d680caabSJohn Baldwin mtx_unlock(&ktrace_mtx); 598*7705d4b2SDmitry Chagin PROC_UNLOCK(p1); 599*7705d4b2SDmitry Chagin 600*7705d4b2SDmitry Chagin ktrprocctor(p2); 601d680caabSJohn Baldwin } 602d680caabSJohn Baldwin 603d680caabSJohn Baldwin /* 6042c255e9dSRobert Watson * When a thread returns, drain any asynchronous records generated by the 6052c255e9dSRobert Watson * system call. 6062c255e9dSRobert Watson */ 6072c255e9dSRobert Watson void 6082c255e9dSRobert Watson ktruserret(struct thread *td) 6092c255e9dSRobert Watson { 6102c255e9dSRobert Watson 6112c255e9dSRobert Watson ktrace_enter(td); 6122c255e9dSRobert Watson sx_xlock(&ktrace_sx); 6132c255e9dSRobert Watson ktr_drain(td); 6142c255e9dSRobert Watson sx_xunlock(&ktrace_sx); 6152c255e9dSRobert Watson ktrace_exit(td); 616df8bae1dSRodney W. Grimes } 617df8bae1dSRodney W. Grimes 61826f9a767SRodney W. Grimes void 619ea3fc8e4SJohn Baldwin ktrnamei(path) 620df8bae1dSRodney W. Grimes char *path; 621df8bae1dSRodney W. Grimes { 622ea3fc8e4SJohn Baldwin struct ktr_request *req; 623ea3fc8e4SJohn Baldwin int namelen; 6244b3aac3dSJohn Baldwin char *buf = NULL; 625df8bae1dSRodney W. Grimes 6264b3aac3dSJohn Baldwin namelen = strlen(path); 6274b3aac3dSJohn Baldwin if (namelen > 0) { 628a163d034SWarner Losh buf = malloc(namelen, M_KTRACE, M_WAITOK); 6294b3aac3dSJohn Baldwin bcopy(path, buf, namelen); 6304b3aac3dSJohn Baldwin } 631ea3fc8e4SJohn Baldwin req = ktr_getrequest(KTR_NAMEI); 63250c22331SPoul-Henning Kamp if (req == NULL) { 63350c22331SPoul-Henning Kamp if (buf != NULL) 63450c22331SPoul-Henning Kamp free(buf, M_KTRACE); 635ea3fc8e4SJohn Baldwin return; 63650c22331SPoul-Henning Kamp } 637ea3fc8e4SJohn Baldwin if (namelen > 0) { 638ea3fc8e4SJohn Baldwin req->ktr_header.ktr_len = namelen; 639d977a583SRobert Watson req->ktr_buffer = buf; 640ea3fc8e4SJohn Baldwin } 6412c255e9dSRobert Watson ktr_submitrequest(curthread, req); 642df8bae1dSRodney W. Grimes } 643df8bae1dSRodney W. Grimes 64426f9a767SRodney W. Grimes void 645a56be37eSJohn Baldwin ktrsysctl(name, namelen) 646a56be37eSJohn Baldwin int *name; 647a56be37eSJohn Baldwin u_int namelen; 648a56be37eSJohn Baldwin { 649a56be37eSJohn Baldwin struct ktr_request *req; 650a56be37eSJohn Baldwin u_int mib[CTL_MAXNAME + 2]; 651a56be37eSJohn Baldwin char *mibname; 652a56be37eSJohn Baldwin size_t mibnamelen; 653a56be37eSJohn Baldwin int error; 654a56be37eSJohn Baldwin 655a56be37eSJohn Baldwin /* Lookup name of mib. */ 656a56be37eSJohn Baldwin KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long")); 657a56be37eSJohn Baldwin mib[0] = 0; 658a56be37eSJohn Baldwin mib[1] = 1; 659a56be37eSJohn Baldwin bcopy(name, mib + 2, namelen * sizeof(*name)); 660a56be37eSJohn Baldwin mibnamelen = 128; 661a56be37eSJohn Baldwin mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK); 662a56be37eSJohn Baldwin error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen, 663a56be37eSJohn Baldwin NULL, 0, &mibnamelen, 0); 664a56be37eSJohn Baldwin if (error) { 665a56be37eSJohn Baldwin free(mibname, M_KTRACE); 666a56be37eSJohn Baldwin return; 667a56be37eSJohn Baldwin } 668a56be37eSJohn Baldwin req = ktr_getrequest(KTR_SYSCTL); 669a56be37eSJohn Baldwin if (req == NULL) { 670a56be37eSJohn Baldwin free(mibname, M_KTRACE); 671a56be37eSJohn Baldwin return; 672a56be37eSJohn Baldwin } 673a56be37eSJohn Baldwin req->ktr_header.ktr_len = mibnamelen; 674a56be37eSJohn Baldwin req->ktr_buffer = mibname; 675a56be37eSJohn Baldwin ktr_submitrequest(curthread, req); 676a56be37eSJohn Baldwin } 677a56be37eSJohn Baldwin 678a56be37eSJohn Baldwin void 679ea3fc8e4SJohn Baldwin ktrgenio(fd, rw, uio, error) 680df8bae1dSRodney W. Grimes int fd; 681df8bae1dSRodney W. Grimes enum uio_rw rw; 68242ebfbf2SBrian Feldman struct uio *uio; 68342ebfbf2SBrian Feldman int error; 684df8bae1dSRodney W. Grimes { 685ea3fc8e4SJohn Baldwin struct ktr_request *req; 686ea3fc8e4SJohn Baldwin struct ktr_genio *ktg; 687b92584a6SJohn Baldwin int datalen; 688b92584a6SJohn Baldwin char *buf; 689df8bae1dSRodney W. Grimes 690552afd9cSPoul-Henning Kamp if (error) { 691552afd9cSPoul-Henning Kamp free(uio, M_IOV); 692df8bae1dSRodney W. Grimes return; 693552afd9cSPoul-Henning Kamp } 694b92584a6SJohn Baldwin uio->uio_offset = 0; 695b92584a6SJohn Baldwin uio->uio_rw = UIO_WRITE; 696b92584a6SJohn Baldwin datalen = imin(uio->uio_resid, ktr_geniosize); 697a163d034SWarner Losh buf = malloc(datalen, M_KTRACE, M_WAITOK); 698552afd9cSPoul-Henning Kamp error = uiomove(buf, datalen, uio); 699552afd9cSPoul-Henning Kamp free(uio, M_IOV); 700552afd9cSPoul-Henning Kamp if (error) { 701b92584a6SJohn Baldwin free(buf, M_KTRACE); 702ea3fc8e4SJohn Baldwin return; 703b92584a6SJohn Baldwin } 704b92584a6SJohn Baldwin req = ktr_getrequest(KTR_GENIO); 705b92584a6SJohn Baldwin if (req == NULL) { 706b92584a6SJohn Baldwin free(buf, M_KTRACE); 707b92584a6SJohn Baldwin return; 708b92584a6SJohn Baldwin } 709ea3fc8e4SJohn Baldwin ktg = &req->ktr_data.ktr_genio; 710ea3fc8e4SJohn Baldwin ktg->ktr_fd = fd; 711ea3fc8e4SJohn Baldwin ktg->ktr_rw = rw; 712b92584a6SJohn Baldwin req->ktr_header.ktr_len = datalen; 713d977a583SRobert Watson req->ktr_buffer = buf; 7142c255e9dSRobert Watson ktr_submitrequest(curthread, req); 715df8bae1dSRodney W. Grimes } 716df8bae1dSRodney W. Grimes 71726f9a767SRodney W. Grimes void 718ea3fc8e4SJohn Baldwin ktrpsig(sig, action, mask, code) 719a93fdaacSMarcel Moolenaar int sig; 720df8bae1dSRodney W. Grimes sig_t action; 7212c42a146SMarcel Moolenaar sigset_t *mask; 722a93fdaacSMarcel Moolenaar int code; 723df8bae1dSRodney W. Grimes { 724ea3fc8e4SJohn Baldwin struct ktr_request *req; 725ea3fc8e4SJohn Baldwin struct ktr_psig *kp; 726df8bae1dSRodney W. Grimes 727ea3fc8e4SJohn Baldwin req = ktr_getrequest(KTR_PSIG); 728ea3fc8e4SJohn Baldwin if (req == NULL) 729ea3fc8e4SJohn Baldwin return; 730ea3fc8e4SJohn Baldwin kp = &req->ktr_data.ktr_psig; 731ea3fc8e4SJohn Baldwin kp->signo = (char)sig; 732ea3fc8e4SJohn Baldwin kp->action = action; 733ea3fc8e4SJohn Baldwin kp->mask = *mask; 734ea3fc8e4SJohn Baldwin kp->code = code; 7352c255e9dSRobert Watson ktr_enqueuerequest(curthread, req); 736df8bae1dSRodney W. Grimes } 737df8bae1dSRodney W. Grimes 73826f9a767SRodney W. Grimes void 739ea3fc8e4SJohn Baldwin ktrcsw(out, user) 740df8bae1dSRodney W. Grimes int out, user; 741df8bae1dSRodney W. Grimes { 742ea3fc8e4SJohn Baldwin struct ktr_request *req; 743ea3fc8e4SJohn Baldwin struct ktr_csw *kc; 744df8bae1dSRodney W. Grimes 745ea3fc8e4SJohn Baldwin req = ktr_getrequest(KTR_CSW); 746ea3fc8e4SJohn Baldwin if (req == NULL) 747ea3fc8e4SJohn Baldwin return; 748ea3fc8e4SJohn Baldwin kc = &req->ktr_data.ktr_csw; 749ea3fc8e4SJohn Baldwin kc->out = out; 750ea3fc8e4SJohn Baldwin kc->user = user; 7512c255e9dSRobert Watson ktr_enqueuerequest(curthread, req); 752df8bae1dSRodney W. Grimes } 75360e15db9SDag-Erling Smørgrav 75460e15db9SDag-Erling Smørgrav void 755a3052d6eSJohn Baldwin ktrstruct(name, data, datalen) 75660e15db9SDag-Erling Smørgrav const char *name; 75760e15db9SDag-Erling Smørgrav void *data; 75860e15db9SDag-Erling Smørgrav size_t datalen; 75960e15db9SDag-Erling Smørgrav { 76060e15db9SDag-Erling Smørgrav struct ktr_request *req; 76160e15db9SDag-Erling Smørgrav char *buf = NULL; 76260e15db9SDag-Erling Smørgrav size_t buflen; 76360e15db9SDag-Erling Smørgrav 76460e15db9SDag-Erling Smørgrav if (!data) 76560e15db9SDag-Erling Smørgrav datalen = 0; 766a3052d6eSJohn Baldwin buflen = strlen(name) + 1 + datalen; 76760e15db9SDag-Erling Smørgrav buf = malloc(buflen, M_KTRACE, M_WAITOK); 768a3052d6eSJohn Baldwin strcpy(buf, name); 769a3052d6eSJohn Baldwin bcopy(data, buf + strlen(name) + 1, datalen); 77060e15db9SDag-Erling Smørgrav if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) { 77160e15db9SDag-Erling Smørgrav free(buf, M_KTRACE); 77260e15db9SDag-Erling Smørgrav return; 77360e15db9SDag-Erling Smørgrav } 77460e15db9SDag-Erling Smørgrav req->ktr_buffer = buf; 77560e15db9SDag-Erling Smørgrav req->ktr_header.ktr_len = buflen; 77660e15db9SDag-Erling Smørgrav ktr_submitrequest(curthread, req); 77760e15db9SDag-Erling Smørgrav } 77864cc6a13SJohn Baldwin #endif /* KTRACE */ 779df8bae1dSRodney W. Grimes 780df8bae1dSRodney W. Grimes /* Interface and common routines */ 781df8bae1dSRodney W. Grimes 782d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 783df8bae1dSRodney W. Grimes struct ktrace_args { 784df8bae1dSRodney W. Grimes char *fname; 785df8bae1dSRodney W. Grimes int ops; 786df8bae1dSRodney W. Grimes int facs; 787df8bae1dSRodney W. Grimes int pid; 788df8bae1dSRodney W. Grimes }; 789d2d3e875SBruce Evans #endif 790df8bae1dSRodney W. Grimes /* ARGSUSED */ 79126f9a767SRodney W. Grimes int 792b40ce416SJulian Elischer ktrace(td, uap) 793b40ce416SJulian Elischer struct thread *td; 794df8bae1dSRodney W. Grimes register struct ktrace_args *uap; 795df8bae1dSRodney W. Grimes { 796db6a20e2SGarrett Wollman #ifdef KTRACE 797df8bae1dSRodney W. Grimes register struct vnode *vp = NULL; 798df8bae1dSRodney W. Grimes register struct proc *p; 799df8bae1dSRodney W. Grimes struct pgrp *pg; 800df8bae1dSRodney W. Grimes int facs = uap->facs & ~KTRFAC_ROOT; 801df8bae1dSRodney W. Grimes int ops = KTROP(uap->ops); 802df8bae1dSRodney W. Grimes int descend = uap->ops & KTRFLAG_DESCEND; 803400a74bfSPawel Jakub Dawidek int nfound, ret = 0; 80433f19beeSJohn Baldwin int flags, error = 0, vfslocked; 805df8bae1dSRodney W. Grimes struct nameidata nd; 806a5881ea5SJohn Baldwin struct ucred *cred; 807df8bae1dSRodney W. Grimes 80864cc6a13SJohn Baldwin /* 80964cc6a13SJohn Baldwin * Need something to (un)trace. 81064cc6a13SJohn Baldwin */ 81164cc6a13SJohn Baldwin if (ops != KTROP_CLEARFILE && facs == 0) 81264cc6a13SJohn Baldwin return (EINVAL); 81364cc6a13SJohn Baldwin 8142c255e9dSRobert Watson ktrace_enter(td); 815df8bae1dSRodney W. Grimes if (ops != KTROP_CLEAR) { 816df8bae1dSRodney W. Grimes /* 817df8bae1dSRodney W. Grimes * an operation which requires a file argument. 818df8bae1dSRodney W. Grimes */ 81933f19beeSJohn Baldwin NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE, 82033f19beeSJohn Baldwin uap->fname, td); 821e6796b67SKirk McKusick flags = FREAD | FWRITE | O_NOFOLLOW; 8229e223287SKonstantin Belousov error = vn_open(&nd, &flags, 0, NULL); 823797f2d22SPoul-Henning Kamp if (error) { 8242c255e9dSRobert Watson ktrace_exit(td); 825df8bae1dSRodney W. Grimes return (error); 826df8bae1dSRodney W. Grimes } 82733f19beeSJohn Baldwin vfslocked = NDHASGIANT(&nd); 828762e6b85SEivind Eklund NDFREE(&nd, NDF_ONLY_PNBUF); 829df8bae1dSRodney W. Grimes vp = nd.ni_vp; 83022db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 831df8bae1dSRodney W. Grimes if (vp->v_type != VREG) { 832a854ed98SJohn Baldwin (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 83333f19beeSJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 8342c255e9dSRobert Watson ktrace_exit(td); 835df8bae1dSRodney W. Grimes return (EACCES); 836df8bae1dSRodney W. Grimes } 83733f19beeSJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 838df8bae1dSRodney W. Grimes } 839df8bae1dSRodney W. Grimes /* 84079deba82SMatthew Dillon * Clear all uses of the tracefile. 841df8bae1dSRodney W. Grimes */ 842df8bae1dSRodney W. Grimes if (ops == KTROP_CLEARFILE) { 84351fd6380SMike Pritchard int vrele_count; 84451fd6380SMike Pritchard 84551fd6380SMike Pritchard vrele_count = 0; 8461005a129SJohn Baldwin sx_slock(&allproc_lock); 8474f506694SXin LI FOREACH_PROC_IN_SYSTEM(p) { 848a7ff7443SJohn Baldwin PROC_LOCK(p); 849a5881ea5SJohn Baldwin if (p->p_tracevp == vp) { 850ea3fc8e4SJohn Baldwin if (ktrcanset(td, p)) { 851ea3fc8e4SJohn Baldwin mtx_lock(&ktrace_mtx); 852d680caabSJohn Baldwin ktr_freeproc(p, &cred, NULL); 853ea3fc8e4SJohn Baldwin mtx_unlock(&ktrace_mtx); 85451fd6380SMike Pritchard vrele_count++; 855a5881ea5SJohn Baldwin crfree(cred); 85651fd6380SMike Pritchard } else 857df8bae1dSRodney W. Grimes error = EPERM; 858df8bae1dSRodney W. Grimes } 859a7ff7443SJohn Baldwin PROC_UNLOCK(p); 86079deba82SMatthew Dillon } 8611005a129SJohn Baldwin sx_sunlock(&allproc_lock); 86251fd6380SMike Pritchard if (vrele_count > 0) { 86351fd6380SMike Pritchard vfslocked = VFS_LOCK_GIANT(vp->v_mount); 86451fd6380SMike Pritchard while (vrele_count-- > 0) 86551fd6380SMike Pritchard vrele(vp); 86651fd6380SMike Pritchard VFS_UNLOCK_GIANT(vfslocked); 86751fd6380SMike Pritchard } 868df8bae1dSRodney W. Grimes goto done; 869df8bae1dSRodney W. Grimes } 870df8bae1dSRodney W. Grimes /* 871df8bae1dSRodney W. Grimes * do it 872df8bae1dSRodney W. Grimes */ 87364cc6a13SJohn Baldwin sx_slock(&proctree_lock); 874df8bae1dSRodney W. Grimes if (uap->pid < 0) { 875df8bae1dSRodney W. Grimes /* 876df8bae1dSRodney W. Grimes * by process group 877df8bae1dSRodney W. Grimes */ 878df8bae1dSRodney W. Grimes pg = pgfind(-uap->pid); 879df8bae1dSRodney W. Grimes if (pg == NULL) { 880ba626c1dSJohn Baldwin sx_sunlock(&proctree_lock); 881df8bae1dSRodney W. Grimes error = ESRCH; 882df8bae1dSRodney W. Grimes goto done; 883df8bae1dSRodney W. Grimes } 884f591779bSSeigo Tanimura /* 885f591779bSSeigo Tanimura * ktrops() may call vrele(). Lock pg_members 886ba626c1dSJohn Baldwin * by the proctree_lock rather than pg_mtx. 887f591779bSSeigo Tanimura */ 888f591779bSSeigo Tanimura PGRP_UNLOCK(pg); 889400a74bfSPawel Jakub Dawidek nfound = 0; 890400a74bfSPawel Jakub Dawidek LIST_FOREACH(p, &pg->pg_members, p_pglist) { 891400a74bfSPawel Jakub Dawidek PROC_LOCK(p); 892400a74bfSPawel Jakub Dawidek if (p_cansee(td, p) != 0) { 893400a74bfSPawel Jakub Dawidek PROC_UNLOCK(p); 894400a74bfSPawel Jakub Dawidek continue; 895400a74bfSPawel Jakub Dawidek } 896400a74bfSPawel Jakub Dawidek nfound++; 897df8bae1dSRodney W. Grimes if (descend) 898a7ff7443SJohn Baldwin ret |= ktrsetchildren(td, p, ops, facs, vp); 899df8bae1dSRodney W. Grimes else 900a7ff7443SJohn Baldwin ret |= ktrops(td, p, ops, facs, vp); 901400a74bfSPawel Jakub Dawidek } 902400a74bfSPawel Jakub Dawidek if (nfound == 0) { 903400a74bfSPawel Jakub Dawidek sx_sunlock(&proctree_lock); 904400a74bfSPawel Jakub Dawidek error = ESRCH; 905400a74bfSPawel Jakub Dawidek goto done; 906400a74bfSPawel Jakub Dawidek } 907df8bae1dSRodney W. Grimes } else { 908df8bae1dSRodney W. Grimes /* 909df8bae1dSRodney W. Grimes * by pid 910df8bae1dSRodney W. Grimes */ 911df8bae1dSRodney W. Grimes p = pfind(uap->pid); 912fe41d17aSJohn Baldwin if (p == NULL) 913df8bae1dSRodney W. Grimes error = ESRCH; 914fe41d17aSJohn Baldwin else 9154eb7c9f6SPawel Jakub Dawidek error = p_cansee(td, p); 916b0d9aeddSPawel Jakub Dawidek if (error) { 917fe41d17aSJohn Baldwin if (p != NULL) 918fe41d17aSJohn Baldwin PROC_UNLOCK(p); 919b0d9aeddSPawel Jakub Dawidek sx_sunlock(&proctree_lock); 9204eb7c9f6SPawel Jakub Dawidek goto done; 921b0d9aeddSPawel Jakub Dawidek } 922df8bae1dSRodney W. Grimes if (descend) 923a7ff7443SJohn Baldwin ret |= ktrsetchildren(td, p, ops, facs, vp); 924df8bae1dSRodney W. Grimes else 925a7ff7443SJohn Baldwin ret |= ktrops(td, p, ops, facs, vp); 926df8bae1dSRodney W. Grimes } 92764cc6a13SJohn Baldwin sx_sunlock(&proctree_lock); 928df8bae1dSRodney W. Grimes if (!ret) 929df8bae1dSRodney W. Grimes error = EPERM; 930df8bae1dSRodney W. Grimes done: 93164cc6a13SJohn Baldwin if (vp != NULL) { 93233f19beeSJohn Baldwin vfslocked = VFS_LOCK_GIANT(vp->v_mount); 933a854ed98SJohn Baldwin (void) vn_close(vp, FWRITE, td->td_ucred, td); 93433f19beeSJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 93564cc6a13SJohn Baldwin } 9362c255e9dSRobert Watson ktrace_exit(td); 937df8bae1dSRodney W. Grimes return (error); 93864cc6a13SJohn Baldwin #else /* !KTRACE */ 93964cc6a13SJohn Baldwin return (ENOSYS); 94064cc6a13SJohn Baldwin #endif /* KTRACE */ 941df8bae1dSRodney W. Grimes } 942df8bae1dSRodney W. Grimes 943e6c4b9baSPoul-Henning Kamp /* ARGSUSED */ 944e6c4b9baSPoul-Henning Kamp int 945b40ce416SJulian Elischer utrace(td, uap) 946b40ce416SJulian Elischer struct thread *td; 947e6c4b9baSPoul-Henning Kamp register struct utrace_args *uap; 948e6c4b9baSPoul-Henning Kamp { 949b40ce416SJulian Elischer 950e6c4b9baSPoul-Henning Kamp #ifdef KTRACE 951ea3fc8e4SJohn Baldwin struct ktr_request *req; 9527f05b035SAlfred Perlstein void *cp; 953c9e7d28eSJohn Baldwin int error; 954e6c4b9baSPoul-Henning Kamp 955c9e7d28eSJohn Baldwin if (!KTRPOINT(td, KTR_USER)) 956c9e7d28eSJohn Baldwin return (0); 957bdfa4f04SAlfred Perlstein if (uap->len > KTR_USER_MAXLEN) 9580bad156aSAlfred Perlstein return (EINVAL); 959a163d034SWarner Losh cp = malloc(uap->len, M_KTRACE, M_WAITOK); 960c9e7d28eSJohn Baldwin error = copyin(uap->addr, cp, uap->len); 96150c22331SPoul-Henning Kamp if (error) { 96250c22331SPoul-Henning Kamp free(cp, M_KTRACE); 963c9e7d28eSJohn Baldwin return (error); 96450c22331SPoul-Henning Kamp } 965ea3fc8e4SJohn Baldwin req = ktr_getrequest(KTR_USER); 96650c22331SPoul-Henning Kamp if (req == NULL) { 96750c22331SPoul-Henning Kamp free(cp, M_KTRACE); 968b10221ffSJoseph Koshy return (ENOMEM); 96950c22331SPoul-Henning Kamp } 970d977a583SRobert Watson req->ktr_buffer = cp; 971ea3fc8e4SJohn Baldwin req->ktr_header.ktr_len = uap->len; 9722c255e9dSRobert Watson ktr_submitrequest(td, req); 973e6c4b9baSPoul-Henning Kamp return (0); 97464cc6a13SJohn Baldwin #else /* !KTRACE */ 975e6c4b9baSPoul-Henning Kamp return (ENOSYS); 97664cc6a13SJohn Baldwin #endif /* KTRACE */ 977e6c4b9baSPoul-Henning Kamp } 978e6c4b9baSPoul-Henning Kamp 979db6a20e2SGarrett Wollman #ifdef KTRACE 98087b6de2bSPoul-Henning Kamp static int 981a7ff7443SJohn Baldwin ktrops(td, p, ops, facs, vp) 982a7ff7443SJohn Baldwin struct thread *td; 983a7ff7443SJohn Baldwin struct proc *p; 984df8bae1dSRodney W. Grimes int ops, facs; 985df8bae1dSRodney W. Grimes struct vnode *vp; 986df8bae1dSRodney W. Grimes { 987ea3fc8e4SJohn Baldwin struct vnode *tracevp = NULL; 988a5881ea5SJohn Baldwin struct ucred *tracecred = NULL; 989df8bae1dSRodney W. Grimes 990fe41d17aSJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 991a7ff7443SJohn Baldwin if (!ktrcanset(td, p)) { 992a7ff7443SJohn Baldwin PROC_UNLOCK(p); 993df8bae1dSRodney W. Grimes return (0); 994a7ff7443SJohn Baldwin } 995fe41d17aSJohn Baldwin if (p->p_flag & P_WEXIT) { 996fe41d17aSJohn Baldwin /* If the process is exiting, just ignore it. */ 997fe41d17aSJohn Baldwin PROC_UNLOCK(p); 998fe41d17aSJohn Baldwin return (1); 999fe41d17aSJohn Baldwin } 1000ea3fc8e4SJohn Baldwin mtx_lock(&ktrace_mtx); 1001df8bae1dSRodney W. Grimes if (ops == KTROP_SET) { 1002a5881ea5SJohn Baldwin if (p->p_tracevp != vp) { 1003df8bae1dSRodney W. Grimes /* 1004a7ff7443SJohn Baldwin * if trace file already in use, relinquish below 1005df8bae1dSRodney W. Grimes */ 1006a5881ea5SJohn Baldwin tracevp = p->p_tracevp; 1007ea3fc8e4SJohn Baldwin VREF(vp); 1008a5881ea5SJohn Baldwin p->p_tracevp = vp; 1009a5881ea5SJohn Baldwin } 1010a5881ea5SJohn Baldwin if (p->p_tracecred != td->td_ucred) { 1011a5881ea5SJohn Baldwin tracecred = p->p_tracecred; 1012a5881ea5SJohn Baldwin p->p_tracecred = crhold(td->td_ucred); 1013df8bae1dSRodney W. Grimes } 1014df8bae1dSRodney W. Grimes p->p_traceflag |= facs; 101532f9753cSRobert Watson if (priv_check(td, PRIV_KTRACE) == 0) 1016df8bae1dSRodney W. Grimes p->p_traceflag |= KTRFAC_ROOT; 1017df8bae1dSRodney W. Grimes } else { 1018df8bae1dSRodney W. Grimes /* KTROP_CLEAR */ 1019d680caabSJohn Baldwin if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) 1020df8bae1dSRodney W. Grimes /* no more tracing */ 1021d680caabSJohn Baldwin ktr_freeproc(p, &tracecred, &tracevp); 1022a7ff7443SJohn Baldwin } 1023ea3fc8e4SJohn Baldwin mtx_unlock(&ktrace_mtx); 1024a7ff7443SJohn Baldwin PROC_UNLOCK(p); 102564cc6a13SJohn Baldwin if (tracevp != NULL) { 1026033eb86eSJeff Roberson int vfslocked; 1027033eb86eSJeff Roberson 1028033eb86eSJeff Roberson vfslocked = VFS_LOCK_GIANT(tracevp->v_mount); 1029ea3fc8e4SJohn Baldwin vrele(tracevp); 1030033eb86eSJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 103164cc6a13SJohn Baldwin } 1032a5881ea5SJohn Baldwin if (tracecred != NULL) 1033a5881ea5SJohn Baldwin crfree(tracecred); 1034df8bae1dSRodney W. Grimes 1035*7705d4b2SDmitry Chagin if ((p->p_traceflag & KTRFAC_MASK) != 0) 1036*7705d4b2SDmitry Chagin ktrprocctor_ne(td, p); 1037*7705d4b2SDmitry Chagin 1038df8bae1dSRodney W. Grimes return (1); 1039df8bae1dSRodney W. Grimes } 1040df8bae1dSRodney W. Grimes 104187b6de2bSPoul-Henning Kamp static int 1042a7ff7443SJohn Baldwin ktrsetchildren(td, top, ops, facs, vp) 1043a7ff7443SJohn Baldwin struct thread *td; 1044a7ff7443SJohn Baldwin struct proc *top; 1045df8bae1dSRodney W. Grimes int ops, facs; 1046df8bae1dSRodney W. Grimes struct vnode *vp; 1047df8bae1dSRodney W. Grimes { 1048df8bae1dSRodney W. Grimes register struct proc *p; 1049df8bae1dSRodney W. Grimes register int ret = 0; 1050df8bae1dSRodney W. Grimes 1051df8bae1dSRodney W. Grimes p = top; 1052fe41d17aSJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 105364cc6a13SJohn Baldwin sx_assert(&proctree_lock, SX_LOCKED); 1054df8bae1dSRodney W. Grimes for (;;) { 1055a7ff7443SJohn Baldwin ret |= ktrops(td, p, ops, facs, vp); 1056df8bae1dSRodney W. Grimes /* 1057df8bae1dSRodney W. Grimes * If this process has children, descend to them next, 1058df8bae1dSRodney W. Grimes * otherwise do any siblings, and if done with this level, 1059df8bae1dSRodney W. Grimes * follow back up the tree (but not past top). 1060df8bae1dSRodney W. Grimes */ 10612e3c8fcbSPoul-Henning Kamp if (!LIST_EMPTY(&p->p_children)) 10622e3c8fcbSPoul-Henning Kamp p = LIST_FIRST(&p->p_children); 1063df8bae1dSRodney W. Grimes else for (;;) { 106464cc6a13SJohn Baldwin if (p == top) 1065df8bae1dSRodney W. Grimes return (ret); 10662e3c8fcbSPoul-Henning Kamp if (LIST_NEXT(p, p_sibling)) { 10672e3c8fcbSPoul-Henning Kamp p = LIST_NEXT(p, p_sibling); 1068df8bae1dSRodney W. Grimes break; 1069df8bae1dSRodney W. Grimes } 1070b75356e1SJeffrey Hsu p = p->p_pptr; 1071df8bae1dSRodney W. Grimes } 1072fe41d17aSJohn Baldwin PROC_LOCK(p); 1073df8bae1dSRodney W. Grimes } 1074df8bae1dSRodney W. Grimes /*NOTREACHED*/ 1075df8bae1dSRodney W. Grimes } 1076df8bae1dSRodney W. Grimes 107787b6de2bSPoul-Henning Kamp static void 10782c255e9dSRobert Watson ktr_writerequest(struct thread *td, struct ktr_request *req) 1079df8bae1dSRodney W. Grimes { 1080ea3fc8e4SJohn Baldwin struct ktr_header *kth; 1081ea3fc8e4SJohn Baldwin struct vnode *vp; 1082ea3fc8e4SJohn Baldwin struct proc *p; 1083ea3fc8e4SJohn Baldwin struct ucred *cred; 1084df8bae1dSRodney W. Grimes struct uio auio; 1085ea3fc8e4SJohn Baldwin struct iovec aiov[3]; 1086f2a2857bSKirk McKusick struct mount *mp; 1087ea3fc8e4SJohn Baldwin int datalen, buflen, vrele_count; 108833f19beeSJohn Baldwin int error, vfslocked; 1089df8bae1dSRodney W. Grimes 10902c255e9dSRobert Watson /* 10912c255e9dSRobert Watson * We hold the vnode and credential for use in I/O in case ktrace is 10922c255e9dSRobert Watson * disabled on the process as we write out the request. 10932c255e9dSRobert Watson * 10942c255e9dSRobert Watson * XXXRW: This is not ideal: we could end up performing a write after 10952c255e9dSRobert Watson * the vnode has been closed. 10962c255e9dSRobert Watson */ 10972c255e9dSRobert Watson mtx_lock(&ktrace_mtx); 10982c255e9dSRobert Watson vp = td->td_proc->p_tracevp; 10992c255e9dSRobert Watson cred = td->td_proc->p_tracecred; 11002c255e9dSRobert Watson 1101ea3fc8e4SJohn Baldwin /* 1102ea3fc8e4SJohn Baldwin * If vp is NULL, the vp has been cleared out from under this 11032c255e9dSRobert Watson * request, so just drop it. Make sure the credential and vnode are 11042c255e9dSRobert Watson * in sync: we should have both or neither. 1105ea3fc8e4SJohn Baldwin */ 11062c255e9dSRobert Watson if (vp == NULL) { 11072c255e9dSRobert Watson KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL")); 1108118258f5SBjoern A. Zeeb mtx_unlock(&ktrace_mtx); 1109df8bae1dSRodney W. Grimes return; 11102c255e9dSRobert Watson } 1111118258f5SBjoern A. Zeeb VREF(vp); 11122c255e9dSRobert Watson KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 1113118258f5SBjoern A. Zeeb crhold(cred); 1114118258f5SBjoern A. Zeeb mtx_unlock(&ktrace_mtx); 11152c255e9dSRobert Watson 1116ea3fc8e4SJohn Baldwin kth = &req->ktr_header; 1117a56be37eSJohn Baldwin KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) < 1118a56be37eSJohn Baldwin sizeof(data_lengths) / sizeof(data_lengths[0]), 1119a56be37eSJohn Baldwin ("data_lengths array overflow")); 11208b149b51SJohn Baldwin datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP]; 1121ea3fc8e4SJohn Baldwin buflen = kth->ktr_len; 1122df8bae1dSRodney W. Grimes auio.uio_iov = &aiov[0]; 1123df8bae1dSRodney W. Grimes auio.uio_offset = 0; 1124df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 1125df8bae1dSRodney W. Grimes auio.uio_rw = UIO_WRITE; 1126df8bae1dSRodney W. Grimes aiov[0].iov_base = (caddr_t)kth; 1127df8bae1dSRodney W. Grimes aiov[0].iov_len = sizeof(struct ktr_header); 1128df8bae1dSRodney W. Grimes auio.uio_resid = sizeof(struct ktr_header); 1129df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 1130ea3fc8e4SJohn Baldwin auio.uio_td = td; 1131ea3fc8e4SJohn Baldwin if (datalen != 0) { 1132ea3fc8e4SJohn Baldwin aiov[1].iov_base = (caddr_t)&req->ktr_data; 1133ea3fc8e4SJohn Baldwin aiov[1].iov_len = datalen; 1134ea3fc8e4SJohn Baldwin auio.uio_resid += datalen; 1135df8bae1dSRodney W. Grimes auio.uio_iovcnt++; 1136ea3fc8e4SJohn Baldwin kth->ktr_len += datalen; 1137ea3fc8e4SJohn Baldwin } 1138ea3fc8e4SJohn Baldwin if (buflen != 0) { 1139d977a583SRobert Watson KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 1140d977a583SRobert Watson aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 1141ea3fc8e4SJohn Baldwin aiov[auio.uio_iovcnt].iov_len = buflen; 1142ea3fc8e4SJohn Baldwin auio.uio_resid += buflen; 1143ea3fc8e4SJohn Baldwin auio.uio_iovcnt++; 1144b92584a6SJohn Baldwin } 11452c255e9dSRobert Watson 114633f19beeSJohn Baldwin vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1147f2a2857bSKirk McKusick vn_start_write(vp, &mp, V_WAIT); 1148cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1149467a273cSRobert Watson #ifdef MAC 115030d239bcSRobert Watson error = mac_vnode_check_write(cred, NOCRED, vp); 1151467a273cSRobert Watson if (error == 0) 1152467a273cSRobert Watson #endif 1153ea3fc8e4SJohn Baldwin error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 115422db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 1155f2a2857bSKirk McKusick vn_finished_write(mp); 1156118258f5SBjoern A. Zeeb crfree(cred); 1157118258f5SBjoern A. Zeeb if (!error) { 1158704c9f00SJohn Baldwin vrele(vp); 115933f19beeSJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 1160df8bae1dSRodney W. Grimes return; 1161118258f5SBjoern A. Zeeb } 1162118258f5SBjoern A. Zeeb VFS_UNLOCK_GIANT(vfslocked); 1163118258f5SBjoern A. Zeeb 1164df8bae1dSRodney W. Grimes /* 1165ea3fc8e4SJohn Baldwin * If error encountered, give up tracing on this vnode. We defer 1166ea3fc8e4SJohn Baldwin * all the vrele()'s on the vnode until after we are finished walking 1167ea3fc8e4SJohn Baldwin * the various lists to avoid needlessly holding locks. 1168118258f5SBjoern A. Zeeb * NB: at this point we still hold the vnode reference that must 1169118258f5SBjoern A. Zeeb * not go away as we need the valid vnode to compare with. Thus let 1170118258f5SBjoern A. Zeeb * vrele_count start at 1 and the reference will be freed 1171118258f5SBjoern A. Zeeb * by the loop at the end after our last use of vp. 1172df8bae1dSRodney W. Grimes */ 1173df8bae1dSRodney W. Grimes log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 1174df8bae1dSRodney W. Grimes error); 1175118258f5SBjoern A. Zeeb vrele_count = 1; 1176ea3fc8e4SJohn Baldwin /* 1177ea3fc8e4SJohn Baldwin * First, clear this vnode from being used by any processes in the 1178ea3fc8e4SJohn Baldwin * system. 1179ea3fc8e4SJohn Baldwin * XXX - If one process gets an EPERM writing to the vnode, should 1180ea3fc8e4SJohn Baldwin * we really do this? Other processes might have suitable 1181ea3fc8e4SJohn Baldwin * credentials for the operation. 1182ea3fc8e4SJohn Baldwin */ 1183a5881ea5SJohn Baldwin cred = NULL; 11841005a129SJohn Baldwin sx_slock(&allproc_lock); 11854f506694SXin LI FOREACH_PROC_IN_SYSTEM(p) { 1186ea3fc8e4SJohn Baldwin PROC_LOCK(p); 1187a5881ea5SJohn Baldwin if (p->p_tracevp == vp) { 1188ea3fc8e4SJohn Baldwin mtx_lock(&ktrace_mtx); 1189d680caabSJohn Baldwin ktr_freeproc(p, &cred, NULL); 1190ea3fc8e4SJohn Baldwin mtx_unlock(&ktrace_mtx); 1191ea3fc8e4SJohn Baldwin vrele_count++; 1192df8bae1dSRodney W. Grimes } 1193ea3fc8e4SJohn Baldwin PROC_UNLOCK(p); 1194a5881ea5SJohn Baldwin if (cred != NULL) { 1195a5881ea5SJohn Baldwin crfree(cred); 1196a5881ea5SJohn Baldwin cred = NULL; 1197a5881ea5SJohn Baldwin } 1198df8bae1dSRodney W. Grimes } 11991005a129SJohn Baldwin sx_sunlock(&allproc_lock); 12002c255e9dSRobert Watson 120133f19beeSJohn Baldwin vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1202ea3fc8e4SJohn Baldwin while (vrele_count-- > 0) 1203ea3fc8e4SJohn Baldwin vrele(vp); 120433f19beeSJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 1205df8bae1dSRodney W. Grimes } 1206df8bae1dSRodney W. Grimes 1207df8bae1dSRodney W. Grimes /* 1208df8bae1dSRodney W. Grimes * Return true if caller has permission to set the ktracing state 1209df8bae1dSRodney W. Grimes * of target. Essentially, the target can't possess any 1210df8bae1dSRodney W. Grimes * more permissions than the caller. KTRFAC_ROOT signifies that 1211df8bae1dSRodney W. Grimes * root previously set the tracing status on the target process, and 1212df8bae1dSRodney W. Grimes * so, only root may further change it. 1213df8bae1dSRodney W. Grimes */ 121487b6de2bSPoul-Henning Kamp static int 1215a7ff7443SJohn Baldwin ktrcanset(td, targetp) 1216a7ff7443SJohn Baldwin struct thread *td; 1217a7ff7443SJohn Baldwin struct proc *targetp; 1218df8bae1dSRodney W. Grimes { 1219df8bae1dSRodney W. Grimes 1220a7ff7443SJohn Baldwin PROC_LOCK_ASSERT(targetp, MA_OWNED); 1221a0f75161SRobert Watson if (targetp->p_traceflag & KTRFAC_ROOT && 122232f9753cSRobert Watson priv_check(td, PRIV_KTRACE)) 122375c13541SPoul-Henning Kamp return (0); 1224a0f75161SRobert Watson 1225f44d9e24SJohn Baldwin if (p_candebug(td, targetp) != 0) 1226a0f75161SRobert Watson return (0); 1227a0f75161SRobert Watson 1228df8bae1dSRodney W. Grimes return (1); 1229df8bae1dSRodney W. Grimes } 1230df8bae1dSRodney W. Grimes 1231db6a20e2SGarrett Wollman #endif /* KTRACE */ 1232