1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 34 */ 35 36 #include <sys/cdefs.h> 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/capsicum.h> 41 #include <sys/systm.h> 42 #include <sys/fcntl.h> 43 #include <sys/kernel.h> 44 #include <sys/kthread.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/namei.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/resourcevar.h> 53 #include <sys/unistd.h> 54 #include <sys/vnode.h> 55 #include <sys/socket.h> 56 #include <sys/stat.h> 57 #include <sys/ktrace.h> 58 #include <sys/sx.h> 59 #include <sys/sysctl.h> 60 #include <sys/sysent.h> 61 #include <sys/syslog.h> 62 #include <sys/sysproto.h> 63 64 #include <security/mac/mac_framework.h> 65 66 /* 67 * The ktrace facility allows the tracing of certain key events in user space 68 * processes, such as system calls, signal delivery, context switches, and 69 * user generated events using utrace(2). It works by streaming event 70 * records and data to a vnode associated with the process using the 71 * ktrace(2) system call. In general, records can be written directly from 72 * the context that generates the event. One important exception to this is 73 * during a context switch, where sleeping is not permitted. To handle this 74 * case, trace events are generated using in-kernel ktr_request records, and 75 * then delivered to disk at a convenient moment -- either immediately, the 76 * next traceable event, at system call return, or at process exit. 77 * 78 * When dealing with multiple threads or processes writing to the same event 79 * log, ordering guarantees are weak: specifically, if an event has multiple 80 * records (i.e., system call enter and return), they may be interlaced with 81 * records from another event. Process and thread ID information is provided 82 * in the record, and user applications can de-interlace events if required. 83 */ 84 85 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 86 87 #ifdef KTRACE 88 89 FEATURE(ktrace, "Kernel support for system-call tracing"); 90 91 #ifndef KTRACE_REQUEST_POOL 92 #define KTRACE_REQUEST_POOL 100 93 #endif 94 95 struct ktr_request { 96 struct ktr_header ktr_header; 97 void *ktr_buffer; 98 union { 99 struct ktr_proc_ctor ktr_proc_ctor; 100 struct ktr_cap_fail ktr_cap_fail; 101 struct ktr_syscall ktr_syscall; 102 struct ktr_sysret ktr_sysret; 103 struct ktr_genio ktr_genio; 104 struct ktr_psig ktr_psig; 105 struct ktr_csw ktr_csw; 106 struct ktr_fault ktr_fault; 107 struct ktr_faultend ktr_faultend; 108 struct ktr_struct_array ktr_struct_array; 109 } ktr_data; 110 STAILQ_ENTRY(ktr_request) ktr_list; 111 }; 112 113 static const int data_lengths[] = { 114 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args), 115 [KTR_SYSRET] = sizeof(struct ktr_sysret), 116 [KTR_NAMEI] = 0, 117 [KTR_GENIO] = sizeof(struct ktr_genio), 118 [KTR_PSIG] = sizeof(struct ktr_psig), 119 [KTR_CSW] = sizeof(struct ktr_csw), 120 [KTR_USER] = 0, 121 [KTR_STRUCT] = 0, 122 [KTR_SYSCTL] = 0, 123 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor), 124 [KTR_PROCDTOR] = 0, 125 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail), 126 [KTR_FAULT] = sizeof(struct ktr_fault), 127 [KTR_FAULTEND] = sizeof(struct ktr_faultend), 128 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array), 129 }; 130 131 static STAILQ_HEAD(, ktr_request) ktr_free; 132 133 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 134 "KTRACE options"); 135 136 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 137 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 138 139 u_int ktr_geniosize = PAGE_SIZE; 140 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize, 141 0, "Maximum size of genio event payload"); 142 143 /* 144 * Allow to not to send signal to traced process, in which context the 145 * ktr record is written. The limit is applied from the process that 146 * set up ktrace, so killing the traced process is not completely fair. 147 */ 148 int ktr_filesize_limit_signal = 0; 149 SYSCTL_INT(_kern_ktrace, OID_AUTO, filesize_limit_signal, CTLFLAG_RWTUN, 150 &ktr_filesize_limit_signal, 0, 151 "Send SIGXFSZ to the traced process when the log size limit is exceeded"); 152 153 static int print_message = 1; 154 static struct mtx ktrace_mtx; 155 static struct sx ktrace_sx; 156 157 struct ktr_io_params { 158 struct vnode *vp; 159 struct ucred *cr; 160 off_t lim; 161 u_int refs; 162 }; 163 164 static void ktrace_init(void *dummy); 165 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 166 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize); 167 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type); 168 static struct ktr_request *ktr_getrequest(int type); 169 static void ktr_submitrequest(struct thread *td, struct ktr_request *req); 170 static struct ktr_io_params *ktr_freeproc(struct proc *p); 171 static void ktr_freerequest(struct ktr_request *req); 172 static void ktr_freerequest_locked(struct ktr_request *req); 173 static void ktr_writerequest(struct thread *td, struct ktr_request *req); 174 static int ktrcanset(struct thread *,struct proc *); 175 static int ktrsetchildren(struct thread *, struct proc *, int, int, 176 struct ktr_io_params *); 177 static int ktrops(struct thread *, struct proc *, int, int, 178 struct ktr_io_params *); 179 static void ktrprocctor_entered(struct thread *, struct proc *); 180 181 /* 182 * ktrace itself generates events, such as context switches, which we do not 183 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 184 * whether or not it is in a region where tracing of events should be 185 * suppressed. 186 */ 187 static void 188 ktrace_enter(struct thread *td) 189 { 190 191 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 192 td->td_pflags |= TDP_INKTRACE; 193 } 194 195 static void 196 ktrace_exit(struct thread *td) 197 { 198 199 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 200 td->td_pflags &= ~TDP_INKTRACE; 201 } 202 203 static void 204 ktrace_assert(struct thread *td) 205 { 206 207 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 208 } 209 210 static void 211 ast_ktrace(struct thread *td, int tda __unused) 212 { 213 KTRUSERRET(td); 214 } 215 216 static void 217 ktrace_init(void *dummy) 218 { 219 struct ktr_request *req; 220 int i; 221 222 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 223 sx_init(&ktrace_sx, "ktrace_sx"); 224 STAILQ_INIT(&ktr_free); 225 for (i = 0; i < ktr_requestpool; i++) { 226 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK | 227 M_ZERO); 228 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 229 } 230 ast_register(TDA_KTRACE, ASTR_ASTF_REQUIRED, 0, ast_ktrace); 231 } 232 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 233 234 static int 235 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 236 { 237 struct thread *td; 238 u_int newsize, oldsize, wantsize; 239 int error; 240 241 /* Handle easy read-only case first to avoid warnings from GCC. */ 242 if (!req->newptr) { 243 oldsize = ktr_requestpool; 244 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 245 } 246 247 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 248 if (error) 249 return (error); 250 td = curthread; 251 ktrace_enter(td); 252 oldsize = ktr_requestpool; 253 newsize = ktrace_resize_pool(oldsize, wantsize); 254 ktrace_exit(td); 255 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 256 if (error) 257 return (error); 258 if (wantsize > oldsize && newsize < wantsize) 259 return (ENOSPC); 260 return (0); 261 } 262 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, 263 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ktr_requestpool, 0, 264 sysctl_kern_ktrace_request_pool, "IU", 265 "Pool buffer size for ktrace(1)"); 266 267 static u_int 268 ktrace_resize_pool(u_int oldsize, u_int newsize) 269 { 270 STAILQ_HEAD(, ktr_request) ktr_new; 271 struct ktr_request *req; 272 int bound; 273 274 print_message = 1; 275 bound = newsize - oldsize; 276 if (bound == 0) 277 return (ktr_requestpool); 278 if (bound < 0) { 279 mtx_lock(&ktrace_mtx); 280 /* Shrink pool down to newsize if possible. */ 281 while (bound++ < 0) { 282 req = STAILQ_FIRST(&ktr_free); 283 if (req == NULL) 284 break; 285 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 286 ktr_requestpool--; 287 free(req, M_KTRACE); 288 } 289 } else { 290 /* Grow pool up to newsize. */ 291 STAILQ_INIT(&ktr_new); 292 while (bound-- > 0) { 293 req = malloc(sizeof(struct ktr_request), M_KTRACE, 294 M_WAITOK | M_ZERO); 295 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list); 296 } 297 mtx_lock(&ktrace_mtx); 298 STAILQ_CONCAT(&ktr_free, &ktr_new); 299 ktr_requestpool += (newsize - oldsize); 300 } 301 mtx_unlock(&ktrace_mtx); 302 return (ktr_requestpool); 303 } 304 305 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */ 306 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) == 307 (sizeof((struct thread *)NULL)->td_name)); 308 309 static struct ktr_request * 310 ktr_getrequest_entered(struct thread *td, int type) 311 { 312 struct ktr_request *req; 313 struct proc *p = td->td_proc; 314 int pm; 315 316 mtx_lock(&ktrace_mtx); 317 if (!KTRCHECK(td, type)) { 318 mtx_unlock(&ktrace_mtx); 319 return (NULL); 320 } 321 req = STAILQ_FIRST(&ktr_free); 322 if (req != NULL) { 323 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 324 req->ktr_header.ktr_type = type; 325 if (p->p_traceflag & KTRFAC_DROP) { 326 req->ktr_header.ktr_type |= KTR_DROP; 327 p->p_traceflag &= ~KTRFAC_DROP; 328 } 329 mtx_unlock(&ktrace_mtx); 330 nanotime(&req->ktr_header.ktr_time); 331 req->ktr_header.ktr_type |= KTR_VERSIONED; 332 req->ktr_header.ktr_pid = p->p_pid; 333 req->ktr_header.ktr_tid = td->td_tid; 334 req->ktr_header.ktr_cpu = PCPU_GET(cpuid); 335 req->ktr_header.ktr_version = KTR_VERSION1; 336 bcopy(td->td_name, req->ktr_header.ktr_comm, 337 sizeof(req->ktr_header.ktr_comm)); 338 req->ktr_buffer = NULL; 339 req->ktr_header.ktr_len = 0; 340 } else { 341 p->p_traceflag |= KTRFAC_DROP; 342 pm = print_message; 343 print_message = 0; 344 mtx_unlock(&ktrace_mtx); 345 if (pm) 346 printf("Out of ktrace request objects.\n"); 347 } 348 return (req); 349 } 350 351 static struct ktr_request * 352 ktr_getrequest(int type) 353 { 354 struct thread *td = curthread; 355 struct ktr_request *req; 356 357 ktrace_enter(td); 358 req = ktr_getrequest_entered(td, type); 359 if (req == NULL) 360 ktrace_exit(td); 361 362 return (req); 363 } 364 365 /* 366 * Some trace generation environments don't permit direct access to VFS, 367 * such as during a context switch where sleeping is not allowed. Under these 368 * circumstances, queue a request to the thread to be written asynchronously 369 * later. 370 */ 371 static void 372 ktr_enqueuerequest(struct thread *td, struct ktr_request *req) 373 { 374 375 mtx_lock(&ktrace_mtx); 376 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 377 mtx_unlock(&ktrace_mtx); 378 ast_sched(td, TDA_KTRACE); 379 } 380 381 /* 382 * Drain any pending ktrace records from the per-thread queue to disk. This 383 * is used both internally before committing other records, and also on 384 * system call return. We drain all the ones we can find at the time when 385 * drain is requested, but don't keep draining after that as those events 386 * may be approximately "after" the current event. 387 */ 388 static void 389 ktr_drain(struct thread *td) 390 { 391 struct ktr_request *queued_req; 392 STAILQ_HEAD(, ktr_request) local_queue; 393 394 ktrace_assert(td); 395 sx_assert(&ktrace_sx, SX_XLOCKED); 396 397 STAILQ_INIT(&local_queue); 398 399 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 400 mtx_lock(&ktrace_mtx); 401 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 402 mtx_unlock(&ktrace_mtx); 403 404 while ((queued_req = STAILQ_FIRST(&local_queue))) { 405 STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 406 ktr_writerequest(td, queued_req); 407 ktr_freerequest(queued_req); 408 } 409 } 410 } 411 412 /* 413 * Submit a trace record for immediate commit to disk -- to be used only 414 * where entering VFS is OK. First drain any pending records that may have 415 * been cached in the thread. 416 */ 417 static void 418 ktr_submitrequest(struct thread *td, struct ktr_request *req) 419 { 420 421 ktrace_assert(td); 422 423 sx_xlock(&ktrace_sx); 424 ktr_drain(td); 425 ktr_writerequest(td, req); 426 ktr_freerequest(req); 427 sx_xunlock(&ktrace_sx); 428 ktrace_exit(td); 429 } 430 431 static void 432 ktr_freerequest(struct ktr_request *req) 433 { 434 435 mtx_lock(&ktrace_mtx); 436 ktr_freerequest_locked(req); 437 mtx_unlock(&ktrace_mtx); 438 } 439 440 static void 441 ktr_freerequest_locked(struct ktr_request *req) 442 { 443 444 mtx_assert(&ktrace_mtx, MA_OWNED); 445 if (req->ktr_buffer != NULL) 446 free(req->ktr_buffer, M_KTRACE); 447 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 448 } 449 450 static void 451 ktr_io_params_ref(struct ktr_io_params *kiop) 452 { 453 mtx_assert(&ktrace_mtx, MA_OWNED); 454 kiop->refs++; 455 } 456 457 static struct ktr_io_params * 458 ktr_io_params_rele(struct ktr_io_params *kiop) 459 { 460 mtx_assert(&ktrace_mtx, MA_OWNED); 461 if (kiop == NULL) 462 return (NULL); 463 KASSERT(kiop->refs > 0, ("kiop ref == 0 %p", kiop)); 464 return (--(kiop->refs) == 0 ? kiop : NULL); 465 } 466 467 void 468 ktr_io_params_free(struct ktr_io_params *kiop) 469 { 470 if (kiop == NULL) 471 return; 472 473 MPASS(kiop->refs == 0); 474 vn_close(kiop->vp, FWRITE, kiop->cr, curthread); 475 crfree(kiop->cr); 476 free(kiop, M_KTRACE); 477 } 478 479 static struct ktr_io_params * 480 ktr_io_params_alloc(struct thread *td, struct vnode *vp) 481 { 482 struct ktr_io_params *res; 483 484 res = malloc(sizeof(struct ktr_io_params), M_KTRACE, M_WAITOK); 485 res->vp = vp; 486 res->cr = crhold(td->td_ucred); 487 res->lim = lim_cur(td, RLIMIT_FSIZE); 488 res->refs = 1; 489 return (res); 490 } 491 492 /* 493 * Disable tracing for a process and release all associated resources. 494 * The caller is responsible for releasing a reference on the returned 495 * vnode and credentials. 496 */ 497 static struct ktr_io_params * 498 ktr_freeproc(struct proc *p) 499 { 500 struct ktr_io_params *kiop; 501 struct ktr_request *req; 502 503 PROC_LOCK_ASSERT(p, MA_OWNED); 504 mtx_assert(&ktrace_mtx, MA_OWNED); 505 kiop = ktr_io_params_rele(p->p_ktrioparms); 506 p->p_ktrioparms = NULL; 507 p->p_traceflag = 0; 508 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) { 509 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list); 510 ktr_freerequest_locked(req); 511 } 512 return (kiop); 513 } 514 515 struct vnode * 516 ktr_get_tracevp(struct proc *p, bool ref) 517 { 518 struct vnode *vp; 519 520 PROC_LOCK_ASSERT(p, MA_OWNED); 521 522 if (p->p_ktrioparms != NULL) { 523 vp = p->p_ktrioparms->vp; 524 if (ref) 525 vrefact(vp); 526 } else { 527 vp = NULL; 528 } 529 return (vp); 530 } 531 532 void 533 ktrsyscall(int code, int narg, syscallarg_t args[]) 534 { 535 struct ktr_request *req; 536 struct ktr_syscall *ktp; 537 size_t buflen; 538 char *buf = NULL; 539 540 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 541 return; 542 543 buflen = sizeof(register_t) * narg; 544 if (buflen > 0) { 545 buf = malloc(buflen, M_KTRACE, M_WAITOK); 546 bcopy(args, buf, buflen); 547 } 548 req = ktr_getrequest(KTR_SYSCALL); 549 if (req == NULL) { 550 if (buf != NULL) 551 free(buf, M_KTRACE); 552 return; 553 } 554 ktp = &req->ktr_data.ktr_syscall; 555 ktp->ktr_code = code; 556 ktp->ktr_narg = narg; 557 if (buflen > 0) { 558 req->ktr_header.ktr_len = buflen; 559 req->ktr_buffer = buf; 560 } 561 ktr_submitrequest(curthread, req); 562 } 563 564 void 565 ktrsysret(int code, int error, register_t retval) 566 { 567 struct ktr_request *req; 568 struct ktr_sysret *ktp; 569 570 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 571 return; 572 573 req = ktr_getrequest(KTR_SYSRET); 574 if (req == NULL) 575 return; 576 ktp = &req->ktr_data.ktr_sysret; 577 ktp->ktr_code = code; 578 ktp->ktr_error = error; 579 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */ 580 ktr_submitrequest(curthread, req); 581 } 582 583 /* 584 * When a setuid process execs, disable tracing. 585 * 586 * XXX: We toss any pending asynchronous records. 587 */ 588 struct ktr_io_params * 589 ktrprocexec(struct proc *p) 590 { 591 struct ktr_io_params *kiop; 592 593 PROC_LOCK_ASSERT(p, MA_OWNED); 594 595 kiop = p->p_ktrioparms; 596 if (kiop == NULL || priv_check_cred(kiop->cr, PRIV_DEBUG_DIFFCRED)) 597 return (NULL); 598 599 mtx_lock(&ktrace_mtx); 600 kiop = ktr_freeproc(p); 601 mtx_unlock(&ktrace_mtx); 602 return (kiop); 603 } 604 605 /* 606 * When a process exits, drain per-process asynchronous trace records 607 * and disable tracing. 608 */ 609 void 610 ktrprocexit(struct thread *td) 611 { 612 struct ktr_request *req; 613 struct proc *p; 614 struct ktr_io_params *kiop; 615 616 p = td->td_proc; 617 if (p->p_traceflag == 0) 618 return; 619 620 ktrace_enter(td); 621 req = ktr_getrequest_entered(td, KTR_PROCDTOR); 622 if (req != NULL) 623 ktr_enqueuerequest(td, req); 624 sx_xlock(&ktrace_sx); 625 ktr_drain(td); 626 sx_xunlock(&ktrace_sx); 627 PROC_LOCK(p); 628 mtx_lock(&ktrace_mtx); 629 kiop = ktr_freeproc(p); 630 mtx_unlock(&ktrace_mtx); 631 PROC_UNLOCK(p); 632 ktr_io_params_free(kiop); 633 ktrace_exit(td); 634 } 635 636 static void 637 ktrprocctor_entered(struct thread *td, struct proc *p) 638 { 639 struct ktr_proc_ctor *ktp; 640 struct ktr_request *req; 641 struct thread *td2; 642 643 ktrace_assert(td); 644 td2 = FIRST_THREAD_IN_PROC(p); 645 req = ktr_getrequest_entered(td2, KTR_PROCCTOR); 646 if (req == NULL) 647 return; 648 ktp = &req->ktr_data.ktr_proc_ctor; 649 ktp->sv_flags = p->p_sysent->sv_flags; 650 ktr_enqueuerequest(td2, req); 651 } 652 653 void 654 ktrprocctor(struct proc *p) 655 { 656 struct thread *td = curthread; 657 658 if ((p->p_traceflag & KTRFAC_MASK) == 0) 659 return; 660 661 ktrace_enter(td); 662 ktrprocctor_entered(td, p); 663 ktrace_exit(td); 664 } 665 666 /* 667 * When a process forks, enable tracing in the new process if needed. 668 */ 669 void 670 ktrprocfork(struct proc *p1, struct proc *p2) 671 { 672 673 MPASS(p2->p_ktrioparms == NULL); 674 MPASS(p2->p_traceflag == 0); 675 676 if (p1->p_traceflag == 0) 677 return; 678 679 PROC_LOCK(p1); 680 mtx_lock(&ktrace_mtx); 681 if (p1->p_traceflag & KTRFAC_INHERIT) { 682 p2->p_traceflag = p1->p_traceflag; 683 if ((p2->p_ktrioparms = p1->p_ktrioparms) != NULL) 684 p1->p_ktrioparms->refs++; 685 } 686 mtx_unlock(&ktrace_mtx); 687 PROC_UNLOCK(p1); 688 689 ktrprocctor(p2); 690 } 691 692 /* 693 * When a thread returns, drain any asynchronous records generated by the 694 * system call. 695 */ 696 void 697 ktruserret(struct thread *td) 698 { 699 700 ktrace_enter(td); 701 sx_xlock(&ktrace_sx); 702 ktr_drain(td); 703 sx_xunlock(&ktrace_sx); 704 ktrace_exit(td); 705 } 706 707 void 708 ktrnamei(const char *path) 709 { 710 struct ktr_request *req; 711 int namelen; 712 char *buf = NULL; 713 714 namelen = strlen(path); 715 if (namelen > 0) { 716 buf = malloc(namelen, M_KTRACE, M_WAITOK); 717 bcopy(path, buf, namelen); 718 } 719 req = ktr_getrequest(KTR_NAMEI); 720 if (req == NULL) { 721 if (buf != NULL) 722 free(buf, M_KTRACE); 723 return; 724 } 725 if (namelen > 0) { 726 req->ktr_header.ktr_len = namelen; 727 req->ktr_buffer = buf; 728 } 729 ktr_submitrequest(curthread, req); 730 } 731 732 void 733 ktrsysctl(int *name, u_int namelen) 734 { 735 struct ktr_request *req; 736 u_int mib[CTL_MAXNAME + 2]; 737 char *mibname; 738 size_t mibnamelen; 739 int error; 740 741 /* Lookup name of mib. */ 742 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long")); 743 mib[0] = 0; 744 mib[1] = 1; 745 bcopy(name, mib + 2, namelen * sizeof(*name)); 746 mibnamelen = 128; 747 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK); 748 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen, 749 NULL, 0, &mibnamelen, 0); 750 if (error) { 751 free(mibname, M_KTRACE); 752 return; 753 } 754 req = ktr_getrequest(KTR_SYSCTL); 755 if (req == NULL) { 756 free(mibname, M_KTRACE); 757 return; 758 } 759 req->ktr_header.ktr_len = mibnamelen; 760 req->ktr_buffer = mibname; 761 ktr_submitrequest(curthread, req); 762 } 763 764 void 765 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error) 766 { 767 struct ktr_request *req; 768 struct ktr_genio *ktg; 769 int datalen; 770 char *buf; 771 772 if (error) { 773 free(uio, M_IOV); 774 return; 775 } 776 uio->uio_offset = 0; 777 uio->uio_rw = UIO_WRITE; 778 datalen = MIN(uio->uio_resid, ktr_geniosize); 779 buf = malloc(datalen, M_KTRACE, M_WAITOK); 780 error = uiomove(buf, datalen, uio); 781 free(uio, M_IOV); 782 if (error) { 783 free(buf, M_KTRACE); 784 return; 785 } 786 req = ktr_getrequest(KTR_GENIO); 787 if (req == NULL) { 788 free(buf, M_KTRACE); 789 return; 790 } 791 ktg = &req->ktr_data.ktr_genio; 792 ktg->ktr_fd = fd; 793 ktg->ktr_rw = rw; 794 req->ktr_header.ktr_len = datalen; 795 req->ktr_buffer = buf; 796 ktr_submitrequest(curthread, req); 797 } 798 799 void 800 ktrpsig(int sig, sig_t action, sigset_t *mask, int code) 801 { 802 struct thread *td = curthread; 803 struct ktr_request *req; 804 struct ktr_psig *kp; 805 806 req = ktr_getrequest(KTR_PSIG); 807 if (req == NULL) 808 return; 809 kp = &req->ktr_data.ktr_psig; 810 kp->signo = (char)sig; 811 kp->action = action; 812 kp->mask = *mask; 813 kp->code = code; 814 ktr_enqueuerequest(td, req); 815 ktrace_exit(td); 816 } 817 818 void 819 ktrcsw(int out, int user, const char *wmesg) 820 { 821 struct thread *td = curthread; 822 struct ktr_request *req; 823 struct ktr_csw *kc; 824 825 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 826 return; 827 828 req = ktr_getrequest(KTR_CSW); 829 if (req == NULL) 830 return; 831 kc = &req->ktr_data.ktr_csw; 832 kc->out = out; 833 kc->user = user; 834 if (wmesg != NULL) 835 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg)); 836 else 837 bzero(kc->wmesg, sizeof(kc->wmesg)); 838 ktr_enqueuerequest(td, req); 839 ktrace_exit(td); 840 } 841 842 void 843 ktrstruct(const char *name, const void *data, size_t datalen) 844 { 845 struct ktr_request *req; 846 char *buf; 847 size_t buflen, namelen; 848 849 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 850 return; 851 852 if (data == NULL) 853 datalen = 0; 854 namelen = strlen(name) + 1; 855 buflen = namelen + datalen; 856 buf = malloc(buflen, M_KTRACE, M_WAITOK); 857 strcpy(buf, name); 858 bcopy(data, buf + namelen, datalen); 859 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) { 860 free(buf, M_KTRACE); 861 return; 862 } 863 req->ktr_buffer = buf; 864 req->ktr_header.ktr_len = buflen; 865 ktr_submitrequest(curthread, req); 866 } 867 868 void 869 ktrstruct_error(const char *name, const void *data, size_t datalen, int error) 870 { 871 872 if (error == 0) 873 ktrstruct(name, data, datalen); 874 } 875 876 void 877 ktrstructarray(const char *name, enum uio_seg seg, const void *data, 878 int num_items, size_t struct_size) 879 { 880 struct ktr_request *req; 881 struct ktr_struct_array *ksa; 882 char *buf; 883 size_t buflen, datalen, namelen; 884 int max_items; 885 886 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 887 return; 888 if (num_items < 0) 889 return; 890 891 /* Trim array length to genio size. */ 892 max_items = ktr_geniosize / struct_size; 893 if (num_items > max_items) { 894 if (max_items == 0) 895 num_items = 1; 896 else 897 num_items = max_items; 898 } 899 datalen = num_items * struct_size; 900 901 if (data == NULL) 902 datalen = 0; 903 904 namelen = strlen(name) + 1; 905 buflen = namelen + datalen; 906 buf = malloc(buflen, M_KTRACE, M_WAITOK); 907 strcpy(buf, name); 908 if (seg == UIO_SYSSPACE) 909 bcopy(data, buf + namelen, datalen); 910 else { 911 if (copyin(data, buf + namelen, datalen) != 0) { 912 free(buf, M_KTRACE); 913 return; 914 } 915 } 916 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) { 917 free(buf, M_KTRACE); 918 return; 919 } 920 ksa = &req->ktr_data.ktr_struct_array; 921 ksa->struct_size = struct_size; 922 req->ktr_buffer = buf; 923 req->ktr_header.ktr_len = buflen; 924 ktr_submitrequest(curthread, req); 925 } 926 927 void 928 ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed, 929 const cap_rights_t *held) 930 { 931 struct thread *td = curthread; 932 struct ktr_request *req; 933 struct ktr_cap_fail *kcf; 934 935 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 936 return; 937 938 req = ktr_getrequest(KTR_CAPFAIL); 939 if (req == NULL) 940 return; 941 kcf = &req->ktr_data.ktr_cap_fail; 942 kcf->cap_type = type; 943 if (needed != NULL) 944 kcf->cap_needed = *needed; 945 else 946 cap_rights_init(&kcf->cap_needed); 947 if (held != NULL) 948 kcf->cap_held = *held; 949 else 950 cap_rights_init(&kcf->cap_held); 951 ktr_enqueuerequest(td, req); 952 ktrace_exit(td); 953 } 954 955 void 956 ktrfault(vm_offset_t vaddr, int type) 957 { 958 struct thread *td = curthread; 959 struct ktr_request *req; 960 struct ktr_fault *kf; 961 962 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 963 return; 964 965 req = ktr_getrequest(KTR_FAULT); 966 if (req == NULL) 967 return; 968 kf = &req->ktr_data.ktr_fault; 969 kf->vaddr = vaddr; 970 kf->type = type; 971 ktr_enqueuerequest(td, req); 972 ktrace_exit(td); 973 } 974 975 void 976 ktrfaultend(int result) 977 { 978 struct thread *td = curthread; 979 struct ktr_request *req; 980 struct ktr_faultend *kf; 981 982 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 983 return; 984 985 req = ktr_getrequest(KTR_FAULTEND); 986 if (req == NULL) 987 return; 988 kf = &req->ktr_data.ktr_faultend; 989 kf->result = result; 990 ktr_enqueuerequest(td, req); 991 ktrace_exit(td); 992 } 993 #endif /* KTRACE */ 994 995 /* Interface and common routines */ 996 997 #ifndef _SYS_SYSPROTO_H_ 998 struct ktrace_args { 999 char *fname; 1000 int ops; 1001 int facs; 1002 int pid; 1003 }; 1004 #endif 1005 /* ARGSUSED */ 1006 int 1007 sys_ktrace(struct thread *td, struct ktrace_args *uap) 1008 { 1009 #ifdef KTRACE 1010 struct vnode *vp = NULL; 1011 struct proc *p; 1012 struct pgrp *pg; 1013 int facs = uap->facs & ~KTRFAC_ROOT; 1014 int ops = KTROP(uap->ops); 1015 int descend = uap->ops & KTRFLAG_DESCEND; 1016 int ret = 0; 1017 int flags, error = 0; 1018 struct nameidata nd; 1019 struct ktr_io_params *kiop, *old_kiop; 1020 1021 /* 1022 * Need something to (un)trace. 1023 */ 1024 if (ops != KTROP_CLEARFILE && facs == 0) 1025 return (EINVAL); 1026 1027 kiop = NULL; 1028 if (ops != KTROP_CLEAR) { 1029 /* 1030 * an operation which requires a file argument. 1031 */ 1032 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname); 1033 flags = FREAD | FWRITE | O_NOFOLLOW; 1034 error = vn_open(&nd, &flags, 0, NULL); 1035 if (error) 1036 return (error); 1037 NDFREE_PNBUF(&nd); 1038 vp = nd.ni_vp; 1039 VOP_UNLOCK(vp); 1040 if (vp->v_type != VREG) { 1041 (void)vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 1042 return (EACCES); 1043 } 1044 kiop = ktr_io_params_alloc(td, vp); 1045 } 1046 1047 /* 1048 * Clear all uses of the tracefile. 1049 */ 1050 ktrace_enter(td); 1051 if (ops == KTROP_CLEARFILE) { 1052 restart: 1053 sx_slock(&allproc_lock); 1054 FOREACH_PROC_IN_SYSTEM(p) { 1055 old_kiop = NULL; 1056 PROC_LOCK(p); 1057 if (p->p_ktrioparms != NULL && 1058 p->p_ktrioparms->vp == vp) { 1059 if (ktrcanset(td, p)) { 1060 mtx_lock(&ktrace_mtx); 1061 old_kiop = ktr_freeproc(p); 1062 mtx_unlock(&ktrace_mtx); 1063 } else 1064 error = EPERM; 1065 } 1066 PROC_UNLOCK(p); 1067 if (old_kiop != NULL) { 1068 sx_sunlock(&allproc_lock); 1069 ktr_io_params_free(old_kiop); 1070 goto restart; 1071 } 1072 } 1073 sx_sunlock(&allproc_lock); 1074 goto done; 1075 } 1076 /* 1077 * do it 1078 */ 1079 sx_slock(&proctree_lock); 1080 if (uap->pid < 0) { 1081 /* 1082 * by process group 1083 */ 1084 pg = pgfind(-uap->pid); 1085 if (pg == NULL) { 1086 sx_sunlock(&proctree_lock); 1087 error = ESRCH; 1088 goto done; 1089 } 1090 1091 /* 1092 * ktrops() may call vrele(). Lock pg_members 1093 * by the proctree_lock rather than pg_mtx. 1094 */ 1095 PGRP_UNLOCK(pg); 1096 if (LIST_EMPTY(&pg->pg_members)) { 1097 sx_sunlock(&proctree_lock); 1098 error = ESRCH; 1099 goto done; 1100 } 1101 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1102 PROC_LOCK(p); 1103 if (descend) 1104 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1105 else 1106 ret |= ktrops(td, p, ops, facs, kiop); 1107 } 1108 } else { 1109 /* 1110 * by pid 1111 */ 1112 p = pfind(uap->pid); 1113 if (p == NULL) { 1114 error = ESRCH; 1115 sx_sunlock(&proctree_lock); 1116 goto done; 1117 } 1118 if (descend) 1119 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1120 else 1121 ret |= ktrops(td, p, ops, facs, kiop); 1122 } 1123 sx_sunlock(&proctree_lock); 1124 if (!ret) 1125 error = EPERM; 1126 done: 1127 if (kiop != NULL) { 1128 mtx_lock(&ktrace_mtx); 1129 kiop = ktr_io_params_rele(kiop); 1130 mtx_unlock(&ktrace_mtx); 1131 ktr_io_params_free(kiop); 1132 } 1133 ktrace_exit(td); 1134 return (error); 1135 #else /* !KTRACE */ 1136 return (ENOSYS); 1137 #endif /* KTRACE */ 1138 } 1139 1140 /* ARGSUSED */ 1141 int 1142 sys_utrace(struct thread *td, struct utrace_args *uap) 1143 { 1144 1145 #ifdef KTRACE 1146 struct ktr_request *req; 1147 void *cp; 1148 int error; 1149 1150 if (!KTRPOINT(td, KTR_USER)) 1151 return (0); 1152 if (uap->len > KTR_USER_MAXLEN) 1153 return (EINVAL); 1154 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 1155 error = copyin(uap->addr, cp, uap->len); 1156 if (error) { 1157 free(cp, M_KTRACE); 1158 return (error); 1159 } 1160 req = ktr_getrequest(KTR_USER); 1161 if (req == NULL) { 1162 free(cp, M_KTRACE); 1163 return (ENOMEM); 1164 } 1165 req->ktr_buffer = cp; 1166 req->ktr_header.ktr_len = uap->len; 1167 ktr_submitrequest(td, req); 1168 return (0); 1169 #else /* !KTRACE */ 1170 return (ENOSYS); 1171 #endif /* KTRACE */ 1172 } 1173 1174 #ifdef KTRACE 1175 static int 1176 ktrops(struct thread *td, struct proc *p, int ops, int facs, 1177 struct ktr_io_params *new_kiop) 1178 { 1179 struct ktr_io_params *old_kiop; 1180 1181 PROC_LOCK_ASSERT(p, MA_OWNED); 1182 if (!ktrcanset(td, p)) { 1183 PROC_UNLOCK(p); 1184 return (0); 1185 } 1186 if ((ops == KTROP_SET && p->p_state == PRS_NEW) || 1187 p_cansee(td, p) != 0) { 1188 /* 1189 * Disallow setting trace points if the process is being born. 1190 * This avoids races with trace point inheritance in 1191 * ktrprocfork(). 1192 */ 1193 PROC_UNLOCK(p); 1194 return (0); 1195 } 1196 if ((p->p_flag & P_WEXIT) != 0) { 1197 /* 1198 * There's nothing to do if the process is exiting, but avoid 1199 * signaling an error. 1200 */ 1201 PROC_UNLOCK(p); 1202 return (1); 1203 } 1204 old_kiop = NULL; 1205 mtx_lock(&ktrace_mtx); 1206 if (ops == KTROP_SET) { 1207 if (p->p_ktrioparms != NULL && 1208 p->p_ktrioparms->vp != new_kiop->vp) { 1209 /* if trace file already in use, relinquish below */ 1210 old_kiop = ktr_io_params_rele(p->p_ktrioparms); 1211 p->p_ktrioparms = NULL; 1212 } 1213 if (p->p_ktrioparms == NULL) { 1214 p->p_ktrioparms = new_kiop; 1215 ktr_io_params_ref(new_kiop); 1216 } 1217 p->p_traceflag |= facs; 1218 if (priv_check(td, PRIV_KTRACE) == 0) 1219 p->p_traceflag |= KTRFAC_ROOT; 1220 } else { 1221 /* KTROP_CLEAR */ 1222 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) 1223 /* no more tracing */ 1224 old_kiop = ktr_freeproc(p); 1225 } 1226 mtx_unlock(&ktrace_mtx); 1227 if ((p->p_traceflag & KTRFAC_MASK) != 0) 1228 ktrprocctor_entered(td, p); 1229 PROC_UNLOCK(p); 1230 ktr_io_params_free(old_kiop); 1231 1232 return (1); 1233 } 1234 1235 static int 1236 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs, 1237 struct ktr_io_params *new_kiop) 1238 { 1239 struct proc *p; 1240 int ret = 0; 1241 1242 p = top; 1243 PROC_LOCK_ASSERT(p, MA_OWNED); 1244 sx_assert(&proctree_lock, SX_LOCKED); 1245 for (;;) { 1246 ret |= ktrops(td, p, ops, facs, new_kiop); 1247 /* 1248 * If this process has children, descend to them next, 1249 * otherwise do any siblings, and if done with this level, 1250 * follow back up the tree (but not past top). 1251 */ 1252 if (!LIST_EMPTY(&p->p_children)) 1253 p = LIST_FIRST(&p->p_children); 1254 else for (;;) { 1255 if (p == top) 1256 return (ret); 1257 if (LIST_NEXT(p, p_sibling)) { 1258 p = LIST_NEXT(p, p_sibling); 1259 break; 1260 } 1261 p = p->p_pptr; 1262 } 1263 PROC_LOCK(p); 1264 } 1265 /*NOTREACHED*/ 1266 } 1267 1268 static void 1269 ktr_writerequest(struct thread *td, struct ktr_request *req) 1270 { 1271 struct ktr_io_params *kiop, *kiop1; 1272 struct ktr_header *kth; 1273 struct vnode *vp; 1274 struct proc *p; 1275 struct ucred *cred; 1276 struct uio auio; 1277 struct iovec aiov[3]; 1278 struct mount *mp; 1279 off_t lim; 1280 int datalen, buflen; 1281 int error; 1282 1283 p = td->td_proc; 1284 1285 /* 1286 * We reference the kiop for use in I/O in case ktrace is 1287 * disabled on the process as we write out the request. 1288 */ 1289 mtx_lock(&ktrace_mtx); 1290 kiop = p->p_ktrioparms; 1291 1292 /* 1293 * If kiop is NULL, it has been cleared out from under this 1294 * request, so just drop it. 1295 */ 1296 if (kiop == NULL) { 1297 mtx_unlock(&ktrace_mtx); 1298 return; 1299 } 1300 1301 ktr_io_params_ref(kiop); 1302 vp = kiop->vp; 1303 cred = kiop->cr; 1304 lim = kiop->lim; 1305 1306 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 1307 mtx_unlock(&ktrace_mtx); 1308 1309 kth = &req->ktr_header; 1310 KASSERT(((u_short)kth->ktr_type & ~KTR_TYPE) < nitems(data_lengths), 1311 ("data_lengths array overflow")); 1312 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_TYPE]; 1313 buflen = kth->ktr_len; 1314 auio.uio_iov = &aiov[0]; 1315 auio.uio_offset = 0; 1316 auio.uio_segflg = UIO_SYSSPACE; 1317 auio.uio_rw = UIO_WRITE; 1318 aiov[0].iov_base = (caddr_t)kth; 1319 aiov[0].iov_len = sizeof(struct ktr_header); 1320 auio.uio_resid = sizeof(struct ktr_header); 1321 auio.uio_iovcnt = 1; 1322 auio.uio_td = td; 1323 if (datalen != 0) { 1324 aiov[1].iov_base = (caddr_t)&req->ktr_data; 1325 aiov[1].iov_len = datalen; 1326 auio.uio_resid += datalen; 1327 auio.uio_iovcnt++; 1328 kth->ktr_len += datalen; 1329 } 1330 if (buflen != 0) { 1331 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 1332 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 1333 aiov[auio.uio_iovcnt].iov_len = buflen; 1334 auio.uio_resid += buflen; 1335 auio.uio_iovcnt++; 1336 } 1337 1338 vn_start_write(vp, &mp, V_WAIT); 1339 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1340 td->td_ktr_io_lim = lim; 1341 #ifdef MAC 1342 error = mac_vnode_check_write(cred, NOCRED, vp); 1343 if (error == 0) 1344 #endif 1345 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 1346 VOP_UNLOCK(vp); 1347 vn_finished_write(mp); 1348 if (error == 0) { 1349 mtx_lock(&ktrace_mtx); 1350 kiop = ktr_io_params_rele(kiop); 1351 mtx_unlock(&ktrace_mtx); 1352 ktr_io_params_free(kiop); 1353 return; 1354 } 1355 1356 /* 1357 * If error encountered, give up tracing on this vnode on this 1358 * process. Other processes might still be suitable for 1359 * writes to this vnode. 1360 */ 1361 log(LOG_NOTICE, 1362 "ktrace write failed, errno %d, tracing stopped for pid %d\n", 1363 error, p->p_pid); 1364 1365 kiop1 = NULL; 1366 PROC_LOCK(p); 1367 mtx_lock(&ktrace_mtx); 1368 if (p->p_ktrioparms != NULL && p->p_ktrioparms->vp == vp) 1369 kiop1 = ktr_freeproc(p); 1370 kiop = ktr_io_params_rele(kiop); 1371 mtx_unlock(&ktrace_mtx); 1372 PROC_UNLOCK(p); 1373 ktr_io_params_free(kiop1); 1374 ktr_io_params_free(kiop); 1375 } 1376 1377 /* 1378 * Return true if caller has permission to set the ktracing state 1379 * of target. Essentially, the target can't possess any 1380 * more permissions than the caller. KTRFAC_ROOT signifies that 1381 * root previously set the tracing status on the target process, and 1382 * so, only root may further change it. 1383 */ 1384 static int 1385 ktrcanset(struct thread *td, struct proc *targetp) 1386 { 1387 1388 PROC_LOCK_ASSERT(targetp, MA_OWNED); 1389 if (targetp->p_traceflag & KTRFAC_ROOT && 1390 priv_check(td, PRIV_KTRACE)) 1391 return (0); 1392 1393 if (p_candebug(td, targetp) != 0) 1394 return (0); 1395 1396 return (1); 1397 } 1398 1399 #endif /* KTRACE */ 1400