1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_ktrace.h" 36 37 #include <sys/param.h> 38 #include <sys/capsicum.h> 39 #include <sys/systm.h> 40 #include <sys/fcntl.h> 41 #include <sys/kernel.h> 42 #include <sys/kthread.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/malloc.h> 46 #include <sys/mount.h> 47 #include <sys/namei.h> 48 #include <sys/priv.h> 49 #include <sys/proc.h> 50 #include <sys/resourcevar.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/socket.h> 54 #include <sys/stat.h> 55 #include <sys/ktrace.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysent.h> 59 #include <sys/syslog.h> 60 #include <sys/sysproto.h> 61 62 #include <security/mac/mac_framework.h> 63 64 /* 65 * The ktrace facility allows the tracing of certain key events in user space 66 * processes, such as system calls, signal delivery, context switches, and 67 * user generated events using utrace(2). It works by streaming event 68 * records and data to a vnode associated with the process using the 69 * ktrace(2) system call. In general, records can be written directly from 70 * the context that generates the event. One important exception to this is 71 * during a context switch, where sleeping is not permitted. To handle this 72 * case, trace events are generated using in-kernel ktr_request records, and 73 * then delivered to disk at a convenient moment -- either immediately, the 74 * next traceable event, at system call return, or at process exit. 75 * 76 * When dealing with multiple threads or processes writing to the same event 77 * log, ordering guarantees are weak: specifically, if an event has multiple 78 * records (i.e., system call enter and return), they may be interlaced with 79 * records from another event. Process and thread ID information is provided 80 * in the record, and user applications can de-interlace events if required. 81 */ 82 83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 84 85 #ifdef KTRACE 86 87 FEATURE(ktrace, "Kernel support for system-call tracing"); 88 89 #ifndef KTRACE_REQUEST_POOL 90 #define KTRACE_REQUEST_POOL 100 91 #endif 92 93 struct ktr_request { 94 struct ktr_header ktr_header; 95 void *ktr_buffer; 96 union { 97 struct ktr_proc_ctor ktr_proc_ctor; 98 struct ktr_cap_fail ktr_cap_fail; 99 struct ktr_syscall ktr_syscall; 100 struct ktr_sysret ktr_sysret; 101 struct ktr_genio ktr_genio; 102 struct ktr_psig ktr_psig; 103 struct ktr_csw ktr_csw; 104 struct ktr_fault ktr_fault; 105 struct ktr_faultend ktr_faultend; 106 struct ktr_struct_array ktr_struct_array; 107 } ktr_data; 108 STAILQ_ENTRY(ktr_request) ktr_list; 109 }; 110 111 static const int data_lengths[] = { 112 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args), 113 [KTR_SYSRET] = sizeof(struct ktr_sysret), 114 [KTR_NAMEI] = 0, 115 [KTR_GENIO] = sizeof(struct ktr_genio), 116 [KTR_PSIG] = sizeof(struct ktr_psig), 117 [KTR_CSW] = sizeof(struct ktr_csw), 118 [KTR_USER] = 0, 119 [KTR_STRUCT] = 0, 120 [KTR_SYSCTL] = 0, 121 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor), 122 [KTR_PROCDTOR] = 0, 123 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail), 124 [KTR_FAULT] = sizeof(struct ktr_fault), 125 [KTR_FAULTEND] = sizeof(struct ktr_faultend), 126 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array), 127 }; 128 129 static STAILQ_HEAD(, ktr_request) ktr_free; 130 131 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 132 "KTRACE options"); 133 134 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 135 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 136 137 u_int ktr_geniosize = PAGE_SIZE; 138 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize, 139 0, "Maximum size of genio event payload"); 140 141 /* 142 * Allow to not to send signal to traced process, in which context the 143 * ktr record is written. The limit is applied from the process that 144 * set up ktrace, so killing the traced process is not completely fair. 145 */ 146 int ktr_filesize_limit_signal = 0; 147 SYSCTL_INT(_kern_ktrace, OID_AUTO, filesize_limit_signal, CTLFLAG_RWTUN, 148 &ktr_filesize_limit_signal, 0, 149 "Send SIGXFSZ to the traced process when the log size limit is exceeded"); 150 151 static int print_message = 1; 152 static struct mtx ktrace_mtx; 153 static struct sx ktrace_sx; 154 155 struct ktr_io_params { 156 struct vnode *vp; 157 struct ucred *cr; 158 off_t lim; 159 u_int refs; 160 }; 161 162 static void ktrace_init(void *dummy); 163 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 164 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize); 165 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type); 166 static struct ktr_request *ktr_getrequest(int type); 167 static void ktr_submitrequest(struct thread *td, struct ktr_request *req); 168 static struct ktr_io_params *ktr_freeproc(struct proc *p); 169 static void ktr_freerequest(struct ktr_request *req); 170 static void ktr_freerequest_locked(struct ktr_request *req); 171 static void ktr_writerequest(struct thread *td, struct ktr_request *req); 172 static int ktrcanset(struct thread *,struct proc *); 173 static int ktrsetchildren(struct thread *, struct proc *, int, int, 174 struct ktr_io_params *); 175 static int ktrops(struct thread *, struct proc *, int, int, 176 struct ktr_io_params *); 177 static void ktrprocctor_entered(struct thread *, struct proc *); 178 179 /* 180 * ktrace itself generates events, such as context switches, which we do not 181 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 182 * whether or not it is in a region where tracing of events should be 183 * suppressed. 184 */ 185 static void 186 ktrace_enter(struct thread *td) 187 { 188 189 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 190 td->td_pflags |= TDP_INKTRACE; 191 } 192 193 static void 194 ktrace_exit(struct thread *td) 195 { 196 197 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 198 td->td_pflags &= ~TDP_INKTRACE; 199 } 200 201 static void 202 ktrace_assert(struct thread *td) 203 { 204 205 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 206 } 207 208 static void 209 ast_ktrace(struct thread *td, int tda __unused) 210 { 211 KTRUSERRET(td); 212 } 213 214 static void 215 ktrace_init(void *dummy) 216 { 217 struct ktr_request *req; 218 int i; 219 220 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 221 sx_init(&ktrace_sx, "ktrace_sx"); 222 STAILQ_INIT(&ktr_free); 223 for (i = 0; i < ktr_requestpool; i++) { 224 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK | 225 M_ZERO); 226 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 227 } 228 ast_register(TDA_KTRACE, ASTR_ASTF_REQUIRED, 0, ast_ktrace); 229 } 230 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 231 232 static int 233 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 234 { 235 struct thread *td; 236 u_int newsize, oldsize, wantsize; 237 int error; 238 239 /* Handle easy read-only case first to avoid warnings from GCC. */ 240 if (!req->newptr) { 241 oldsize = ktr_requestpool; 242 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 243 } 244 245 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 246 if (error) 247 return (error); 248 td = curthread; 249 ktrace_enter(td); 250 oldsize = ktr_requestpool; 251 newsize = ktrace_resize_pool(oldsize, wantsize); 252 ktrace_exit(td); 253 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 254 if (error) 255 return (error); 256 if (wantsize > oldsize && newsize < wantsize) 257 return (ENOSPC); 258 return (0); 259 } 260 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, 261 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ktr_requestpool, 0, 262 sysctl_kern_ktrace_request_pool, "IU", 263 "Pool buffer size for ktrace(1)"); 264 265 static u_int 266 ktrace_resize_pool(u_int oldsize, u_int newsize) 267 { 268 STAILQ_HEAD(, ktr_request) ktr_new; 269 struct ktr_request *req; 270 int bound; 271 272 print_message = 1; 273 bound = newsize - oldsize; 274 if (bound == 0) 275 return (ktr_requestpool); 276 if (bound < 0) { 277 mtx_lock(&ktrace_mtx); 278 /* Shrink pool down to newsize if possible. */ 279 while (bound++ < 0) { 280 req = STAILQ_FIRST(&ktr_free); 281 if (req == NULL) 282 break; 283 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 284 ktr_requestpool--; 285 free(req, M_KTRACE); 286 } 287 } else { 288 /* Grow pool up to newsize. */ 289 STAILQ_INIT(&ktr_new); 290 while (bound-- > 0) { 291 req = malloc(sizeof(struct ktr_request), M_KTRACE, 292 M_WAITOK | M_ZERO); 293 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list); 294 } 295 mtx_lock(&ktrace_mtx); 296 STAILQ_CONCAT(&ktr_free, &ktr_new); 297 ktr_requestpool += (newsize - oldsize); 298 } 299 mtx_unlock(&ktrace_mtx); 300 return (ktr_requestpool); 301 } 302 303 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */ 304 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) == 305 (sizeof((struct thread *)NULL)->td_name)); 306 307 static struct ktr_request * 308 ktr_getrequest_entered(struct thread *td, int type) 309 { 310 struct ktr_request *req; 311 struct proc *p = td->td_proc; 312 int pm; 313 314 mtx_lock(&ktrace_mtx); 315 if (!KTRCHECK(td, type)) { 316 mtx_unlock(&ktrace_mtx); 317 return (NULL); 318 } 319 req = STAILQ_FIRST(&ktr_free); 320 if (req != NULL) { 321 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 322 req->ktr_header.ktr_type = type; 323 if (p->p_traceflag & KTRFAC_DROP) { 324 req->ktr_header.ktr_type |= KTR_DROP; 325 p->p_traceflag &= ~KTRFAC_DROP; 326 } 327 mtx_unlock(&ktrace_mtx); 328 nanotime(&req->ktr_header.ktr_time); 329 req->ktr_header.ktr_type |= KTR_VERSIONED; 330 req->ktr_header.ktr_pid = p->p_pid; 331 req->ktr_header.ktr_tid = td->td_tid; 332 req->ktr_header.ktr_cpu = PCPU_GET(cpuid); 333 req->ktr_header.ktr_version = KTR_VERSION1; 334 bcopy(td->td_name, req->ktr_header.ktr_comm, 335 sizeof(req->ktr_header.ktr_comm)); 336 req->ktr_buffer = NULL; 337 req->ktr_header.ktr_len = 0; 338 } else { 339 p->p_traceflag |= KTRFAC_DROP; 340 pm = print_message; 341 print_message = 0; 342 mtx_unlock(&ktrace_mtx); 343 if (pm) 344 printf("Out of ktrace request objects.\n"); 345 } 346 return (req); 347 } 348 349 static struct ktr_request * 350 ktr_getrequest(int type) 351 { 352 struct thread *td = curthread; 353 struct ktr_request *req; 354 355 ktrace_enter(td); 356 req = ktr_getrequest_entered(td, type); 357 if (req == NULL) 358 ktrace_exit(td); 359 360 return (req); 361 } 362 363 /* 364 * Some trace generation environments don't permit direct access to VFS, 365 * such as during a context switch where sleeping is not allowed. Under these 366 * circumstances, queue a request to the thread to be written asynchronously 367 * later. 368 */ 369 static void 370 ktr_enqueuerequest(struct thread *td, struct ktr_request *req) 371 { 372 373 mtx_lock(&ktrace_mtx); 374 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 375 mtx_unlock(&ktrace_mtx); 376 ast_sched(td, TDA_KTRACE); 377 } 378 379 /* 380 * Drain any pending ktrace records from the per-thread queue to disk. This 381 * is used both internally before committing other records, and also on 382 * system call return. We drain all the ones we can find at the time when 383 * drain is requested, but don't keep draining after that as those events 384 * may be approximately "after" the current event. 385 */ 386 static void 387 ktr_drain(struct thread *td) 388 { 389 struct ktr_request *queued_req; 390 STAILQ_HEAD(, ktr_request) local_queue; 391 392 ktrace_assert(td); 393 sx_assert(&ktrace_sx, SX_XLOCKED); 394 395 STAILQ_INIT(&local_queue); 396 397 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 398 mtx_lock(&ktrace_mtx); 399 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 400 mtx_unlock(&ktrace_mtx); 401 402 while ((queued_req = STAILQ_FIRST(&local_queue))) { 403 STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 404 ktr_writerequest(td, queued_req); 405 ktr_freerequest(queued_req); 406 } 407 } 408 } 409 410 /* 411 * Submit a trace record for immediate commit to disk -- to be used only 412 * where entering VFS is OK. First drain any pending records that may have 413 * been cached in the thread. 414 */ 415 static void 416 ktr_submitrequest(struct thread *td, struct ktr_request *req) 417 { 418 419 ktrace_assert(td); 420 421 sx_xlock(&ktrace_sx); 422 ktr_drain(td); 423 ktr_writerequest(td, req); 424 ktr_freerequest(req); 425 sx_xunlock(&ktrace_sx); 426 ktrace_exit(td); 427 } 428 429 static void 430 ktr_freerequest(struct ktr_request *req) 431 { 432 433 mtx_lock(&ktrace_mtx); 434 ktr_freerequest_locked(req); 435 mtx_unlock(&ktrace_mtx); 436 } 437 438 static void 439 ktr_freerequest_locked(struct ktr_request *req) 440 { 441 442 mtx_assert(&ktrace_mtx, MA_OWNED); 443 if (req->ktr_buffer != NULL) 444 free(req->ktr_buffer, M_KTRACE); 445 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 446 } 447 448 static void 449 ktr_io_params_ref(struct ktr_io_params *kiop) 450 { 451 mtx_assert(&ktrace_mtx, MA_OWNED); 452 kiop->refs++; 453 } 454 455 static struct ktr_io_params * 456 ktr_io_params_rele(struct ktr_io_params *kiop) 457 { 458 mtx_assert(&ktrace_mtx, MA_OWNED); 459 if (kiop == NULL) 460 return (NULL); 461 KASSERT(kiop->refs > 0, ("kiop ref == 0 %p", kiop)); 462 return (--(kiop->refs) == 0 ? kiop : NULL); 463 } 464 465 void 466 ktr_io_params_free(struct ktr_io_params *kiop) 467 { 468 if (kiop == NULL) 469 return; 470 471 MPASS(kiop->refs == 0); 472 vn_close(kiop->vp, FWRITE, kiop->cr, curthread); 473 crfree(kiop->cr); 474 free(kiop, M_KTRACE); 475 } 476 477 static struct ktr_io_params * 478 ktr_io_params_alloc(struct thread *td, struct vnode *vp) 479 { 480 struct ktr_io_params *res; 481 482 res = malloc(sizeof(struct ktr_io_params), M_KTRACE, M_WAITOK); 483 res->vp = vp; 484 res->cr = crhold(td->td_ucred); 485 res->lim = lim_cur(td, RLIMIT_FSIZE); 486 res->refs = 1; 487 return (res); 488 } 489 490 /* 491 * Disable tracing for a process and release all associated resources. 492 * The caller is responsible for releasing a reference on the returned 493 * vnode and credentials. 494 */ 495 static struct ktr_io_params * 496 ktr_freeproc(struct proc *p) 497 { 498 struct ktr_io_params *kiop; 499 struct ktr_request *req; 500 501 PROC_LOCK_ASSERT(p, MA_OWNED); 502 mtx_assert(&ktrace_mtx, MA_OWNED); 503 kiop = ktr_io_params_rele(p->p_ktrioparms); 504 p->p_ktrioparms = NULL; 505 p->p_traceflag = 0; 506 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) { 507 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list); 508 ktr_freerequest_locked(req); 509 } 510 return (kiop); 511 } 512 513 struct vnode * 514 ktr_get_tracevp(struct proc *p, bool ref) 515 { 516 struct vnode *vp; 517 518 PROC_LOCK_ASSERT(p, MA_OWNED); 519 520 if (p->p_ktrioparms != NULL) { 521 vp = p->p_ktrioparms->vp; 522 if (ref) 523 vrefact(vp); 524 } else { 525 vp = NULL; 526 } 527 return (vp); 528 } 529 530 void 531 ktrsyscall(int code, int narg, syscallarg_t args[]) 532 { 533 struct ktr_request *req; 534 struct ktr_syscall *ktp; 535 size_t buflen; 536 char *buf = NULL; 537 538 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 539 return; 540 541 buflen = sizeof(register_t) * narg; 542 if (buflen > 0) { 543 buf = malloc(buflen, M_KTRACE, M_WAITOK); 544 bcopy(args, buf, buflen); 545 } 546 req = ktr_getrequest(KTR_SYSCALL); 547 if (req == NULL) { 548 if (buf != NULL) 549 free(buf, M_KTRACE); 550 return; 551 } 552 ktp = &req->ktr_data.ktr_syscall; 553 ktp->ktr_code = code; 554 ktp->ktr_narg = narg; 555 if (buflen > 0) { 556 req->ktr_header.ktr_len = buflen; 557 req->ktr_buffer = buf; 558 } 559 ktr_submitrequest(curthread, req); 560 } 561 562 void 563 ktrsysret(int code, int error, register_t retval) 564 { 565 struct ktr_request *req; 566 struct ktr_sysret *ktp; 567 568 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 569 return; 570 571 req = ktr_getrequest(KTR_SYSRET); 572 if (req == NULL) 573 return; 574 ktp = &req->ktr_data.ktr_sysret; 575 ktp->ktr_code = code; 576 ktp->ktr_error = error; 577 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */ 578 ktr_submitrequest(curthread, req); 579 } 580 581 /* 582 * When a setuid process execs, disable tracing. 583 * 584 * XXX: We toss any pending asynchronous records. 585 */ 586 struct ktr_io_params * 587 ktrprocexec(struct proc *p) 588 { 589 struct ktr_io_params *kiop; 590 591 PROC_LOCK_ASSERT(p, MA_OWNED); 592 593 kiop = p->p_ktrioparms; 594 if (kiop == NULL || priv_check_cred(kiop->cr, PRIV_DEBUG_DIFFCRED)) 595 return (NULL); 596 597 mtx_lock(&ktrace_mtx); 598 kiop = ktr_freeproc(p); 599 mtx_unlock(&ktrace_mtx); 600 return (kiop); 601 } 602 603 /* 604 * When a process exits, drain per-process asynchronous trace records 605 * and disable tracing. 606 */ 607 void 608 ktrprocexit(struct thread *td) 609 { 610 struct ktr_request *req; 611 struct proc *p; 612 struct ktr_io_params *kiop; 613 614 p = td->td_proc; 615 if (p->p_traceflag == 0) 616 return; 617 618 ktrace_enter(td); 619 req = ktr_getrequest_entered(td, KTR_PROCDTOR); 620 if (req != NULL) 621 ktr_enqueuerequest(td, req); 622 sx_xlock(&ktrace_sx); 623 ktr_drain(td); 624 sx_xunlock(&ktrace_sx); 625 PROC_LOCK(p); 626 mtx_lock(&ktrace_mtx); 627 kiop = ktr_freeproc(p); 628 mtx_unlock(&ktrace_mtx); 629 PROC_UNLOCK(p); 630 ktr_io_params_free(kiop); 631 ktrace_exit(td); 632 } 633 634 static void 635 ktrprocctor_entered(struct thread *td, struct proc *p) 636 { 637 struct ktr_proc_ctor *ktp; 638 struct ktr_request *req; 639 struct thread *td2; 640 641 ktrace_assert(td); 642 td2 = FIRST_THREAD_IN_PROC(p); 643 req = ktr_getrequest_entered(td2, KTR_PROCCTOR); 644 if (req == NULL) 645 return; 646 ktp = &req->ktr_data.ktr_proc_ctor; 647 ktp->sv_flags = p->p_sysent->sv_flags; 648 ktr_enqueuerequest(td2, req); 649 } 650 651 void 652 ktrprocctor(struct proc *p) 653 { 654 struct thread *td = curthread; 655 656 if ((p->p_traceflag & KTRFAC_MASK) == 0) 657 return; 658 659 ktrace_enter(td); 660 ktrprocctor_entered(td, p); 661 ktrace_exit(td); 662 } 663 664 /* 665 * When a process forks, enable tracing in the new process if needed. 666 */ 667 void 668 ktrprocfork(struct proc *p1, struct proc *p2) 669 { 670 671 MPASS(p2->p_ktrioparms == NULL); 672 MPASS(p2->p_traceflag == 0); 673 674 if (p1->p_traceflag == 0) 675 return; 676 677 PROC_LOCK(p1); 678 mtx_lock(&ktrace_mtx); 679 if (p1->p_traceflag & KTRFAC_INHERIT) { 680 p2->p_traceflag = p1->p_traceflag; 681 if ((p2->p_ktrioparms = p1->p_ktrioparms) != NULL) 682 p1->p_ktrioparms->refs++; 683 } 684 mtx_unlock(&ktrace_mtx); 685 PROC_UNLOCK(p1); 686 687 ktrprocctor(p2); 688 } 689 690 /* 691 * When a thread returns, drain any asynchronous records generated by the 692 * system call. 693 */ 694 void 695 ktruserret(struct thread *td) 696 { 697 698 ktrace_enter(td); 699 sx_xlock(&ktrace_sx); 700 ktr_drain(td); 701 sx_xunlock(&ktrace_sx); 702 ktrace_exit(td); 703 } 704 705 void 706 ktrnamei(const char *path) 707 { 708 struct ktr_request *req; 709 int namelen; 710 char *buf = NULL; 711 712 namelen = strlen(path); 713 if (namelen > 0) { 714 buf = malloc(namelen, M_KTRACE, M_WAITOK); 715 bcopy(path, buf, namelen); 716 } 717 req = ktr_getrequest(KTR_NAMEI); 718 if (req == NULL) { 719 if (buf != NULL) 720 free(buf, M_KTRACE); 721 return; 722 } 723 if (namelen > 0) { 724 req->ktr_header.ktr_len = namelen; 725 req->ktr_buffer = buf; 726 } 727 ktr_submitrequest(curthread, req); 728 } 729 730 void 731 ktrsysctl(int *name, u_int namelen) 732 { 733 struct ktr_request *req; 734 u_int mib[CTL_MAXNAME + 2]; 735 char *mibname; 736 size_t mibnamelen; 737 int error; 738 739 /* Lookup name of mib. */ 740 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long")); 741 mib[0] = 0; 742 mib[1] = 1; 743 bcopy(name, mib + 2, namelen * sizeof(*name)); 744 mibnamelen = 128; 745 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK); 746 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen, 747 NULL, 0, &mibnamelen, 0); 748 if (error) { 749 free(mibname, M_KTRACE); 750 return; 751 } 752 req = ktr_getrequest(KTR_SYSCTL); 753 if (req == NULL) { 754 free(mibname, M_KTRACE); 755 return; 756 } 757 req->ktr_header.ktr_len = mibnamelen; 758 req->ktr_buffer = mibname; 759 ktr_submitrequest(curthread, req); 760 } 761 762 void 763 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error) 764 { 765 struct ktr_request *req; 766 struct ktr_genio *ktg; 767 int datalen; 768 char *buf; 769 770 if (error != 0 && (rw == UIO_READ || error == EFAULT)) { 771 freeuio(uio); 772 return; 773 } 774 uio->uio_offset = 0; 775 uio->uio_rw = UIO_WRITE; 776 datalen = MIN(uio->uio_resid, ktr_geniosize); 777 buf = malloc(datalen, M_KTRACE, M_WAITOK); 778 error = uiomove(buf, datalen, uio); 779 freeuio(uio); 780 if (error) { 781 free(buf, M_KTRACE); 782 return; 783 } 784 req = ktr_getrequest(KTR_GENIO); 785 if (req == NULL) { 786 free(buf, M_KTRACE); 787 return; 788 } 789 ktg = &req->ktr_data.ktr_genio; 790 ktg->ktr_fd = fd; 791 ktg->ktr_rw = rw; 792 req->ktr_header.ktr_len = datalen; 793 req->ktr_buffer = buf; 794 ktr_submitrequest(curthread, req); 795 } 796 797 void 798 ktrpsig(int sig, sig_t action, sigset_t *mask, int code) 799 { 800 struct thread *td = curthread; 801 struct ktr_request *req; 802 struct ktr_psig *kp; 803 804 req = ktr_getrequest(KTR_PSIG); 805 if (req == NULL) 806 return; 807 kp = &req->ktr_data.ktr_psig; 808 kp->signo = (char)sig; 809 kp->action = action; 810 kp->mask = *mask; 811 kp->code = code; 812 ktr_enqueuerequest(td, req); 813 ktrace_exit(td); 814 } 815 816 void 817 ktrcsw(int out, int user, const char *wmesg) 818 { 819 struct thread *td = curthread; 820 struct ktr_request *req; 821 struct ktr_csw *kc; 822 823 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 824 return; 825 826 req = ktr_getrequest(KTR_CSW); 827 if (req == NULL) 828 return; 829 kc = &req->ktr_data.ktr_csw; 830 kc->out = out; 831 kc->user = user; 832 if (wmesg != NULL) 833 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg)); 834 else 835 bzero(kc->wmesg, sizeof(kc->wmesg)); 836 ktr_enqueuerequest(td, req); 837 ktrace_exit(td); 838 } 839 840 void 841 ktrstruct(const char *name, const void *data, size_t datalen) 842 { 843 struct ktr_request *req; 844 char *buf; 845 size_t buflen, namelen; 846 847 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 848 return; 849 850 if (data == NULL) 851 datalen = 0; 852 namelen = strlen(name) + 1; 853 buflen = namelen + datalen; 854 buf = malloc(buflen, M_KTRACE, M_WAITOK); 855 strcpy(buf, name); 856 bcopy(data, buf + namelen, datalen); 857 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) { 858 free(buf, M_KTRACE); 859 return; 860 } 861 req->ktr_buffer = buf; 862 req->ktr_header.ktr_len = buflen; 863 ktr_submitrequest(curthread, req); 864 } 865 866 void 867 ktrstruct_error(const char *name, const void *data, size_t datalen, int error) 868 { 869 870 if (error == 0) 871 ktrstruct(name, data, datalen); 872 } 873 874 void 875 ktrstructarray(const char *name, enum uio_seg seg, const void *data, 876 int num_items, size_t struct_size) 877 { 878 struct ktr_request *req; 879 struct ktr_struct_array *ksa; 880 char *buf; 881 size_t buflen, datalen, namelen; 882 int max_items; 883 884 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 885 return; 886 if (num_items < 0) 887 return; 888 889 /* Trim array length to genio size. */ 890 max_items = ktr_geniosize / struct_size; 891 if (num_items > max_items) { 892 if (max_items == 0) 893 num_items = 1; 894 else 895 num_items = max_items; 896 } 897 datalen = num_items * struct_size; 898 899 if (data == NULL) 900 datalen = 0; 901 902 namelen = strlen(name) + 1; 903 buflen = namelen + datalen; 904 buf = malloc(buflen, M_KTRACE, M_WAITOK); 905 strcpy(buf, name); 906 if (seg == UIO_SYSSPACE) 907 bcopy(data, buf + namelen, datalen); 908 else { 909 if (copyin(data, buf + namelen, datalen) != 0) { 910 free(buf, M_KTRACE); 911 return; 912 } 913 } 914 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) { 915 free(buf, M_KTRACE); 916 return; 917 } 918 ksa = &req->ktr_data.ktr_struct_array; 919 ksa->struct_size = struct_size; 920 req->ktr_buffer = buf; 921 req->ktr_header.ktr_len = buflen; 922 ktr_submitrequest(curthread, req); 923 } 924 925 void 926 ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed, 927 const cap_rights_t *held) 928 { 929 struct thread *td = curthread; 930 struct ktr_request *req; 931 struct ktr_cap_fail *kcf; 932 933 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 934 return; 935 936 req = ktr_getrequest(KTR_CAPFAIL); 937 if (req == NULL) 938 return; 939 kcf = &req->ktr_data.ktr_cap_fail; 940 kcf->cap_type = type; 941 if (needed != NULL) 942 kcf->cap_needed = *needed; 943 else 944 cap_rights_init(&kcf->cap_needed); 945 if (held != NULL) 946 kcf->cap_held = *held; 947 else 948 cap_rights_init(&kcf->cap_held); 949 ktr_enqueuerequest(td, req); 950 ktrace_exit(td); 951 } 952 953 void 954 ktrfault(vm_offset_t vaddr, int type) 955 { 956 struct thread *td = curthread; 957 struct ktr_request *req; 958 struct ktr_fault *kf; 959 960 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 961 return; 962 963 req = ktr_getrequest(KTR_FAULT); 964 if (req == NULL) 965 return; 966 kf = &req->ktr_data.ktr_fault; 967 kf->vaddr = vaddr; 968 kf->type = type; 969 ktr_enqueuerequest(td, req); 970 ktrace_exit(td); 971 } 972 973 void 974 ktrfaultend(int result) 975 { 976 struct thread *td = curthread; 977 struct ktr_request *req; 978 struct ktr_faultend *kf; 979 980 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 981 return; 982 983 req = ktr_getrequest(KTR_FAULTEND); 984 if (req == NULL) 985 return; 986 kf = &req->ktr_data.ktr_faultend; 987 kf->result = result; 988 ktr_enqueuerequest(td, req); 989 ktrace_exit(td); 990 } 991 #endif /* KTRACE */ 992 993 /* Interface and common routines */ 994 995 #ifndef _SYS_SYSPROTO_H_ 996 struct ktrace_args { 997 char *fname; 998 int ops; 999 int facs; 1000 int pid; 1001 }; 1002 #endif 1003 /* ARGSUSED */ 1004 int 1005 sys_ktrace(struct thread *td, struct ktrace_args *uap) 1006 { 1007 #ifdef KTRACE 1008 struct vnode *vp = NULL; 1009 struct proc *p; 1010 struct pgrp *pg; 1011 int facs = uap->facs & ~KTRFAC_ROOT; 1012 int ops = KTROP(uap->ops); 1013 int descend = uap->ops & KTRFLAG_DESCEND; 1014 int ret = 0; 1015 int flags, error = 0; 1016 struct nameidata nd; 1017 struct ktr_io_params *kiop, *old_kiop; 1018 1019 /* 1020 * Need something to (un)trace. 1021 */ 1022 if (ops != KTROP_CLEARFILE && facs == 0) 1023 return (EINVAL); 1024 1025 kiop = NULL; 1026 if (ops != KTROP_CLEAR) { 1027 /* 1028 * an operation which requires a file argument. 1029 */ 1030 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname); 1031 flags = FREAD | FWRITE | O_NOFOLLOW; 1032 error = vn_open(&nd, &flags, 0, NULL); 1033 if (error) 1034 return (error); 1035 NDFREE_PNBUF(&nd); 1036 vp = nd.ni_vp; 1037 VOP_UNLOCK(vp); 1038 if (vp->v_type != VREG) { 1039 (void)vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 1040 return (EACCES); 1041 } 1042 kiop = ktr_io_params_alloc(td, vp); 1043 } 1044 1045 /* 1046 * Clear all uses of the tracefile. 1047 */ 1048 ktrace_enter(td); 1049 if (ops == KTROP_CLEARFILE) { 1050 restart: 1051 sx_slock(&allproc_lock); 1052 FOREACH_PROC_IN_SYSTEM(p) { 1053 old_kiop = NULL; 1054 PROC_LOCK(p); 1055 if (p->p_ktrioparms != NULL && 1056 p->p_ktrioparms->vp == vp) { 1057 if (ktrcanset(td, p)) { 1058 mtx_lock(&ktrace_mtx); 1059 old_kiop = ktr_freeproc(p); 1060 mtx_unlock(&ktrace_mtx); 1061 } else 1062 error = EPERM; 1063 } 1064 PROC_UNLOCK(p); 1065 if (old_kiop != NULL) { 1066 sx_sunlock(&allproc_lock); 1067 ktr_io_params_free(old_kiop); 1068 goto restart; 1069 } 1070 } 1071 sx_sunlock(&allproc_lock); 1072 goto done; 1073 } 1074 /* 1075 * do it 1076 */ 1077 sx_slock(&proctree_lock); 1078 if (uap->pid < 0) { 1079 /* 1080 * by process group 1081 */ 1082 pg = pgfind(-uap->pid); 1083 if (pg == NULL) { 1084 sx_sunlock(&proctree_lock); 1085 error = ESRCH; 1086 goto done; 1087 } 1088 1089 /* 1090 * ktrops() may call vrele(). Lock pg_members 1091 * by the proctree_lock rather than pg_mtx. 1092 */ 1093 PGRP_UNLOCK(pg); 1094 if (LIST_EMPTY(&pg->pg_members)) { 1095 sx_sunlock(&proctree_lock); 1096 error = ESRCH; 1097 goto done; 1098 } 1099 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1100 PROC_LOCK(p); 1101 if (descend) 1102 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1103 else 1104 ret |= ktrops(td, p, ops, facs, kiop); 1105 } 1106 } else { 1107 /* 1108 * by pid 1109 */ 1110 p = pfind(uap->pid); 1111 if (p == NULL) { 1112 error = ESRCH; 1113 sx_sunlock(&proctree_lock); 1114 goto done; 1115 } 1116 if (descend) 1117 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1118 else 1119 ret |= ktrops(td, p, ops, facs, kiop); 1120 } 1121 sx_sunlock(&proctree_lock); 1122 if (!ret) 1123 error = EPERM; 1124 done: 1125 if (kiop != NULL) { 1126 mtx_lock(&ktrace_mtx); 1127 kiop = ktr_io_params_rele(kiop); 1128 mtx_unlock(&ktrace_mtx); 1129 ktr_io_params_free(kiop); 1130 } 1131 ktrace_exit(td); 1132 return (error); 1133 #else /* !KTRACE */ 1134 return (ENOSYS); 1135 #endif /* KTRACE */ 1136 } 1137 1138 /* ARGSUSED */ 1139 int 1140 sys_utrace(struct thread *td, struct utrace_args *uap) 1141 { 1142 1143 #ifdef KTRACE 1144 struct ktr_request *req; 1145 void *cp; 1146 int error; 1147 1148 if (!KTRPOINT(td, KTR_USER)) 1149 return (0); 1150 if (uap->len > KTR_USER_MAXLEN) 1151 return (EINVAL); 1152 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 1153 error = copyin(uap->addr, cp, uap->len); 1154 if (error) { 1155 free(cp, M_KTRACE); 1156 return (error); 1157 } 1158 req = ktr_getrequest(KTR_USER); 1159 if (req == NULL) { 1160 free(cp, M_KTRACE); 1161 return (ENOMEM); 1162 } 1163 req->ktr_buffer = cp; 1164 req->ktr_header.ktr_len = uap->len; 1165 ktr_submitrequest(td, req); 1166 return (0); 1167 #else /* !KTRACE */ 1168 return (ENOSYS); 1169 #endif /* KTRACE */ 1170 } 1171 1172 #ifdef KTRACE 1173 static int 1174 ktrops(struct thread *td, struct proc *p, int ops, int facs, 1175 struct ktr_io_params *new_kiop) 1176 { 1177 struct ktr_io_params *old_kiop; 1178 1179 PROC_LOCK_ASSERT(p, MA_OWNED); 1180 if (!ktrcanset(td, p)) { 1181 PROC_UNLOCK(p); 1182 return (0); 1183 } 1184 if ((ops == KTROP_SET && p->p_state == PRS_NEW) || 1185 p_cansee(td, p) != 0) { 1186 /* 1187 * Disallow setting trace points if the process is being born. 1188 * This avoids races with trace point inheritance in 1189 * ktrprocfork(). 1190 */ 1191 PROC_UNLOCK(p); 1192 return (0); 1193 } 1194 if ((p->p_flag & P_WEXIT) != 0) { 1195 /* 1196 * There's nothing to do if the process is exiting, but avoid 1197 * signaling an error. 1198 */ 1199 PROC_UNLOCK(p); 1200 return (1); 1201 } 1202 old_kiop = NULL; 1203 mtx_lock(&ktrace_mtx); 1204 if (ops == KTROP_SET) { 1205 if (p->p_ktrioparms != NULL && 1206 p->p_ktrioparms->vp != new_kiop->vp) { 1207 /* if trace file already in use, relinquish below */ 1208 old_kiop = ktr_io_params_rele(p->p_ktrioparms); 1209 p->p_ktrioparms = NULL; 1210 } 1211 if (p->p_ktrioparms == NULL) { 1212 p->p_ktrioparms = new_kiop; 1213 ktr_io_params_ref(new_kiop); 1214 } 1215 p->p_traceflag |= facs; 1216 if (priv_check(td, PRIV_KTRACE) == 0) 1217 p->p_traceflag |= KTRFAC_ROOT; 1218 } else { 1219 /* KTROP_CLEAR */ 1220 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) 1221 /* no more tracing */ 1222 old_kiop = ktr_freeproc(p); 1223 } 1224 mtx_unlock(&ktrace_mtx); 1225 if ((p->p_traceflag & KTRFAC_MASK) != 0) 1226 ktrprocctor_entered(td, p); 1227 PROC_UNLOCK(p); 1228 ktr_io_params_free(old_kiop); 1229 1230 return (1); 1231 } 1232 1233 static int 1234 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs, 1235 struct ktr_io_params *new_kiop) 1236 { 1237 struct proc *p; 1238 int ret = 0; 1239 1240 p = top; 1241 PROC_LOCK_ASSERT(p, MA_OWNED); 1242 sx_assert(&proctree_lock, SX_LOCKED); 1243 for (;;) { 1244 ret |= ktrops(td, p, ops, facs, new_kiop); 1245 /* 1246 * If this process has children, descend to them next, 1247 * otherwise do any siblings, and if done with this level, 1248 * follow back up the tree (but not past top). 1249 */ 1250 if (!LIST_EMPTY(&p->p_children)) 1251 p = LIST_FIRST(&p->p_children); 1252 else for (;;) { 1253 if (p == top) 1254 return (ret); 1255 if (LIST_NEXT(p, p_sibling)) { 1256 p = LIST_NEXT(p, p_sibling); 1257 break; 1258 } 1259 p = p->p_pptr; 1260 } 1261 PROC_LOCK(p); 1262 } 1263 /*NOTREACHED*/ 1264 } 1265 1266 static void 1267 ktr_writerequest(struct thread *td, struct ktr_request *req) 1268 { 1269 struct ktr_io_params *kiop, *kiop1; 1270 struct ktr_header *kth; 1271 struct vnode *vp; 1272 struct proc *p; 1273 struct ucred *cred; 1274 struct uio auio; 1275 struct iovec aiov[3]; 1276 struct mount *mp; 1277 off_t lim; 1278 int datalen, buflen; 1279 int error; 1280 1281 p = td->td_proc; 1282 1283 /* 1284 * We reference the kiop for use in I/O in case ktrace is 1285 * disabled on the process as we write out the request. 1286 */ 1287 mtx_lock(&ktrace_mtx); 1288 kiop = p->p_ktrioparms; 1289 1290 /* 1291 * If kiop is NULL, it has been cleared out from under this 1292 * request, so just drop it. 1293 */ 1294 if (kiop == NULL) { 1295 mtx_unlock(&ktrace_mtx); 1296 return; 1297 } 1298 1299 ktr_io_params_ref(kiop); 1300 vp = kiop->vp; 1301 cred = kiop->cr; 1302 lim = kiop->lim; 1303 1304 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 1305 mtx_unlock(&ktrace_mtx); 1306 1307 kth = &req->ktr_header; 1308 KASSERT(((u_short)kth->ktr_type & ~KTR_TYPE) < nitems(data_lengths), 1309 ("data_lengths array overflow")); 1310 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_TYPE]; 1311 buflen = kth->ktr_len; 1312 auio.uio_iov = &aiov[0]; 1313 auio.uio_offset = 0; 1314 auio.uio_segflg = UIO_SYSSPACE; 1315 auio.uio_rw = UIO_WRITE; 1316 aiov[0].iov_base = (caddr_t)kth; 1317 aiov[0].iov_len = sizeof(struct ktr_header); 1318 auio.uio_resid = sizeof(struct ktr_header); 1319 auio.uio_iovcnt = 1; 1320 auio.uio_td = td; 1321 if (datalen != 0) { 1322 aiov[1].iov_base = (caddr_t)&req->ktr_data; 1323 aiov[1].iov_len = datalen; 1324 auio.uio_resid += datalen; 1325 auio.uio_iovcnt++; 1326 kth->ktr_len += datalen; 1327 } 1328 if (buflen != 0) { 1329 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 1330 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 1331 aiov[auio.uio_iovcnt].iov_len = buflen; 1332 auio.uio_resid += buflen; 1333 auio.uio_iovcnt++; 1334 } 1335 1336 vn_start_write(vp, &mp, V_WAIT); 1337 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1338 td->td_ktr_io_lim = lim; 1339 #ifdef MAC 1340 error = mac_vnode_check_write(cred, NOCRED, vp); 1341 if (error == 0) 1342 #endif 1343 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 1344 VOP_UNLOCK(vp); 1345 vn_finished_write(mp); 1346 if (error == 0) { 1347 mtx_lock(&ktrace_mtx); 1348 kiop = ktr_io_params_rele(kiop); 1349 mtx_unlock(&ktrace_mtx); 1350 ktr_io_params_free(kiop); 1351 return; 1352 } 1353 1354 /* 1355 * If error encountered, give up tracing on this vnode on this 1356 * process. Other processes might still be suitable for 1357 * writes to this vnode. 1358 */ 1359 log(LOG_NOTICE, 1360 "ktrace write failed, errno %d, tracing stopped for pid %d\n", 1361 error, p->p_pid); 1362 1363 kiop1 = NULL; 1364 PROC_LOCK(p); 1365 mtx_lock(&ktrace_mtx); 1366 if (p->p_ktrioparms != NULL && p->p_ktrioparms->vp == vp) 1367 kiop1 = ktr_freeproc(p); 1368 kiop = ktr_io_params_rele(kiop); 1369 mtx_unlock(&ktrace_mtx); 1370 PROC_UNLOCK(p); 1371 ktr_io_params_free(kiop1); 1372 ktr_io_params_free(kiop); 1373 } 1374 1375 /* 1376 * Return true if caller has permission to set the ktracing state 1377 * of target. Essentially, the target can't possess any 1378 * more permissions than the caller. KTRFAC_ROOT signifies that 1379 * root previously set the tracing status on the target process, and 1380 * so, only root may further change it. 1381 */ 1382 static int 1383 ktrcanset(struct thread *td, struct proc *targetp) 1384 { 1385 1386 PROC_LOCK_ASSERT(targetp, MA_OWNED); 1387 if (targetp->p_traceflag & KTRFAC_ROOT && 1388 priv_check(td, PRIV_KTRACE)) 1389 return (0); 1390 1391 if (p_candebug(td, targetp) != 0) 1392 return (0); 1393 1394 return (1); 1395 } 1396 1397 #endif /* KTRACE */ 1398